Initial import of my local openpandora.oe.git Open Embedded overlay to the GIT server...
authorDavid-John Willis <John.Willis@Distant-earth.com>
Tue, 27 Jan 2009 09:00:41 +0000 (09:00 +0000)
committerDavid-John Willis <John.Willis@Distant-earth.com>
Tue, 27 Jan 2009 09:00:41 +0000 (09:00 +0000)
This just contains files that are used in the current images. All in-development stuff has been unstaged for now to set a sensible baseline and will be added back in as tested.

104 files changed:
conf/machine/omap3-pandora.conf.notused [new file with mode: 0755]
packages/images/pandora-core-image.bb [new file with mode: 0755]
packages/images/pandora-desktop-image.bb [new file with mode: 0755]
packages/images/pandora-gui-image.bb [new file with mode: 0755]
packages/images/pandora-satogui-image.bb [new file with mode: 0755]
packages/images/pandora-validation-image.bb [new file with mode: 0755]
packages/linux/linux-omap.inc [new file with mode: 0755]
packages/linux/linux.inc [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/0001-Implement-downsampling-with-debugs.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/0001-Removed-resolution-check-that-prevents-scaling-when.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/cache-display-fix.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/defconfig [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/defconfig.bak [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/dvb-fix-dma.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/evm-mcspi-ts.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/fix-install.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/fix-irq33.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-256MB.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-add-clk-get-parent.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-enable-overlay-optimalization.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-fix-display-panning.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-fix-timings.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-improve-pixclock-config.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-make-dpll4-m4-ck-programmable.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/mru-make-video-timings-selectable.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-dma-iso-in.eml [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-fix-ISO-in-unlink.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-fix-dbrownell.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-fix-endpoints.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-fix-multiple-bulk-transfers.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-mru-otgfix.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/musb-support-high-bandwidth.patch.eml [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/nand.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/no-cortex-deadlock.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/no-empty-flash-warnings.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/no-harry-potter.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/omap-2430-lcd.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/oprofile-0.9.3.armv7.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/pvr/dispc.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/pvr/nokia-TI.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/pvr/pvr-add.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/read_die_ids.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/sitecomwl168-support.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/strongly-ordered-memory.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/timer-suppression.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi/touchscreen.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel-wifi_2.6.27-pandora.bb [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/0001-Implement-downsampling-with-debugs.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/0001-Removed-resolution-check-that-prevents-scaling-when.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/cache-display-fix.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/defconfig [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/defconfig.bak [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/dvb-fix-dma.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/evm-mcspi-ts.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/fix-install.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/fix-irq33.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-256MB.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-add-clk-get-parent.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-enable-overlay-optimalization.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-fix-display-panning.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-fix-timings.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-improve-pixclock-config.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-make-dpll4-m4-ck-programmable.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/mru-make-video-timings-selectable.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-dma-iso-in.eml [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-fix-ISO-in-unlink.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-fix-dbrownell.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-fix-endpoints.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-fix-multiple-bulk-transfers.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-mru-otgfix.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/musb-support-high-bandwidth.patch.eml [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/nand.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/no-cortex-deadlock.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/no-empty-flash-warnings.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/no-harry-potter.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/omap-2430-lcd.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/oprofile-0.9.3.armv7.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/pvr/dispc.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/pvr/nokia-TI.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/pvr/pvr-add.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/read_die_ids.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/sitecomwl168-support.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/strongly-ordered-memory.diff [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/timer-suppression.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel/touchscreen.patch [new file with mode: 0755]
packages/linux/omap3-pandora-kernel_2.6.27-pandora.bb [new file with mode: 0755]
packages/omap3-deviceid/files/Makefile [new file with mode: 0755]
packages/omap3-deviceid/files/mem.c [new file with mode: 0755]
packages/omap3-deviceid/files/omap3-deviceid.sh [new file with mode: 0755]
packages/omap3-deviceid/omap3-deviceid_1.0.bb [new file with mode: 0755]
packages/pandora-system/pandora-firmware.bb [new file with mode: 0755]
packages/pandora-system/pandora-firmware/Fw1251r1c.bin [new file with mode: 0755]
packages/pandora-system/pandora-firmware/README.txt [new file with mode: 0755]
packages/pandora-system/pandora-firmware/brf6300.bin [new file with mode: 0755]
packages/pandora-system/pandora-matchbox-gtk-theme.bb [new file with mode: 0755]
packages/pandora-system/pandora-set-root-password.bb [new file with mode: 0755]
packages/pandora-system/pandora-wifi-tools/wlan_cu_makefile.patch [new file with mode: 0755]
packages/pandora-system/pandora-wifi-tools_git.bb [new file with mode: 0755]
packages/pandora-system/pandora-wifi.inc [new file with mode: 0755]
packages/pandora-system/pandora-wifi_git.bb [new file with mode: 0755]
packages/tasks/task-pandora-core.bb [new file with mode: 0755]
packages/tasks/task-pandora-desktop.bb [new file with mode: 0755]
packages/tasks/task-pandora-gui.bb [new file with mode: 0755]
packages/tasks/task-pandora-satogui.bb [new file with mode: 0755]

diff --git a/conf/machine/omap3-pandora.conf.notused b/conf/machine/omap3-pandora.conf.notused
new file mode 100755 (executable)
index 0000000..f892ec2
--- /dev/null
@@ -0,0 +1,51 @@
+#@TYPE: Machine
+#@NAME: OMAP3 based Pandora Handheld Console
+#@DESCRIPTION: Machine configuration for the Pandora http://www.openpandora.org/ Handheld Console
+
+TARGET_ARCH = "arm"
+include conf/machine/include/tune-cortexa8.inc
+
+PREFERRED_PROVIDER_virtual/xserver = "xserver-xorg"
+
+XSERVER = " \
+       xserver-xorg \
+       xf86-input-evdev \
+       xf86-input-mouse \
+       xf86-input-tslib \
+       xf86-video-omapfb \
+       xf86-input-keyboard \
+"
+
+#What is the correct one, both seem in use?
+MACHINE_GUI_CLASS = "bigscreen"
+GUI_MACHINE_CLASS = "bigscreen"
+
+#800 * 480 Widescreen
+MACHINE_DISPLAY_WIDTH_PIXELS = "800"
+MACHINE_DISPLAY_HEIGHT_PIXELS = "480"
+
+# Ship all kernel modules
+MACHINE_EXTRA_RRECOMMENDS = "omap3-sgx-modules kernel-modules"
+
+# Make sure firmware is installed for BT and WiFi.
+MACHINE_EXTRA_RDEPENDS = "pandora-firmware"
+
+IMAGE_FSTYPES += "jffs2 tar.bz2"
+EXTRA_IMAGECMD_jffs2 = "-lnp "
+
+SERIAL_CONSOLE = "115200 ttyS0"
+
+#PREFERRED_PROVIDER_virtual/kernel = "linux-omap"
+PREFERRED_PROVIDER_virtual/kernel = "${MACHINE}-kernel"
+
+KERNEL_IMAGETYPE = "uImage"
+
+UBOOT_ENTRYPOINT = "0x80008000"
+UBOOT_LOADADDRESS = "0x80008000"
+UBOOT_ARCH = "arm"
+UBOOT_MACHINE = "omap3_pandora_config"
+
+PREFERRED_VERSION_u-boot = "git"
+#EXTRA_IMAGEDEPENDS += "u-boot x-load"
+
+MACHINE_FEATURES = "kernel26 apm alsa usbgadget usbhost keyboard vfat ext2 screen touchscreen bluetooth wifi"
diff --git a/packages/images/pandora-core-image.bb b/packages/images/pandora-core-image.bb
new file mode 100755 (executable)
index 0000000..56d71fb
--- /dev/null
@@ -0,0 +1,27 @@
+# Console image for Pandora handheld console
+
+inherit image
+
+IMAGE_LINGUAS = "en-gb en-us"
+
+DEPENDS = "task-base"
+
+IMAGE_INSTALL += " \
+       task-pandora-core \
+"
+#  pandora-set-root-password \ 
+
+IMAGE_PREPROCESS_COMMAND = "create_etc_timestamp"
+
+#ROOTFS_POSTPROCESS_COMMAND += '${@base_conditional("DISTRO_TYPE", "release", "zap_root_password; ", "",d)}'
+#ROOTFS_POSTPROCESS_COMMAND += "set_image_autologin; "
+
+
+# Helper to say what image we built, include GIT tag and image name.
+PANDORA_VERSION_FILE = "${IMAGE_ROOTFS}/${sysconfdir}/op-version"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Tag Name: `git tag|tail -n 1`> ${PANDORA_VERSION_FILE};cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo VERSION: `git-log -n1 --pretty=oneline|awk '{print $1}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Branch: ` git branch |awk '/*/{print $2}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "echo Build Host: `cat /etc/hostname` >> ${PANDORA_VERSION_FILE};"
+ROOTFS_POSTPROCESS_COMMAND += "echo Time Stamp: `date -R` >> ${PANDORA_VERSION_FILE};"
diff --git a/packages/images/pandora-desktop-image.bb b/packages/images/pandora-desktop-image.bb
new file mode 100755 (executable)
index 0000000..f2d8669
--- /dev/null
@@ -0,0 +1,27 @@
+# E17 demo image for Pandora handheld console
+
+inherit image
+
+IMAGE_LINGUAS = "en-gb en-us"
+
+DEPENDS = "task-base"
+
+IMAGE_INSTALL += " \
+       task-pandora-desktop \
+"
+
+#  pandora-set-root-password \ 
+
+IMAGE_PREPROCESS_COMMAND = "create_etc_timestamp"
+
+#ROOTFS_POSTPROCESS_COMMAND += '${@base_conditional("DISTRO_TYPE", "release", "zap_root_password; ", "",d)}'
+#ROOTFS_POSTPROCESS_COMMAND += "set_image_autologin; "
+
+
+# Helper to say what image we built, include GIT tag and image name.
+PANDORA_VERSION_FILE = "${IMAGE_ROOTFS}/${sysconfdir}/op-version"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Tag Name: `git tag|tail -n 1`> ${PANDORA_VERSION_FILE};cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo VERSION: `git-log -n1 --pretty=oneline|awk '{print $1}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Branch: ` git branch |awk '/*/{print $2}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "echo Build Host: `cat /etc/hostname` >> ${PANDORA_VERSION_FILE};"
+ROOTFS_POSTPROCESS_COMMAND += "echo Time Stamp: `date -R` >> ${PANDORA_VERSION_FILE};"
diff --git a/packages/images/pandora-gui-image.bb b/packages/images/pandora-gui-image.bb
new file mode 100755 (executable)
index 0000000..a6b1aeb
--- /dev/null
@@ -0,0 +1,27 @@
+# Default matchbox image for the Pandora
+
+inherit image
+
+IMAGE_LINGUAS = "en-gb en-us"
+
+DEPENDS = "task-base"
+
+IMAGE_INSTALL = " \
+       task-pandora-gui \
+"
+
+#  pandora-set-root-password \ 
+
+IMAGE_PREPROCESS_COMMAND = "create_etc_timestamp"
+
+#ROOTFS_POSTPROCESS_COMMAND += '${@base_conditional("DISTRO_TYPE", "release", "zap_root_password; ", "",d)}'
+#ROOTFS_POSTPROCESS_COMMAND += "set_image_autologin; "
+
+
+# Helper to say what image we built, include GIT tag and image name.
+PANDORA_VERSION_FILE = "${IMAGE_ROOTFS}/${sysconfdir}/op-version"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Tag Name: `git tag|tail -n 1`> ${PANDORA_VERSION_FILE};cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo VERSION: `git-log -n1 --pretty=oneline|awk '{print $1}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Branch: ` git branch |awk '/*/{print $2}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "echo Build Host: `cat /etc/hostname` >> ${PANDORA_VERSION_FILE};"
+ROOTFS_POSTPROCESS_COMMAND += "echo Time Stamp: `date -R` >> ${PANDORA_VERSION_FILE};"
diff --git a/packages/images/pandora-satogui-image.bb b/packages/images/pandora-satogui-image.bb
new file mode 100755 (executable)
index 0000000..1af71a5
--- /dev/null
@@ -0,0 +1,27 @@
+# Default matchbox image for the Pandora
+
+inherit image
+
+IMAGE_LINGUAS = "en-gb en-us"
+
+DEPENDS = "task-base"
+
+IMAGE_INSTALL = " \
+       task-pandora-satogui \
+"
+
+#  pandora-set-root-password \ 
+
+IMAGE_PREPROCESS_COMMAND = "create_etc_timestamp"
+
+#ROOTFS_POSTPROCESS_COMMAND += '${@base_conditional("DISTRO_TYPE", "release", "zap_root_password; ", "",d)}'
+#ROOTFS_POSTPROCESS_COMMAND += "set_image_autologin; "
+
+
+# Helper to say what image we built, include GIT tag and image name.
+PANDORA_VERSION_FILE = "${IMAGE_ROOTFS}/${sysconfdir}/op-version"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Tag Name: `git tag|tail -n 1`> ${PANDORA_VERSION_FILE};cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo VERSION: `git-log -n1 --pretty=oneline|awk '{print $1}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Branch: ` git branch |awk '/*/{print $2}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "echo Build Host: `cat /etc/hostname` >> ${PANDORA_VERSION_FILE};"
+ROOTFS_POSTPROCESS_COMMAND += "echo Time Stamp: `date -R` >> ${PANDORA_VERSION_FILE};"
diff --git a/packages/images/pandora-validation-image.bb b/packages/images/pandora-validation-image.bb
new file mode 100755 (executable)
index 0000000..f3b38f2
--- /dev/null
@@ -0,0 +1,30 @@
+# Validation image for Pandora handheld console
+# for hardware testing and flashing images to NAND if copied to the SD in the correct places.
+
+inherit image
+
+IMAGE_LINGUAS = "en-gb en-us"
+
+DEPENDS = "task-base"
+
+IMAGE_INSTALL += " \
+       task-pandora-core \
+       task-pandora-validation \
+"
+#  pandora-set-root-password \ 
+
+IMAGE_PREPROCESS_COMMAND = "create_etc_timestamp"
+
+#ROOTFS_POSTPROCESS_COMMAND += '${@base_conditional("DISTRO_TYPE", "release", "zap_root_password; ", "",d)}'
+#ROOTFS_POSTPROCESS_COMMAND += "set_image_autologin; "
+
+
+# Helper to say what image we built, include GIT tag and image name.
+PANDORA_VERSION_FILE = "${IMAGE_ROOTFS}/${sysconfdir}/op-version"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Tag Name: `git tag|tail -n 1`> ${PANDORA_VERSION_FILE};cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo VERSION: `git-log -n1 --pretty=oneline|awk '{print $1}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "OLD_PWD=$PWD; cd `dirname '${FILE_DIRNAME}'`; echo Branch: ` git branch |awk '/*/{print $2}'` >> ${PANDORA_VERSION_FILE}; cd $OLD_PWD;"
+ROOTFS_POSTPROCESS_COMMAND += "echo Build Host: `cat /etc/hostname` >> ${PANDORA_VERSION_FILE};"
+ROOTFS_POSTPROCESS_COMMAND += "echo Time Stamp: `date -R` >> ${PANDORA_VERSION_FILE};"
+
diff --git a/packages/linux/linux-omap.inc b/packages/linux/linux-omap.inc
new file mode 100755 (executable)
index 0000000..c706a3f
--- /dev/null
@@ -0,0 +1,7 @@
+require linux.inc
+
+DESCRIPTION = "Linux kernel for OMAP processors"
+KERNEL_IMAGETYPE = "uImage"
+
+module_autoload_ohci-hcd_omap5912osk = "ohci-hcd"
+
diff --git a/packages/linux/linux.inc b/packages/linux/linux.inc
new file mode 100755 (executable)
index 0000000..bd7c07b
--- /dev/null
@@ -0,0 +1,168 @@
+DESCRIPTION = "Linux Kernel"
+SECTION = "kernel"
+LICENSE = "GPL"
+
+inherit kernel
+
+RPSRC = "http://www.rpsys.net/openzaurus/patches/archive"
+
+# Enable OABI compat for people stuck with obsolete userspace
+ARM_KEEP_OABI ?= "1"
+
+# Specify the commandline for your device
+
+# Boot from mmc
+CMDLINE_at91sam9263ek = "mem=64M console=ttyS0,115200 root=/dev/mmcblk0p1 rootfstype=ext2 rootdelay=5"
+# Boot from nfs
+#CMDLINE_at91sam9263ek = "mem=64M console=ttyS0,115200 root=301 root=/dev/nfs nfsroot=172.20.3.1:/data/at91 ip=172.20.0.5:::255.255.0.0"
+
+# Set the verbosity of kernel messages during runtime
+# You can define CMDLINE_DEBUG in your local.conf or distro.conf to override this behaviour  
+CMDLINE_DEBUG ?= '${@base_conditional("DISTRO_TYPE", "release", "quiet", "debug",d)}'
+CMDLINE_append = " ${CMDLINE_DEBUG} "
+
+# Support for binary device tree generation
+
+FILES_kernel-devicetree = "/boot/devicetree*"
+
+KERNEL_DEVICETREE_boc01 = "${WORKDIR}/boc01.dts"
+KERNEL_DEVICETREE_mpc8313e-rdb = "arch/${ARCH}/boot/dts/mpc8313erdb.dts"
+KERNEL_DEVICETREE_mpc8315e-rdb = "arch/${ARCH}/boot/dts/mpc8315erdb.dts"
+KERNEL_DEVICETREE_mpc8323e-rdb = "arch/${ARCH}/boot/dts/mpc832x_rdb.dts"
+KERNEL_DEVICETREE_kilauea = "arch/${ARCH}/boot/dts/kilauea.dts"
+KERNEL_DEVICETREE_sequoia = "arch/${ARCH}/boot/dts/sequoia.dts"
+KERNEL_DEVICETREE_canyonlands = "arch/${ARCH}/boot/dts/canyonlands.dts"
+
+KERNEL_DEVICETREE_FLAGS = "-R 8 -S 0x3000"
+
+python __anonymous () {
+
+    import bb
+    
+    devicetree = bb.data.getVar('KERNEL_DEVICETREE', d, 1) or ''
+    if devicetree:
+       depends = bb.data.getVar("DEPENDS", d, 1)
+       bb.data.setVar("DEPENDS", "%s dtc-native" % depends, d)
+       packages = bb.data.getVar("PACKAGES", d, 1)
+       bb.data.setVar("PACKAGES", "%s kernel-devicetree" % packages, d)
+}
+
+do_configure_prepend() {
+        echo "" > ${S}/.config
+
+        #
+        # logo support, if you supply logo_linux_clut224.ppm in SRC_URI, then it's going to be used
+        #
+        if [ -e ${WORKDIR}/logo_linux_clut224.ppm ]; then
+                install -m 0644 ${WORKDIR}/logo_linux_clut224.ppm drivers/video/logo/logo_linux_clut224.ppm
+                echo "CONFIG_LOGO=y"                    >> ${S}/.config
+                echo "CONFIG_LOGO_LINUX_CLUT224=y"      >> ${S}/.config
+        fi
+
+        #
+        # oabi / eabi support
+        #
+        if [ "${TARGET_OS}" = "linux-gnueabi" -o  "${TARGET_OS}" = "linux-uclibcgnueabi" ]; then
+                echo "CONFIG_AEABI=y"                   >> ${S}/.config
+                if [ "${ARM_KEEP_OABI}" = "1" ] ; then
+                        echo "CONFIG_OABI_COMPAT=y"             >> ${S}/.config
+                else
+                        echo "# CONFIG_OABI_COMPAT is not set"  >> ${S}/.config
+                fi
+        else
+                echo "# CONFIG_AEABI is not set"        >> ${S}/.config
+                echo "# CONFIG_OABI_COMPAT is not set"  >> ${S}/.config
+        fi
+
+        # When enabling thumb for userspace we also need thumb support in the kernel
+        if [ "${ARM_INSTRUCTION_SET}" = "thumb" ] ; then
+            sed -i -e /CONFIG_ARM_THUMB/d ${WORKDIR}/defconfig 
+            echo "CONFIG_ARM_THUMB=y" >> ${S}/.config
+        fi
+
+       #
+       # endian support
+       #
+        if [ "${SITEINFO_ENDIANESS}" = "be" ]; then
+                echo "CONFIG_CPU_BIG_ENDIAN=y"          >> ${S}/.config
+        fi
+
+        echo "CONFIG_CMDLINE=\"${CMDLINE}\"" >> ${S}/.config
+
+        sed -e '/CONFIG_AEABI/d' \
+            -e '/CONFIG_OABI_COMPAT=/d' \
+            -e '/CONFIG_CMDLINE=/d' \
+            -e '/CONFIG_CPU_BIG_ENDIAN/d' \
+            -e '/CONFIG_LOGO=/d' \
+            -e '/CONFIG_LOGO_LINUX_CLUT224=/d' \
+            -e '/CONFIG_LOCALVERSION/d' \
+           < '${WORKDIR}/defconfig' >>'${S}/.config'
+
+        #
+        # root-over-nfs-over-usb-eth support. Limited, but should cover some cases.
+        # Enable this by setting a proper CMDLINE_NFSROOT_USB.
+        #
+        if [ ! -z "${CMDLINE_NFSROOT_USB}" ]; then
+                oenote "Configuring the kernel for root-over-nfs-over-usb-eth with CMDLINE ${CMDLINE_NFSROOT_USB}"
+                sed -e '/CONFIG_INET/d' \
+                    -e '/CONFIG_IP_PNP=/d' \
+                    -e '/CONFIG_USB_GADGET=/d' \
+                    -e '/CONFIG_USB_GADGET_SELECTED=/d' \
+                    -e '/CONFIG_USB_ETH=/d' \
+                    -e '/CONFIG_NFS_FS=/d' \
+                    -e '/CONFIG_ROOT_NFS=/d' \
+                    -e '/CONFIG_CMDLINE=/d' \
+                    -i ${S}/.config
+                echo "CONFIG_INET=y"                     >> ${S}/.config
+                echo "CONFIG_IP_PNP=y"                   >> ${S}/.config
+                echo "CONFIG_USB_GADGET=y"               >> ${S}/.config
+                echo "CONFIG_USB_GADGET_SELECTED=y"      >> ${S}/.config
+                echo "CONFIG_USB_ETH=y"                  >> ${S}/.config
+                echo "CONFIG_NFS_FS=y"                   >> ${S}/.config
+                echo "CONFIG_ROOT_NFS=y"                 >> ${S}/.config
+                echo "CONFIG_CMDLINE=\"${CMDLINE_NFSROOT_USB}\"" >> ${S}/.config
+        fi
+        yes '' | oe_runmake oldconfig
+}
+
+do_configure_append_avr32() {
+        sed -i -e s:-mno-pic::g arch/avr32/Makefile
+}
+
+do_compile_append() {
+    if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then 
+        if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
+            ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
+            uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
+            rm -f linux.bin
+        else
+            ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
+            rm -f linux.bin.gz
+            gzip -9 linux.bin
+            uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage
+            rm -f linux.bin.gz
+        fi
+    fi
+}
+
+do_devicetree_image() {
+    if test -n "${KERNEL_DEVICETREE}" ; then
+        dtc -I dts -O dtb ${KERNEL_DEVICETREE_FLAGS} -o devicetree ${KERNEL_DEVICETREE}
+        install -m 0644 devicetree ${D}/boot/devicetree-${KERNEL_VERSION}
+        install -m 0644 devicetree ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGE_BASE_NAME}.dtb
+        cd ${DEPLOY_DIR_IMAGE}
+        rm -f ${KERNEL_IMAGE_SYMLINK_NAME}.dtb
+        ln -sf ${KERNEL_IMAGE_BASE_NAME}.dtb ${KERNEL_IMAGE_SYMLINK_NAME}.dtb
+    fi
+}
+
+addtask devicetree_image after do_deploy before do_package
+
+pkg_postinst_kernel-devicetree () {
+       cd /${KERNEL_IMAGEDEST}; update-alternatives --install /${KERNEL_IMAGEDEST}/devicetree devicetree devicetree-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
+}
+
+pkg_postrm_kernel-devicetree () {
+       cd /${KERNEL_IMAGEDEST}; update-alternatives --remove devicetree devicetree-${KERNEL_VERSION} || true
+}
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/0001-Implement-downsampling-with-debugs.patch b/packages/linux/omap3-pandora-kernel-wifi/0001-Implement-downsampling-with-debugs.patch
new file mode 100755 (executable)
index 0000000..d3608df
--- /dev/null
@@ -0,0 +1,138 @@
+From 1ef94095e9399a9a387b7b457b48f6c5de7013d8 Mon Sep 17 00:00:00 2001
+From: Tuomas Kulve <tuomas.kulve@movial.com>
+Date: Fri, 31 Oct 2008 14:23:57 +0200
+Subject: [PATCH] Implement downsampling (with debugs).
+
+---
+ drivers/video/omap/dispc.c |   75 +++++++++++++++++++++++++++++++++++++-------
+ 1 files changed, 63 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 68bc887..3640dbe 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -18,6 +18,8 @@
+  * with this program; if not, write to the Free Software Foundation, Inc.,
+  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  */
++#define DEBUG
++#define VERBOSE_DEBUG
+ #include <linux/kernel.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/mm.h>
+@@ -545,6 +547,17 @@ static void write_firhv_reg(int plane, int reg, u32 value)
+       dispc_write_reg(base + reg * 8, value);
+ }
++static void write_firv_reg(int plane, int reg, u32 value)
++{
++      u32 base;
++
++      if (plane == 1)
++              base = 0x1E0;
++      else
++              base = 0x1E0 + 0x20;
++      dispc_write_reg(base + reg * 4, value);
++}
++
+ static void set_upsampling_coef_table(int plane)
+ {
+       const u32 coef[][2] = {
+@@ -565,6 +578,27 @@ static void set_upsampling_coef_table(int plane)
+       }
+ }
++static void set_downsampling_coef_table(int plane)
++{
++      const u32 coef[][3] = {
++                { 0x24382400, 0x24382400, 0x00000000 },
++                { 0x28371FFE, 0x28391F04, 0x000004FE },
++                { 0x2C361BFB, 0x2D381B08, 0x000008FB },
++                { 0x303516F9, 0x3237170C, 0x00000CF9 },
++                { 0x11343311, 0x123737F7, 0x0000F711 },
++                { 0x1635300C, 0x173732F9, 0x0000F90C },
++                { 0x1B362C08, 0x1B382DFB, 0x0000FB08 },
++                { 0x1F372804, 0x1F3928FE, 0x0000FE04 },
++      };
++      int i;
++
++      for (i = 0; i < 8; i++) {
++              write_firh_reg(plane, i, coef[i][0]);
++              write_firhv_reg(plane, i, coef[i][1]);
++              write_firv_reg(plane, i, coef[i][2]);
++      }
++}
++
+ static int omap_dispc_set_scale(int plane,
+                               int orig_width, int orig_height,
+                               int out_width, int out_height)
+@@ -592,25 +626,47 @@ static int omap_dispc_set_scale(int plane,
+               if (orig_height > out_height ||
+                   orig_width * 8 < out_width ||
+                   orig_height * 8 < out_height) {
++                        dev_dbg(dispc.fbdev->dev, 
++                                "Max upsampling is 8x, "
++                                "tried: %dx%d -> %dx%d\n",
++                                orig_width, orig_height,
++                                out_width, out_height);
+                       enable_lcd_clocks(0);
+                       return -EINVAL;
+               }
+               set_upsampling_coef_table(plane);
+       } else if (orig_width > out_width) {
+-              /* Downsampling not yet supported
+-              */
+-
+-              enable_lcd_clocks(0);
+-              return -EINVAL;
++              /*
++               * Downsampling.
++               * Currently you can only scale both dimensions in one way.
++               */
++              if (orig_height < out_height ||
++                  orig_width > out_width * 4||
++                  orig_height > out_height * 4) {
++                        dev_dbg(dispc.fbdev->dev, 
++                                "Max downsampling is 4x, "
++                                "tried: %dx%d -> %dx%d\n",
++                                orig_width, orig_height,
++                                out_width, out_height);
++                      enable_lcd_clocks(0);
++                      return -EINVAL;
++              }
++              set_downsampling_coef_table(plane);
+       }
+       if (!orig_width || orig_width == out_width)
+               fir_hinc = 0;
+       else
+-              fir_hinc = 1024 * orig_width / out_width;
++              fir_hinc = 1024 * (orig_width -1)/ (out_width -1);
+       if (!orig_height || orig_height == out_height)
+               fir_vinc = 0;
+       else
+-              fir_vinc = 1024 * orig_height / out_height;
++              fir_vinc = 1024 * (orig_height-1) / (out_height -1 );
++
++      dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
++              "orig_height %d fir_hinc  %d fir_vinc %d\n",
++              out_width, out_height, orig_width, orig_height,
++              fir_hinc, fir_vinc);
++
+       dispc.fir_hinc[plane] = fir_hinc;
+       dispc.fir_vinc[plane] = fir_vinc;
+@@ -619,11 +675,6 @@ static int omap_dispc_set_scale(int plane,
+                   ((fir_vinc & 4095) << 16) |
+                   (fir_hinc & 4095));
+-      dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
+-              "orig_height %d fir_hinc  %d fir_vinc %d\n",
+-              out_width, out_height, orig_width, orig_height,
+-              fir_hinc, fir_vinc);
+-
+       MOD_REG_FLD(vs_reg[plane],
+                   FLD_MASK(16, 11) | FLD_MASK(0, 11),
+                   ((out_height - 1) << 16) | (out_width - 1));
+-- 
+1.5.6.5
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/0001-Removed-resolution-check-that-prevents-scaling-when.patch b/packages/linux/omap3-pandora-kernel-wifi/0001-Removed-resolution-check-that-prevents-scaling-when.patch
new file mode 100755 (executable)
index 0000000..636203e
--- /dev/null
@@ -0,0 +1,26 @@
+From 3227bd5c412e7eb0d4370b2834e71723f6b4be48 Mon Sep 17 00:00:00 2001
+From: Tuomas Kulve <tuomas.kulve@movial.fi>
+Date: Mon, 27 Oct 2008 18:55:59 +0200
+Subject: [PATCH] Removed resolution check that prevents scaling when output resolution doesn't match the original resolution.
+
+---
+ drivers/video/omap/dispc.c |    3 ---
+ 1 files changed, 0 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 0f0b2e5..1df0c1e 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -579,9 +579,6 @@ static int omap_dispc_set_scale(int plane,
+       if ((unsigned)plane > OMAPFB_PLANE_NUM)
+               return -ENODEV;
+-      if (out_width != orig_width || out_height != orig_height)
+-              return -EINVAL;
+-
+       enable_lcd_clocks(1);
+       if (orig_width < out_width) {
+               /*
+-- 
+1.5.6.5
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/cache-display-fix.patch b/packages/linux/omap3-pandora-kernel-wifi/cache-display-fix.patch
new file mode 100755 (executable)
index 0000000..019fd5a
--- /dev/null
@@ -0,0 +1,238 @@
+On Tue, 2008-07-01 at 06:23 +0100, Dirk Behme wrote:
+> Catalin Marinas wrote:
+> > But, anyway, if you want a patch, Harry is updating it to a recent
+> > kernel.
+> 
+> Any news on this? I think there are some people wanting a patch ;)
+
+See below for a preliminary patch updated to 2.6.26-rc8. Note that I
+don't plan to submit it in its current form but clean it up a bit first.
+
+
+Show the cache type of ARMv7 CPUs
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+---
+
+ arch/arm/kernel/setup.c  |  137 +++++++++++++++++++++++++++++++++++++++++++++-
+ include/asm-arm/system.h |   18 ++++++
+ 2 files changed, 153 insertions(+), 2 deletions(-)
+
+
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 5ae0eb2..0cd238d 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -256,6 +256,24 @@ static const char *proc_arch[] = {
+       "?(17)",
+ };
++static const char *v7_cache_policy[4] = {
++      "reserved",
++      "AVIVT",
++      "VIPT",
++      "PIPT",
++};
++
++static const char *v7_cache_type[8] = {
++      "none",
++      "instruction only",
++      "data only",
++      "separate instruction and data",
++      "unified",
++      "unknown type",
++      "unknown type",
++      "unknown type",
++};
++
+ #define CACHE_TYPE(x) (((x) >> 25) & 15)
+ #define CACHE_S(x)    ((x) & (1 << 24))
+ #define CACHE_DSIZE(x)        (((x) >> 12) & 4095)    /* only if S=1 */
+@@ -266,6 +284,22 @@ static const char *proc_arch[] = {
+ #define CACHE_M(y)    ((y) & (1 << 2))
+ #define CACHE_LINE(y) ((y) & 3)
++#define CACHE_TYPE_V7(x)      (((x) >> 14) & 3)
++#define CACHE_UNIFIED(x)      ((((x) >> 27) & 7)+1)
++#define CACHE_COHERENT(x)     ((((x) >> 24) & 7)+1)
++
++#define CACHE_ID_LEVEL_MASK   7
++#define CACHE_ID_LEVEL_BITS   3
++
++#define CACHE_LINE_V7(v)      ((1 << (((v) & 7)+4)))
++#define CACHE_ASSOC_V7(v)     ((((v) >> 3) & ((1<<10)-1))+1)
++#define CACHE_SETS_V7(v)      ((((v) >> 13) & ((1<<15)-1))+1)
++#define CACHE_SIZE_V7(v)      (CACHE_LINE_V7(v)*CACHE_ASSOC_V7(v)*CACHE_SETS_V7(v))
++#define CACHE_WA_V7(v)                (((v) & (1<<28)) != 0)
++#define CACHE_RA_V7(v)                (((v) & (1<<29)) != 0)
++#define CACHE_WB_V7(v)                (((v) & (1<<30)) != 0)
++#define CACHE_WT_V7(v)                (((v) & (1<<31)) != 0)
++
+ static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
+ {
+       unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
+@@ -279,11 +313,57 @@ static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
+                       CACHE_LINE(cache)));
+ }
++static void dump_v7_cache(const char *type, int cpu, unsigned int level)
++{
++      unsigned int cachesize;
++                    
++      write_extended_cpuid(2,0,0,0,level);  /* Set the cache size selection register */
++      write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
++      cachesize = read_extended_cpuid(1,0,0,0);
++
++      printk("CPU%u: %s cache: %d bytes, associativity %d, %d byte lines, %d sets,\n      supports%s%s%s%s\n",
++             cpu, type,
++             CACHE_SIZE_V7(cachesize),CACHE_ASSOC_V7(cachesize),
++             CACHE_LINE_V7(cachesize),CACHE_SETS_V7(cachesize),
++             CACHE_WA_V7(cachesize) ? " WA" : "",
++             CACHE_RA_V7(cachesize) ? " RA" : "",
++             CACHE_WB_V7(cachesize) ? " WB" : "",
++             CACHE_WT_V7(cachesize) ? " WT" : "");
++}
++
+ static void __init dump_cpu_info(int cpu)
+ {
+       unsigned int info = read_cpuid(CPUID_CACHETYPE);
+-      if (info != processor_id) {
++      if (info != processor_id && (info & (1 << 31))) {
++              /* ARMv7 style of cache info register */
++              unsigned int id = read_extended_cpuid(1,0,0,1);
++              unsigned int level = 0;
++              printk("CPU%u: L1 I %s cache. Caches unified at level %u, coherent at level %u\n",
++                     cpu,
++                     v7_cache_policy[CACHE_TYPE_V7(info)],
++                     CACHE_UNIFIED(id),
++                     CACHE_COHERENT(id));
++
++              while (id & CACHE_ID_LEVEL_MASK) {
++                      printk("CPU%u: Level %u cache is %s\n",
++                             cpu, (level >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
++
++                      if (id & 1) {
++                              /* Dump I at this level */
++                              dump_v7_cache("I", cpu, level | 1);
++                      }
++
++                      if (id & (4 | 2)) {
++                              /* Dump D or unified at this level */
++                              dump_v7_cache((id & 4) ? "unified" : "D", cpu, level);
++                      }
++
++                      /* Next level out */
++                      level += 2;
++                      id >>= CACHE_ID_LEVEL_BITS;
++              }
++      } else if (info != processor_id) {
+               printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
+                      cache_types[CACHE_TYPE(info)]);
+               if (CACHE_S(info)) {
+@@ -916,6 +996,30 @@ c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
+                           CACHE_LINE(cache)));
+ }
++static void c_show_v7_cache(struct seq_file *m, const char *type, unsigned int levelselect)
++{
++      unsigned int cachesize;
++      unsigned int level = (levelselect >> 1) + 1;
++                    
++      write_extended_cpuid(2,0,0,0,levelselect);  /* Set the cache size selection register */
++      write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
++      cachesize = read_extended_cpuid(1,0,0,0);
++
++      seq_printf(m, "L%u %s size\t\t: %d bytes\n"
++                 "L%u %s assoc\t\t: %d\n"
++                 "L%u %s line length\t: %d\n"
++                 "L%u %s sets\t\t: %d\n"
++                 "L%u %s supports\t\t:%s%s%s%s\n",
++                 level, type, CACHE_SIZE_V7(cachesize),
++                 level, type, CACHE_ASSOC_V7(cachesize),
++                 level, type, CACHE_LINE_V7(cachesize),
++                 level, type, CACHE_SETS_V7(cachesize),
++                 level, type, CACHE_WA_V7(cachesize) ? " WA" : "",
++                 CACHE_RA_V7(cachesize) ? " RA" : "",
++                 CACHE_WB_V7(cachesize) ? " WB" : "",
++                 CACHE_WT_V7(cachesize) ? " WT" : "");
++}
++
+ static int c_show(struct seq_file *m, void *v)
+ {
+       int i;
+@@ -971,7 +1075,36 @@ static int c_show(struct seq_file *m, void *v)
+       {
+               unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
+-              if (cache_info != processor_id) {
++              if (cache_info != processor_id && (cache_info & (1<<31))) {
++                      /* V7 style of cache info register */
++                      unsigned int id = read_extended_cpuid(1,0,0,1);
++                      unsigned int levelselect = 0;
++                      seq_printf(m, "L1 I cache\t:%s\n"
++                                 "Cache unification level\t: %u\n"
++                                 "Cache coherency level\t: %u\n",
++                                 v7_cache_policy[CACHE_TYPE_V7(cache_info)],
++                                 CACHE_UNIFIED(id),
++                                 CACHE_COHERENT(id));
++
++                      while (id & CACHE_ID_LEVEL_MASK) {
++                              seq_printf(m, "Level %u cache\t\t: %s\n",
++                                         (levelselect >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
++
++                              if (id & 1) {
++                                      /* Dump I at this level */
++                                      c_show_v7_cache(m, "I", levelselect | 1);
++                              }
++
++                              if (id & (4 | 2)) {
++                                      /* Dump D or unified at this level */
++                                      c_show_v7_cache(m, (id & 4) ? "cache" : "D", levelselect);
++                              }
++
++                              /* Next level out */
++                              levelselect += 2;
++                              id >>= CACHE_ID_LEVEL_BITS;
++                      }
++              } else if (cache_info != processor_id) {
+                       seq_printf(m, "Cache type\t: %s\n"
+                                     "Cache clean\t: %s\n"
+                                     "Cache lockdown\t: %s\n"
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index 514af79..704738e 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -74,6 +74,24 @@
+                   : "cc");                                            \
+               __val;                                                  \
+       })
++#define read_extended_cpuid(op1,op2,op3,op4)          \
++      ({                                                              \
++              unsigned int __val;                                     \
++              asm("mrc p15," __stringify(op1) ",%0,c" __stringify(op2)",c" __stringify(op3)"," __stringify(op4)       \
++                  : "=r" (__val)                                      \
++                  :                                                   \
++                  : "cc");                                            \
++              __val;                                                  \
++      })
++
++#define write_extended_cpuid(op1,op2,op3,op4,v)               \
++      ({                                                              \
++              unsigned int __val = v;                                 \
++              asm("mcr p15," __stringify(op1) ",%0,c" __stringify(op2)",c" __stringify(op3)"," __stringify(op4)       \
++                  :                                                   \
++                  : "r" (__val)                                       \
++                  : "cc");                                            \
++      })
+ #else
+ extern unsigned int processor_id;
+ #define read_cpuid(reg) (processor_id)
+
+
+-- 
+Catalin
+
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/defconfig b/packages/linux/omap3-pandora-kernel-wifi/defconfig
new file mode 100755 (executable)
index 0000000..16815a9
--- /dev/null
@@ -0,0 +1,1965 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-omap1
+# Mon Jan 12 18:36:10 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV7=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
+# CONFIG_HAVE_IOREMAP_PROT is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+CONFIG_HAVE_CLK=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LSF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_MSM7X00A is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
+# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
+CONFIG_OMAP_SMARTREFLEX=y
+# CONFIG_OMAP_SMARTREFLEX_TESTING is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_BOOT_TAG=y
+CONFIG_OMAP_BOOT_REASON=y
+# CONFIG_OMAP_COMPONENT_VERSION is not set
+# CONFIG_OMAP_GPIO_SWITCH is not set
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MMU_FWK is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+CONFIG_ARCH_OMAP34XX=y
+CONFIG_ARCH_OMAP3430=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_OVERO is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+CONFIG_OMAP_TICK_GPTIMER=12
+
+#
+# Boot options
+#
+
+#
+# Power management
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=" debug "
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=y
+CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTSDIO=y
+# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIBRF6150 is not set
+CONFIG_BT_HCIH4P=y
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+CONFIG_CFG80211=y
+CONFIG_NL80211=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_MAC80211=y
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_IEEE80211=y
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=y
+CONFIG_IEEE80211_CRYPT_CCMP=y
+CONFIG_IEEE80211_CRYPT_TKIP=y
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_93CX6=y
+# CONFIG_OMAP_STI is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_PANDORA_GAME_CONSOLE=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+# CONFIG_LIBERTAS_SDIO is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_USB_ZD1201=y
+CONFIG_USB_NET_RNDIS_WLAN=y
+CONFIG_RTL8187=y
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_P54_COMMON=y
+CONFIG_P54_USB=y
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=y
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_RT2X00=y
+CONFIG_RT2X00_LIB=y
+CONFIG_RT2X00_LIB_USB=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2500USB=y
+CONFIG_RT2500USB_LEDS=y
+CONFIG_RT73USB=y
+CONFIG_RT73USB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=y
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2102 is not set
+# CONFIG_TOUCHSCREEN_TSC210X is not set
+# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_UINPUT is not set
+CONFIG_INPUT_VSENSE=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_TLV320AIC23 is not set
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL4030_USB=y
+CONFIG_TWL4030_PWRBUTTON=y
+CONFIG_TWL4030_POWEROFF=y
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_LP5521 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+CONFIG_SPI_OMAP24XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_TSC2101 is not set
+# CONFIG_SPI_TSC2102 is not set
+# CONFIG_SPI_TSC210X is not set
+# CONFIG_SPI_TSC2301 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_POWER=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_CX25840=m
+CONFIG_VIDEO_CX2341X=m
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_TUNER_3036 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+# CONFIG_VIDEO_EM28XX is not set
+CONFIG_VIDEO_USBVISION=m
+CONFIG_VIDEO_USBVIDEO=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_QUICKCAM_MESSENGER=m
+# CONFIG_USB_ET61X251 is not set
+CONFIG_VIDEO_OVCAMCHIP=m
+CONFIG_USB_W9968CF=m
+CONFIG_USB_OV511=m
+CONFIG_USB_SE401=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_STV680=m
+# CONFIG_USB_ZC0301 is not set
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_ZR364XX=m
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_RADIO_TEA5761 is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_ANYSEE is not set
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_DVB_CINERGYT2=m
+# CONFIG_DVB_CINERGYT2_TUNING is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+# CONFIG_DVB_DRX397XD is not set
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+CONFIG_DVB_NXT200X=m
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_S5H1411=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_LNBP21=m
+# CONFIG_DVB_ISL6405 is not set
+CONFIG_DVB_ISL6421=m
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GPIO=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CCONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+# CONFIG_SND_OMAP_AIC23 is not set
+# CONFIG_SND_OMAP_TSC2101 is not set
+# CONFIG_SND_SX1 is not set
+# CONFIG_SND_OMAP_TSC2102 is not set
+# CONFIG_SND_OMAP24XX_EAC is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_OMAP_EHCI_PHY_MODE=y
+# CONFIG_OMAP_EHCI_TLL_MODE is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_OMAP_DEBUG is not set
+# CONFIG_LEDS_OMAP is not set
+# CONFIG_LEDS_OMAP_PWM is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+
+#
+# Voltage and Current regulators
+#
+# CONFIG_REGULATOR is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_UIO is not set
+
+#
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/packages/linux/omap3-pandora-kernel-wifi/defconfig.bak b/packages/linux/omap3-pandora-kernel-wifi/defconfig.bak
new file mode 100755 (executable)
index 0000000..92f3c08
--- /dev/null
@@ -0,0 +1,1970 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-omap1
+# Mon Jan 12 18:36:10 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV7=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
+# CONFIG_HAVE_IOREMAP_PROT is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+CONFIG_HAVE_CLK=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LSF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_MSM7X00A is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
+# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
+CONFIG_OMAP_SMARTREFLEX=y
+# CONFIG_OMAP_SMARTREFLEX_TESTING is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_BOOT_TAG=y
+CONFIG_OMAP_BOOT_REASON=y
+# CONFIG_OMAP_COMPONENT_VERSION is not set
+# CONFIG_OMAP_GPIO_SWITCH is not set
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MMU_FWK is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+CONFIG_ARCH_OMAP34XX=y
+CONFIG_ARCH_OMAP3430=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_OVERO is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+CONFIG_OMAP_TICK_GPTIMER=12
+
+#
+# Boot options
+#
+
+#
+# Power management
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=" debug "
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=y
+CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTSDIO=y
+# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIBRF6150 is not set
+CONFIG_BT_HCIH4P=y
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+CONFIG_CFG80211=y
+CONFIG_NL80211=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_MAC80211=y
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_IEEE80211=y
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=y
+CONFIG_IEEE80211_CRYPT_CCMP=y
+CONFIG_IEEE80211_CRYPT_TKIP=y
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_93CX6=y
+# CONFIG_OMAP_STI is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_PANDORA_GAME_CONSOLE=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+# CONFIG_LIBERTAS_SDIO is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_USB_ZD1201=y
+CONFIG_USB_NET_RNDIS_WLAN=y
+CONFIG_RTL8187=y
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_P54_COMMON=y
+CONFIG_P54_USB=y
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=y
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_RT2X00=y
+CONFIG_RT2X00_LIB=y
+CONFIG_RT2X00_LIB_USB=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2500USB=y
+CONFIG_RT2500USB_LEDS=y
+CONFIG_RT73USB=y
+CONFIG_RT73USB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=y
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2102 is not set
+# CONFIG_TOUCHSCREEN_TSC210X is not set
+# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_UINPUT is not set
+CONFIG_INPUT_VSENSE=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_TLV320AIC23 is not set
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL4030_USB=y
+CONFIG_TWL4030_PWRBUTTON=y
+CONFIG_TWL4030_POWEROFF=y
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_LP5521 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+CONFIG_SPI_OMAP24XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_TSC2101 is not set
+# CONFIG_SPI_TSC2102 is not set
+# CONFIG_SPI_TSC210X is not set
+# CONFIG_SPI_TSC2301 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_POWER=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_CX25840=m
+CONFIG_VIDEO_CX2341X=m
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_TUNER_3036 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+# CONFIG_VIDEO_EM28XX is not set
+CONFIG_VIDEO_USBVISION=m
+CONFIG_VIDEO_USBVIDEO=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_QUICKCAM_MESSENGER=m
+# CONFIG_USB_ET61X251 is not set
+CONFIG_VIDEO_OVCAMCHIP=m
+CONFIG_USB_W9968CF=m
+CONFIG_USB_OV511=m
+CONFIG_USB_SE401=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_STV680=m
+# CONFIG_USB_ZC0301 is not set
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_ZR364XX=m
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_RADIO_TEA5761 is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_ANYSEE is not set
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_DVB_CINERGYT2=m
+# CONFIG_DVB_CINERGYT2_TUNING is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+# CONFIG_DVB_DRX397XD is not set
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+CONFIG_DVB_NXT200X=m
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_S5H1411=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_LNBP21=m
+# CONFIG_DVB_ISL6405 is not set
+CONFIG_DVB_ISL6421=m
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GPIO=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CCONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+# CONFIG_SND_OMAP_AIC23 is not set
+# CONFIG_SND_OMAP_TSC2101 is not set
+# CONFIG_SND_SX1 is not set
+# CONFIG_SND_OMAP_TSC2102 is not set
+# CONFIG_SND_OMAP24XX_EAC is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_OMAP_EHCI_PHY_MODE=y
+# CONFIG_OMAP_EHCI_TLL_MODE is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_OMAP_HS=y
+CONFIG_MMC_SPI=m
+CONFIG_OMAP_HS_MMC1=y
+CONFIG_OMAP_HS_MMC2=y
+CONFIG_OMAP_HS_MMC3=y
+CONFIG_TIWLAN_SDIO=y
+CONFIG_TIWLAN_MMC_CONTROLLER=3
+# CONFIG_MMC_SPI is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_OMAP_DEBUG is not set
+# CONFIG_LEDS_OMAP is not set
+# CONFIG_LEDS_OMAP_PWM is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+
+#
+# Voltage and Current regulators
+#
+# CONFIG_REGULATOR is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_UIO is not set
+
+#
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/packages/linux/omap3-pandora-kernel-wifi/dvb-fix-dma.diff b/packages/linux/omap3-pandora-kernel-wifi/dvb-fix-dma.diff
new file mode 100755 (executable)
index 0000000..e05473f
--- /dev/null
@@ -0,0 +1,60 @@
+Hi,
+I post this patch that fixes a kernel crash that happens when using a dvb
+usb stick on a mips platform and I think even on other platforms on which
+the dma access in not cache-coherent.
+
+The problem's origin is that, inside the method usb_bulk_urb_init of file
+drivers/media/dvb/dvb-usb/usb-urb.c, stream->urb_list[i]->transfer_buffer
+points to a memory area that has been allocated to be dma-coherent but
+stream->urb_list[i]->transfer_flags doesn't include the
+URB_NO_TRANSFER_DMA_MAP flag and stream->urb_list[i]->transfer_dma is not
+set.
+When later on the stream->urb_list[i]->transfer_buffer pointer is used
+inside function usb_hcd_submit_urb of file drivers/usb/core/hcd.c since the
+flag URB_NO_TRANSFER_DMA_MAP is not set the urb->transfer_buffer pointer is
+passed to the dma_map_single function that since the address is dma-coherent
+returns a wrong tranfer_dma address that later on leads to the kernel crash.
+
+The problem is solved by setting the URB_NO_TRANSFER_DMA_MAP flag and the
+stream->urb_list[i]->transfer_dma address.
+
+Perhaps to be more safe the URB_NO_TRANSFER_DMA_MAP flag can be set only
+if stream->urb_list[i]->transfer_dma != 0.
+
+I don't know if half of the fault can be of the dma_map_single function that
+should anyway returns a valid address both for a not dma-coherent and a
+dma-coherent address.
+
+Just to be clear:
+I've done this patch to solve my problem and I tested it only on a mips 
+platform
+but I think it should not cause any problems on other platforms.
+I posted it here to help someone else that can have my same problem and to 
+point it out
+to the mantainer of this part of code.
+You can use it at your own risk and I'm not resposible in any way for any 
+problem or
+damage that it can cause.
+I'm available to discuss about it
+
+Bye
+
+Michele Scorcia
+
+--------------------
+
+
+
+
+--- /tmp/usb-urb.c     2008-10-08 09:53:23.000000000 +0200
++++ git/drivers/media/dvb/dvb-usb/usb-urb.c    2008-10-08 09:54:16.000000000 +0200
+@@ -152,7 +152,8 @@
+                               stream->props.u.bulk.buffersize,
+                               usb_urb_complete, stream);
+-              stream->urb_list[i]->transfer_flags = 0;
++              stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
++              stream->urb_list[i]->transfer_dma = stream->dma_addr[i];        
+               stream->urbs_initialized++;
+       }
+       return 0;
diff --git a/packages/linux/omap3-pandora-kernel-wifi/evm-mcspi-ts.diff b/packages/linux/omap3-pandora-kernel-wifi/evm-mcspi-ts.diff
new file mode 100755 (executable)
index 0000000..64d797c
--- /dev/null
@@ -0,0 +1,132 @@
+From linux-omap-owner@vger.kernel.org Sun Nov 02 21:08:25 2008
+Received: from localhost
+       ([127.0.0.1] helo=dominion ident=koen)
+       by dominion.dominion.void with esmtp (Exim 4.69)
+       (envelope-from <linux-omap-owner@vger.kernel.org>)
+       id 1KwjFJ-0008Hg-0T
+       for koen@localhost; Sun, 02 Nov 2008 21:08:25 +0100
+Received: from xs.service.utwente.nl [130.89.5.250]
+       by dominion with POP3 (fetchmail-6.3.9-rc2)
+       for <koen@localhost> (single-drop); Sun, 02 Nov 2008 21:08:25 +0100 (CET)
+Received: from mail.service.utwente.nl ([130.89.5.253]) by exchange.service.utwente.nl with Microsoft SMTPSVC(6.0.3790.3959);
+        Sun, 2 Nov 2008 20:57:16 +0100
+Received: from mx.utwente.nl ([130.89.2.13]) by mail.service.utwente.nl with Microsoft SMTPSVC(6.0.3790.3959);
+        Sun, 2 Nov 2008 20:57:16 +0100
+Received: from vger.kernel.org (vger.kernel.org [209.132.176.167])
+          by mx.utwente.nl (8.12.10/SuSE Linux 0.7) with ESMTP id mA2JudEK010968
+          for <k.kooi@student.utwente.nl>; Sun, 2 Nov 2008 20:56:40 +0100
+Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
+       id S1752819AbYKBT4i (ORCPT <rfc822;k.kooi@student.utwente.nl>);
+       Sun, 2 Nov 2008 14:56:38 -0500
+Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752829AbYKBT4i
+       (ORCPT <rfc822;linux-omap-outgoing>); Sun, 2 Nov 2008 14:56:38 -0500
+Received: from fg-out-1718.google.com ([72.14.220.153]:32481 "EHLO
+       fg-out-1718.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
+       with ESMTP id S1752819AbYKBT4h (ORCPT
+       <rfc822;linux-omap@vger.kernel.org>); Sun, 2 Nov 2008 14:56:37 -0500
+Received: by fg-out-1718.google.com with SMTP id 19so1869080fgg.17
+        for <linux-omap@vger.kernel.org>; Sun, 02 Nov 2008 11:56:33 -0800 (PST)
+DKIM-Signature:        v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=gmail.com; s=gamma;
+        h=domainkey-signature:received:received:from:to:cc:subject:date
+         :message-id:x-mailer:in-reply-to:references;
+        bh=Ftvoq8kE3ciPRy7pNy5VLkNnZD8o0HYWIrO1LMS/lAY=;
+        b=HpEcngDUbAObGNJuQmBIG3SoNHesUL57GluZGlYO7kxFxfH6N8zeHjKuRSk86+mT5s
+         gMhyCC07wjVp75HnqCtKbOJzNw/8F4ZGbL2lY1LC99+zxHW1JBQv5c3ZaoCVqTw6TuH0
+         bQ8Ew2BwHknT3wGA+QcGoMJJs5aw62AhPiyHY=
+DomainKey-Signature: a=rsa-sha1; c=nofws;
+        d=gmail.com; s=gamma;
+        h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references;
+        b=aio1APZhCIcYIrMY844QkdaQzKw0/yiuaVjqfv52fnft1kafGT2qAS3KfXAc61a9If
+         sXHbi2fr/r1a7YZJJVGqkJX0WmWTY0OqdhS1lmugP/dXEMHeqaArKATbvxrq9/svb1bV
+         Vzpkm6sOzLrr54uo+BcZNoxHWqb8W2UrRxuTk=
+Received: by 10.103.131.18 with SMTP id i18mr6668205mun.126.1225655793072;
+        Sun, 02 Nov 2008 11:56:33 -0800 (PST)
+Received: from localhost.localdomain ([78.59.134.74])
+        by mx.google.com with ESMTPS id g1sm23199635muf.8.2008.11.02.11.56.31
+        (version=TLSv1/SSLv3 cipher=RC4-MD5);
+        Sun, 02 Nov 2008 11:56:31 -0800 (PST)
+From: Grazvydas Ignotas <notasas@gmail.com>
+To: linux-omap@vger.kernel.org
+Cc: Grazvydas Ignotas <notasas@gmail.com>
+Subject: Re: omap3evm LCD red-tint workaround
+Date:  Sun,  2 Nov 2008 21:56:19 +0200
+Message-Id: <1225655779-18934-1-git-send-email-notasas@gmail.com>
+X-Mailer: git-send-email 1.5.4.3
+In-Reply-To: <57322719-1A5A-45DC-9846-5C0A3B6EF346@student.utwente.nl>
+References: <57322719-1A5A-45DC-9846-5C0A3B6EF346@student.utwente.nl>
+Sender: linux-omap-owner@vger.kernel.org
+Precedence: bulk
+List-ID: <linux-omap.vger.kernel.org>
+X-Mailing-List:        linux-omap@vger.kernel.org
+X-UTwente-MailScanner-Information: Scanned by MailScanner. Contact servicedesk@icts.utwente.nl for more information.
+X-UTwente-MailScanner: Found to be clean
+X-UTwente-MailScanner-From: linux-omap-owner@vger.kernel.org
+X-Spam-Status: No
+X-OriginalArrivalTime: 02 Nov 2008 19:57:16.0876 (UTC) FILETIME=[34FBA0C0:01C93D25]
+
+> PS: TS is still unusable with the 16x16 pixel resolution
+This is also the case for Pandora. The patch below fixes the problem,
+but as I have no other boards to test this on, I haven't sent it.
+See if it helps you.
+
+
+From 91f3af26bbf751b846e6265d86387e81be7c1364 Mon Sep 17 00:00:00 2001
+From: Grazvydas Ignotas <notasas@gmail.com>
+Date: Tue, 28 Oct 2008 22:01:42 +0200
+Subject: [PATCH] OMAP3: fix McSPI transfers
+
+Currently on OMAP3 if both write and read is set up for a transfer,
+the first byte returned on read is corrupted. Work around this by
+disabling channel between reads and writes, instead of transfers.
+---
+ drivers/spi/omap2_mcspi.c |    7 ++++---
+ 1 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
+index 454a271..4890b6c 100644
+--- a/drivers/spi/omap2_mcspi.c
++++ b/drivers/spi/omap2_mcspi.c
+@@ -710,7 +710,6 @@ static void omap2_mcspi_work(struct work_struct *work)
+               spi = m->spi;
+               cs = spi->controller_state;
+-              omap2_mcspi_set_enable(spi, 1);
+               list_for_each_entry(t, &m->transfers, transfer_list) {
+                       if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+                               status = -EINVAL;
+@@ -741,6 +740,8 @@ static void omap2_mcspi_work(struct work_struct *work)
+                       if (t->len) {
+                               unsigned        count;
++                              omap2_mcspi_set_enable(spi, 1);
++
+                               /* RX_ONLY mode needs dummy data in TX reg */
+                               if (t->tx_buf == NULL)
+                                       __raw_writel(0, cs->base
+@@ -752,6 +753,8 @@ static void omap2_mcspi_work(struct work_struct *work)
+                                       count = omap2_mcspi_txrx_pio(spi, t);
+                               m->actual_length += count;
++                              omap2_mcspi_set_enable(spi, 0);
++
+                               if (count != t->len) {
+                                       status = -EIO;
+                                       break;
+@@ -777,8 +780,6 @@ static void omap2_mcspi_work(struct work_struct *work)
+               if (cs_active)
+                       omap2_mcspi_force_cs(spi, 0);
+-              omap2_mcspi_set_enable(spi, 0);
+-
+               m->status = status;
+               m->complete(m->context);
+-- 
+1.5.4.3
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/fix-install.patch b/packages/linux/omap3-pandora-kernel-wifi/fix-install.patch
new file mode 100755 (executable)
index 0000000..46bc25a
--- /dev/null
@@ -0,0 +1,23 @@
+From: Steve Sakoman <steve@sakoman.com>
+Date: Mon, 18 Aug 2008 16:07:31 +0000 (-0700)
+Subject: scripts/Makefile.fwinst: add missing space when setting mode in cmd_install
+X-Git-Url: http://www.sakoman.net/cgi-bin/gitweb.cgi?p=linux-omap-2.6.git;a=commitdiff_plain;h=f039944bdd491cde7327133e9976881d3133ae70
+
+scripts/Makefile.fwinst: add missing space when setting mode in cmd_install
+
+This was causing build failures on some machines
+---
+
+diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
+index 6bf8e87..fb20532 100644
+--- a/scripts/Makefile.fwinst
++++ b/scripts/Makefile.fwinst
+@@ -37,7 +37,7 @@ install-all-dirs: $(installed-fw-dirs)
+       @true
+ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
+-      cmd_install = $(INSTALL) -m0644 $< $@
++      cmd_install = $(INSTALL) -m 0644 $< $@
+ $(installed-fw-dirs):
+       $(call cmd,mkdir)
diff --git a/packages/linux/omap3-pandora-kernel-wifi/fix-irq33.diff b/packages/linux/omap3-pandora-kernel-wifi/fix-irq33.diff
new file mode 100755 (executable)
index 0000000..709f042
--- /dev/null
@@ -0,0 +1,111 @@
+From: "Nathan Monson" <nmonson@gmail.com>
+To: "linux-omap@vger.kernel.org List" <linux-omap@vger.kernel.org>
+Subject: Re: omapfb: help from userspace
+Cc: "TK, Pratheesh Gangadhar" <pratheesh@ti.com>
+
+On Wed, Oct 8, 2008 at 11:36 AM, Nathan Monson <nmonson@gmail.com> wrote:
+> "Felipe Contreras" <felipe.contreras@gmail.com> writes:
+>> irq -33, desc: c0335cf8, depth: 0, count: 0, unhandled: 0
+>
+> On the BeagleBoard list, Pratheesh Gangadhar said that mapping I/O
+> regions as Strongly Ordered suppresses this problem:
+> http://groups.google.com/group/beagleboard/browse_thread/thread/23e1c95b4bfb09b5/70d12dca569ca503?show_docid=70d12dca569ca503
+
+Pratheesh helped me make a patch against the latest linux-omap git to
+try this.
+
+With this patch, my IRQ -33 problems with the DSP have disappeared.
+Before, I would end up in IRQ -33 loop after 10 invocations of the DSP
+Bridge 'ping.out' utility.  I just finished running it 50,000 times
+without error.
+
+As stated before, this patch is just a workaround for testing
+purposes, not a fix.  Who knows what performance side effects it
+has...
+
+---
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9eb936e..5cb4f5f 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -25,6 +25,7 @@ struct map_desc {
+ #define MT_HIGH_VECTORS               8
+ #define MT_MEMORY             9
+ #define MT_ROM                        10
++#define MT_MEMORY_SO          11
+
+ #define MT_NONSHARED_DEVICE   MT_DEVICE_NONSHARED
+ #define MT_IXP2000_DEVICE     MT_DEVICE_IXP2000
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index adbe21f..c11c0e8 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -119,13 +119,13 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = L3_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L3_34XX_PHYS),
+               .length         = L3_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L4_34XX_PHYS),
+               .length         = L4_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_WK_34XX_VIRT,
+@@ -137,19 +137,19 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = OMAP34XX_GPMC_VIRT,
+               .pfn            = __phys_to_pfn(OMAP34XX_GPMC_PHYS),
+               .length         = OMAP34XX_GPMC_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = OMAP343X_SMS_VIRT,
+               .pfn            = __phys_to_pfn(OMAP343X_SMS_PHYS),
+               .length         = OMAP343X_SMS_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = OMAP343X_SDRC_VIRT,
+               .pfn            = __phys_to_pfn(OMAP343X_SDRC_PHYS),
+               .length         = OMAP343X_SDRC_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_PER_34XX_VIRT,
+@@ -161,7 +161,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = L4_EMU_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L4_EMU_34XX_PHYS),
+               .length         = L4_EMU_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+ };
+ #endif
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index a713e40..d5f25ad 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -245,6 +245,10 @@ static struct mem_type mem_types[] = {
+               .prot_sect = PMD_TYPE_SECT,
+               .domain    = DOMAIN_KERNEL,
+       },
++      [MT_MEMORY_SO] = {
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_UNCACHED,
++              .domain    = DOMAIN_KERNEL,
++      },
+ };
+
+ const struct mem_type *get_mem_type(unsigned int type)
+--
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-256MB.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-256MB.diff
new file mode 100755 (executable)
index 0000000..0492ca2
--- /dev/null
@@ -0,0 +1,24 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Thu, 2 Oct 2008 00:05:33 +0000 (+0100)
+Subject: OMAP: Increase VMALLOC_END to allow 256MB RAM
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=355a0ce968e4a7b0c8d8203f4517296e932e373d
+
+OMAP: Increase VMALLOC_END to allow 256MB RAM
+
+This increases VMALLOC_END to 0x18000000, making room for 256MB
+RAM with the default 128MB vmalloc region.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/plat-omap/include/mach/vmalloc.h b/arch/arm/plat-omap/include/mach/vmalloc.h
+index d8515cb..b97dfaf 100644
+--- a/arch/arm/plat-omap/include/mach/vmalloc.h
++++ b/arch/arm/plat-omap/include/mach/vmalloc.h
+@@ -17,5 +17,5 @@
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+  */
+-#define VMALLOC_END     (PAGE_OFFSET + 0x17000000)
++#define VMALLOC_END     (PAGE_OFFSET + 0x18000000)
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-add-clk-get-parent.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-add-clk-get-parent.diff
new file mode 100755 (executable)
index 0000000..64944a5
--- /dev/null
@@ -0,0 +1,64 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:51:36 +0000 (+0100)
+Subject: OMAP: Add clk_get_parent() for OMAP2/3
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=08d1f1947a5a970b2fe6e4dfeeb70286b9379056
+
+OMAP: Add clk_get_parent() for OMAP2/3
+
+This makes clk_get_parent() work on OMAP2/3.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
+index 5f48e14..aae0d2e 100644
+--- a/arch/arm/mach-omap2/clock.c
++++ b/arch/arm/mach-omap2/clock.c
+@@ -831,6 +831,11 @@ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
+       return 0;
+ }
++struct clk *omap2_clk_get_parent(struct clk *clk)
++{
++      return clk->parent;
++}
++
+ /* DPLL rate rounding code */
+ /**
+diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
+index 3fa2e26..2916879 100644
+--- a/arch/arm/mach-omap2/clock.h
++++ b/arch/arm/mach-omap2/clock.h
+@@ -29,6 +29,7 @@ int omap2_clk_set_rate(struct clk *clk, unsigned long rate);
+ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent);
+ int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance);
+ long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate);
++struct clk *omap2_clk_get_parent(struct clk *clk);
+ #ifdef CONFIG_OMAP_RESET_CLOCKS
+ void omap2_clk_disable_unused(struct clk *clk);
+diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
+index c26d9d8..f91bd57 100644
+--- a/arch/arm/mach-omap2/clock24xx.c
++++ b/arch/arm/mach-omap2/clock24xx.c
+@@ -423,6 +423,7 @@ static struct clk_functions omap2_clk_functions = {
+       .clk_round_rate         = omap2_clk_round_rate,
+       .clk_set_rate           = omap2_clk_set_rate,
+       .clk_set_parent         = omap2_clk_set_parent,
++      .clk_get_parent         = omap2_clk_get_parent,
+       .clk_disable_unused     = omap2_clk_disable_unused,
+ #ifdef        CONFIG_CPU_FREQ
+       .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
+diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
+index 152d095..2196edd 100644
+--- a/arch/arm/mach-omap2/clock34xx.c
++++ b/arch/arm/mach-omap2/clock34xx.c
+@@ -606,6 +606,7 @@ static struct clk_functions omap2_clk_functions = {
+       .clk_round_rate         = omap2_clk_round_rate,
+       .clk_set_rate           = omap2_clk_set_rate,
+       .clk_set_parent         = omap2_clk_set_parent,
++      .clk_get_parent         = omap2_clk_get_parent,
+       .clk_disable_unused     = omap2_clk_disable_unused,
+ };
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-enable-overlay-optimalization.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-enable-overlay-optimalization.diff
new file mode 100755 (executable)
index 0000000..d027c53
--- /dev/null
@@ -0,0 +1,117 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:45:26 +0000 (+0100)
+Subject: OMAP: Enable overlay optimisation
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=7e052af7e4c73dc450412486ad37eb529e725dc7
+
+OMAP: Enable overlay optimisation
+
+This enables the overlay optimisation feature when the video
+overlay is active. This reduces memory bandwidth used by the
+display subsystem, improving overall performance.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 888d2c2..0f0b2e5 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -315,6 +315,60 @@ void omap_dispc_enable_digit_out(int enable)
+ }
+ EXPORT_SYMBOL(omap_dispc_enable_digit_out);
++#define MIN(a, b) ((a)<(b)?(a):(b))
++#define MAX(a, b) ((a)>(b)?(a):(b))
++
++static void setup_overlay_opt(void)
++{
++      struct fb_info **fbi = dispc.fbdev->fb_info;
++      struct omapfb_plane_struct *gfx, *vid;
++      struct fb_var_screeninfo *gvar;
++      unsigned gx, gx2, gy, gy2, gw, gh;
++      unsigned vx, vx2, vy, vy2, vw, vh;
++      unsigned bpp, skip;
++      static unsigned last_skip;
++
++      if (!fbi[0] || !fbi[1])
++              return;
++
++      gfx = fbi[0]->par;
++      vid = fbi[1]->par;
++      gvar = &fbi[0]->var;
++
++      gx = gfx->info.pos_x;
++      gy = gfx->info.pos_y;
++      gw = gfx->info.out_width;
++      gh = gfx->info.out_height;
++      vx = vid->info.pos_x;
++      vy = vid->info.pos_y;
++      vw = vid->info.out_width;
++      vh = vid->info.out_height;
++      gx2 = gx + gw;
++      gy2 = gy + gh;
++      vx2 = vx + vw;
++      vy2 = vy + vh;
++      bpp = gvar->bits_per_pixel / 8;
++
++      if (!gfx->info.enabled || !vid->info.enabled ||
++          dispc.color_key.key_type != OMAPFB_COLOR_KEY_DISABLED) {
++              skip = 0;
++      } else if (vx <= gx && vx2 >= gx2) {
++              unsigned y = MIN(gy2, vy2) - MAX(gy, vy);
++              skip = y * gvar->xres_virtual * bpp;
++      } else if (vx <= gx || vx2 >= gx2) {
++              unsigned x = MIN(gx2, vx2) - MAX(gx, vx);
++              skip = x * bpp;
++      } else {
++              skip = vw * bpp + 1;
++      }
++
++      if (skip != last_skip) {
++              last_skip = skip;
++              dispc_write_reg(DISPC_GFX_WINDOW_SKIP, skip);
++              MOD_REG_FLD(DISPC_CONTROL, 1<<12, !!skip<<12);
++      }
++}
++
+ static inline int _setup_plane(int plane, int channel_out,
+                                 u32 paddr, int screen_width,
+                                 int pos_x, int pos_y, int width, int height,
+@@ -437,6 +491,9 @@ static inline int _setup_plane(int plane, int channel_out,
+       dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
++      if (plane < 2)
++              setup_overlay_opt();
++
+       MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
+       return height * screen_width * bpp / 8;
+@@ -585,11 +642,19 @@ static int omap_dispc_enable_plane(int plane, int enable)
+       const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+                               DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+                               DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
++      struct omapfb_plane_struct *pi;
++
+       if ((unsigned int)plane > dispc.mem_desc.region_cnt)
+               return -EINVAL;
++      pi = dispc.fbdev->fb_info[plane]->par;
++      pi->info.enabled = enable;
++
+       enable_lcd_clocks(1);
+       MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
++      if (plane < 2)
++              setup_overlay_opt();
++      MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
+       enable_lcd_clocks(0);
+       return 0;
+@@ -633,6 +698,7 @@ static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
+       if (val != 0)
+               dispc_write_reg(tr_reg, ck->trans_key);
+       dispc_write_reg(df_reg, ck->background);
++      setup_overlay_opt();
+       enable_lcd_clocks(0);
+       dispc.color_key = *ck;
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-fix-display-panning.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-fix-display-panning.diff
new file mode 100755 (executable)
index 0000000..a4ba3d2
--- /dev/null
@@ -0,0 +1,49 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:18:48 +0000 (+0100)
+Subject: OMAP: Fix omapfb display panning
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=2ea46e9f28ff57a32d87bc380457a584c913fe78
+
+OMAP: Fix omapfb display panning
+
+This makes the FBIOPAN_DISPLAY ioctl work with omapfb.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index ce4c4de..64bf333 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -436,6 +436,8 @@ static inline int _setup_plane(int plane, int channel_out,
+       dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
++      MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
++
+       return height * screen_width * bpp / 8;
+ }
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index e7f3462..e9ffb92 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -207,8 +207,8 @@ static int ctrl_change_mode(struct fb_info *fbi)
+       struct omapfb_device *fbdev = plane->fbdev;
+       struct fb_var_screeninfo *var = &fbi->var;
+-      offset = var->yoffset * fbi->fix.line_length +
+-               var->xoffset * var->bits_per_pixel / 8;
++      offset = (var->yoffset * var->xres_virtual + var->xoffset) *
++              var->bits_per_pixel / 8;
+       if (fbdev->ctrl->sync)
+               fbdev->ctrl->sync();
+@@ -426,6 +426,8 @@ static void set_fb_fix(struct fb_info *fbi)
+       }
+       fix->accel              = FB_ACCEL_OMAP1610;
+       fix->line_length        = var->xres_virtual * bpp / 8;
++      fix->xpanstep           = 1;
++      fix->ypanstep           = 1;
+ }
+ static int set_color_mode(struct omapfb_plane_struct *plane,
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-fix-timings.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-fix-timings.diff
new file mode 100755 (executable)
index 0000000..37ca7d3
--- /dev/null
@@ -0,0 +1,26 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:16:14 +0000 (+0100)
+Subject: OMAP: Fix video timings info message
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=3a8bdf0967ae2c4eb3cebb97118ef0392f709c1c
+
+OMAP: Fix video timings info message
+
+This fixes the hsync frequency value printed on startup.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index d176a2c..e7f3462 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -1792,7 +1792,8 @@ static int omapfb_do_probe(struct platform_device *pdev,
+                       vram, fbdev->mem_desc.region_cnt);
+       pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
+                       "vfreq %lu.%lu Hz\n",
+-                      phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
++                      phz / 1000, hhz / 10000, hhz % 10000,
++                      vhz / 10, vhz % 10);
+       return 0;
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-improve-pixclock-config.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-improve-pixclock-config.diff
new file mode 100755 (executable)
index 0000000..5a70212
--- /dev/null
@@ -0,0 +1,93 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:34:39 +0000 (+0100)
+Subject: OMAP: Improve pixel clock configuration
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=01c2d720e59c291de9eb21eb65225f2f215fef84
+
+OMAP: Improve pixel clock configuration
+
+This sets the DSS1_ALWON_FCLK clock as close as possible to a
+multiple of the requested pixel clock, while keeping it below
+the 173MHz limit.
+
+Due to of the structure of the clock tree, dss1_alwon_fck cannot
+be set directly, and we must use dpll4_m4_ck instead.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 64bf333..888d2c2 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -177,6 +177,7 @@ static struct {
+       struct clk      *dss_ick, *dss1_fck;
+       struct clk      *dss_54m_fck;
++      struct clk      *dpll4_m4_ck;
+       enum omapfb_update_mode update_mode;
+       struct omapfb_device    *fbdev;
+@@ -736,19 +737,34 @@ static void setup_color_conv_coef(void)
+       MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
+ }
++#define MAX_FCK 173000000
++
+ static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
+ {
++      unsigned long prate = clk_get_rate(clk_get_parent(dispc.dpll4_m4_ck));
++      unsigned long pcd_min = is_tft? 2: 3;
++      unsigned long fck_div;
+       unsigned long fck, lck;
+-      *lck_div = 1;
+       pck = max(1, pck);
++
++      if (pck * pcd_min > MAX_FCK) {
++              dev_warn(dispc.fbdev->dev, "pixclock %d kHz too high.\n",
++                       pck / 1000);
++              pck = MAX_FCK / pcd_min;
++      }
++
++      fck = pck * 2;
++      fck_div = (prate + pck) / fck;
++      if (fck_div > 16)
++              fck_div /= (fck_div + 15) / 16;
++      if (fck_div < 1)
++              fck_div = 1;
++      clk_set_rate(dispc.dpll4_m4_ck, prate / fck_div);
+       fck = clk_get_rate(dispc.dss1_fck);
+-      lck = fck;
+-      *pck_div = (lck + pck - 1) / pck;
+-      if (is_tft)
+-              *pck_div = max(2, *pck_div);
+-      else
+-              *pck_div = max(3, *pck_div);
++
++      *lck_div = 1;
++      *pck_div = (fck + pck - 1) / pck;
+       if (*pck_div > 255) {
+               *pck_div = 255;
+               lck = pck * *pck_div;
+@@ -909,11 +925,21 @@ static int get_dss_clocks(void)
+               return PTR_ERR(dispc.dss_54m_fck);
+       }
++      if (IS_ERR((dispc.dpll4_m4_ck =
++                              clk_get(dispc.fbdev->dev, "dpll4_m4_ck")))) {
++              dev_err(dispc.fbdev->dev, "can't get dpll4_m4_ck");
++              clk_put(dispc.dss_ick);
++              clk_put(dispc.dss1_fck);
++              clk_put(dispc.dss_54m_fck);
++              return PTR_ERR(dispc.dss_54m_fck);
++      }
++
+       return 0;
+ }
+ static void put_dss_clocks(void)
+ {
++      clk_put(dispc.dpll4_m4_ck);
+       clk_put(dispc.dss_54m_fck);
+       clk_put(dispc.dss1_fck);
+       clk_put(dispc.dss_ick);
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-make-dpll4-m4-ck-programmable.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-make-dpll4-m4-ck-programmable.diff
new file mode 100755 (executable)
index 0000000..0a535c5
--- /dev/null
@@ -0,0 +1,27 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:52:42 +0000 (+0100)
+Subject: OMAP: Make dpll4_m4_ck programmable with clk_set_rate()
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=feab5b628c06619196044c15d9d2a113df173eee
+
+OMAP: Make dpll4_m4_ck programmable with clk_set_rate()
+
+Filling the set_rate and round_rate fields of dpll4_m4_ck makes
+this clock programmable through clk_set_rate().  This is needed
+to give omapfb control over the dss1_alwon_fck rate.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
+index 41f91f8..9c8e0c8 100644
+--- a/arch/arm/mach-omap2/clock34xx.h
++++ b/arch/arm/mach-omap2/clock34xx.h
+@@ -877,6 +877,8 @@ static struct clk dpll4_m4_ck = {
+                               PARENT_CONTROLS_CLOCK,
+       .clkdm          = { .name = "dpll4_clkdm" },
+       .recalc         = &omap2_clksel_recalc,
++      .set_rate       = &omap2_clksel_set_rate,
++      .round_rate     = &omap2_clksel_round_rate,
+ };
+ /* The PWRDN bit is apparently only available on 3430ES2 and above */
diff --git a/packages/linux/omap3-pandora-kernel-wifi/mru-make-video-timings-selectable.diff b/packages/linux/omap3-pandora-kernel-wifi/mru-make-video-timings-selectable.diff
new file mode 100755 (executable)
index 0000000..bba3ef7
--- /dev/null
@@ -0,0 +1,312 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:42:12 +0000 (+0100)
+Subject: OMAP: Make video mode selectable from pre-defined list
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=7a9e55d7156dae6bc930c77620a88a669d2ed1c9
+
+OMAP: Make video mode selectable from pre-defined list
+
+This adds a list of common video modes and allows one to be
+selected with video=omapfb:mode:name on the command line,
+overriding the defaults from lcd_*.c. A default named mode
+can also be specified in the kernel configuration.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
+index 5ebd591..9977e80 100644
+--- a/drivers/video/omap/Kconfig
++++ b/drivers/video/omap/Kconfig
+@@ -7,26 +7,13 @@ config FB_OMAP
+       help
+           Frame buffer driver for OMAP based boards.
+-choice
+-      depends on FB_OMAP && MACH_OVERO
+-      prompt "Screen resolution"
+-      default FB_OMAP_079M3R
++config FB_OMAP_VIDEO_MODE
++      string "Default video mode"
++      depends on FB_OMAP
+       help
+-        Selected desired screen resolution
+-
+-config FB_OMAP_031M3R
+-      boolean "640 x 480 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_048M3R
+-      boolean "800 x 600 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_079M3R
+-      boolean "1024 x 768 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_092M9R
+-      boolean "1280 x 720 @ 60 Hz Reduced blanking"
+-
+-endchoice
++        Enter video mode name to use if none is specified on the kernel
++        command line. If left blank, board-specific default timings
++        will be used. See omapfb_main.c for a list of valid mode names.
+ config FB_OMAP_LCDC_EXTERNAL
+       bool "External LCD controller support"
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index e9ffb92..c4c4049 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -36,6 +36,20 @@
+ #define MODULE_NAME   "omapfb"
++struct video_mode {
++      const char      *name;
++      int             x_res, y_res;
++      int             pixel_clock;    /* In kHz */
++      int             hsw;            /* Horizontal synchronization
++                                         pulse width */
++      int             hfp;            /* Horizontal front porch */
++      int             hbp;            /* Horizontal back porch */
++      int             vsw;            /* Vertical synchronization
++                                         pulse width */
++      int             vfp;            /* Vertical front porch */
++      int             vbp;            /* Vertical back porch */
++};
++
+ static unsigned int   def_accel;
+ static unsigned long  def_vram[OMAPFB_PLANE_NUM];
+ static unsigned int   def_vram_cnt;
+@@ -43,6 +57,7 @@ static unsigned long def_vxres;
+ static unsigned long  def_vyres;
+ static unsigned int   def_rotate;
+ static unsigned int   def_mirror;
++static char           def_mode[16] = CONFIG_FB_OMAP_VIDEO_MODE;
+ #ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
+ static int            manual_update = 1;
+@@ -53,6 +68,7 @@ static int           manual_update;
+ static struct platform_device *fbdev_pdev;
+ static struct lcd_panel               *fbdev_panel;
+ static struct omapfb_device   *omapfb_dev;
++static struct video_mode      video_mode;
+ struct caps_table_struct {
+       unsigned long flag;
+@@ -83,6 +99,152 @@ static struct caps_table_struct color_caps[] = {
+       { 1 << OMAPFB_COLOR_YUY422,     "YUY422", },
+ };
++static struct video_mode video_modes[] __initdata = {
++      {
++              /* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
++              .name           = "640x480@60",
++              .x_res          = 640,
++              .y_res          = 480,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 7,
++              .pixel_clock    = 23500,
++      },
++      {
++              /* 800 x 600 @ 60 Hz  Reduced blanking VESA CVT 0.48M3-R */
++              .name           = "800x600@60",
++              .x_res          = 800,
++              .y_res          = 600,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 11,
++              .pixel_clock    = 35500,
++      },
++      {
++              /* 1024 x 768 @ 60 Hz  Reduced blanking VESA CVT 0.79M3-R */
++              .name           = "1024x768@60",
++              .x_res          = 1024,
++              .y_res          = 768,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 15,
++              .pixel_clock    = 56000,
++      },
++      {
++              /* 1280 x 720 @ 60 Hz  Reduced blanking VESA CVT 0.92M9-R */
++              .name           = "1280x720@60",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 5,
++              .vbp            = 13,
++              .pixel_clock    = 64000,
++      },
++      {
++              /* 720 x 480 @ 60 Hz  CEA-861 Format 3 */
++              .name           = "480p60",
++              .x_res          = 720,
++              .y_res          = 480,
++              .hfp            = 16,
++              .hsw            = 62,
++              .hbp            = 60,
++              .vfp            = 9,
++              .vsw            = 6,
++              .vbp            = 30,
++              .pixel_clock    = 27027,
++      },
++      {
++              /* 720 x 576 @ 60 Hz  CEA-861 Format 18 */
++              .name           = "576p50",
++              .x_res          = 720,
++              .y_res          = 576,
++              .hfp            = 12,
++              .hsw            = 64,
++              .hbp            = 68,
++              .vfp            = 5,
++              .vsw            = 5,
++              .vbp            = 39,
++              .pixel_clock    = 27000,
++      },
++      {
++              /* 1280 x 720 @ 50 Hz  CEA-861B Format 19 */
++              .name           = "720p50",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 440,
++              .hsw            = 40,
++              .hbp            = 220,
++              .vfp            = 20,
++              .vsw            = 5,
++              .vbp            = 5,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1280 x 720 @ 60 Hz  CEA-861B Format 4 */
++              .name           = "720p60",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 110,
++              .hsw            = 40,
++              .hbp            = 220,
++              .vfp            = 20,
++              .vsw            = 5,
++              .vbp            = 5,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 24 Hz  CEA-861B Format 32 */
++              .name           = "1080p24",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 638,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 25 Hz  CEA-861B Format 33 */
++              .name           = "1080p25",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 528,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 30 Hz  CEA-861B Format 34 */
++              .name           = "1080p30",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 88,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++};
++
+ /*
+  * ---------------------------------------------------------------------------
+  * LCD panel
+@@ -1714,6 +1876,20 @@ static int omapfb_do_probe(struct platform_device *pdev,
+               goto cleanup;
+       }
++      if (video_mode.name) {
++              pr_info("omapfb: using mode %s\n", video_mode.name);
++
++              fbdev->panel->x_res     = video_mode.x_res;
++              fbdev->panel->y_res     = video_mode.y_res;
++              fbdev->panel->pixel_clock = video_mode.pixel_clock;
++              fbdev->panel->hsw       = video_mode.hsw;
++              fbdev->panel->hfp       = video_mode.hfp;
++              fbdev->panel->hbp       = video_mode.hbp;
++              fbdev->panel->vsw       = video_mode.vsw;
++              fbdev->panel->vfp       = video_mode.vfp;
++              fbdev->panel->vbp       = video_mode.vbp;
++      }
++
+       r = fbdev->panel->init(fbdev->panel, fbdev);
+       if (r)
+               goto cleanup;
+@@ -1870,6 +2046,17 @@ static struct platform_driver omapfb_driver = {
+       },
+ };
++static void __init omapfb_find_mode(char *name, struct video_mode *vmode)
++{
++      int i;
++
++      for (i = 0; i < sizeof(video_modes)/sizeof(video_modes[0]); i++)
++              if (!strcmp(name, video_modes[i].name)) {
++                      *vmode = video_modes[i];
++                      break;
++              }
++}
++
+ #ifndef MODULE
+ /* Process kernel command line parameters */
+@@ -1918,6 +2105,8 @@ static int __init omapfb_setup(char *options)
+                       def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
+               else if (!strncmp(this_opt, "manual_update", 13))
+                       manual_update = 1;
++              else if (!strncmp(this_opt, "mode:", 5))
++                      strncpy(def_mode, this_opt + 5, sizeof(def_mode));
+               else {
+                       pr_debug("omapfb: invalid option\n");
+                       r = -1;
+@@ -1939,6 +2128,9 @@ static int __init omapfb_init(void)
+               return -ENODEV;
+       omapfb_setup(option);
+ #endif
++
++      omapfb_find_mode(def_mode, &video_mode);
++
+       /* Register the driver with LDM */
+       if (platform_driver_register(&omapfb_driver)) {
+               pr_debug("failed to register omapfb driver\n");
+@@ -1960,6 +2152,7 @@ module_param_named(vyres, def_vyres, long, 0664);
+ module_param_named(rotate, def_rotate, uint, 0664);
+ module_param_named(mirror, def_mirror, uint, 0664);
+ module_param_named(manual_update, manual_update, bool, 0664);
++module_param_string(video_mode, def_mode, sizeof(def_mode), 0664);
+ module_init(omapfb_init);
+ module_exit(omapfb_cleanup);
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-dma-iso-in.eml b/packages/linux/omap3-pandora-kernel-wifi/musb-dma-iso-in.eml
new file mode 100755 (executable)
index 0000000..56fc827
--- /dev/null
@@ -0,0 +1,138 @@
+Fixes blurred capture images in dma mode. Isochronous error field in
+urb and source data buffer pointer were not updated properly in dma
+mode.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..a481d54 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -1505,10 +1505,29 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+ #ifdef CONFIG_USB_INVENTRA_DMA
++              if (usb_pipeisoc(pipe)) {
++                      struct usb_iso_packet_descriptor *d;
++
++                      d = urb->iso_frame_desc + qh->iso_idx;
++                      d->actual_length = xfer_len;
++
++                      /* even if there was an error, we did the dma
++                       * for iso_frame_desc->length
++                       */
++                      if (d->status != EILSEQ && d->status != -EOVERFLOW)
++                              d->status = 0;
++
++                      if (++qh->iso_idx >= urb->number_of_packets)
++                              done = true;
++                      else
++                              done = false;
++
++              } else  {
+               /* done if urb buffer is full or short packet is recd */
+               done = (urb->actual_length + xfer_len >=
+                               urb->transfer_buffer_length
+                       || dma->actual_len < qh->maxpacket);
++              }
+               /* send IN token for next packet, without AUTOREQ */
+               if (!done) {
+@@ -1545,7 +1564,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               if (dma) {
+                       struct dma_controller   *c;
+                       u16                     rx_count;
+-                      int                     ret;
++                      int                     ret, length;
++                      dma_addr_t              buf;
+                       rx_count = musb_readw(epio, MUSB_RXCOUNT);
+@@ -1558,6 +1578,35 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                       c = musb->dma_controller;
++                      if (usb_pipeisoc(pipe)) {
++                              int status = 0;
++                              struct usb_iso_packet_descriptor *d;
++
++                              d = urb->iso_frame_desc + qh->iso_idx;
++
++                              if (iso_err) {
++                                      status = -EILSEQ;
++                                      urb->error_count++;
++                              }
++                              if (rx_count > d->length) {
++                                      if (status == 0) {
++                                              status = -EOVERFLOW;
++                                              urb->error_count++;
++                                      }
++                                      DBG(2, "** OVERFLOW %d into %d\n",\
++                                          rx_count, d->length);
++
++                                      length = d->length;
++                              } else
++                                      length = rx_count;
++                              d->status = status;
++                              buf = urb->transfer_dma + d->offset;
++                      } else {
++                              length = rx_count;
++                              buf = urb->transfer_dma +
++                                              urb->actual_length;
++                      }
++
+                       dma->desired_mode = 0;
+ #ifdef USE_MODE1
+                       /* because of the issue below, mode 1 will
+@@ -1569,6 +1618,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                                               urb->actual_length)
+                                       > qh->maxpacket)
+                               dma->desired_mode = 1;
++                      if (rx_count < hw_ep->max_packet_sz_rx) {
++                              length = rx_count;
++                              dma->bDesiredMode = 0;
++                      } else {
++                              length = urb->transfer_buffer_length;
++                      }
+ #endif
+ /* Disadvantage of using mode 1:
+@@ -1606,12 +1661,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                        */
+                       ret = c->channel_program(
+                               dma, qh->maxpacket,
+-                              dma->desired_mode,
+-                              urb->transfer_dma
+-                                      + urb->actual_length,
+-                              (dma->desired_mode == 0)
+-                                      ? rx_count
+-                                      : urb->transfer_buffer_length);
++                              dma->desired_mode, buf, length);
+                       if (!ret) {
+                               c->channel_release(dma);
+@@ -1628,19 +1678,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               }
+       }
+-      if (dma && usb_pipeisoc(pipe)) {
+-              struct usb_iso_packet_descriptor        *d;
+-              int                                     iso_stat = status;
+-
+-              d = urb->iso_frame_desc + qh->iso_idx;
+-              d->actual_length += xfer_len;
+-              if (iso_err) {
+-                      iso_stat = -EILSEQ;
+-                      urb->error_count++;
+-              }
+-              d->status = iso_stat;
+-      }
+-
+ finish:
+       urb->actual_length += xfer_len;
+       qh->offset += xfer_len;
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-fix-ISO-in-unlink.diff b/packages/linux/omap3-pandora-kernel-wifi/musb-fix-ISO-in-unlink.diff
new file mode 100755 (executable)
index 0000000..c93a5b0
--- /dev/null
@@ -0,0 +1,69 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-omap@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, felipe.balbi@nokia.com,
+        stern@rowland.harvard.edu, Ajay Kumar Gupta <ajay.gupta@ti.com>
+Subject: [PATCH v3] OMAP:MUSB: Corrects urb unlink function path
+Date:  Mon, 25 Aug 2008 10:52:16 +0530
+
+Fixes kernel panic while ISO IN transfer is aborted.Replaced
+usb_hcd_unlink_urb_from_ep() from musb_giveback() to __musb_giveback()
+to make sure urb is unlinked before giveback when __musb_giveback() is
+called from musb_urb_dequeue().
+
+Acquired musb->lock() before usb_hcd_unlink_urb_from_ep() within in 
+enqueue path.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_host.c |    7 +++++--
+ 1 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..4279311 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -291,6 +291,7 @@ __acquires(musb->lock)
+                       urb->actual_length, urb->transfer_buffer_length
+                       );
++      usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+       spin_unlock(&musb->lock);
+       usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+       spin_lock(&musb->lock);
+@@ -353,8 +354,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+               break;
+       }
+-      usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+-
+       qh->is_ready = 0;
+       __musb_giveback(musb, urb, status);
+       qh->is_ready = ready;
+@@ -1787,7 +1786,9 @@ static int musb_urb_enqueue(
+        */
+       qh = kzalloc(sizeof *qh, mem_flags);
+       if (!qh) {
++              spin_lock_irqsave(&musb->lock, flags);
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
++              spin_unlock_irqrestore(&musb->lock, flags);
+               return -ENOMEM;
+       }
+@@ -1899,7 +1900,9 @@ static int musb_urb_enqueue(
+ done:
+       if (ret != 0) {
++              spin_lock_irqsave(&musb->lock, flags);
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
++              spin_unlock_irqrestore(&musb->lock, flags);
+               kfree(qh);
+       }
+       return ret;
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-fix-dbrownell.patch b/packages/linux/omap3-pandora-kernel-wifi/musb-fix-dbrownell.patch
new file mode 100755 (executable)
index 0000000..3526cd3
--- /dev/null
@@ -0,0 +1,71 @@
+From: David Brownell <dbrownell@users.sourceforge.net>
+
+Minor cleanups to omap 2430/34xx/35x musb_hdrc init:
+
+ - num_eps is 16; here, each one is bidirectional
+ - use DMA_32BIT_MASK to prevent confusion/errors
+ - initialize root port power to reflect 100 mA limit
+
+This still hard-wires some board-specific data, since there
+are no hooks through which different boards can provide the
+right data to the init code.
+
+Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
+
+--- a/arch/arm/mach-omap2/usb-musb.c
++++ b/arch/arm/mach-omap2/usb-musb.c
+@@ -21,12 +21,15 @@
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
++#include <linux/dma-mapping.h>
++
+ #include <asm/io.h>
+-#include <mach/mux.h>
++
+ #include <linux/usb/musb.h>
+ #include <mach/hardware.h>
+ #include <mach/pm.h>
++#include <mach/mux.h>
+ #include <mach/usb.h>
+ #ifdef CONFIG_USB_MUSB_SOC
+@@ -109,7 +112,7 @@ static struct musb_hdrc_config musb_config = {
+       .dyn_fifo       = 1,
+       .soft_con       = 1,
+       .dma            = 1,
+-      .num_eps        = 32,
++      .num_eps        = 16,
+       .dma_channels   = 7,
+       .dma_req_chan   = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3),
+       .ram_bits       = 12,
+@@ -129,16 +132,22 @@ static struct musb_hdrc_platform_data musb_plat = {
+                       : "usbhs_ick",
+       .set_clock      = musb_set_clock,
+       .config         = &musb_config,
++
++      /* REVISIT charge pump on TWL4030 can supply up to
++       * 100 mA ... but this value is board-specific, like
++       * "mode", and should be passed to usb_musb_init().
++       */
++      .power          = 50,                   /* up to 100 mA */
+ };
+-static u64 musb_dmamask = ~(u32)0;
++static u64 musb_dmamask = DMA_32BIT_MASK;
+ static struct platform_device musb_device = {
+       .name           = "musb_hdrc",
+       .id             = -1,
+       .dev = {
+               .dma_mask               = &musb_dmamask,
+-              .coherent_dma_mask      = 0xffffffff,
++              .coherent_dma_mask      = DMA_32BIT_MASK,
+               .platform_data          = &musb_plat,
+       },
+       .num_resources  = ARRAY_SIZE(musb_resources),
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-fix-endpoints.diff b/packages/linux/omap3-pandora-kernel-wifi/musb-fix-endpoints.diff
new file mode 100755 (executable)
index 0000000..5d1201f
--- /dev/null
@@ -0,0 +1,197 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-usb@vger.kernel.org
+Cc: linux-omap@vger.kernel.org, david-b@pacbell.net, me@felipebalbi.com,
+        Ajay Kumar Gupta <ajay.gupta@ti.com>
+Subject: [PATCH] MUSB: BULK request on different available endpoints
+Date:  Tue,  7 Oct 2008 11:12:24 +0530
+
+Fixes co-working issue of usb serial device with usb/net devices while
+oter endpoints are free and can be used.This patch implements the policy
+that if endpoint resources are available then different BULK request goes
+to different endpoint otherwise they are multiplexed to one reserved
+endpoint as currently done.
+
+NAK limit scheme has to be added for multiplexed BULK request scenario
+to avoid endpoint starvation due to usb/net devices.
+
+musb->periodic[] flag setting is also updated.It use to set this flag for
+an endpoint even when only rx or tx is used.Now flag setting is done on
+rx/tx basis of an endpoint.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_host.c |   94 ++++++++++++++++++++++++------------------
+ drivers/usb/musb/musb_host.h |    1 +
+ 2 files changed, 55 insertions(+), 40 deletions(-)
+--- /tmp/musb_host.c   2008-10-07 10:10:49.000000000 +0200
++++ git/drivers/usb/musb/musb_host.c   2008-10-07 10:13:59.000000000 +0200
+@@ -378,27 +378,32 @@
+               switch (qh->type) {
++              case USB_ENDPOINT_XFER_CONTROL:
++              case USB_ENDPOINT_XFER_BULK:
++                      /* fifo policy for these lists, except that NAKing
++                       * should rotate a qh to the end (for fairness).
++                       */
++                      if (qh->mux == 1) {
++                              head = qh->ring.prev;
++                              list_del(&qh->ring);
++                              kfree(qh);
++                              qh = first_qh(head);
++                              break;
++                      }
+               case USB_ENDPOINT_XFER_ISOC:
+               case USB_ENDPOINT_XFER_INT:
+                       /* this is where periodic bandwidth should be
+                        * de-allocated if it's tracked and allocated;
+                        * and where we'd update the schedule tree...
+                        */
+-                      musb->periodic[ep->epnum] = NULL;
++                      if (is_in)
++                              musb->periodic[2 * ep->epnum - 2] = NULL;
++                      else
++                              musb->periodic[2 * ep->epnum - 1] = NULL;
+                       kfree(qh);
+                       qh = NULL;
+                       break;
+-              case USB_ENDPOINT_XFER_CONTROL:
+-              case USB_ENDPOINT_XFER_BULK:
+-                      /* fifo policy for these lists, except that NAKing
+-                       * should rotate a qh to the end (for fairness).
+-                       */
+-                      head = qh->ring.prev;
+-                      list_del(&qh->ring);
+-                      kfree(qh);
+-                      qh = first_qh(head);
+-                      break;
+               }
+       }
+       return qh;
+@@ -1728,22 +1733,9 @@
+       u16                     maxpacket;
+       /* use fixed hardware for control and bulk */
+-      switch (qh->type) {
+-      case USB_ENDPOINT_XFER_CONTROL:
++      if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+               head = &musb->control;
+               hw_ep = musb->control_ep;
+-              break;
+-      case USB_ENDPOINT_XFER_BULK:
+-              hw_ep = musb->bulk_ep;
+-              if (is_in)
+-                      head = &musb->in_bulk;
+-              else
+-                      head = &musb->out_bulk;
+-              break;
+-      }
+-      if (head) {
+-              idle = list_empty(head);
+-              list_add_tail(&qh->ring, head);
+               goto success;
+       }
+@@ -1778,7 +1770,8 @@
+       for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+               int     diff;
+-              if (musb->periodic[epnum])
++              if ((is_in && musb->periodic[2 * epnum - 2]) ||
++                      (!is_in && musb->periodic[2 * epnum - 1]))
+                       continue;
+               hw_ep = &musb->endpoints[epnum];
+               if (hw_ep == musb->bulk_ep)
+@@ -1789,19 +1782,36 @@
+               else
+                       diff = hw_ep->max_packet_sz_tx - maxpacket;
+-              if (diff > 0 && best_diff > diff) {
++              if (diff >= 0 && best_diff > diff) {
+                       best_diff = diff;
+                       best_end = epnum;
+               }
+       }
+-      if (best_end < 0)
++      /* use bulk reserved ep1 if no other ep is free*/
++      if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
++              hw_ep = musb->bulk_ep;
++              if (is_in)
++                      head = &musb->in_bulk;
++              else
++                      head = &musb->out_bulk;
++              goto success;
++      } else if (best_end < 0)
+               return -ENOSPC;
+       idle = 1;
++      qh->mux = 0;
+       hw_ep = musb->endpoints + best_end;
+-      musb->periodic[best_end] = qh;
+-      DBG(4, "qh %p periodic slot %d\n", qh, best_end);
++      if (is_in)
++              musb->periodic[2 * best_end - 2] = qh;
++      else
++              musb->periodic[2 * best_end - 1] = qh;
++      DBG(4, "qh %p periodic slot %d%s\n", qh, best_end, is_in ? "Rx" : "Tx");
+ success:
++      if (head) {
++              idle = list_empty(head);
++              list_add_tail(&qh->ring, head);
++              qh->mux = 1;
++      }
+       qh->hw_ep = hw_ep;
+       qh->hep->hcpriv = qh;
+       if (idle)
+@@ -2065,11 +2075,13 @@
+                       sched = &musb->control;
+                       break;
+               case USB_ENDPOINT_XFER_BULK:
+-                      if (usb_pipein(urb->pipe))
+-                              sched = &musb->in_bulk;
+-                      else
+-                              sched = &musb->out_bulk;
+-                      break;
++                      if (qh->mux == 1) {
++                              if (usb_pipein(urb->pipe))
++                                      sched = &musb->in_bulk;
++                              else
++                                      sched = &musb->out_bulk;
++                              break;
++                      }
+               default:
+                       /* REVISIT when we get a schedule tree, periodic
+                        * transfers won't always be at the head of a
+@@ -2131,11 +2143,13 @@
+               sched = &musb->control;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+-              if (is_in)
+-                      sched = &musb->in_bulk;
+-              else
+-                      sched = &musb->out_bulk;
+-              break;
++              if (qh->mux == 1) {
++                      if (is_in)
++                              sched = &musb->in_bulk;
++                      else
++                              sched = &musb->out_bulk;
++                      break;
++              }
+       case USB_ENDPOINT_XFER_ISOC:
+       case USB_ENDPOINT_XFER_INT:
+               for (i = 0; i < musb->nr_endpoints; i++) {
+--- /tmp/musb_host.h   2008-10-07 08:59:38.000000000 +0200
++++ git/drivers/usb/musb/musb_host.h   2008-10-07 10:10:54.000000000 +0200
+@@ -53,7 +53,8 @@
+       struct list_head        ring;           /* of musb_qh */
+       /* struct musb_qh               *next; */       /* for periodic tree */
+-
++      u8          mux;        /* qh multiplexed to hw_ep */
++      
+       unsigned                offset;         /* in urb->transfer_buffer */
+       unsigned                segsize;        /* current xfer fragment */
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-fix-multiple-bulk-transfers.diff b/packages/linux/omap3-pandora-kernel-wifi/musb-fix-multiple-bulk-transfers.diff
new file mode 100755 (executable)
index 0000000..7435a2e
--- /dev/null
@@ -0,0 +1,194 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-usb@vger.kernel.org
+Cc: linux-omap@vger.kernel.org, felipe.balbi@nokia.com,
+Subject: [PATCH] MUSB: Fix for kernel panic with multiple bulk transfer
+Date:  Wed,  1 Oct 2008 13:08:56 +0530
+
+Fixes kernel panic when multiple copy is performed among more than two mass
+storage media and transfer is aborted.musb_advance_schedule(),
+musb_urb_dequeue(),musb_cleanup_urb() and musb_h_disable() functions have
+been modified to correct urb handling associated with bulk and control
+endpoints which are multiplexed on one hardware endpoint.
+
+musb_advance_schedule() has been removed from musb_cleanup_urb() and added
+to musb_urb_dequeue(). musb_h_disable() has been modified to take care of
+multiple qh on same hw_ep scenario.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+CC: Romit Dasgupta <romit@ti.com> 
+---
+Suggestions welcome to move while loop doing kfree(qh) from 
+musb_advance_schedule() and musb_h_disable() to musb_giveback().
+
+ drivers/usb/musb/musb_host.c |  105 ++++++++++++++++++++++++++++++-----------
+ 1 files changed, 77 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 8b4be01..c2474de 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -427,8 +427,17 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
+               qh = musb_giveback(qh, urb, 0);
+       else
+               qh = musb_giveback(qh, urb, urb->status);
++      while (qh && qh->is_ready && list_empty(&qh->hep->urb_list)) {
++              struct list_head *head;
++              head = qh->ring.prev;
++              list_del(&qh->ring);
++              qh->hep->hcpriv = NULL;
++              kfree(qh);
++              qh = first_qh(head);
++      }
+-      if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
++
++      if (qh && qh->is_ready) {
+               DBG(4, "... next ep%d %cX urb %p\n",
+                               hw_ep->epnum, is_in ? 'R' : 'T',
+                               next_urb(qh));
+@@ -1964,8 +1973,6 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+               /* flush cpu writebuffer */
+               csr = musb_readw(epio, MUSB_TXCSR);
+       }
+-      if (status == 0)
+-              musb_advance_schedule(ep->musb, urb, ep, is_in);
+       return status;
+ }
+@@ -2026,13 +2033,24 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+       if (ret < 0 || (sched && qh != first_qh(sched))) {
+               int     ready = qh->is_ready;
+-
++              int     type = urb->pipe;
+               ret = 0;
+               qh->is_ready = 0;
+               __musb_giveback(musb, urb, 0);
+-              qh->is_ready = ready;
+-      } else
++
++              if (list_empty(&qh->hep->urb_list) && list_empty(&qh->ring))
++                      list_del(&qh->ring);
++              else
++                      qh->is_ready = ready;
++              if (usb_pipeisoc(type))
++                      musb->periodic[qh->hw_ep->epnum] = NULL;
++      } else {
+               ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
++              if (!ret) {
++                      musb_advance_schedule(qh->hw_ep->musb, urb, qh->hw_ep,
++                                      urb->pipe & USB_DIR_IN);
++              }
++      }
+ done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return ret;
+@@ -2046,14 +2064,17 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+       unsigned long           flags;
+       struct musb             *musb = hcd_to_musb(hcd);
+       u8                      is_in = epnum & USB_DIR_IN;
+-      struct musb_qh          *qh = hep->hcpriv;
++      struct musb_qh          *qh, *qh_for_curr_urb;
+       struct urb              *urb, *tmp;
+       struct list_head        *sched;
+-
+-      if (!qh)
+-              return;
++      int                     i;
+       spin_lock_irqsave(&musb->lock, flags);
++      qh = hep->hcpriv;
++      if (!qh) {
++              spin_unlock_irqrestore(&musb->lock, flags);
++              return;
++      }
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+@@ -2065,6 +2086,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+               else
+                       sched = &musb->out_bulk;
+               break;
++      case USB_ENDPOINT_XFER_ISOC:
++      case USB_ENDPOINT_XFER_INT:
++              for (i = 0; i < musb->nr_endpoints; i++) {
++                      if (musb->periodic[i] == qh)
++                              sched = &qh->ring;
++                      break;
++              }
+       default:
+               /* REVISIT when we get a schedule tree, periodic transfers
+                * won't always be at the head of a singleton queue...
+@@ -2073,26 +2101,47 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+               break;
+       }
+-      /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+-
+       /* kick first urb off the hardware, if needed */
+-      qh->is_ready = 0;
+-      if (!sched || qh == first_qh(sched)) {
++      if (sched) {
++              qh_for_curr_urb = qh;
+               urb = next_urb(qh);
+-
+-              /* make software (then hardware) stop ASAP */
+-              if (!urb->unlinked)
+-                      urb->status = -ESHUTDOWN;
+-
+-              /* cleanup */
+-              musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+-      } else
+-              urb = NULL;
+-
+-      /* then just nuke all the others */
+-      list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+-              musb_giveback(qh, urb, -ESHUTDOWN);
+-
++              if (urb) {
++                      /* make software (then hardware) stop ASAP */
++                      if (!urb->unlinked)
++                              urb->status = -ESHUTDOWN;
++                      /* cleanup first urb of first qh; */
++                      if (qh == first_qh(sched)) {
++                              musb_cleanup_urb(urb, qh,
++                                      urb->pipe & USB_DIR_IN);
++                      }
++                      qh = musb_giveback(qh, urb, -ESHUTDOWN);
++                      if (qh == qh_for_curr_urb) {
++                              list_for_each_entry_safe_from(urb, tmp,
++                                      &hep->urb_list, urb_list) {
++                                      qh = musb_giveback(qh, tmp, -ESHUTDOWN);
++                                      if (qh != qh_for_curr_urb)
++                                              break;
++                              }
++                      }
++              }
++              /* pick the next candidate and go */
++              if (qh && qh->is_ready) {
++                      while (qh && qh->is_ready &&
++                              list_empty(&qh->hep->urb_list)) {
++                                      struct list_head *head;
++                                      head = qh->ring.prev;
++                                      list_del(&qh->ring);
++                                      qh->hep->hcpriv = NULL;
++                                      kfree(qh);
++                                      qh = first_qh(head);
++                      }
++                      if (qh && qh->is_ready) {
++                              epnum = qh->hep->desc.bEndpointAddress;
++                              is_in = epnum & USB_DIR_IN;
++                              musb_start_urb(musb, is_in, qh);
++                      }
++              }
++      }
+       spin_unlock_irqrestore(&musb->lock, flags);
+ }
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-mru-otgfix.diff b/packages/linux/omap3-pandora-kernel-wifi/musb-mru-otgfix.diff
new file mode 100755 (executable)
index 0000000..767858b
--- /dev/null
@@ -0,0 +1,43 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Sat, 6 Sep 2008 15:11:00 +0000 (+0100)
+Subject: usb: musb: fix something
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=1e5bc41773bb981b3a89bd762becf98c72be5e4c
+
+usb: musb: fix something
+
+This makes USB work on the Beagleboard.  I don't know why.
+---
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index e07cad8..4d6ff26 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1819,6 +1819,9 @@ allocate_instance(struct device *dev, void __iomem *mbase)
+               ep->epnum = epnum;
+       }
++#ifdef CONFIG_USB_MUSB_OTG
++      otg_set_transceiver(&musb->xceiv);
++#endif
+       musb->controller = dev;
+       return musb;
+ }
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index 9d2dcb1..51af80b 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -215,12 +215,14 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+ int __init musb_platform_init(struct musb *musb)
+ {
++      struct otg_transceiver *xceiv = otg_get_transceiver();
+       u32 l;
+ #if defined(CONFIG_ARCH_OMAP2430)
+       omap_cfg_reg(AE5_2430_USB0HS_STP);
+ #endif
++      musb->xceiv = *xceiv;
+       musb_platform_resume(musb);
+       l = omap_readl(OTG_SYSCONFIG);
diff --git a/packages/linux/omap3-pandora-kernel-wifi/musb-support-high-bandwidth.patch.eml b/packages/linux/omap3-pandora-kernel-wifi/musb-support-high-bandwidth.patch.eml
new file mode 100755 (executable)
index 0000000..0264a97
--- /dev/null
@@ -0,0 +1,134 @@
+Enables support for camera (as creative) requiring high bandwidth
+isochronous transfer.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_core.c |   18 +++++++++---------
+ drivers/usb/musb/musb_host.c |   32 +++++++++++++++++++++-----------
+ 2 files changed, 30 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index c939f81..9914f70 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1063,17 +1063,17 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
+ { .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
+ { .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
+ { .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket =  64, },
+ { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket =  64, },
+ { .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket =  64, },
++{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 256, },
++{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 256, },
++{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 4096, },
+ { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+ { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+ };
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..84173df 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -1443,6 +1443,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                       /* packet error reported later */
+                       iso_err = true;
+               }
++      } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
++              DBG(3, "end %d Highbandwidth  incomplete ISO packet received\n"
++                                      , epnum);
++              status = -EPROTO;
+       }
+       /* faults abort the transfer */
+@@ -1595,7 +1599,13 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                               val &= ~MUSB_RXCSR_H_AUTOREQ;
+                       else
+                               val |= MUSB_RXCSR_H_AUTOREQ;
+-                      val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
++
++                      if (qh->maxpacket & ~0x7ff)
++                              /* Autoclear doesn't work in high bandwidth iso */
++                              val |= MUSB_RXCSR_DMAENAB;
++                      else
++                              val |= MUSB_RXCSR_AUTOCLEAR
++                                      | MUSB_RXCSR_DMAENAB;
+                       musb_writew(epio, MUSB_RXCSR,
+                               MUSB_RXCSR_H_WZC_BITS | val);
+@@ -1666,6 +1676,7 @@ static int musb_schedule(
+       int                     best_end, epnum;
+       struct musb_hw_ep       *hw_ep = NULL;
+       struct list_head        *head = NULL;
++      u16                     maxpacket;
+       /* use fixed hardware for control and bulk */
+       switch (qh->type) {
+@@ -1708,6 +1719,13 @@ static int musb_schedule(
+       best_diff = 4096;
+       best_end = -1;
++      if (qh->maxpacket & (1<<11))
++              maxpacket = 2 * (qh->maxpacket & 0x7ff);
++      else if (qh->maxpacket & (1<<12))
++              maxpacket = 3 * (qh->maxpacket & 0x7ff);
++      else
++              maxpacket = (qh->maxpacket & 0x7ff);
++
+       for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+               int     diff;
+@@ -1718,9 +1736,9 @@ static int musb_schedule(
+                       continue;
+               if (is_in)
+-                      diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
++                      diff = hw_ep->max_packet_sz_rx - maxpacket;
+               else
+-                      diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
++                      diff = hw_ep->max_packet_sz_tx - maxpacket;
+               if (diff > 0 && best_diff > diff) {
+                       best_diff = diff;
+@@ -1797,13 +1815,6 @@ static int musb_urb_enqueue(
+       qh->is_ready = 1;
+       qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+-
+-      /* no high bandwidth support yet */
+-      if (qh->maxpacket & ~0x7ff) {
+-              ret = -EMSGSIZE;
+-              goto done;
+-      }
+-
+       qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+       qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+@@ -1897,7 +1908,6 @@ static int musb_urb_enqueue(
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+-done:
+       if (ret != 0) {
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
+               kfree(qh);
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/nand.patch b/packages/linux/omap3-pandora-kernel-wifi/nand.patch
new file mode 100755 (executable)
index 0000000..4a6d8e6
--- /dev/null
@@ -0,0 +1,11 @@
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 2ede116..d18a8c9 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -196,7 +196,7 @@ static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)  {
+       struct nand_chip *nand = mtd->priv;
+
+-      __raw_readsl(nand->IO_ADDR_R, buf, len / 2);
++      readsw(nand->IO_ADDR_R, buf, len / 2);
+ }
+
+ /*
\ No newline at end of file
diff --git a/packages/linux/omap3-pandora-kernel-wifi/no-cortex-deadlock.patch b/packages/linux/omap3-pandora-kernel-wifi/no-cortex-deadlock.patch
new file mode 100755 (executable)
index 0000000..78547c8
--- /dev/null
@@ -0,0 +1,77 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Sat, 16 Aug 2008 23:03:06 +0000 (+0100)
+Subject: ARM: Workaround for erratum 451034
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=b84fa87873ffb68ad23930cf6cddeea8bec43ede
+
+ARM: Workaround for erratum 451034
+
+On Cortex-A8 r1p0 and r1p1, executing a NEON store with an integer
+store in the store buffer, can cause a processor deadlock under
+certain conditions.
+
+Executing a DMB instruction before saving NEON/VFP registers and before
+return to userspace makes it safe to run code which includes similar
+counter-measures.  Userspace code can still trigger the deadlock, so
+a different workaround is required to safely run untrusted code.
+
+See ARM Cortex-A8 Errata Notice (PR120-PRDC-008070) for full details.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index aa475d9..41d536e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1117,6 +1117,22 @@ config NEON
+         Say Y to include support code for NEON, the ARMv7 Advanced SIMD
+         Extension.
++config ARM_ERRATUM_451034
++      bool "Enable workaround for ARM erratum 451034"
++      depends on VFPv3
++      help
++        On Cortex-A8 r1p0 and r1p1, executing a NEON store with an integer
++        store in the store buffer, can cause a processor deadlock under
++        certain conditions.
++
++        See ARM Cortex-A8 Errata Notice (PR120-PRDC-008070) for full details.
++
++        Say Y to include a partial workaround.
++
++        WARNING: Even with this option enabled, userspace code can trigger
++        the deadlock.  To safely run untrusted code, a different fix is
++        required.
++
+ endmenu
+ menu "Userspace binary formats"
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 422f3cc..934798b 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -32,6 +32,9 @@
+       @ write all the working registers out of the VFP
+       .macro  VFPFSTMIA, base, tmp
++#ifdef CONFIG_ARM_ERRATUM_451034
++      dmb
++#endif
+ #if __LINUX_ARM_ARCH__ < 6
+       STC     p11, cr0, [\base],#33*4             @ FSTMIAX \base!, {d0-d15}
+ #else
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 060d7e2..9799a35 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -69,6 +69,10 @@ no_work_pending:
+       /* perform architecture specific actions before user return */
+       arch_ret_to_user r1, lr
++#ifdef CONFIG_ARM_ERRATUM_451034
++      dmb
++#endif
++
+       @ slow_restore_user_regs
+       ldr     r1, [sp, #S_PSR]                @ get calling cpsr
+       ldr     lr, [sp, #S_PC]!                @ get pc
diff --git a/packages/linux/omap3-pandora-kernel-wifi/no-empty-flash-warnings.patch b/packages/linux/omap3-pandora-kernel-wifi/no-empty-flash-warnings.patch
new file mode 100755 (executable)
index 0000000..ab344b0
--- /dev/null
@@ -0,0 +1,15 @@
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 1d437de..33b3feb 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -647,8 +647,8 @@ scan_more:
+                       inbuf_ofs = ofs - buf_ofs;
+                       while (inbuf_ofs < scan_end) {
+                               if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
+-                                      printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
+-                                             empty_start, ofs);
++//                                    printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
++//                                           empty_start, ofs);
+                                       if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
+                                               return err;
+                                       goto scan_more;
diff --git a/packages/linux/omap3-pandora-kernel-wifi/no-harry-potter.diff b/packages/linux/omap3-pandora-kernel-wifi/no-harry-potter.diff
new file mode 100755 (executable)
index 0000000..2bb20ab
--- /dev/null
@@ -0,0 +1,11 @@
+--- /tmp/Makefile      2008-04-24 14:36:20.509598016 +0200
++++ git/arch/arm/Makefile      2008-04-24 14:36:31.949546584 +0200
+@@ -47,7 +47,7 @@
+ # Note that GCC does not numerically define an architecture version
+ # macro, but instead defines a whole series of macros which makes
+ # testing for a specific architecture or later rather impossible.
+-arch-$(CONFIG_CPU_32v7)               :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7a,-march=armv5t -Wa$(comma)-march=armv7a)
++arch-$(CONFIG_CPU_32v7)               :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
+ arch-$(CONFIG_CPU_32v6)               :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
+ # Only override the compiler option if ARMv6. The ARMv6K extensions are
+ # always available in ARMv7
diff --git a/packages/linux/omap3-pandora-kernel-wifi/omap-2430-lcd.patch b/packages/linux/omap3-pandora-kernel-wifi/omap-2430-lcd.patch
new file mode 100755 (executable)
index 0000000..8f8a687
--- /dev/null
@@ -0,0 +1,11 @@
+--- git/drivers/video/omap/lcd_2430sdp.c.orig  2007-08-13 14:35:17.000000000 -0700
++++ git/drivers/video/omap/lcd_2430sdp.c       2007-08-13 14:35:55.000000000 -0700
+@@ -32,7 +32,7 @@
+ #define LCD_PANEL_BACKLIGHT_GPIO      91
+ #define LCD_PANEL_ENABLE_GPIO         154
+ #define LCD_PIXCLOCK_MAX              5400 /* freq 5.4 MHz */
+-#define PM_RECEIVER             TWL4030_MODULE_PM_RECIEVER
++#define PM_RECEIVER             TWL4030_MODULE_PM_RECEIVER
+ #define ENABLE_VAUX2_DEDICATED  0x09
+ #define ENABLE_VAUX2_DEV_GRP    0x20
diff --git a/packages/linux/omap3-pandora-kernel-wifi/oprofile-0.9.3.armv7.diff b/packages/linux/omap3-pandora-kernel-wifi/oprofile-0.9.3.armv7.diff
new file mode 100755 (executable)
index 0000000..1eedbb5
--- /dev/null
@@ -0,0 +1,599 @@
+Hi,
+
+This patch adds Oprofile support on ARMv7, using the PMNC unit.
+Tested on OMAP3430 SDP.
+
+Feedback and comments are welcome.
+
+The patch to user space components is attached for reference. It i applies 
+against version 0.9.3 of oprofile source 
+(http://prdownloads.sourceforge.net/oprofile/oprofile-0.9.3.tar.gz).
+
+Regards,
+Jean.
+
+---
+
+From: Jean Pihet <jpihet@mvista.com>
+Date: Tue, 6 May 2008 17:21:44 +0200
+Subject: [PATCH] ARM: Add ARMv7 oprofile support
+
+Add ARMv7 Oprofile support to kernel
+
+Signed-off-by: Jean Pihet <jpihet@mvista.com>
+---
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index c60a27d..60b50a0 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -161,6 +161,11 @@ config OPROFILE_MPCORE
+ config OPROFILE_ARM11_CORE
+       bool
++config OPROFILE_ARMV7
++      def_bool y
++      depends on CPU_V7 && !SMP
++      bool
++
+ endif
+ config VECTORS_BASE
+diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
+index e61d0cc..88e31f5 100644
+--- a/arch/arm/oprofile/Makefile
++++ b/arch/arm/oprofile/Makefile
+@@ -11,3 +11,4 @@ oprofile-$(CONFIG_CPU_XSCALE)                += op_model_xscale.o
+ oprofile-$(CONFIG_OPROFILE_ARM11_CORE)        += op_model_arm11_core.o
+ oprofile-$(CONFIG_OPROFILE_ARMV6)     += op_model_v6.o
+ oprofile-$(CONFIG_OPROFILE_MPCORE)    += op_model_mpcore.o
++oprofile-$(CONFIG_OPROFILE_ARMV7)     += op_model_v7.o
+diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
+index 0a5cf3a..3fcd752 100644
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -145,6 +145,10 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) 
+       spec = &op_mpcore_spec;
+ #endif
++#ifdef CONFIG_OPROFILE_ARMV7
++      spec = &op_armv7_spec;
++#endif
++
+       if (spec) {
+               ret = spec->init();
+               if (ret < 0)
+diff --git a/arch/arm/oprofile/op_arm_model.h 
+b/arch/arm/oprofile/op_arm_model.h
+index 4899c62..8c4e4f6 100644
+--- a/arch/arm/oprofile/op_arm_model.h
++++ b/arch/arm/oprofile/op_arm_model.h
+@@ -26,6 +26,7 @@ extern struct op_arm_model_spec op_xscale_spec;
+ extern struct op_arm_model_spec op_armv6_spec;
+ extern struct op_arm_model_spec op_mpcore_spec;
++extern struct op_arm_model_spec op_armv7_spec;
+ extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
+diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
+new file mode 100644
+index 0000000..a159bc1
+--- /dev/null
++++ b/arch/arm/oprofile/op_model_v7.c
+@@ -0,0 +1,407 @@
++/**
++ * @file op_model_v7.c
++ * ARM V7 (Cortex A8) Event Monitor Driver
++ *
++ * @remark Copyright 2008 Jean Pihet <jpihet@mvista.com>
++ * @remark Copyright 2004 ARM SMP Development Team
++ */
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/oprofile.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/smp.h>
++
++#include "op_counter.h"
++#include "op_arm_model.h"
++#include "op_model_v7.h"
++
++/* #define DEBUG */
++
++
++/*
++ * ARM V7 PMNC support
++ */
++
++static u32 cnt_en[CNTMAX];
++
++static inline void armv7_pmnc_write(u32 val)
++{
++      val &= PMNC_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
++}
++
++static inline u32 armv7_pmnc_read(void)
++{
++      u32 val;
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
++      return val;
++}
++
++static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = CNTENS_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= CNTENS_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = CNTENC_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= CNTENC_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
++                      " interrupt enable %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = INTENS_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= INTENS_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_getreset_flags(void)
++{
++      u32 val;
++
++      /* Read */
++      asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
++
++      /* Write to clear flags */
++      val &= FLAG_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
++
++      return val;
++}
++
++static inline int armv7_pmnc_select_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if ((cnt == CCNT) || (cnt >= CNTMAX)) {
++              printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      val = (cnt - CNT0) & SELECT_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
++
++      return cnt;
++}
++
++static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
++{
++      if (armv7_pmnc_select_counter(cnt) == cnt) {
++              val &= EVTSEL_MASK;
++              asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
++      }
++}
++
++static void armv7_pmnc_reset_counter(unsigned int cnt)
++{
++      u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++      u32 val = -(u32)counter_config[cpu_cnt].count;
++
++      switch (cnt) {
++      case CCNT:
++              armv7_pmnc_disable_counter(cnt);
++
++              asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
++
++              if (cnt_en[cnt] != 0)
++                  armv7_pmnc_enable_counter(cnt);
++
++              break;
++
++      case CNT0:
++      case CNT1:
++      case CNT2:
++      case CNT3:
++              armv7_pmnc_disable_counter(cnt);
++
++              if (armv7_pmnc_select_counter(cnt) == cnt)
++                  asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
++
++              if (cnt_en[cnt] != 0)
++                  armv7_pmnc_enable_counter(cnt);
++
++              break;
++
++      default:
++              printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              break;
++      }
++}
++
++int armv7_setup_pmnc(void)
++{
++      unsigned int cnt;
++
++      if (armv7_pmnc_read() & PMNC_E) {
++              printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
++                      " new event counter.\n", smp_processor_id());
++              return -EBUSY;
++      }
++
++      /*
++       * Initialize & Reset PMNC: C bit, D bit and P bit.
++       *  Note: Using a slower count for CCNT (D bit: divide by 64) results
++       *   in a more stable system
++       */
++      armv7_pmnc_write(PMNC_P | PMNC_C | PMNC_D);
++
++
++      for (cnt = CCNT; cnt < CNTMAX; cnt++) {
++              unsigned long event;
++              u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++
++              /*
++               * Disable counter
++               */
++              armv7_pmnc_disable_counter(cnt);
++              cnt_en[cnt] = 0;
++
++              if (!counter_config[cpu_cnt].enabled)
++                      continue;
++
++              event = counter_config[cpu_cnt].event & 255;
++
++              /*
++               * Set event (if destined for PMNx counters)
++               * We don't need to set the event if it's a cycle count
++               */
++              if (cnt != CCNT)
++                      armv7_pmnc_write_evtsel(cnt, event);
++
++              /*
++               * Enable interrupt for this counter
++               */
++              armv7_pmnc_enable_intens(cnt);
++
++              /*
++               * Reset counter
++               */
++              armv7_pmnc_reset_counter(cnt);
++
++              /*
++               * Enable counter
++               */
++              armv7_pmnc_enable_counter(cnt);
++              cnt_en[cnt] = 1;
++      }
++
++      return 0;
++}
++
++static inline void armv7_start_pmnc(void)
++{
++      armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
++}
++
++static inline void armv7_stop_pmnc(void)
++{
++      armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
++}
++
++/*
++ * CPU counters' IRQ handler (one IRQ per CPU)
++ */
++static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
++{
++      struct pt_regs *regs = get_irq_regs();
++      unsigned int cnt;
++      u32 flags;
++
++
++      /*
++       * Stop IRQ generation
++       */
++      armv7_stop_pmnc();
++
++      /*
++       * Get and reset overflow status flags
++       */
++      flags = armv7_pmnc_getreset_flags();
++
++      /*
++       * Cycle counter
++       */
++      if (flags & FLAG_C) {
++              u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
++              armv7_pmnc_reset_counter(CCNT);
++              oprofile_add_sample(regs, cpu_cnt);
++      }
++
++      /*
++       * PMNC counters 0:3
++       */
++      for (cnt = CNT0; cnt < CNTMAX; cnt++) {
++              if (flags & (1 << (cnt - CNT0))) {
++                      u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++                      armv7_pmnc_reset_counter(cnt);
++                      oprofile_add_sample(regs, cpu_cnt);
++              }
++      }
++
++      /*
++       * Allow IRQ generation
++       */
++      armv7_start_pmnc();
++
++      return IRQ_HANDLED;
++}
++
++int armv7_request_interrupts(int *irqs, int nr)
++{
++      unsigned int i;
++      int ret = 0;
++
++      for (i = 0; i < nr; i++) {
++              ret = request_irq(irqs[i], armv7_pmnc_interrupt,
++                              IRQF_DISABLED, "CP15 PMNC", NULL);
++              if (ret != 0) {
++                      printk(KERN_ERR "oprofile: unable to request IRQ%u"
++                              " for ARMv7\n",
++                             irqs[i]);
++                      break;
++              }
++      }
++
++      if (i != nr)
++              while (i-- != 0)
++                      free_irq(irqs[i], NULL);
++
++      return ret;
++}
++
++void armv7_release_interrupts(int *irqs, int nr)
++{
++      unsigned int i;
++
++      for (i = 0; i < nr; i++)
++              free_irq(irqs[i], NULL);
++}
++
++#ifdef DEBUG
++static void armv7_pmnc_dump_regs(void)
++{
++      u32 val;
++      unsigned int cnt;
++
++      printk(KERN_INFO "PMNC registers dump:\n");
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
++      printk(KERN_INFO "PMNC  =0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
++      printk(KERN_INFO "CNTENS=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
++      printk(KERN_INFO "INTENS=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
++      printk(KERN_INFO "FLAGS =0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
++      printk(KERN_INFO "SELECT=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
++      printk(KERN_INFO "CCNT  =0x%08x\n", val);
++
++      for (cnt = CNT0; cnt < CNTMAX; cnt++) {
++              armv7_pmnc_select_counter(cnt);
++              asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
++              printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
++              asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
++              printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
++      }
++}
++#endif
++
++
++static int irqs[] = {
++#ifdef CONFIG_ARCH_OMAP3
++      INT_34XX_BENCH_MPU_EMUL,
++#endif
++};
++
++static void armv7_pmnc_stop(void)
++{
++#ifdef DEBUG
++      armv7_pmnc_dump_regs();
++#endif
++      armv7_stop_pmnc();
++      armv7_release_interrupts(irqs, ARRAY_SIZE(irqs));
++}
++
++static int armv7_pmnc_start(void)
++{
++      int ret;
++
++#ifdef DEBUG
++      armv7_pmnc_dump_regs();
++#endif
++      ret = armv7_request_interrupts(irqs, ARRAY_SIZE(irqs));
++      if (ret >= 0)
++              armv7_start_pmnc();
++
++      return ret;
++}
++
++static int armv7_detect_pmnc(void)
++{
++      return 0;
++}
++
++struct op_arm_model_spec op_armv7_spec = {
++      .init           = armv7_detect_pmnc,
++      .num_counters   = 5,
++      .setup_ctrs     = armv7_setup_pmnc,
++      .start          = armv7_pmnc_start,
++      .stop           = armv7_pmnc_stop,
++      .name           = "arm/armv7",
++};
+diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
+new file mode 100644
+index 0000000..08f40ea
+--- /dev/null
++++ b/arch/arm/oprofile/op_model_v7.h
+@@ -0,0 +1,101 @@
++/**
++ * @file op_model_v7.h
++ * ARM v7 (Cortex A8) Event Monitor Driver
++ *
++ * @remark Copyright 2008 Jean Pihet <jpihet@mvista.com>
++ * @remark Copyright 2004 ARM SMP Development Team
++ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
++ * @remark Copyright 2000-2004 MontaVista Software Inc
++ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
++ * @remark Copyright 2004 Intel Corporation
++ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
++ * @remark Copyright 2004 Oprofile Authors
++ *
++ * @remark Read the file COPYING
++ *
++ * @author Zwane Mwaikambo
++ */
++#ifndef OP_MODEL_V7_H
++#define OP_MODEL_V7_H
++
++/*
++ * Per-CPU PMNC: config reg
++ */
++#define PMNC_E                (1 << 0)        /* Enable all counters */
++#define PMNC_P                (1 << 1)        /* Reset all counters */
++#define PMNC_C                (1 << 2)        /* Cycle counter reset */
++#define PMNC_D                (1 << 3)        /* CCNT counts every 64th cpu cycle */
++#define PMNC_X                (1 << 4)        /* Export to ETM */
++#define PMNC_DP               (1 << 5)        /* Disable CCNT if non-invasive debug*/
++#define       PMNC_MASK       0x3f            /* Mask for writable bits */
++
++/*
++ * Available counters
++ */
++#define CCNT          0
++#define CNT0          1
++#define CNT1          2
++#define CNT2          3
++#define CNT3          4
++#define CNTMAX                5
++
++#define CPU_COUNTER(cpu, counter)     ((cpu) * CNTMAX + (counter))
++
++/*
++ * CNTENS: counters enable reg
++ */
++#define CNTENS_P0     (1 << 0)
++#define CNTENS_P1     (1 << 1)
++#define CNTENS_P2     (1 << 2)
++#define CNTENS_P3     (1 << 3)
++#define CNTENS_C      (1 << 31)
++#define       CNTENS_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * CNTENC: counters disable reg
++ */
++#define CNTENC_P0     (1 << 0)
++#define CNTENC_P1     (1 << 1)
++#define CNTENC_P2     (1 << 2)
++#define CNTENC_P3     (1 << 3)
++#define CNTENC_C      (1 << 31)
++#define       CNTENC_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * INTENS: counters overflow interrupt enable reg
++ */
++#define INTENS_P0     (1 << 0)
++#define INTENS_P1     (1 << 1)
++#define INTENS_P2     (1 << 2)
++#define INTENS_P3     (1 << 3)
++#define INTENS_C      (1 << 31)
++#define       INTENS_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * EVTSEL: Event selection reg
++ */
++#define       EVTSEL_MASK     0x7f            /* Mask for writable bits */
++
++/*
++ * SELECT: Counter selection reg
++ */
++#define       SELECT_MASK     0x1f            /* Mask for writable bits */
++
++/*
++ * FLAG: counters overflow flag status reg
++ */
++#define FLAG_P0               (1 << 0)
++#define FLAG_P1               (1 << 1)
++#define FLAG_P2               (1 << 2)
++#define FLAG_P3               (1 << 3)
++#define FLAG_C                (1 << 31)
++#define       FLAG_MASK       0x8000000f      /* Mask for writable bits */
++
++
++int armv7_setup_pmu(void);
++int armv7_start_pmu(void);
++int armv7_stop_pmu(void);
++int armv7_request_interrupts(int *, int);
++void armv7_release_interrupts(int *, int);
++
++#endif
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi/pvr/dispc.patch b/packages/linux/omap3-pandora-kernel-wifi/pvr/dispc.patch
new file mode 100755 (executable)
index 0000000..1697448
--- /dev/null
@@ -0,0 +1,46 @@
+--- kernel-2.6.27.orig/drivers/video/omap/dispc.c
++++ kernel-2.6.27/drivers/video/omap/dispc.c
+@@ -314,6 +319,32 @@
+ }
+ EXPORT_SYMBOL(omap_dispc_enable_digit_out);
++extern void omap_dispc_set_plane_base(int plane, u32 paddr)
++{
++      u32 reg;
++      u32 val;
++
++      switch (plane) {
++      case 0:
++              reg = DISPC_GFX_BA0;
++              break;
++      case 1:
++              reg = DISPC_VID1_BASE + DISPC_VID_BA0;
++              break;
++      case 2:
++              reg = DISPC_VID2_BASE + DISPC_VID_BA0;
++              break;
++      default:
++              BUG();
++              return;
++      }
++
++      dispc_write_reg(reg, paddr);
++      val = dispc_read_reg(DISPC_CONTROL) | (1 << 5); /* GOLCD */
++      dispc_write_reg(DISPC_CONTROL, val);
++}
++EXPORT_SYMBOL(omap_dispc_set_plane_base);
++
+ static inline int _setup_plane(int plane, int channel_out,
+                                 u32 paddr, int screen_width,
+                                 int pos_x, int pos_y, int width, int height,
+--- /tmp/dispc.h       2008-12-09 15:13:12.000000000 +0100
++++ git/drivers/video/omap/dispc.h     2008-12-09 15:13:36.000000000 +0100
+@@ -32,6 +32,8 @@
+ #define DISPC_TFT_DATA_LINES_18               2
+ #define DISPC_TFT_DATA_LINES_24               3
++extern void omap_dispc_set_plane_base(int plane, u32 paddr);
++
+ extern void omap_dispc_set_lcd_size(int width, int height);
+ extern void omap_dispc_enable_lcd_out(int enable);
diff --git a/packages/linux/omap3-pandora-kernel-wifi/pvr/nokia-TI.diff b/packages/linux/omap3-pandora-kernel-wifi/pvr/nokia-TI.diff
new file mode 100755 (executable)
index 0000000..a4aca1e
--- /dev/null
@@ -0,0 +1,8798 @@
+ include4/img_types.h                                                 |    5 
+ include4/pdumpdefs.h                                                 |    1 
+ include4/pvrmodule.h                                                 |   31 
+ include4/pvrversion.h                                                |    8 
+ include4/services.h                                                  |   46 
+ include4/servicesext.h                                               |    6 
+ include4/sgxapi_km.h                                                 |   65 
+ services4/3rdparty/bufferclass_example/bufferclass_example.c         |   32 
+ services4/3rdparty/bufferclass_example/bufferclass_example.h         |   25 
+ services4/3rdparty/bufferclass_example/bufferclass_example_linux.c   |   20 
+ services4/3rdparty/bufferclass_example/bufferclass_example_private.c |   76 -
+ services4/3rdparty/bufferclass_example/kbuild/Makefile               |   40 
+ services4/3rdparty/dc_omap3430_linux/kbuild/Makefile                 |   39 
+ services4/3rdparty/dc_omap3430_linux/omaplfb.h                       |    7 
+ services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c          |   60 
+ services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c                 |   52 
+ services4/include/pvr_bridge.h                                       |   26 
+ services4/include/servicesint.h                                      |   17 
+ services4/include/sgx_bridge.h                                       |   95 +
+ services4/include/sgx_bridge_km.h                                    |  139 -
+ services4/include/sgxinfo.h                                          |  347 ++--
+ services4/srvkm/Makefile                                             |   68 
+ services4/srvkm/bridged/bridged_pvr_bridge.c                         |  732 ++++++++-
+ services4/srvkm/common/deviceclass.c                                 |    6 
+ services4/srvkm/common/devicemem.c                                   |    3 
+ services4/srvkm/common/handle.c                                      |   58 
+ services4/srvkm/common/power.c                                       |   15 
+ services4/srvkm/common/pvrsrv.c                                      |  151 +-
+ services4/srvkm/common/queue.c                                       |    4 
+ services4/srvkm/common/resman.c                                      |   13 
+ services4/srvkm/devices/sgx/mmu.c                                    |    2 
+ services4/srvkm/devices/sgx/mmu.h                                    |    2 
+ services4/srvkm/devices/sgx/pb.c                                     |   37 
+ services4/srvkm/devices/sgx/sgx2dcore.c                              |   21 
+ services4/srvkm/devices/sgx/sgx_bridge_km.h                          |  158 ++
+ services4/srvkm/devices/sgx/sgxinfokm.h                              |  146 +
+ services4/srvkm/devices/sgx/sgxinit.c                                |  734 ++--------
+ services4/srvkm/devices/sgx/sgxkick.c                                |  327 +++-
+ services4/srvkm/devices/sgx/sgxreset.c                               |  330 ++++
+ services4/srvkm/devices/sgx/sgxtransfer.c                            |  312 ++++
+ services4/srvkm/devices/sgx/sgxutils.c                               |  459 +++---
+ services4/srvkm/devices/sgx/sgxutils.h                               |   28 
+ services4/srvkm/env/linux/env_data.h                                 |    8 
+ services4/srvkm/env/linux/event.c                                    |  221 +++
+ services4/srvkm/env/linux/event.h                                    |   32 
+ services4/srvkm/env/linux/kbuild/Makefile                            |   81 +
+ services4/srvkm/env/linux/mm.c                                       |    8 
+ services4/srvkm/env/linux/module.c                                   |  342 +++-
+ services4/srvkm/env/linux/osfunc.c                                   |  347 +++-
+ services4/srvkm/env/linux/pdump.c                                    |   13 
+ services4/srvkm/env/linux/proc.c                                     |   17 
+ services4/srvkm/env/linux/pvr_debug.c                                |    2 
+ services4/srvkm/hwdefs/sgxdefs.h                                     |    4 
+ services4/srvkm/hwdefs/sgxerrata.h                                   |    9 
+ services4/srvkm/hwdefs/sgxfeaturedefs.h                              |   11 
+ services4/srvkm/include/device.h                                     |   35 
+ services4/srvkm/include/handle.h                                     |   10 
+ services4/srvkm/include/osfunc.h                                     |   32 
+ services4/srvkm/include/pdump_km.h                                   |    2 
+ services4/srvkm/include/resman.h                                     |    5 
+ services4/srvkm/include/srvkm.h                                      |    4 
+ services4/system/include/syscommon.h                                 |    2 
+ services4/system/omap3430/sysconfig.c                                |   24 
+ services4/system/omap3430/sysconfig.h                                |    7 
+ services4/system/omap3430/sysutils.c                                 |    2 
+ 65 files changed, 4286 insertions(+), 1675 deletions(-)
+
+
+diff -Nurd git/drivers/gpu/pvr/include4/img_types.h git/drivers/gpu/pvr/include4/img_types.h
+--- git/drivers/gpu/pvr/include4/img_types.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/img_types.h   2008-12-18 15:47:29.000000000 +0100
+@@ -43,7 +43,10 @@
+ typedef signed long           IMG_INT32,      *IMG_PINT32;
+       #if defined(LINUX)
+-
++#if !defined(USE_CODE)
++              typedef unsigned long long              IMG_UINT64,     *IMG_PUINT64;
++              typedef long long                               IMG_INT64,      *IMG_PINT64;
++#endif
+       #else
+               #error("define an OS")
+diff -Nurd git/drivers/gpu/pvr/include4/pdumpdefs.h git/drivers/gpu/pvr/include4/pdumpdefs.h
+--- git/drivers/gpu/pvr/include4/pdumpdefs.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pdumpdefs.h   2008-12-18 15:47:29.000000000 +0100
+@@ -73,6 +73,7 @@
+       PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
+       PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
+       PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++      PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
+       
+       PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
+ } PDUMP_MEM_FORMAT;
+diff -Nurd git/drivers/gpu/pvr/include4/pvrmodule.h git/drivers/gpu/pvr/include4/pvrmodule.h
+--- git/drivers/gpu/pvr/include4/pvrmodule.h   1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pvrmodule.h   2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef       _PVRMODULE_H_
++#define       _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif        
+diff -Nurd git/drivers/gpu/pvr/include4/pvrversion.h git/drivers/gpu/pvr/include4/pvrversion.h
+--- git/drivers/gpu/pvr/include4/pvrversion.h  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pvrversion.h  2008-12-18 15:47:29.000000000 +0100
+@@ -28,10 +28,10 @@
+ #define _PVRVERSION_H_
+ #define PVRVERSION_MAJ 1
+-#define PVRVERSION_MIN 1
+-#define PVRVERSION_BRANCH 11
+-#define PVRVERSION_BUILD 970
+-#define PVRVERSION_STRING "1.1.11.970"
++#define PVRVERSION_MIN 2
++#define PVRVERSION_BRANCH 12
++#define PVRVERSION_BUILD 838
++#define PVRVERSION_STRING "1.2.12.838"
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/include4/servicesext.h git/drivers/gpu/pvr/include4/servicesext.h
+--- git/drivers/gpu/pvr/include4/servicesext.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/servicesext.h 2008-12-18 15:47:29.000000000 +0100
+@@ -150,6 +150,8 @@
+       PVRSRV_PIXEL_FORMAT_V8U8,
+       PVRSRV_PIXEL_FORMAT_V16U16,
+       PVRSRV_PIXEL_FORMAT_QWVU8888,
++      PVRSRV_PIXEL_FORMAT_XLVU8888,
++      PVRSRV_PIXEL_FORMAT_QWVU16,
+       PVRSRV_PIXEL_FORMAT_D16,
+       PVRSRV_PIXEL_FORMAT_D24S8,
+       PVRSRV_PIXEL_FORMAT_D24X8,
+@@ -159,7 +161,9 @@
+       PVRSRV_PIXEL_FORMAT_YUY2,
+       PVRSRV_PIXEL_FORMAT_DXT23,
+       PVRSRV_PIXEL_FORMAT_DXT45,      
+-      PVRSRV_PIXEL_FORMAT_G32R32F,    
++      PVRSRV_PIXEL_FORMAT_G32R32F,
++      PVRSRV_PIXEL_FORMAT_NV11,
++      PVRSRV_PIXEL_FORMAT_NV12,
+       PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
+ } PVRSRV_PIXEL_FORMAT;
+diff -Nurd git/drivers/gpu/pvr/include4/services.h git/drivers/gpu/pvr/include4/services.h
+--- git/drivers/gpu/pvr/include4/services.h    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/services.h    2008-12-18 15:47:29.000000000 +0100
+@@ -36,16 +36,14 @@
+ #include "pdumpdefs.h"
+-#if defined(SERVICES4)
+ #define IMG_CONST const
+-#else
+-#define IMG_CONST
+-#endif
+ #define PVRSRV_MAX_CMD_SIZE           1024
+ #define PVRSRV_MAX_DEVICES            16      
++#define EVENTOBJNAME_MAXLENGTH (50)
++
+ #define PVRSRV_MEM_READ                                               (1<<0)
+ #define PVRSRV_MEM_WRITE                                      (1<<1)
+ #define PVRSRV_MEM_CACHE_CONSISTENT                   (1<<2)
+@@ -90,6 +88,7 @@
+ #define PVRSRV_MISC_INFO_TIMER_PRESENT                        (1<<0)
+ #define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT            (1<<1)
+ #define PVRSRV_MISC_INFO_MEMSTATS_PRESENT             (1<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT    (1<<3)
+ #define PVRSRV_PDUMP_MAX_FILENAME_SIZE                        20
+ #define PVRSRV_PDUMP_MAX_COMMENT_SIZE                 200
+@@ -133,7 +132,8 @@
+       IMG_OPENGLES2  = 0x00000003,
+       IMG_D3DM           = 0x00000004,
+       IMG_SRV_UM         = 0x00000005,
+-      IMG_OPENVG         = 0x00000006
++      IMG_OPENVG         = 0x00000006,
++      IMG_SRVCLIENT  = 0x00000007,
+ } IMG_MODULE_ID;
+@@ -202,10 +202,8 @@
+       
+       IMG_PVOID                               pvLinAddr;      
+-#if defined(SERVICES4)
+     
+       IMG_PVOID                               pvLinAddrKM;
+-#endif
+       
+       
+       IMG_DEV_VIRTADDR                sDevVAddr;
+@@ -294,6 +292,14 @@
+ } PVRSRV_DEVICE_IDENTIFIER;
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++      
++      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
++      
++      IMG_HANDLE      hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
+ typedef struct _PVRSRV_MISC_INFO_
+ {
+@@ -313,9 +319,14 @@
+       IMG_UINT32      ui32MemoryStrLen;
+       
+       
++      PVRSRV_EVENTOBJECT      sGlobalEventObject;
++      IMG_HANDLE                      hOSGlobalEvent;
++      
++      
+       
+ } PVRSRV_MISC_INFO;
++
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
+@@ -335,7 +346,7 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+ IMG_IMPORT
+-PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (PVRSRV_MISC_INFO *psMiscInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+ #if 1
+ IMG_IMPORT
+@@ -348,7 +359,9 @@
+ #endif
+ IMG_IMPORT
+-PVRSRV_ERROR PollForValue (volatile IMG_UINT32 *pui32LinMemAddr,
++PVRSRV_ERROR PollForValue ( PVRSRV_CONNECTION *psConnection,
++                                                      IMG_HANDLE hOSEvent,
++                                                      volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                       IMG_UINT32 ui32Value,
+                                                                       IMG_UINT32 ui32Mask,
+                                                                       IMG_UINT32 ui32Waitus,
+@@ -631,21 +644,18 @@
+                                                                                       IMG_UINT32 ui32RegValue,
+                                                                                       IMG_UINT32 ui32Flags);
+-#ifdef SERVICES4
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                                        IMG_UINT32 ui32RegAddr,
+                                                                                                        IMG_UINT32 ui32RegValue,
+                                                                                                        IMG_UINT32 ui32Mask,
+                                                                                                        IMG_UINT32 ui32Flags);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                       IMG_UINT32 ui32RegAddr,
+                                                                                       IMG_UINT32 ui32RegValue,
+                                                                                       IMG_UINT32 ui32Mask);
+-#ifdef SERVICES4
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                       IMG_UINT32 ui32RegAddr,
+@@ -655,7 +665,6 @@
+                                                                                               PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+                                                                                               IMG_UINT32 ui32Offset,
+                                                                                               IMG_DEV_PHYADDR sPDDevPAddr);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
+@@ -676,7 +685,6 @@
+                                                                                        IMG_CONST IMG_CHAR *pszComment,
+                                                                                        IMG_BOOL bContinuous);
+-#if defined(SERVICES4)
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                         IMG_BOOL bContinuous,
+@@ -686,7 +694,6 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                                          IMG_UINT32 ui32Flags,
+                                                                                                          IMG_CONST IMG_CHAR *pszFormat, ...);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
+@@ -718,7 +725,7 @@
+                                                                                       IMG_UINT32 ui32Size,
+                                                                                       IMG_UINT32 ui32PDumpFlags);
+-#ifdef SERVICES4
++
+ IMG_IMPORT
+ IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
+@@ -726,7 +733,6 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                               IMG_UINT32 ui32RegOffset,
+                                                                                               IMG_BOOL bLastFrame);
+-#endif
+ IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(IMG_CHAR *pszLibraryName);
+ IMG_IMPORT PVRSRV_ERROR       PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
+@@ -777,9 +783,9 @@
+ IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_UINT32 ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
+ #endif 
+-PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION *psConnection, 
+-                                                                      IMG_HANDLE hOSEvent, 
+-                                                                      IMG_UINT32 ui32MSTimeout);
++IMG_IMPORT 
++PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION * psConnection, 
++                                                                      IMG_HANDLE hOSEvent);
+ #define TIME_NOT_PASSED_UINT32(a,b,c)         ((a - b) < c)
+diff -Nurd git/drivers/gpu/pvr/include4/sgxapi_km.h git/drivers/gpu/pvr/include4/sgxapi_km.h
+--- git/drivers/gpu/pvr/include4/sgxapi_km.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/sgxapi_km.h   2008-12-18 15:47:29.000000000 +0100
+@@ -32,6 +32,7 @@
+ #endif
+ #include "sgxdefs.h"
++
+ #if defined(__linux__) && !defined(USE_CODE)
+       #if defined(__KERNEL__)
+               #include <asm/unistd.h>
+@@ -64,6 +65,8 @@
+ #define SGX_MAX_TA_STATUS_VALS        32
+ #define SGX_MAX_3D_STATUS_VALS        2
++#define SGX_MAX_SRC_SYNCS                     4
++
+ #define PFLAGS_POWERDOWN                      0x00000001
+ #define PFLAGS_POWERUP                                0x00000002
+  
+@@ -75,11 +78,60 @@
+       IMG_SYS_PHYADDR                 sPhysBase;                              
+ }SGX_SLAVE_PORT;
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_CBSIZE                                      0x100   
++
++#define PVRSRV_SGX_HWPERF_INVALID                                     1
++#define PVRSRV_SGX_HWPERF_TRANSFER                                    2
++#define PVRSRV_SGX_HWPERF_TA                                          3
++#define PVRSRV_SGX_HWPERF_3D                                          4
++
++#define PVRSRV_SGX_HWPERF_ON                                          0x40
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++      IMG_UINT32      ui32FrameNo;
++      IMG_UINT32      ui32Type;
++      IMG_UINT32      ui32StartTimeWraps;
++      IMG_UINT32      ui32StartTime;
++      IMG_UINT32      ui32EndTimeWraps;
++      IMG_UINT32      ui32EndTime;
++      IMG_UINT32      ui32ClockSpeed;
++      IMG_UINT32      ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_
++{
++      IMG_UINT32      ui32Woff;
++      IMG_UINT32      ui32Roff;
++      PVRSRV_SGX_HWPERF_CBDATA psHWPerfCBData[PVRSRV_SGX_HWPERF_CBSIZE];
++} PVRSRV_SGX_HWPERF_CB;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++      PVRSRV_SGX_HWPERF_CBDATA*       psHWPerfData;   
++      IMG_UINT32                                      ui32ArraySize;  
++      IMG_UINT32                                      ui32DataCount;  
++      IMG_UINT32                                      ui32Time;               
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif 
++
++
+ typedef enum _SGX_MISC_INFO_REQUEST_
+ {
++      SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++#ifdef SUPPORT_SGX_HWPERF
++      SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++      SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++      SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif 
+       SGX_MISC_INFO_REQUEST_FORCE_I16                                 =  0x7fff
+ } SGX_MISC_INFO_REQUEST;
++
+ typedef struct _SGX_MISC_INFO_
+ {
+       SGX_MISC_INFO_REQUEST   eRequest;       
+@@ -87,6 +139,10 @@
+       union
+       {
+               IMG_UINT32      reserved;       
++              IMG_UINT32                                                                                      ui32SGXClockSpeed;
++#ifdef SUPPORT_SGX_HWPERF
++              SGX_MISC_INFO_HWPERF_RETRIEVE_CB                                        sRetrieveCB;
++#endif 
+       } uData;
+ } SGX_MISC_INFO;
+@@ -162,6 +218,15 @@
+ } PVR3DIF4_KICKTA_PDUMP, *PPVR3DIF4_KICKTA_PDUMP;
+ #endif        
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE              26
++#define SGX_MAX_2D_SRC_SYNC_OPS                       3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS  64
++#define SGX_MAX_TRANSFER_SYNC_OPS     5
++#endif
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   2008-12-18 15:47:29.000000000 +0100
+@@ -197,11 +197,27 @@
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
++              
++
++              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
++              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
++              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
++              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
++              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
++              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
++
+               for(i=0; i < BC_EXAMPLE_NUM_BUFFERS; i++)
+               {
++                      IMG_UINT32 ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++
++                      if(psDevInfo->sBufferInfo.pixelformat == PVRSRV_PIXEL_FORMAT_YUV420)
++                      {
++                              
++                              ui32Size += ((BC_EXAMPLE_STRIDE >> 1) * (BC_EXAMPLE_HEIGHT >> 1) << 1);
++                      }
+                       
+-                      if (AllocContigMemory(BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE,
++                      if (AllocContigMemory(ui32Size,
+                                                                 &psDevInfo->psSystemBuffer[i].hMemHandle,
+                                                                 &psDevInfo->psSystemBuffer[i].sCPUVAddr,
+                                                                 &sSystemBufferCPUPAddr) != PVRSRV_OK)
+@@ -211,12 +227,14 @@
+                       psDevInfo->ui32NumBuffers++;
+-                      psDevInfo->psSystemBuffer[i].ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++                      psDevInfo->psSystemBuffer[i].ui32Size = ui32Size;
+                       psDevInfo->psSystemBuffer[i].sSysAddr = CpuPAddrToSysPAddr(sSystemBufferCPUPAddr);
+                       psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr = (psDevInfo->psSystemBuffer[i].sSysAddr.uiAddr & 0xFFFFF000);
+                       psDevInfo->psSystemBuffer[i].psSyncData = IMG_NULL;
+               }
++              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
++
+               
+               psDevInfo->sBCJTable.ui32TableSize = sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE);
+@@ -234,16 +252,6 @@
+               {
+                       return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+               }
+-
+-              
+-
+-              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
+-              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
+-              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
+-              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
+-              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
+-              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
+-              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
+       }
+       
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   2008-12-18 15:47:29.000000000 +0100
+@@ -39,11 +39,32 @@
+ #define BC_EXAMPLE_NUM_BUFFERS        3
+-#define BC_EXAMPLE_WIDTH              (160)
++#define YUV420 1
++#ifdef YUV420
++
++#define BC_EXAMPLE_WIDTH              (320)
+ #define BC_EXAMPLE_HEIGHT             (160)
+-#define BC_EXAMPLE_STRIDE             (160*2)
++#define BC_EXAMPLE_STRIDE             (320)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YUV420)
++
++#else
++#ifdef YUV422
++
++#define BC_EXAMPLE_WIDTH              (320)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (320*2)
+ #define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YVYU)
++#else
++
++#define BC_EXAMPLE_WIDTH              (320)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (320*2)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_RGB565)
++
++#endif
++#endif
++
+ #define BC_EXAMPLE_DEVICEID            0
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     2008-12-18 15:47:29.000000000 +0100
+@@ -38,11 +38,10 @@
+ #include "bufferclass_example.h"
+ #include "bufferclass_example_linux.h"
++#include "pvrmodule.h"
+ #define DEVNAME       "bc_example"
+-MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+-MODULE_LICENSE("GPL");
+ MODULE_SUPPORTED_DEVICE(DEVNAME);
+ int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+@@ -259,22 +258,11 @@
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+-      else
+-      {
+-              IMG_VOID *pvPage;
+-              IMG_VOID *pvEnd = pvLinAddr + ui32Size;
+-
+-              for(pvPage = pvLinAddr; pvPage < pvEnd;  pvPage += PAGE_SIZE)
+-              {
+-                      SetPageReserved(virt_to_page(pvPage));
+-              }
+-              pPhysAddr->uiAddr = dma;
+-              *pLinAddr = pvLinAddr;
++      pPhysAddr->uiAddr = dma;
++      *pLinAddr = pvLinAddr;
+-              return PVRSRV_OK;
+-      }
+-      return PVRSRV_ERROR_OUT_OF_MEMORY;
++      return PVRSRV_OK;
+ #endif
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   2008-12-18 15:47:29.000000000 +0100
+@@ -26,6 +26,43 @@
+ #include "bufferclass_example.h"
++void FillYUV420Image(void *pvDest, int width, int height, int bytestride)
++{
++      static int iPhase = 0;
++      int i, j;
++      unsigned char u,v,y;
++      unsigned char *pui8y = (unsigned char *)pvDest;
++      unsigned short *pui16uv;
++      unsigned int count = 0;
++
++      for(j=0;j<height;j++)
++      {
++              for(i=0;i<width;i++)
++              {
++                      y = (((i+iPhase)>>6)%(2)==0)? 0x7f:0x00;
++
++                      pui8y[count++] = y;
++              }
++      }
++
++      pui16uv = (unsigned short *)((unsigned char *)pvDest + (width * height));
++      count = 0;
++
++      for(j=0;j<height;j+=2)
++      {
++              for(i=0;i<width;i+=2)
++              {
++                      u = (j<(height/2))? ((i<(width/2))? 0xFF:0x33) : ((i<(width/2))? 0x33:0xAA);
++                      v = (j<(height/2))? ((i<(width/2))? 0xAC:0x0) : ((i<(width/2))? 0x03:0xEE);
++
++                      
++                      pui16uv[count++] = (v << 8) | u;
++
++              }
++      }
++
++      iPhase++;
++}
+ void FillYUV422Image(void *pvDest, int width, int height, int bytestride)
+ {
+@@ -37,12 +74,12 @@
+       for(y=0;y<height;y++)
+       {
+-              for(x=0;x<width >> 1;x++)
++              for(x=0;x<width;x+=2)
+               {
+-                      u = (y<(height/2))? ((x<(width/4))? 0xFF:0x33) : ((x<(width/4))? 0x33:0xAA);
+-                      v = (y<(height/2))? ((x<(width/4))? 0xAA:0x0) : ((x<(width/4))? 0x03:0xEE);
++                      u = (y<(height/2))? ((x<(width/2))? 0xFF:0x33) : ((x<(width/2))? 0x33:0xAA);
++                      v = (y<(height/2))? ((x<(width/2))? 0xAA:0x0) : ((x<(width/2))? 0x03:0xEE);
+-                      y0 = y1 = (((x+iPhase)>>4)%(2)==0)? 0x7f:0x00;
++                      y0 = y1 = (((x+iPhase)>>6)%(2)==0)? 0x7f:0x00;
+                       
+                       pui32yuv[count++] = (y1 << 24) | (v << 16) | (y0 << 8) | u;
+@@ -115,19 +152,36 @@
+       
+       psSyncData = psBuffer->psSyncData;
+-      
+       if(psSyncData)
+       {
++              
++              if(psSyncData->ui32ReadOpsPending != psSyncData->ui32ReadOpsComplete)
++              {
++                      return -1;
++              }
++
++              
+               psSyncData->ui32WriteOpsPending++;
+       }
+-      if(psBufferInfo->pixelformat == PVRSRV_PIXEL_FORMAT_RGB565)
+-      {
+-              FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
+-      }
+-      else
++      switch(psBufferInfo->pixelformat)
+       {
+-              FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++              case PVRSRV_PIXEL_FORMAT_RGB565:
++              default:
++              {
++                      FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
++              case PVRSRV_PIXEL_FORMAT_YVYU:
++              {
++                      FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
++              case PVRSRV_PIXEL_FORMAT_YUV420:
++              {
++                      FillYUV420Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
+       }
+       
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile 2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,40 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++#
++
++MODULE                = bc_example
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++              -I$(EURASIAROOT)/services4/include \
++              -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++              -I$(EURASIAROOT)/services4/system/include \
++
++SOURCES =     ../bufferclass_example.c \
++                      ../bufferclass_example_linux.c \
++                      ../bufferclass_example_private.c
++
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile   1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile   2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,39 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++#
++
++MODULE                = omaplfb
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++              -I$(EURASIAROOT)/services4/include \
++              -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++              -I$(EURASIAROOT)/services4/system/include \
++
++SOURCES       =       ../omaplfb_displayclass.c \
++                      ../omaplfb_linux.c
++
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    2008-12-18 15:47:29.000000000 +0100
+@@ -41,6 +41,7 @@
+ #define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
+ #define       DRIVER_PREFIX   "omaplfb"
++//extern int omap2_disp_get_output_dev(int);
+ static IMG_VOID *gpvAnchor;
+@@ -57,8 +58,6 @@
+                                                  PVR_POWER_STATE      eCurrentPowerState);
+ #endif
+-extern void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr);
+-
+ static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
+ static OMAPLFB_DEVINFO * GetAnchorPtr(IMG_VOID)
+@@ -124,28 +123,53 @@
+ static PVRSRV_ERROR Flip(OMAPLFB_SWAPCHAIN *psSwapChain,
+                                                 IMG_UINT32 aPhyAddr)
+ {
+-      if (1 /* omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD */)
++      IMG_UINT32 control;
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();     
++
++      if (1) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD)
+       {
+-                omap_dispc_set_plane_base(0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
++
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr);
++      
++              control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
++              control |= OMAP_CONTROL_GOLCD;
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
++              
+               return PVRSRV_OK;
+       }
+       else
+-      if (0 /*omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV*/)
++      if (0) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV)
+       {
+-                omap_dispc_set_plane_base(0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr + psDevInfo->sFBInfo.ui32ByteStride);
++      
++              control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
++              control |= OMAP_CONTROL_GODIGITAL;
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
++              
+               return PVRSRV_OK;
+       }
+-
++      
+       return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ static IMG_VOID EnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-
++      
++      IMG_UINT32 ui32InterruptEnable  = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
++      ui32InterruptEnable |= OMAPLCD_INTMASK_VSYNC;
++      OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ui32InterruptEnable );
+ }
+ static IMG_VOID DisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
++      
++      IMG_UINT32 ui32InterruptEnable = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
++      ui32InterruptEnable &= ~(OMAPLCD_INTMASK_VSYNC);
++      OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ui32InterruptEnable);
+ }
+ static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
+@@ -169,6 +193,7 @@
+ #endif
+       );
++      
+       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
+       psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
+@@ -363,6 +388,7 @@
+       PVR_UNREFERENCED_PARAMETER(ui32OEMFlags);       
+       PVR_UNREFERENCED_PARAMETER(pui32SwapChainID);
+       
++      
+       if(!hDevice 
+       || !psDstSurfAttrib 
+       || !psSrcSurfAttrib 
+@@ -399,6 +425,7 @@
+       || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
+       {
++              
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }               
+@@ -407,6 +434,7 @@
+       || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
+       {
++              
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }               
+@@ -467,12 +495,21 @@
+       }
+       
++      psSwapChain->pvRegs = ioremap(psDevInfo->psLINFBInfo->fix.mmio_start, psDevInfo->psLINFBInfo->fix.mmio_len);
++
++      if (psSwapChain->pvRegs == IMG_NULL)
++      {
++              printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
++              goto ErrorFreeVSyncItems;
++      }
++
++      
+       unblank_display(psDevInfo);
+       if (OMAPLFBInstallVSyncISR(psSwapChain) != PVRSRV_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
+-              goto ErrorFreeVSyncItems;
++              goto ErrorUnmapRegisters;
+       }
+               
+       EnableVSyncInterrupt(psSwapChain);
+@@ -485,6 +522,8 @@
+       return PVRSRV_OK;
++ErrorUnmapRegisters:
++      iounmap(psSwapChain->pvRegs);
+ ErrorFreeVSyncItems:
+       OMAPLFBFreeKernelMem(psVSyncFlips);
+ ErrorFreeBuffers:
+@@ -590,6 +629,9 @@
+       }
+       
++      iounmap(psSwapChain->pvRegs);
++
++      
+       OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
+       OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
+       OMAPLFBFreeKernelMem(psSwapChain);
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 2008-12-18 15:47:29.000000000 +0100
+@@ -121,6 +121,9 @@
+       IMG_UINT32 ui32RemoveIndex;
+       
++      IMG_VOID *pvRegs;
++
++      
+       PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
+ } OMAPLFB_SWAPCHAIN;
+@@ -194,8 +197,8 @@
+ IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size);
+ IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem);
+-IMG_VOID OMAPLFBWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+-IMG_UINT32 OMAPLFBReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
++IMG_VOID OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++IMG_UINT32 OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
+ PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
+ PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2008-12-18 15:47:29.000000000 +0100
+@@ -101,28 +100,57 @@
+ }
+ static void
+-OMAPLFBVSyncISR(void *arg)
++OMAPLFBVSyncISR(void *arg, struct pt_regs *regs)
+ {
+-      (void) OMAPLFBVSyncIHandler((OMAPLFB_SWAPCHAIN *)arg);
++      OMAPLFB_SWAPCHAIN *psSwapChain= (OMAPLFB_SWAPCHAIN *)arg;
++      
++      (void) OMAPLFBVSyncIHandler(psSwapChain);
+ }
+-#define DISPC_IRQ_VSYNC 0x0002
+-
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-        if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
+-            return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
+-
++      if (1) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD)
++       {
++              if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR,
++                                      psSwapChain) != 0)
++              {
++                      printk("request OMAPLCD IRQ failed");
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++      }
++      else
++      if (0) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV)
++      {
++              if (omap_dispc_request_irq(DISPC_IRQSTATUS_EVSYNC_EVEN|DISPC_IRQSTATUS_EVSYNC_ODD, OMAPLFBVSyncISR, psSwapChain) != 0)
++              {
++                      printk("request OMAPLCD IRQ failed");
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++      }
++              
+       return PVRSRV_OK;
+ }
+ PVRSRV_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-        omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++      omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++              
++      return PVRSRV_OK;               
++}
+-      return PVRSRV_OK;
++IMG_VOID OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      IMG_VOID *pvRegAddr = (IMG_VOID *)((IMG_UINT8 *)psSwapChain->pvRegs + ui32Offset);
++
++      
++      writel(ui32Value, pvRegAddr);
++}
++
++IMG_UINT32 OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset)
++{
++      return readl((IMG_UINT8 *)psSwapChain->pvRegs + ui32Offset);
+ }
+ module_init(OMAPLFB_Init);
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge.h git/drivers/gpu/pvr/services4/include/pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/pvr_bridge.h 2008-12-18 15:47:29.000000000 +0100
+@@ -202,14 +202,14 @@
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST  (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)      
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
+-#define PVRSRV_BRIDGE_EVENT_OBJECT_CONNECT            PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
+-#define PVRSRV_BRIDGE_EVENT_OBJECT_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE              PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST           (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+       
+ #define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD             (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
+-#define PVRSRV_KERNAL_MODE_CLIENT                             1
++#define PVRSRV_KERNEL_MODE_CLIENT                             1
+ typedef struct PVRSRV_BRIDGE_RETURN_TAG
+ {
+@@ -716,7 +716,7 @@
+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+-      IMG_HANDLE *hKernelMemInfo;
++      IMG_HANDLE hKernelMemInfo;
+       IMG_UINT32 ui32Offset;
+       IMG_DEV_PHYADDR sPDDevPAddr;
+ }PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
+@@ -1302,9 +1302,25 @@
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+       IMG_HANDLE      hOSEventKM;
+-      IMG_UINT32  ui32MSTimeout;
+ } PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++      PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct        PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++      IMG_HANDLE hOSEvent;
++      PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++      PVRSRV_EVENTOBJECT sEventObject;
++      IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/servicesint.h git/drivers/gpu/pvr/services4/include/servicesint.h
+--- git/drivers/gpu/pvr/services4/include/servicesint.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/servicesint.h        2008-12-18 15:47:29.000000000 +0100
+@@ -38,16 +38,6 @@
+ #define DRIVERNAME_MAXLENGTH  (100)
+-#define EVENTOBJNAME_MAXLENGTH (50)
+-
+-
+-typedef struct _PVRSRV_EVENTOBJECT_
+-{
+-      
+-      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
+-      
+-      IMG_HANDLE      hOSEventKM;
+-} PVRSRV_EVENTOBJECT;
+ typedef struct _PVRSRV_KERNEL_MEM_INFO_
+@@ -93,6 +83,13 @@
+ } PVRSRV_KERNEL_SYNC_INFO;
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++      IMG_UINT32                      ui32ReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sReadOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32WriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
+ typedef struct _PVRSRV_SYNC_OBJECT
+ {
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge.h git/drivers/gpu/pvr/services4/include/sgx_bridge.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgx_bridge.h 2008-12-18 15:47:29.000000000 +0100
+@@ -70,8 +70,16 @@
+ #define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
+ #define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
+ #define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D                                    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+-#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+  
+ typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
+@@ -161,8 +169,18 @@
+ {
+       IMG_UINT32                              ui32BridgeFlags; 
+       IMG_HANDLE                              hDevCookie;
+-      IMG_DEV_VIRTADDR                sHWRenderContextDevVAddr;
++      PVRSRV_TRANSFER_SGX_KICK                        sKick;
+ }PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_2D_SGX_KICK                              sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
+ #endif
+  
+@@ -330,6 +348,33 @@
+       IMG_HANDLE hHWRenderContext;
+ }PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
+ typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+@@ -337,18 +382,54 @@
+       IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
+ }PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
+-typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+       IMG_HANDLE hDevCookie;
+-      IMG_HANDLE hHWRenderContext;
+-}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++      IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
+- 
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+ #define       SGX2D_MAX_BLT_CMD_SIZ           256     
+ #endif 
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      IMG_UINT32              ui32PerfReg;
++      IMG_BOOL                bNewPerf;
++      IMG_UINT32              ui32NewPerf;
++      IMG_UINT32              ui32NewPerfReset;
++      IMG_UINT32              ui32PerfCountersReg;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32OldPerf;
++      IMG_UINT32              aui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++      IMG_UINT32              ui32KickTACounter;
++      IMG_UINT32              ui32KickTARenderCounter;
++      IMG_UINT32              ui32CPUTime;
++      IMG_UINT32              ui32SGXTime;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS;
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
+@@ -1,139 +0,0 @@
+-/**********************************************************************
+- *
+- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+- * 
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- * 
+- * This program is distributed in the hope it will be useful but, except 
+- * as otherwise stated in writing, without any warranty; without even the 
+- * implied warranty of merchantability or fitness for a particular purpose. 
+- * See the GNU General Public License for more details.
+- * 
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- * 
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+- *
+- ******************************************************************************/
+-
+-#if !defined(__SGX_BRIDGE_KM_H__)
+-#define __SGX_BRIDGE_KM_H__
+-
+-#include "sgxapi_km.h"
+-#include "sgxinfo.h"
+-#include "sgxinfokm.h"
+-#include "sgx_bridge.h"
+-#include "pvr_bridge.h"
+-#include "perproc.h"
+-
+-#if defined (__cplusplus)
+-extern "C" {
+-#endif
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
+-                                                               IMG_DEV_VIRTADDR sHWRenderContextDevVAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
+-                                               PVR3DIF4_CCB_KICK *psCCBKick);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
+-                                                                IMG_DEV_VIRTADDR sDevVAddr,
+-                                                                IMG_DEV_PHYADDR *pDevPAddr,
+-                                                                IMG_CPU_PHYADDR *pCpuPAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
+-                                                                                      IMG_HANDLE              hDevMemContext,
+-                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
+-                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
+-                                                        SGX_MISC_INFO                 *psMiscInfo);
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
+-                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
+-                                                        IMG_UINT32            ui32NumSrcSyncs,
+-                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
+-                                                        IMG_UINT32            ui32DataByteSize,
+-                                                        IMG_UINT32            *pui32BltData);
+-
+-#if defined(SGX2D_DIRECT_BLITS)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
+-                                                         IMG_UINT32                   ui32DataByteSize,
+-                                                         IMG_UINT32                   *pui32BltData);
+-#endif 
+-#endif 
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
+-                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
+-                                                                         IMG_BOOL bWaitForComplete);
+-#endif 
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
+-                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+-                                                         IMG_HANDLE hDevHandle,
+-                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
+-                                        IMG_UINT32 ui32TotalPBSize,
+-                                        IMG_HANDLE *phSharedPBDesc,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+-                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
+-                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+-                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+-                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+-                                       IMG_UINT32 ui32TotalPBSize,
+-                                       IMG_HANDLE *phSharedPBDesc,
+-                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
+-                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
+-
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+-                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
+-
+- 
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-#define       SGX2D_MAX_BLT_CMD_SIZ           256     
+-#endif 
+-
+-#if defined (__cplusplus)
+-}
+-#endif
+-
+-#endif 
+-
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgxinfo.h git/drivers/gpu/pvr/services4/include/sgxinfo.h
+--- git/drivers/gpu/pvr/services4/include/sgxinfo.h    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgxinfo.h    2008-12-18 15:47:29.000000000 +0100
+@@ -59,11 +59,16 @@
+ #if defined(SGX_SUPPORT_HWPROFILING)
+       IMG_HANDLE      hKernelHWProfilingMemInfo;
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      IMG_HANDLE      hKernelHWPerfCBMemInfo;
++#endif
+       IMG_UINT32 ui32EDMTaskReg0;
+       IMG_UINT32 ui32EDMTaskReg1;
+-      IMG_UINT32 ui32ClockGateMask;
++      IMG_UINT32 ui32ClkGateCtl;
++      IMG_UINT32 ui32ClkGateCtl2;
++      IMG_UINT32 ui32ClkGateStatusMask;
+       IMG_UINT32 ui32CacheControl;
+@@ -111,11 +116,13 @@
+ #define PVRSRV_CCBFLAGS_RASTERCMD                     0x1
+ #define PVRSRV_CCBFLAGS_TRANSFERCMD                   0x2
+ #define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD     0x3
++#if defined(SGX_FEATURE_2D_HARDWARE) 
++#define PVRSRV_CCBFLAGS_2DCMD                         0x4 
++#endif
+ #define PVRSRV_KICKFLAG_RENDER                                0x1
+ #define PVRSRV_KICKFLAG_PIXEL                         0x2
+-
+ #define       SGX_BIF_INVALIDATE_PTCACHE      0x1
+ #define       SGX_BIF_INVALIDATE_PDCACHE      0x2
+@@ -125,25 +132,40 @@
+       PVRSRV_SGX_COMMAND_TYPE         eCommand;
+       PVRSRV_SGX_COMMAND              sCommand;
+       IMG_HANDLE                      hCCBKernelMemInfo;
+-      IMG_HANDLE                      hDstKernelSyncInfo;
+-      IMG_UINT32                      ui32DstReadOpsPendingOffset;
+-      IMG_UINT32                      ui32DstWriteOpsPendingOffset;
++      IMG_HANDLE      hRenderSurfSyncInfo;
++
+       IMG_UINT32      ui32NumTAStatusVals;
+-      IMG_UINT32      aui32TAStatusValueOffset[SGX_MAX_TA_STATUS_VALS];
+       IMG_HANDLE      ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+       IMG_UINT32      ui32Num3DStatusVals;
+-      IMG_UINT32      aui323DStatusValueOffset[SGX_MAX_3D_STATUS_VALS];
+       IMG_HANDLE      ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+-#ifdef        NO_HARDWARE
+-      IMG_BOOL        bTerminate;
+-      IMG_HANDLE      hUpdateDstKernelSyncInfo;
++
++      IMG_BOOL        bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++      IMG_BOOL        bTerminateOrAbort;
++#endif
++      IMG_UINT32      ui32KickFlags;
++
++      
++      IMG_UINT32      ui32CCBOffset;
++
++      
++      IMG_UINT32      ui32NumSrcSyncs;
++      IMG_HANDLE      ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++
++      
++      IMG_BOOL        bTADependency;
++      IMG_HANDLE      hTA3DSyncInfo;
++
++      IMG_HANDLE      hTASyncInfo;
++      IMG_HANDLE      h3DSyncInfo;
++#if defined(NO_HARDWARE)
+       IMG_UINT32      ui32WriteOpsPendingVal;
+ #endif
+-      IMG_UINT32                                      ui32KickFlags;
+ } PVR3DIF4_CCB_KICK;
++
+ typedef struct _PVRSRV_SGX_HOST_CTL_
+ {     
+@@ -158,163 +180,25 @@
+       IMG_UINT32                              ui32ResManFlags;                
+       IMG_DEV_VIRTADDR                sResManCleanupData;             
++      
+       IMG_DEV_VIRTADDR                sTAHWPBDesc;            
+       IMG_DEV_VIRTADDR                s3DHWPBDesc;
++      IMG_DEV_VIRTADDR                sHostHWPBDesc;          
+-} PVRSRV_SGX_HOST_CTL;
+-
+-
+-#if defined(SUPPORT_HW_RECOVERY)
+-typedef struct _SGX_INIT_SCRIPT_DATA
+-{
+-      IMG_UINT32 asHWRecoveryData[SGX_MAX_DEV_DATA];
+-} SGX_INIT_SCRIPT_DATA;
+-#endif
+-
+-typedef struct _PVRSRV_SGXDEV_INFO_
+-{
+-      PVRSRV_DEVICE_TYPE              eDeviceType;
+-      PVRSRV_DEVICE_CLASS             eDeviceClass;
+-
+-      IMG_UINT8                               ui8VersionMajor;
+-      IMG_UINT8                               ui8VersionMinor;
+-      IMG_UINT32                              ui32CoreConfig;
+-      IMG_UINT32                              ui32CoreFlags;
+-
+-      
+-      IMG_PVOID                               pvRegsBaseKM;
+-      
+-
+-      
+-      IMG_HANDLE                              hRegMapping;
+-
+-      
+-      IMG_SYS_PHYADDR                 sRegsPhysBase;
+-      
+-      IMG_UINT32                              ui32RegSize;
+-
+-      
+-      IMG_UINT32                              ui32CoreClockSpeed;
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      
+-      SGX_SLAVE_PORT                  s2DSlavePortKM;
+-
+-      
+-      PVRSRV_RESOURCE                 s2DSlaveportResource;
+-
+-      
+-      IMG_UINT32                      ui322DFifoSize;
+-      IMG_UINT32                      ui322DFifoOffset;
+-      
+-      IMG_HANDLE                      h2DCmdCookie;
+-      
+-      IMG_HANDLE                      h2DQueue;
+-      IMG_BOOL                        b2DHWRecoveryInProgress;
+-      IMG_BOOL                        b2DHWRecoveryEndPending;
+-      IMG_UINT32                      ui322DCompletedBlits;
+-      IMG_BOOL                        b2DLockupSuspected;
+-#endif
+-      
+-    
+-      IMG_VOID                        *psStubPBDescListKM;
+-
+-
+-      
+-      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
+-
+-      IMG_VOID                                *pvDeviceMemoryHeap;
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
+-      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
+-      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
+-      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
+-      IMG_UINT32                              *pui32KernelCCBEventKicker; 
+-      IMG_UINT32                              ui32TAKickAddress;              
+-      IMG_UINT32                              ui32TexLoadKickAddress; 
+-      IMG_UINT32                              ui32VideoHandlerAddress;
+-#if defined(SGX_SUPPORT_HWPROFILING)
+-      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
+-#endif
+-
+-      
+-      IMG_UINT32                              ui32ClientRefCount;
+-
+-      
+-      IMG_UINT32                              ui32CacheControl;
+-
+-      
+-
+-
+-      IMG_VOID                                *pvMMUContextList;
+-
+-      
+-      IMG_BOOL                                bForcePTOff;
+-
+-      IMG_UINT32                              ui32EDMTaskReg0;
+-      IMG_UINT32                              ui32EDMTaskReg1;
+-
+-      IMG_UINT32                              ui32ClockGateMask;
+-      SGX_INIT_SCRIPTS                sScripts;
+-#if defined(SUPPORT_HW_RECOVERY)
+-      SGX_INIT_SCRIPT_DATA    sScriptData;
+-#endif
+-              
+-      IMG_HANDLE                              hBIFResetPDOSMemHandle;
+-      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
+-      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
+-      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
+-      IMG_UINT32                              *pui32BIFResetPD;
+-      IMG_UINT32                              *pui32BIFResetPT;
+-
+-
+-
+-#if defined(SUPPORT_HW_RECOVERY)
+-      
+-      IMG_HANDLE                              hTimer;
+-      
+-      IMG_UINT32                              ui32TimeStamp;
+-#endif
+-
+-      
+-      IMG_UINT32                              ui32NumResets;
+-
+-      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
+-      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
+-
+-      IMG_UINT32                              ui32Flags;
+-
+-      
+-      IMG_UINT32                              ui32RegFlags;
+-
+-      #if defined(PDUMP)
+-      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
+-      #endif
++      IMG_UINT32                              ui32NumActivePowerEvents;        
+-#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+-      
+-      IMG_VOID                                *pvDummyPTPageCpuVAddr;
+-      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
+-      IMG_HANDLE                              hDummyPTPageOSMemHandle;
+-      IMG_VOID                                *pvDummyDataPageCpuVAddr;
+-      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
+-      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#if defined(SUPPORT_SGX_HWPERF)
++      IMG_UINT32                      ui32HWPerfFlags;                
+ #endif
+-      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      PVRSRV_EVENTOBJECT      *psSGXEventObject;
+-#endif
++       
++      IMG_UINT32                      ui32TimeWraps;
++} PVRSRV_SGX_HOST_CTL;
+-} PVRSRV_SGXDEV_INFO;
+ typedef struct _PVR3DIF4_CLIENT_INFO_
+ {
+-      IMG_VOID                                        *pvRegsBase;                    
+-      IMG_HANDLE                                      hBlockMapping;                  
+-      SGX_SLAVE_PORT                          s2DSlavePort;                   
+       IMG_UINT32                                      ui32ProcessID;                  
+       IMG_VOID                                        *pvProcess;                             
+       PVRSRV_MISC_INFO                        sMiscInfo;                              
+@@ -330,13 +214,9 @@
+ typedef struct _PVR3DIF4_INTERNAL_DEVINFO_
+ {
+       IMG_UINT32                      ui32Flags;
+-      IMG_BOOL                        bTimerEnable;
+       IMG_HANDLE                      hCtlKernelMemInfoHandle;
+       IMG_BOOL                        bForcePTOff;
+       IMG_UINT32                      ui32RegFlags;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      IMG_HANDLE                      hOSEvent;               
+-#endif
+ } PVR3DIF4_INTERNAL_DEVINFO;
+ typedef struct _PVRSRV_SGX_SHARED_CCB_
+@@ -371,5 +251,150 @@
+       #endif
+ }PVRSRV_SGX_CCB;
++typedef struct _CTL_STATUS_
++{
++      IMG_DEV_VIRTADDR        sStatusDevAddr;
++      IMG_UINT32              ui32StatusValue;
++} CTL_STATUS, *PCTL_STATUS;
++
++#if defined(TRANSFER_QUEUE)
++#define SGXTQ_MAX_STATUS 5
++typedef struct _PVR3DIF4_CMDTA_SHARED_
++{
++      IMG_UINT32                      ui32NumTAStatusVals;
++      IMG_UINT32                      ui32Num3DStatusVals;
++      
++      
++      IMG_UINT32                      ui32WriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32ReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32                      ui32TQSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                sTQSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32TQSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                sTQSyncReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32                      ui323DTQSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                s3DTQSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui323DTQSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                s3DTQSyncReadOpsCompleteDevVAddr;
++      
++      
++      IMG_UINT32                      ui32NumSrcSyncs;
++      PVRSRV_DEVICE_SYNC_OBJECT       asSrcSyncs[SGX_MAX_SRC_SYNCS];
++
++      CTL_STATUS                      sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++      CTL_STATUS                      sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sTA3DDependancy;        
++      
++} PVR3DIF4_CMDTA_SHARED;
++
++typedef struct _PVR3DIF4_TRANSFERCMD_SHARED_
++{
++      
++      
++      IMG_UINT32              ui32SrcReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sSrcReadOpsCompleteDevAddr;
++      
++      IMG_UINT32              ui32SrcWriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sSrcWriteOpsCompleteDevAddr;
++
++      
++      
++      IMG_UINT32              ui32DstReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sDstReadOpsCompleteDevAddr;
++      
++      IMG_UINT32              ui32DstWriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sDstWriteOpsCompleteDevAddr;
++
++      
++      IMG_UINT32              ui32TASyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR        sTASyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32              ui32TASyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR        sTASyncReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32              ui323DSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR        s3DSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32              ui323DSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR        s3DSyncReadOpsCompleteDevVAddr;
++
++      IMG_UINT32              ui32NumStatusVals;
++      CTL_STATUS              sCtlStatusInfo[SGXTQ_MAX_STATUS];
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_UINT32              ui32NumDstSync;
++
++      IMG_DEV_VIRTADDR        sSrcWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++      IMG_DEV_VIRTADDR        sSrcReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_DEV_VIRTADDR        sDstWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++      IMG_DEV_VIRTADDR        sDstReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++} PVR3DIF4_TRANSFERCMD_SHARED, *PPVR3DIF4_TRANSFERCMD_SHARED;
++
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++      IMG_HANDLE              hCCBMemInfo;
++      IMG_UINT32              ui32SharedCmdCCBOffset;
++
++      IMG_DEV_VIRTADDR        sHWTransferContextDevVAddr;
++
++      IMG_HANDLE              hTASyncInfo;
++      IMG_HANDLE              h3DSyncInfo;
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_HANDLE              ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_UINT32              ui32NumDstSync;
++      IMG_HANDLE              ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_UINT32              ui32StatusFirstSync;
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVR3DIF4_2DCMD_SHARED_ {
++      
++      IMG_UINT32                      ui32NumSrcSync;
++      PVRSRV_DEVICE_SYNC_OBJECT       sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sDstSyncData;
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sTASyncData;
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       s3DSyncData;
++} PVR3DIF4_2DCMD_SHARED, *PPVR3DIF4_2DCMD_SHARED;
++
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++      IMG_HANDLE              hCCBMemInfo;
++      IMG_UINT32              ui32SharedCmdCCBOffset;
++
++      IMG_DEV_VIRTADDR        sHW2DContextDevVAddr;
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_HANDLE              ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++      
++      
++      IMG_HANDLE              hDstSyncInfo;
++      
++      
++      IMG_HANDLE              hTASyncInfo;
++      
++      
++      IMG_HANDLE              h3DSyncInfo;
++      
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif        
++#endif        
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS        9
++
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   2008-12-18 15:47:29.000000000 +0100
+@@ -44,7 +44,6 @@
+ #include "bridged_pvr_bridge.h"
+ #include "env_data.h"
+-
+ #if defined (__linux__)
+ #include "mmap.h"
+ #else
+@@ -66,7 +65,7 @@
+ static IMG_BOOL gbInitServerRunning = IMG_FALSE;
+ static IMG_BOOL gbInitServerRan = IMG_FALSE;
+-static IMG_BOOL gbInitServerSuccessful = IMG_FALSE;
++static IMG_BOOL gbInitSuccessful = IMG_FALSE;
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+@@ -446,7 +445,13 @@
+ }
+-
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++int
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++                                         PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
+ static int
+ PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
+                                          PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+@@ -512,7 +517,7 @@
+               psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+               psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+               psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
+-              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+               psAllocDeviceMemOUT->eError =
+                       PVRSRVAllocHandle(psPerProc->psHandleBase,
+@@ -568,6 +573,7 @@
+       return 0;
+ }
++#endif 
+ static int
+ PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
+@@ -1547,12 +1553,12 @@
+               return 0;
+       }
+-      if(psDoKickIN->sCCBKick.hDstKernelSyncInfo != IMG_NULL)
++      if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
+       {
+               psRetOUT->eError =
+                       PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                         &psDoKickIN->sCCBKick.hDstKernelSyncInfo,
+-                                                         psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++                                                         psDoKickIN->sCCBKick.hTA3DSyncInfo,
+                                                          PVRSRV_HANDLE_TYPE_SYNC_INFO); 
+               if(psRetOUT->eError != PVRSRV_OK)
+@@ -1561,13 +1567,12 @@
+               }
+       }
+-#if defined (NO_HARDWARE)
+-      if(psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo != IMG_NULL)
++      if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
+       {
+               psRetOUT->eError =
+                       PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                         &psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
+-                                                         psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         &psDoKickIN->sCCBKick.hTASyncInfo,
++                                                         psDoKickIN->sCCBKick.hTASyncInfo,
+                                                          PVRSRV_HANDLE_TYPE_SYNC_INFO); 
+               if(psRetOUT->eError != PVRSRV_OK)
+@@ -1575,7 +1580,46 @@
+                       return 0;
+               }
+       }
+-#endif
++
++      if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.h3DSyncInfo,
++                                                         psDoKickIN->sCCBKick.h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      
++      if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
+       for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
+       {
+               psRetOUT->eError =
+@@ -1590,6 +1634,11 @@
+               }
+       }
++      if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
+       for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
+       {
+               psRetOUT->eError =
+@@ -1604,6 +1653,20 @@
+               }
+       }
++      if(psDoKickIN->sCCBKick.hRenderSurfSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hRenderSurfSyncInfo,
++                                                         psDoKickIN->sCCBKick.hRenderSurfSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
+       psRetOUT->eError =
+               SGXDoKickKM(hDevCookieInt, 
+                                       &psDoKickIN->sCCBKick);
+@@ -1620,51 +1683,119 @@
+                       PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
++      PVRSRV_TRANSFER_SGX_KICK *psKick;
++      IMG_UINT32 i;
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
+       PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++      psKick = &psSubmitTransferIN->sKick;
++
+       psRetOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDevCookieInt,
+                                                  psSubmitTransferIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
+-
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
+       psRetOUT->eError =
+-              SGXSubmitTransferKM(hDevCookieInt,
+-                                                      psSubmitTransferIN->sHWRenderContextDevVAddr);
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psKick->hCCBMemInfo,
++                                                 psKick->hCCBMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hTASyncInfo,
++                                                         psKick->hTASyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->h3DSyncInfo,
++                                                         psKick->h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahSrcSyncInfo[i],
++                                                         psKick->ahSrcSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumDstSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahDstSyncInfo[i],
++                                                         psKick->ahDstSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
+       return 0;
+ }
+-#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
+ static int
+-SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
+-                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
+-                               PVRSRV_BRIDGE_RETURN *psRetOUT,
+-                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      SGX_MISC_INFO *psMiscInfo;
+-
+-
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO);
++      PVRSRV_2D_SGX_KICK *psKick;
++      IMG_UINT32 i;
+-      
+-      psMiscInfo =
+-              (SGX_MISC_INFO *)((IMG_UINT8 *)psSGXGetMiscInfoIN
+-                                                + sizeof(PVRSRV_BRIDGE_IN_SGXGETMISCINFO));
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+       psRetOUT->eError =
+-              PVRSRVLookupHandle(psPerProc->psHandleBase, 
+-                                                 &hDevCookieInt, 
+-                                                 psSGXGetMiscInfoIN->hDevCookie, 
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSubmit2DIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
+       if(psRetOUT->eError != PVRSRV_OK)
+@@ -1672,45 +1803,156 @@
+               return 0;
+       }
+-      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++      psKick = &psSubmit2DIN->sKick;
+-      if(CopyFromUserWrapper(psPerProc, 
+-                                     ui32BridgeID,
+-                                                 psMiscInfo,
+-                                                 psSGXGetMiscInfoIN->psMiscInfo,
+-                                                 sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psKick->hCCBMemInfo,
++                                                 psKick->hCCBMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
+       {
+-              return -EFAULT;
++              return 0;
+       }
+-      switch(psMiscInfo->eRequest)
++      if (psKick->hTASyncInfo != IMG_NULL)
+       {
+-              default:
+-                      break;
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hTASyncInfo,
++                                                         psKick->hTASyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
+-      
+-      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->h3DSyncInfo,
++                                                         psKick->h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
+-      
+-      switch(psMiscInfo->eRequest)
++      if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
+       {
+-              default:
+-                      break;
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahSrcSyncInfo[i],
++                                                         psKick->ahSrcSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
+-      if(CopyToUserWrapper(psPerProc,
+-                                   ui32BridgeID,
+-                                               psSGXGetMiscInfoIN->psMiscInfo,
+-                                               psMiscInfo,
+-                                               sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      if (psKick->hDstSyncInfo != IMG_NULL)
+       {
+-              return -EFAULT;
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hDstSyncInfo,
++                                                         psKick->hDstSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
++      psRetOUT->eError =
++              SGXSubmit2DKM(hDevCookieInt, psKick);
++
++      return 0;
++}
++#endif
++
++#endif
++
++static int
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      SGX_MISC_INFO *psMiscInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++                                                      PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++      psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                      &hDevCookieInt,
++                                                      psSGXGetMiscInfoIN->hDevCookie,
++                                                      PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE*)hDevCookieInt)->pvDevice;
++
++      psMiscInfo = psSGXGetMiscInfoIN->psMiscInfo;
++      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++
+       return 0;
+ }
++#if defined(SUPPORT_SGX_HWPERF)
++static int
++SGXReadHWPerfCountersBW(IMG_UINT32                                                                    ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS       *psSGXReadHWPerfCountersIN,
++                                              PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS      *psSGXReadHWPerfCountersOUT,
++                                              PVRSRV_PER_PROCESS_DATA                                         *psPerProc)
++{
++      IMG_HANDLE                      hDevCookieInt;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS);
++
++      psSGXReadHWPerfCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                      &hDevCookieInt,
++                                                      psSGXReadHWPerfCountersIN->hDevCookie,
++                                                      PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psSGXReadHWPerfCountersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = ((PVRSRV_DEVICE_NODE*)hDevCookieInt)->pvDevice;
++
++      psSGXReadHWPerfCountersOUT->eError = SGXReadHWPerfCountersKM(psDevInfo,
++                                                      psSGXReadHWPerfCountersIN->ui32PerfReg,
++                                                      &psSGXReadHWPerfCountersOUT->ui32OldPerf,
++                                                      psSGXReadHWPerfCountersIN->bNewPerf,
++                                                      psSGXReadHWPerfCountersIN->ui32NewPerf,
++                                                      psSGXReadHWPerfCountersIN->ui32NewPerfReset,
++                                                      psSGXReadHWPerfCountersIN->ui32PerfCountersReg,
++                                                      &psSGXReadHWPerfCountersOUT->aui32Counters[0],
++                                                      &psSGXReadHWPerfCountersOUT->ui32KickTACounter,
++                                                      &psSGXReadHWPerfCountersOUT->ui32KickTARenderCounter,
++                                                      &psSGXReadHWPerfCountersOUT->ui32CPUTime,
++                                                      &psSGXReadHWPerfCountersOUT->ui32SGXTime);
++
++      return 0;
++}
++#endif 
++
+ static int
+ PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
+                                          IMG_VOID *psBridgeIn,
+@@ -1752,15 +1994,13 @@
+               return 0;
+       }
+-      PDUMPENDINITPHASE();
+-
+-      gbInitServerSuccessful = psInitSrvDisconnectIN->bInitSuccesful;
+-
+       psPerProc->bInitProcess = IMG_FALSE;
+       gbInitServerRunning = IMG_FALSE;
+       gbInitServerRan = IMG_TRUE;
+-      psRetOUT->eError = PVRSRV_OK;
++      psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++      gbInitSuccessful = (IMG_BOOL)(((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)));
+       return 0;
+ }
+@@ -1772,15 +2012,99 @@
+                                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
+                                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
++      IMG_HANDLE hOSEventKM;
++
+       PVR_UNREFERENCED_PARAMETER(psPerProc);
+       
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
+-      psRetOUT->eError = OSEventObjectWait(psEventObjectWaitIN->hOSEventKM, psEventObjectWaitIN->ui32MSTimeout);
++      psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hOSEventKM, 
++                                                 psEventObjectWaitIN->hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++      
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      
++      psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++      return 0;
++}
++
++static int
++PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++                                                PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++      psEventObjectOpenOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &psEventObjectOpenIN->sEventObject.hOSEventKM, 
++                                                 psEventObjectOpenIN->sEventObject.hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++      if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psEventObjectOpenOUT->eError = OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
++
++      if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psEventObjectOpenOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psEventObjectOpenOUT->hOSEvent,
++                                                psEventObjectOpenOUT->hOSEvent,
++                                                PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);               
+       return 0;
+ }
++static int
++PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hOSEventKM;
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++      
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &psEventObjectCloseIN->sEventObject.hOSEventKM, 
++                                                 psEventObjectCloseIN->sEventObject.hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &hOSEventKM, 
++                                                 psEventObjectCloseIN->hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++      return 0;
++}
+ static int
+ SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
+@@ -1847,6 +2171,13 @@
+       bLookupFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -1907,6 +2238,13 @@
+       bReleaseFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -1950,6 +2288,10 @@
+       bDissociateFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -2005,7 +2347,6 @@
+                                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+       IMG_HANDLE hHWRenderContextInt;
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
+@@ -2020,10 +2361,8 @@
+               return 0;
+       }
+-      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+-
+       hHWRenderContextInt =
+-              SGXRegisterHWRenderContextKM(psDevInfo,
++              SGXRegisterHWRenderContextKM(hDevCookieInt,
+                                                                        &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr);
+       if (hHWRenderContextInt == IMG_NULL)
+@@ -2043,54 +2382,180 @@
+ }
+ static int
+-SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
+-                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
+-                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
+-                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+-      IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
+       psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWRenderContextInt,
++                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      
++      return 0;
++}
++
++static int
++SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hHWTransferContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++      psSGXRegHWTransferContextOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase, 
+                                                  &hDevCookieInt,
+-                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 psSGXRegHWTransferContextIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      hHWTransferContextInt =
++              SGXRegisterHWTransferContextKM(hDevCookieInt,
++                                                                       &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr);
++
++      if (hHWTransferContextInt == IMG_NULL)
++      {
++              psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHWTransferContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHWTransferContextOUT->hHWTransferContext,
++                                                hHWTransferContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hHWTransferContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWTransferContextInt,
++                                                 psSGXUnregHWTransferContextIN->hHWTransferContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
++      psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWTransferContextIN->hHWTransferContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++      
++      return 0;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static int
++SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hHW2DContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++      psSGXRegHW2DContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXRegHW2DContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
+       psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+-      SGXFlushHWRenderTargetKM(psDevInfo, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++      hHW2DContextInt =
++              SGXRegisterHW2DContextKM(hDevCookieInt,
++                                                                       &psSGXRegHW2DContextIN->sHW2DContextDevVAddr);
++
++      if (hHW2DContextInt == IMG_NULL)
++      {
++              psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHW2DContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHW2DContextOUT->hHW2DContext,
++                                                hHW2DContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+       return 0;
+ }
+ static int
+-SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
+-                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
+                                                          PVRSRV_BRIDGE_RETURN *psRetOUT,
+                                                          PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+-      IMG_HANDLE hHWRenderContextInt;
++      IMG_HANDLE hHW2DContextInt;
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
+       psRetOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                 &hHWRenderContextInt,
+-                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
+-                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++                                                 &hHW2DContextInt,
++                                                 psSGXUnregHW2DContextIN->hHW2DContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
+-      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+@@ -2098,11 +2563,37 @@
+       psRetOUT->eError =
+               PVRSRVReleaseHandle(psPerProc->psHandleBase,
+-                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
+-                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++                                                      psSGXUnregHW2DContextIN->hHW2DContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+       
+       return 0;
+ }
++#endif
++
++static int
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++      return 0;
++}
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+@@ -2679,16 +3170,63 @@
+                                       PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
+                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
++      PVRSRV_ERROR eError;
++      
+       PVR_UNREFERENCED_PARAMETER(psPerProc);
+-
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
+       
+       OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
+                         &psGetMiscInfoIN->sMiscInfo,
+                         sizeof(PVRSRV_MISC_INFO));
+-      psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoIN->sMiscInfo);
+-      psGetMiscInfoOUT->sMiscInfo = psGetMiscInfoIN->sMiscInfo;
++      if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT)
++      {
++                      
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                            psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++                                          (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++          if(eError != PVRSRV_OK)
++          {
++                  PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Out of memory"));
++                  return -EFAULT;
++          }
++
++          psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++      
++              
++              eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++                                   psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++                                   psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++                                   psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++              
++          
++          OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                            psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++                            (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++      
++          
++          psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr; 
++
++          if(eError != PVRSRV_OK)
++          {
++              
++                  PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
++                  return -EFAULT;
++          }
++      }
++      else
++      {
++              psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++      }
++
++      if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++      {
++              psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                                                                      &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++                                                                                                      psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++                                                                                                      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++                                                                                                      PVRSRV_HANDLE_ALLOC_FLAG_SHARED);        
++      }
+       return 0;
+ }
+@@ -3526,6 +4064,7 @@
+               psKernelMemInfo->ui32Flags;
+       psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
+               psKernelMemInfo->ui32AllocSize; 
++      psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+       psAllocSharedSysMemOUT->eError =
+               PVRSRVAllocHandle(psPerProc->psHandleBase,
+                                                 &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
+@@ -3641,7 +4180,7 @@
+               psKernelMemInfo->ui32Flags;
+       psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
+               psKernelMemInfo->ui32AllocSize; 
+-      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+       psMapMemInfoMemOUT->eError =
+               PVRSRVAllocSubHandle(psPerProc->psHandleBase,
+                                                 &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
+@@ -3972,6 +4511,8 @@
+               
+       SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRVEventObjectOpenBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRVEventObjectCloseBW);
+ #if defined(SUPPORT_SGX1)
+@@ -4009,7 +4550,18 @@
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
+-
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#if defined(TRANSFER_QUEUE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++#endif
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++#endif 
++#if defined(SUPPORT_SGX_HWPERF)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS, SGXReadHWPerfCountersBW);
+ #endif 
+@@ -4059,7 +4611,7 @@
+       {
+               if(gbInitServerRan)
+               {
+-                      if(!gbInitServerSuccessful)
++                      if(!gbInitSuccessful)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed.  Driver unusable.",
+                                                __FUNCTION__));
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   2008-12-18 15:47:29.000000000 +0100
+@@ -24,7 +24,6 @@
+  *
+  ******************************************************************************/
+-#include <linux/module.h>
+ #include "services_headers.h"
+ #include "buffer_manager.h"
+ #include "kernelbuffer.h"
+@@ -1128,7 +1127,8 @@
+       
+       apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+-      if(psBuffer->psSwapChain->psLastFlipBuffer)
++      if(psBuffer->psSwapChain->psLastFlipBuffer &&
++              psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
+       {
+               apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+               ui32NumSrcSyncs++;
+@@ -1389,7 +1389,7 @@
+ }
+-IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State)
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
+ {
+       PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     2008-12-18 15:47:29.000000000 +0100
+@@ -422,7 +422,8 @@
+       BM_HEAP                                 *psBMHeap;
+       IMG_HANDLE                              hDevMemContext;
+-      if (!hDevMemHeap)
++      if (!hDevMemHeap ||
++              (ui32Size == 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/handle.c git/drivers/gpu/pvr/services4/srvkm/common/handle.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/handle.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/handle.c        2008-12-18 15:47:29.000000000 +0100
+@@ -25,6 +25,10 @@
+  ******************************************************************************/
+ #ifdef        PVR_SECURE_HANDLES
++#ifdef        __linux__
++#include <linux/vmalloc.h>
++#endif
++
+ #include <stddef.h>
+ #include "services_headers.h"
+@@ -36,6 +40,8 @@
+ #define       HANDLE_BLOCK_SIZE       256
+ #endif
++#define       HANDLE_LARGE_BLOCK_SIZE 1024
++
+ #define       HANDLE_HASH_TAB_INIT_SIZE       32
+ #define       INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
+@@ -100,13 +106,13 @@
+ {
+       IMG_BOOL bIsEmpty;
+-      bIsEmpty = (psList->ui32Next == ui32Index);
++      bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
+ #ifdef        DEBUG
+       {
+               IMG_BOOL bIsEmpty2;
+-              bIsEmpty2 = (psList->ui32Prev == ui32Index);
++              bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
+               PVR_ASSERT(bIsEmpty == bIsEmpty2);
+       }
+ #endif
+@@ -114,6 +120,7 @@
+       return bIsEmpty;
+ }
++#ifdef DEBUG
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(NoChildren)
+ #endif
+@@ -143,6 +150,7 @@
+       }
+       return IMG_FALSE;
+ }
++#endif 
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(ParentHandle)
+@@ -328,6 +336,14 @@
+       if (psBase->psHandleArray != IMG_NULL)
+       {
++#ifdef        __linux__
++              if (psBase->bVmallocUsed)
++              {
++                      vfree(psBase->psHandleArray);
++                      psBase->psHandleArray = IMG_NULL;
++                      return PVRSRV_OK;
++              }
++#endif        
+               eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+                       psBase->ui32TotalHandCount * sizeof(struct sHandle),
+                       psBase->psHandleArray,
+@@ -363,6 +379,7 @@
+               PVR_ASSERT(hHandle != IMG_NULL);
+               PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++              PVR_UNREFERENCED_PARAMETER(hHandle);
+       }
+       
+@@ -495,22 +512,46 @@
+       return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+ }
++#define       NEW_HANDLE_ARRAY_SIZE(psBase, handleNumberIncrement)    \
++      (((psBase)->ui32TotalHandCount +  (handleNumberIncrement)) * \
++      sizeof(struct sHandle))
++
+ static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase)
+ {
+       struct sHandle *psNewHandleArray;
+       IMG_HANDLE hNewHandBlockAlloc;
+       PVRSRV_ERROR eError;
+       struct sHandle *psHandle;
++      IMG_UINT32 ui32HandleNumberIncrement =  HANDLE_BLOCK_SIZE;
++      IMG_UINT32 ui32NewHandleArraySize = NEW_HANDLE_ARRAY_SIZE(psBase, ui32HandleNumberIncrement);
++#ifdef        __linux__
++      IMG_BOOL bVmallocUsed = IMG_FALSE;
++#endif
+       
+       eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+-              (psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE) * sizeof(struct sHandle),
++              ui32NewHandleArraySize,
+               (IMG_PVOID *)&psNewHandleArray,
+               &hNewHandBlockAlloc);
+       if (eError != PVRSRV_OK)
+       {
++#ifdef        __linux__
++              PVR_TRACE(("IncreaseHandleArraySize:  OSAllocMem failed (%d), trying vmalloc", eError));
++              
++              ui32HandleNumberIncrement =  HANDLE_LARGE_BLOCK_SIZE;
++              ui32NewHandleArraySize = NEW_HANDLE_ARRAY_SIZE(psBase, ui32HandleNumberIncrement);
++
++              psNewHandleArray = vmalloc(ui32NewHandleArraySize);
++              if (psNewHandleArray == IMG_NULL)
++              {
++                      PVR_TRACE(("IncreaseHandleArraySize:  vmalloc failed"));
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++              bVmallocUsed = IMG_TRUE;
++#else 
+               PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Couldn't allocate new handle array (%d)", eError));
+               return eError;
++#endif        
+       }
+       
+@@ -521,7 +562,7 @@
+       
+       for(psHandle = psNewHandleArray + psBase->ui32TotalHandCount;
+-              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE;
++              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + ui32HandleNumberIncrement;
+               psHandle++)
+       {
+               psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
+@@ -538,15 +579,18 @@
+       
+       psBase->psHandleArray = psNewHandleArray;
+       psBase->hHandBlockAlloc = hNewHandBlockAlloc;
++#ifdef        __linux__
++      psBase->bVmallocUsed = bVmallocUsed;
++#endif
+       
+       PVR_ASSERT(psBase->ui32FreeHandCount == 0);
+-      psBase->ui32FreeHandCount = HANDLE_BLOCK_SIZE;
++      psBase->ui32FreeHandCount = ui32HandleNumberIncrement;
+       PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
+       psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
+-      psBase->ui32TotalHandCount += HANDLE_BLOCK_SIZE;
++      psBase->ui32TotalHandCount += ui32HandleNumberIncrement;
+       PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
+       psBase->ui32LastFreeIndexPlusOne = psBase->ui32TotalHandCount;
+@@ -564,7 +608,7 @@
+       
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+-      PVR_ASSERT(psBase->psHashTab != NULL);
++      PVR_ASSERT(psBase->psHashTab != IMG_NULL);
+       if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/power.c git/drivers/gpu/pvr/services4/srvkm/common/power.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/power.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/power.c 2008-12-18 15:47:29.000000000 +0100
+@@ -207,6 +207,21 @@
+ }
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(IMG_UINT32                       ui32DeviceIndex,
++                                             PVR_POWER_STATE  eNewPowerState)
++{
++      PVRSRV_ERROR    eError;
++      eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      return eError;
++}
++
++
+ IMG_EXPORT
+ PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
+                                                                                PVR_POWER_STATE        eNewPowerState,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        2008-12-18 15:47:29.000000000 +0100
+@@ -28,6 +28,7 @@
+ #include "buffer_manager.h"
+ #include "handle.h"
+ #include "perproc.h"
++#include "pdump_km.h"
+ #include "ra.h"
+@@ -180,7 +181,7 @@
+ }
+-PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
+ {
+       PVRSRV_ERROR    eError;
+@@ -215,6 +216,20 @@
+       gpsSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0;
+       gpsSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_EVENTOBJECT) , 
++                                       (IMG_VOID **)&psSysData->psGlobalEventObject, 0) != PVRSRV_OK) 
++      {
++              
++              goto Error;
++      }
++
++      if(OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
++      {
++              goto Error;     
++      }       
++
+       return eError;
+       
+ Error:
+@@ -224,12 +239,21 @@
+-IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData)
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
+ {
+       PVRSRV_ERROR    eError;
+       
+       PVR_UNREFERENCED_PARAMETER(psSysData);
++      
++      if(psSysData->psGlobalEventObject)
++      {
++              OSEventObjectDestroy(psSysData->psGlobalEventObject);
++              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                               sizeof(PVRSRV_EVENTOBJECT) , 
++                                               psSysData->psGlobalEventObject, 0);
++      }
++
+       eError = PVRSRVHandleDeInit();
+       if (eError != PVRSRV_OK)
+       {
+@@ -246,10 +270,10 @@
+ }
+-PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,  
+-                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+-                                                                IMG_UINT32 ui32SOCInterruptBit,
+-                                                                IMG_UINT32 *pui32DeviceIndex)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,  
++                                                                                        PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                                        IMG_UINT32 ui32SOCInterruptBit,
++                                                                                        IMG_UINT32 *pui32DeviceIndex)
+ {
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+@@ -342,6 +366,61 @@
+               }
+       }
++      
++
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed to get SysData"));
++              return(eError);
++      }
++
++      if (bInitSuccessful)
++      {
++              eError = SysFinalise();
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
++                      return eError;
++              }
++
++              
++              psDeviceNode = psSysData->psDeviceNodeList;
++              while (psDeviceNode)
++              {
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                                                       PVRSRV_POWER_Unspecified,
++                                                                                                                       KERNEL_ID, IMG_FALSE);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++
++      PDUMPENDINITPHASE();
++
+       return PVRSRV_OK;
+ }
+@@ -408,7 +487,7 @@
+ }
+-PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
+ {
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       PVRSRV_DEVICE_NODE      **ppsDevNode;
+@@ -441,10 +520,6 @@
+       
+-
+-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-      
+-
+       eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
+                                                                                PVRSRV_POWER_STATE_D3,
+                                                                                KERNEL_ID,
+@@ -454,7 +529,16 @@
+               PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
+               return eError;
+       }
+-#endif 
++
++      
++
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVResManConnect call"));
++              return eError;
++      }
+       
+@@ -481,11 +565,11 @@
+ IMG_EXPORT
+-PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
+-                                                                        IMG_UINT32 ui32Value,
+-                                                                        IMG_UINT32 ui32Mask,
+-                                                                        IMG_UINT32 ui32Waitus,
+-                                                                        IMG_UINT32 ui32Tries)
++PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask,
++                                                                                IMG_UINT32 ui32Waitus,
++                                                                                IMG_UINT32 ui32Tries)
+ {
+       IMG_BOOL        bStart = IMG_FALSE;
+       IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
+@@ -585,7 +669,8 @@
+       
+       if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
+                                                                               |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
+-                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT))
++                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++                                                                              |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT))
+       {
+               PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
+               return PVRSRV_ERROR_INVALID_PARAMS;                     
+@@ -719,13 +804,20 @@
+               i32Count = OSSNPrintf(pszStr, 100, "\n\0");
+               UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+       }
++
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++      && psSysData->psGlobalEventObject)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++              psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
++      }
+       
+       return PVRSRV_OK;
+ }
+-PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32            *pui32Total, 
+-                                                              IMG_UINT32              *pui32Available)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32               *pui32Total, 
++                                                                                       IMG_UINT32             *pui32Available)
+ {
+       IMG_UINT32 ui32Total = 0, i = 0;
+       IMG_UINT32 ui32Available = 0;
+@@ -746,7 +838,7 @@
+ }
+-IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
+ {
+       SYS_DATA                        *psSysData;
+       IMG_BOOL                        bStatus = IMG_FALSE;
+@@ -776,7 +868,7 @@
+ }
+-IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData)
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
+ {
+       SYS_DATA                        *psSysData = pvSysData;
+       IMG_BOOL                        bStatus = IMG_FALSE;
+@@ -826,7 +918,7 @@
+ }
+-IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData)
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
+ {
+       SYS_DATA                        *psSysData = pvSysData;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+@@ -853,10 +945,21 @@
+       {
+               PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
+       }
++      
++      
++      if (psSysData->psGlobalEventObject)
++      {
++              IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++              if(hOSEventKM)
++              {
++                      OSEventObjectSignal(hOSEventKM);
++              }
++      }       
+ }
+-PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, 
++                                                                                                              IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
+ {
+       IMG_UINT32         uiBytesSaved = 0;
+       IMG_PVOID          pvLocalMemCPUVAddr;
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/queue.c git/drivers/gpu/pvr/services4/srvkm/common/queue.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/queue.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/queue.c 2008-12-18 15:47:29.000000000 +0100
+@@ -760,14 +760,10 @@
+       
+       PVRSRVCommandCompleteCallbacks();
+       
+-#if defined(SYS_USING_INTERRUPTS)
+       if(bScheduleMISR)
+       {
+               OSScheduleMISR(psSysData);
+       }
+-#else
+-      PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
+-#endif 
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/resman.c git/drivers/gpu/pvr/services4/srvkm/common/resman.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/resman.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/resman.c        2008-12-18 15:47:29.000000000 +0100
+@@ -145,6 +141,10 @@
+               
+               case RESMAN_TYPE_HW_RENDER_CONTEXT:
+                       return "HW Render Context Resource";
++              case RESMAN_TYPE_HW_TRANSFER_CONTEXT:
++                      return "HW Transfer Context Resource";
++              case RESMAN_TYPE_HW_2D_CONTEXT:
++                      return "HW 2D Context Resource";
+               case RESMAN_TYPE_SHARED_PB_DESC:
+                       return "Shared Parameter Buffer Description Resource";
+               
+@@ -378,7 +378,12 @@
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
+               
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
++              
++              
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);                       
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      2008-12-18 15:47:29.000000000 +0100
+@@ -1966,6 +1966,8 @@
+ }
++
++
+ #if PAGE_TEST
+ static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr)
+ {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      2008-12-18 15:47:29.000000000 +0100
+@@ -27,6 +27,8 @@
+ #ifndef _MMU_H_
+ #define _MMU_H_
++#include "sgxinfokm.h"
++
+ PVRSRV_ERROR
+ MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       2008-12-18 15:47:29.000000000 +0100
+@@ -56,11 +56,26 @@
+       psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++      
++
++
++#if defined(FIXME)
+       for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+               psStubPBDesc != IMG_NULL;
+               psStubPBDesc = psStubPBDesc->psNext)
+       {
+               if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++#else
++      psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++      if (psStubPBDesc != IMG_NULL)
++      {
++              if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,
++                                      "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++                                      ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++              }
++#endif
+               {
+                       IMG_UINT32 i;
+                       PRESMAN_ITEM psResItem;
+@@ -125,20 +140,6 @@
+       return eError;
+ }
+-IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO* psSGXDevInfo) 
+-{
+-      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
+-      
+-      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
+-              *ppsStubPBDesc != IMG_NULL;
+-              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
+-      {
+-              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
+-              IMG_UINT32* pui32Flags = (IMG_UINT32*)psStubPBDesc->psHWPBDescKernelMemInfo->pvLinAddrKM;
+-              *pui32Flags |= 1;
+-      }
+-}
+-
+ static PVRSRV_ERROR
+ SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
+@@ -266,7 +267,7 @@
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "SGXAddSharedPBDescKM: "
+-                                      "Failed to register exisitng shared "
++                                      "Failed to register existing shared "
+                                       "PBDesc with the resource manager"));
+                               goto NoAddKeepPB;
+                       }
+@@ -301,7 +302,7 @@
+       }
+-      psStubPBDesc->ppsSubKernelMemInfos=IMG_NULL;
++      psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+       if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+                                 sizeof(PVRSRV_KERNEL_MEM_INFO *)
+@@ -395,8 +396,10 @@
+       }
+ NoAddKeepPB:
+-      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
+               PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i], IMG_FALSE);
++      }
+       PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
+       PVRSRVFreeDeviceMemKM(hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        2008-12-18 15:47:29.000000000 +0100
+@@ -27,12 +27,15 @@
+ #include "sgxdefs.h"
+ #include "services_headers.h"
+ #include "sgxinfo.h"
++#include "sgxinfokm.h"
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #include "sgx2dcore.h"
+-#define SGX2D_FLUSH_BH                                                        (0xF0000000) 
++#define SGX2D_FLUSH_BH        0xF0000000 
++#define       SGX2D_FENCE_BH  0x70000000 
++
+ #define SGX2D_QUEUED_BLIT_PAD 4
+ #define SGX2D_COMMAND_QUEUE_SIZE 1024
+@@ -521,7 +524,7 @@
+       
+       if (hCmdCookie != IMG_NULL)
+       {
+-              PVRSRVCommandCompleteKM(hCmdCookie, IMG_FALSE);
++              PVRSRVCommandCompleteKM(hCmdCookie, IMG_TRUE);
+       }
+       PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Exit"));
+@@ -723,7 +726,7 @@
+                       SGX2DWriteSlavePortBatch(psDevInfo, pui32BltData, ui32DataByteSize);
+-                      SGX2DWriteSlavePort(psDevInfo, EURASIA2D_FENCE_BH);
++                      SGX2DWriteSlavePort(psDevInfo, SGX2D_FENCE_BH);
+               }
+       }
+@@ -817,6 +820,18 @@
+       
+       PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++#if defined(DEBUG)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++      }
++#endif
++
+       return PVRSRV_ERROR_TIMEOUT;
+ }
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h    1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h    2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,158 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++                                               PVR3DIF4_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++                                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                                IMG_DEV_PHYADDR *pDevPAddr,
++                                                                IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
++                                                                                      IMG_HANDLE              hDevMemContext,
++                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_IMPORT
++PVRSRV_ERROR SGXReadHWPerfCountersKM(PVRSRV_SGXDEV_INFO       *psDevInfo,
++                                                                       IMG_UINT32                     ui32PerfReg,
++                                                                       IMG_UINT32                     *pui32OldPerf,
++                                                                       IMG_BOOL                       bNewPerf,
++                                                                       IMG_UINT32                     ui32NewPerf,
++                                                                       IMG_UINT32                     ui32NewPerfReset,
++                                                                       IMG_UINT32                     ui32PerfCountersReg,
++                                                                       IMG_UINT32                     *pui32Counters,
++                                                                       IMG_UINT32                     *pui32KickTACounter,
++                                                                       IMG_UINT32                     *pui32KickTARenderCounter,
++                                                                       IMG_UINT32                     *pui32CPUTime,
++                                                                       IMG_UINT32                     *pui32SGXTime);
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                        IMG_UINT32            ui32DataByteSize,
++                                                        IMG_UINT32            *pui32BltData);
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData);
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete);
++#endif 
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                         IMG_HANDLE hDevHandle,
++                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        2008-12-18 15:47:29.000000000 +0100
+@@ -45,14 +45,152 @@
+ #define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST     0x01    
+ #define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST     0x02    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x04    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x10    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x20    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST     0x04    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST    0x08    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x10    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x20    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x40    
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;
++      PVRSRV_DEVICE_CLASS             eDeviceClass;
++
++      IMG_UINT8                               ui8VersionMajor;
++      IMG_UINT8                               ui8VersionMinor;
++      IMG_UINT32                              ui32CoreConfig;
++      IMG_UINT32                              ui32CoreFlags;
++
++      
++      IMG_PVOID                               pvRegsBaseKM;
++      
++
++      
++      IMG_HANDLE                              hRegMapping;
++
++      
++      IMG_SYS_PHYADDR                 sRegsPhysBase;
++      
++      IMG_UINT32                              ui32RegSize;
++
++      
++      IMG_UINT32                              ui32CoreClockSpeed;
++      IMG_UINT32                              ui32uKernelTimerClock;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++      SGX_SLAVE_PORT                  s2DSlavePortKM;
++
++      
++      PVRSRV_RESOURCE                 s2DSlaveportResource;
++
++      
++      IMG_UINT32                      ui322DFifoSize;
++      IMG_UINT32                      ui322DFifoOffset;
++      
++      IMG_HANDLE                      h2DCmdCookie;
++      
++      IMG_HANDLE                      h2DQueue;
++      IMG_BOOL                        b2DHWRecoveryInProgress;
++      IMG_BOOL                        b2DHWRecoveryEndPending;
++      IMG_UINT32                      ui322DCompletedBlits;
++      IMG_BOOL                        b2DLockupSuspected;
++#endif
++      
++    
++      IMG_VOID                        *psStubPBDescListKM;
++
++
++      
++      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
++
++      IMG_VOID                                *pvDeviceMemoryHeap;
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
++      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
++      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
++      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
++      IMG_UINT32                              *pui32KernelCCBEventKicker; 
++      IMG_UINT32                              ui32TAKickAddress;              
++      IMG_UINT32                              ui32TexLoadKickAddress; 
++      IMG_UINT32                              ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++      IMG_UINT32                              ui32KickTACounter;
++      IMG_UINT32                              ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++#endif
++      
++      IMG_UINT32                              ui32ClientRefCount;
++      
++      IMG_UINT32                              ui32CacheControl;
++      
++      IMG_VOID                                *pvMMUContextList;
++
++      
++      IMG_BOOL                                bForcePTOff;
++
++      IMG_UINT32                              ui32EDMTaskReg0;
++      IMG_UINT32                              ui32EDMTaskReg1;
++
++      IMG_UINT32                              ui32ClkGateCtl;
++      IMG_UINT32                              ui32ClkGateCtl2;
++      IMG_UINT32                              ui32ClkGateStatusMask;
++      SGX_INIT_SCRIPTS                sScripts;
++
++              
++      IMG_HANDLE                              hBIFResetPDOSMemHandle;
++      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
++      IMG_UINT32                              *pui32BIFResetPD;
++      IMG_UINT32                              *pui32BIFResetPT;
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      IMG_HANDLE                              hTimer;
++      
++      IMG_UINT32                              ui32TimeStamp;
++#endif
++
++      
++      IMG_UINT32                              ui32NumResets;
++
++      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32RegFlags;
++
++      #if defined(PDUMP)
++      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
++      #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      IMG_VOID                                *pvDummyPTPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
++      IMG_HANDLE                              hDummyPTPageOSMemHandle;
++      IMG_VOID                                *pvDummyDataPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
++      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#endif
++
++      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
++
++} PVRSRV_SGXDEV_INFO;
++
+ typedef struct _SGX_TIMING_INFORMATION_
+ {
+@@ -122,10 +260,8 @@
+ PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
+-
+ IMG_VOID SGXOSTimer(IMG_VOID *pvData);
+-IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO  *psDevInfo);
+ #if defined(NO_HARDWARE)
+ static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO             *psDevInfo,
+                                                                                               IMG_UINT32 ui32StatusRegister,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  2008-12-18 15:47:29.000000000 +0100
+@@ -54,23 +54,16 @@
+ #endif
+ IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
+-IMG_VOID SGXScheduleProcessQueues(IMG_VOID *pvData);
+ IMG_UINT32 gui32EventStatusServicesByISR = 0;
+-static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
+-                                               IMG_UINT32                      ui32PDUMPFlags);
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                IMG_UINT32                     ui32PDUMPFlags);
+-PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                 IMG_BOOL                             bHardwareRecovery);
++static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bHardwareRecovery);
+ PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-#define SGX_BIF_DIR_LIST_INDEX_EDM    15
+-#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
+-#else
+-#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
+-#endif
+ static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
+ {
+@@ -116,6 +109,9 @@
+ #if defined(SGX_SUPPORT_HWPROFILING)
+       psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
+       
+@@ -124,7 +120,7 @@
+                                               (IMG_VOID **)&psKernelCCBInfo, 0);
+       if (eError != PVRSRV_OK)        
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to alloc memory"));
++              PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
+               goto failed_allockernelccb;
+       }
+@@ -151,7 +147,9 @@
+       psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
+       psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
+-      psDevInfo->ui32ClockGateMask = psInitInfo->ui32ClockGateMask;   
++      psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl;
++      psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2;
++      psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
+       
+@@ -183,10 +181,20 @@
+               if (eNewPowerState == PVRSRV_POWER_STATE_D3)
+               {
+                       PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+-                      #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
+-                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClockGateMask;
++
++            #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
++                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClkGateStatusMask;
+                       #endif
++#if defined(SUPPORT_HW_RECOVERY)
++                      
++                      if (OSDisableTimer(psDevInfo->hTimer) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
++                              return  PVRSRV_ERROR_GENERIC;
++                      }
++#endif 
++
+                       
+                       psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST;
+@@ -202,7 +210,7 @@
+                                                               MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                               WAIT_TRY_COUNT) != PVRSRV_OK)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip power off failed."));
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for chip power off failed."));
+                       }
+                       #endif
+@@ -229,7 +237,7 @@
+                                                               MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                               WAIT_TRY_COUNT) != PVRSRV_OK)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip idle failed."));
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for chip idle failed."));
+                       }
+                       #endif
+                       PDUMPREGPOL(EUR_CR_CLKGATESTATUS, 0, ui32ClockMask);
+@@ -278,6 +286,14 @@
+                               PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
+                               return eError;
+                       }
++#if defined(SUPPORT_HW_RECOVERY)
++                      eError = OSEnableTimer(psDevInfo->hTimer);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState : Failed to enable host timer"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++#endif
+               }
+               PVR_DPF((PVR_DBG_WARNING,
+@@ -288,8 +304,6 @@
+       return PVRSRV_OK;
+ }
+-#define       SCRIPT_DATA(pData, offset, type) (*((type *)(((char *)pData) + offset)))
+-#define       SCRIPT_DATA_UI32(pData, offset) SCRIPT_DATA(pData, offset, IMG_UINT32)
+ static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
+ {
+@@ -333,14 +347,18 @@
+       return PVRSRV_ERROR_GENERIC;;
+ }
+-PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                 IMG_BOOL                             bHardwareRecovery)
++static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bHardwareRecovery)
+ {
+       PVRSRV_ERROR            eError;
+       IMG_UINT32                      ui32ReadOffset, ui32WriteOffset;
+       
+-      ResetSGX(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, psDevInfo->ui32ClkGateCtl);
++      PDUMPREGWITHFLAGS(EUR_CR_CLKGATECTL, psDevInfo->ui32ClkGateCtl, PDUMP_FLAGS_CONTINUOUS);
++
++      
++      SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
+       
+       *psDevInfo->pui32KernelCCBEventKicker = 0;
+@@ -381,12 +399,14 @@
+                                                  0,
+                                                  PVRSRV_USSE_EDM_INTERRUPT_HWR,
+                                                  MAX_HW_TIME_US/WAIT_TRY_COUNT,
+-                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
++                                                 1000) != PVRSRV_OK)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGXEDM: Wait for uKernel HW Recovery failed"));
++                      PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel HW Recovery failed"));
++                      return PVRSRV_ERROR_RETRY;
+               }
+       }
++
+       
+@@ -426,259 +446,6 @@
+ }
+-static IMG_VOID ResetSGXSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
+-                                                        IMG_UINT32                    ui32PDUMPFlags,
+-                                                        IMG_BOOL                              bPDump)
+-{
+-#if !defined(PDUMP)
+-      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+-#endif 
+-
+-      
+-      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
+-      if (bPDump)
+-      {
+-              PDUMPIDLWITHFLAGS(1000, ui32PDUMPFlags);
+-      }
+-}
+-
+-
+-static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
+-                                               IMG_UINT32                      ui32PDUMPFlags)
+-{
+-      IMG_UINT32 ui32RegVal;
+-
+-      const IMG_UINT32 ui32SoftResetRegVal =
+-                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
+-                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
+-                                      #endif
+-                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
+-                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
+-
+-      const IMG_UINT32 ui32BifInvalDCVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
+-
+-      const IMG_UINT32 ui32BifFaultMask =
+-                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      IMG_UINT32                      ui32BIFCtrl;
+-#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+-      IMG_UINT32                      ui32BIFMemArb;
+-#endif 
+-#endif 
+-
+-#ifndef PDUMP
+-      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+-#endif 
+-
+-      psDevInfo->ui32NumResets++;
+-
+-      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
+-
+-#if defined(FIX_HW_BRN_23944)
+-      
+-      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      
+-      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+-      if (ui32RegVal & ui32BifFaultMask)
+-      {
+-              
+-              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      }
+-#endif 
+-
+-      
+-      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      
+-      
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+-
+-#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+-      
+-
+-      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
+-                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
+-                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
+-#endif 
+-#endif 
+-
+-
+-      
+-
+-
+-
+-
+-      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-      
+-      ui32RegVal = ui32SoftResetRegVal;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-
+-      
+-
+-      for (;;)
+-      {
+-              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+-              IMG_DEV_VIRTADDR sBifFault;
+-              IMG_UINT32 ui32PDIndex, ui32PTIndex;
+-
+-              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
+-              {
+-                      break;
+-              }
+-              
+-              
+-
+-
+-              
+-
+-
+-              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+-              PVR_DPF((PVR_DBG_WARNING, "ResetSGX: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
+-              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+-              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+-
+-              
+-              ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-
+-              
+-              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
+-              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
+-
+-              
+-              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
+-              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              ui32RegVal = ui32SoftResetRegVal;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-              ui32RegVal = 0;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
+-              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
+-      }
+-
+-
+-      
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      
+-      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
+-#ifdef SGX_FEATURE_2D_HARDWARE
+-      
+-      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
+-#endif
+-#if defined(FIX_HW_BRN_23410)
+-      
+-      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
+-#endif
+-
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
+-#endif 
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
+-      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+-
+-#ifdef SGX_FEATURE_2D_HARDWARE
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
+-#endif
+-      
+-#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      
+-      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      ui32RegVal = ui32SoftResetRegVal;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-#endif 
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32BifInvalDCVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-      
+-      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
+-}
+-
+ static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
+ {
+       PVRSRV_SGXDEV_INFO      *psDevInfo;     
+@@ -730,6 +497,7 @@
+       psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
+       
+       for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
+       {
+@@ -759,25 +527,6 @@
+               return PVRSRV_ERROR_GENERIC;
+       }
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      
+-      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
+-                                       sizeof(PVRSRV_EVENTOBJECT) , 
+-                                       (IMG_VOID **)&psDevInfo->psSGXEventObject, 0) != PVRSRV_OK)    
+-      {
+-              
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for event object"));
+-              return (PVRSRV_ERROR_OUT_OF_MEMORY);
+-      }
+-
+-      if(OSEventObjectCreate("PVRSRV_EVENTOBJECT_SGX", psDevInfo->psSGXEventObject) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to create event object"));
+-              return (PVRSRV_ERROR_OUT_OF_MEMORY);
+-      
+-      }
+-#endif 
+-
+       return PVRSRV_OK;
+ }
+@@ -816,9 +565,10 @@
+       
+       
+       psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++      psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
+       
+       
+-      psInitInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++      psInitInfo->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
+ #if defined(SUPPORT_HW_RECOVERY)
+       psInitInfo->ui32HWRecoverySampleRate = psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
+ #endif 
+@@ -970,7 +720,6 @@
+ #endif
+-
+       
+       OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
+@@ -983,27 +732,16 @@
+       PDUMPCOMMENT("Kernel CCB Event Kicker");
+       PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+-
++#if defined(SUPPORT_HW_RECOVERY)
+       
+-      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+-                                                                               PVRSRV_POWER_Unspecified,
+-                                                                               KERNEL_ID, IMG_FALSE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed PVRSRVSetDevicePowerStateKM call"));
+-              return eError;
+-      }
+-#if defined(SUPPORT_HW_RECOVERY)
++
++      psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++                                                                 1000 * 50 / psSGXDeviceMap->sTimingInfo.ui32uKernelFreq);
++      if(psDevInfo->hTimer == IMG_NULL)
+       {
+-              SGX_TIMING_INFORMATION* psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
+-              
+-              psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
+-              if(psDevInfo->hTimer == IMG_NULL)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"OSAddTimer : Failed to register timer callback function"));
+-                      return PVRSRV_ERROR_GENERIC;
+-              }
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM : Failed to register timer callback function"));
++              return PVRSRV_ERROR_GENERIC;
+       }
+ #endif
+@@ -1030,38 +768,17 @@
+       }
+ #if defined(SUPPORT_HW_RECOVERY)
+-      
+-      if(psDevInfo->hTimer)
+-      {
+-              eError = OSRemoveTimer (psDevInfo->hTimer);
+-              if (eError != PVRSRV_OK)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
+-                      return  eError;
+-              }
+-      }
+-#endif
+-
+-      MMU_BIFResetPDFree(psDevInfo);
+-
+-      
+-
+-
+-
+-
+-
+-
+-#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-      
+-      eError = SGXDeinitialise((IMG_HANDLE)psDevInfo);
++      eError = OSRemoveTimer(psDevInfo->hTimer);
+       if (eError != PVRSRV_OK)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGXDeinitialise failed"));
+-              return eError;
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++              return  eError;
+       }
++      psDevInfo->hTimer = IMG_NULL;
+ #endif 
++      MMU_BIFResetPDFree(psDevInfo);
+       
+@@ -1146,23 +863,14 @@
+ #endif 
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      
+-      if(psDevInfo->psSGXEventObject)
+-      {
+-              OSEventObjectDestroy(psDevInfo->psSGXEventObject);
+-              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
+-                                               sizeof(PVRSRV_EVENTOBJECT) , 
+-                                               psDevInfo->psSGXEventObject, 0);
+-      }
+-#endif 
+       
+       
+       OSFreePages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS,
+                               sizeof(PVRSRV_SGXDEV_INFO),
+                               psDevInfo,
+                               hDevInfoOSMemHandle);
+-
++      psDeviceNode->pvDevice = IMG_NULL;
++      
+       if (psDeviceMemoryHeap != IMG_NULL)
+       {
+       
+@@ -1178,47 +886,17 @@
+-IMG_VOID HWRecoveryResetSGX (PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                       IMG_UINT32             ui32Component,
+-                                                       IMG_UINT32                     ui32CallerID)
+-{
+-      PVRSRV_ERROR eError;
+-
+-      PVR_UNREFERENCED_PARAMETER(ui32Component);
+-      PVR_UNREFERENCED_PARAMETER(ui32CallerID);
+-      
+-      
+-      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+-      
+-      
+-      PDUMPSUSPEND();
+-
+-      
+-      ResetPBs(psDevInfo);
+-
+-      
+-      eError = SGXInitialise(psDevInfo, IMG_TRUE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
+-      }
+-
+-      
+-      PDUMPRESUME();
+-}
+-
+-
+-IMG_VOID HWRecoveryResetSGXEDM (PVRSRV_DEVICE_NODE *psDeviceNode,
+-                                                                      IMG_UINT32                      ui32Component,
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                                      IMG_UINT32                      ui32Component,
+                                                                       IMG_UINT32                      ui32CallerID)
+ {
+       PVRSRV_ERROR            eError;
+       PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      SGX2DHWRecoveryStart(psDevInfo);
+-#endif
++      PVR_UNREFERENCED_PARAMETER(ui32Component);
++
+       
+       eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+@@ -1227,15 +905,32 @@
+               
+-              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGXEDM: Power transition in progress"));
++              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
+               return;
+       }
+       psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+       
+-      HWRecoveryResetSGX(psDevInfo, ui32Component, ui32CallerID);
++      
++      
++      PDUMPSUSPEND();
++      
++      do
++      {
++              eError = SGXInitialise(psDevInfo, IMG_TRUE);
++      }
++      while (eError == PVRSRV_ERROR_RETRY);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++      }
++
++      
++      PDUMPRESUME();
++      
+       PVRSRVPowerUnlock(ui32CallerID);
+       
+       
+@@ -1244,11 +939,9 @@
+       
+       
+       PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      SGX2DHWRecoveryEnd(psDevInfo);
+-#endif
+ }
++#endif 
++
+ #if defined(SUPPORT_HW_RECOVERY)
+ IMG_VOID SGXOSTimer(IMG_VOID *pvData)
+@@ -1261,10 +954,6 @@
+       IMG_UINT32              ui32CurrentEDMTasks;
+       IMG_BOOL                bLockup = IMG_FALSE;
+       IMG_BOOL                bPoweredDown;
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      IMG_UINT32              ui322DCompletedBlits = 0;
+-      IMG_BOOL                b2DCoreIsBusy;
+-#endif
+       
+       psDevInfo->ui32TimeStamp++;
+@@ -1305,42 +994,6 @@
+               }
+       }
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      if (!bPoweredDown)
+-      {
+-              ui322DCompletedBlits = psDevInfo->ui322DCompletedBlits;
+-              psDevInfo->ui322DCompletedBlits = SGX2DCompletedBlits(psDevInfo);
+-      }
+-
+-      if (!bLockup && !bPoweredDown)
+-      {
+-              b2DCoreIsBusy = SGX2DIsBusy(psDevInfo);
+-
+-              if (b2DCoreIsBusy && ui322DCompletedBlits == psDevInfo->ui322DCompletedBlits)
+-              {
+-                      if (psDevInfo->b2DLockupSuspected)
+-                      {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXTimer() detects 2D lockup (%d blits completed)", psDevInfo->ui322DCompletedBlits));
+-                              bLockup = IMG_TRUE;
+-                              psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-                      }
+-                      else
+-                      {
+-                              
+-                              psDevInfo->b2DLockupSuspected = IMG_TRUE;
+-                      }
+-              }
+-              else
+-              {
+-                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-              }
+-      }
+-      else
+-      {
+-                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-      }
+-#endif 
+-
+       if (bLockup)
+       {
+               PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+@@ -1349,7 +1002,7 @@
+               psSGXHostCtl->ui32HostDetectedLockups ++;
+               
+-              HWRecoveryResetSGXEDM(psDeviceNode, 0, KERNEL_ID);
++              HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
+       }
+ }
+ #endif 
+@@ -1394,14 +1047,6 @@
+                       ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
+               }
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-              if (ui32EventStatus & EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK)
+-              {
+-                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK;
+-                      SGX2DHandle2DComplete(psDevInfo);
+-              }
+-#endif
+-
+               if (ui32EventClear)
+               {
+                       bInterruptProcessed = IMG_TRUE;
+@@ -1420,7 +1065,6 @@
+ IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
+ {
+-      PVRSRV_ERROR            eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+       PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+@@ -1428,64 +1072,12 @@
+       if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) &&
+               !(psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR))
+       {
+-              HWRecoveryResetSGXEDM(psDeviceNode, 0, ISR_ID);
++              HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+       }
+-      if ((eError == PVRSRV_OK) &&
+-              (psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
+-              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
+-      {
+-              
+-
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-              {
+-
+-                      
+-                      PDUMPSUSPEND();
+-              
+-                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+-                                                                                               PVRSRV_POWER_STATE_D3,
+-                                                                                               ISR_ID, IMG_FALSE);
+-                      if (eError == PVRSRV_OK)
+-                      {
+-                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
+-                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
+-                              {
+-                                      
+-
+-
+-                                      psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+-                              }
+-                      }
+-                      else if (eError == PVRSRV_ERROR_RETRY)
+-                      {
+-                              
+-
+-                              eError = PVRSRV_OK;
+-                      }
+-                      
+-                      
+-                      PDUMPRESUME();
+-              }
++      SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
+ #endif 
+-      }
+-
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              if(psEventObject->hOSEventKM)
+-              {
+-                      OSEventObjectSignal(psEventObject->hOSEventKM);
+-              }
+-      }
+-
+-#endif
+-
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "SGX_MISRHandler error:%lu", eError));
+-      }
+ }
+ #endif 
+@@ -1494,7 +1086,6 @@
+ {
+       DEVICE_MEMORY_INFO *psDevMemoryInfo;
+       DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+-      IMG_BOOL bSharedPB = IMG_TRUE;
+       
+       psDeviceNode->sDevId.eDeviceType        = DEV_DEVICE_TYPE;
+@@ -1684,13 +1275,8 @@
+                                                                                                               | PVRSRV_HAP_MULTI_PROCESS;
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent";
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS";
+-#if defined(SGX535)
+       
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-#else
+-      
+-      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-#endif
+       
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID = HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
+@@ -1698,32 +1284,23 @@
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters";
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName = "3DParameters BS";
+-
+-
+-      if(bSharedPB)
+-      {
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+-                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+-#if 0
+-                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#if defined(SUPPORT_PERCONTEXT_PB)
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                      | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ #else
+-                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
+-#endif
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-      }
+-      else
+-      {
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+-                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+-                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+-      }
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                      | PVRSRV_HAP_MULTI_PROCESS;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif                
+       
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_MAPPING_HEAP_ID);
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
+-      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszName = "GeneralMapping";
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszBSName = "GeneralMapping BS";
+@@ -1767,23 +1344,7 @@
+       
+       psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      psClientInfo->s2DSlavePort = psDevInfo->s2DSlavePortKM;
+-#endif
+-      psClientInfo->pvRegsBase = psDevInfo->pvRegsBaseKM;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              psClientInfo->hOSEventKM = psEventObject->hOSEventKM;
+-      }
+-      else
+-      {
+-              psClientInfo->hOSEventKM = IMG_NULL;
+-      }
+-#endif
+-      
+       
+       OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
+@@ -1792,13 +1353,48 @@
+       return PVRSRV_OK;
+ }
++
+ IMG_EXPORT
+-PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_MISC_INFO *psMiscInfo)
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo)
+ {
+-      PVR_UNREFERENCED_PARAMETER(psDevInfo);
+-
+       switch(psMiscInfo->eRequest)
+       {
++              case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++              {
++                      psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
++                      return PVRSRV_OK;
++              }
++#ifdef SUPPORT_SGX_HWPERF
++              case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++              {
++                      psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= PVRSRV_SGX_HWPERF_ON;
++                      return PVRSRV_OK;
++              }
++              case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++              {
++                      psDevInfo->psSGXHostCtl->ui32HWPerfFlags &= ~PVRSRV_SGX_HWPERF_ON;
++                      return PVRSRV_OK;
++              }
++              case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++              {
++                      SGX_MISC_INFO_HWPERF_RETRIEVE_CB* psRetrieve = &psMiscInfo->uData.sRetrieveCB;
++                      PVRSRV_SGX_HWPERF_CB* psHWPerfCB = (PVRSRV_SGX_HWPERF_CB*)psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++                      IMG_UINT i = 0;
++
++                      for (; psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < psRetrieve->ui32ArraySize; i++)
++                      {
++                              PVRSRV_SGX_HWPERF_CBDATA* psData = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++                              OSMemCopy(&psRetrieve->psHWPerfData[i], psData, sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++                              psRetrieve->psHWPerfData[i].ui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++                              psRetrieve->psHWPerfData[i].ui32TimeMax = psDevInfo->ui32uKernelTimerClock;
++                              psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (PVRSRV_SGX_HWPERF_CBSIZE - 1);
++                      }
++                      psRetrieve->ui32DataCount = i;
++                      psRetrieve->ui32Time = OSClockus();
++                      return PVRSRV_OK;
++              }
++#endif 
+               default:
+               {
+                       
+@@ -1807,3 +1403,55 @@
+       }
+ }
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_EXPORT
++PVRSRV_ERROR SGXReadHWPerfCountersKM(PVRSRV_SGXDEV_INFO       *psDevInfo,
++                                                                       IMG_UINT32                     ui32PerfReg,
++                                                                       IMG_UINT32                     *pui32OldPerf,
++                                                                       IMG_BOOL                       bNewPerf,
++                                                                       IMG_UINT32                     ui32NewPerf,
++                                                                       IMG_UINT32                     ui32NewPerfReset,
++                                                                       IMG_UINT32                     ui32PerfCountersReg,
++                                                                       IMG_UINT32                     *pui32Counters,
++                                                                       IMG_UINT32                     *pui32KickTACounter,
++                                                                       IMG_UINT32                     *pui32KickTARenderCounter,
++                                                                       IMG_UINT32                     *pui32CPUTime,
++                                                                       IMG_UINT32                     *pui32SGXTime)
++{
++      IMG_UINT32      i;
++
++      
++
++      {
++              *pui32OldPerf = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg);
++
++              for (i = 0; i < 9; ++i)
++              {
++                      pui32Counters[i] = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32PerfCountersReg + (i * 4));
++              }
++
++              *pui32KickTACounter = psDevInfo->ui32KickTACounter;
++              *pui32KickTARenderCounter = psDevInfo->ui32KickTARenderCounter;
++
++              *pui32CPUTime = OSClockus();
++              *pui32SGXTime = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++      }
++
++      
++
++      if (bNewPerf)
++      {
++              if(ui32NewPerfReset != 0)
++              {
++                      OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg, ui32NewPerf | ui32NewPerfReset);
++              }
++
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg, ui32NewPerf);
++      }
++
++      return PVRSRV_OK;
++}
++#endif 
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  2008-12-18 15:47:29.000000000 +0100
+@@ -24,11 +24,13 @@
+  *
+  ******************************************************************************/
++#include <stddef.h> 
+ #include "services_headers.h"
+ #include "sgxinfo.h"
+ #include "sgxinfokm.h"
+ #if defined (PDUMP)
+ #include "sgxapi_km.h"
++#include "pdump_km.h"
+ #endif
+ #include "sgx_bridge_km.h"
+ #include "osfunc.h"
+@@ -36,92 +38,241 @@
+ #include "sgxutils.h"
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++      ((psCCBKick)->offset + sizeof(type) < (psCCBMemInfo)->ui32AllocSize)
++
+ #define       CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
+       ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
+               (psCCBKick)->offset))
+-#define       CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, offset) \
+-              ((psCCBKick)->offset < (psCCBMemInfo)->ui32AllocSize)
+-
+ IMG_EXPORT
+ PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, PVR3DIF4_CCB_KICK *psCCBKick)
+ {
+       PVRSRV_ERROR eError;
+       PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+       PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
+-      IMG_UINT32 *pui32DstReadOpsPendingVal;
+-      IMG_UINT32 *pui32DstWriteOpsPendingVal;
++      PVR3DIF4_CMDTA_SHARED *psTACmd;
+       IMG_UINT32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++#endif
+-#if defined(NO_HARDWARE)
+-      pui32DstReadOpsPendingVal = IMG_NULL;
+-      pui32DstWriteOpsPendingVal = IMG_NULL;
++#if defined(SUPPORT_SGX_HWPERF)
++      if (psCCBKick->bKickRender)
++      {
++              ++psDevInfo->ui32KickTARenderCounter;
++      }
++      ++psDevInfo->ui32KickTACounter;
+ #endif
+-      if (psCCBKick->hDstKernelSyncInfo != IMG_NULL)
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
+       {
+-              
+-              if (!CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset) || !CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset))
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      psTACmd = CCB_DATA_FROM_OFFSET(PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
++
++      
++      if (psCCBKick->hTA3DSyncInfo)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++              psTACmd->sTA3DDependancy.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->sTA3DDependancy.ui32WriteOpPendingVal   = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++              if (psCCBKick->bTADependency)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: ui32DstReadOpsPendingOffset or ui32DstWriteOpsPendingOffset out of range"));
++                      psSyncInfo->psSyncData->ui32WriteOpsPending++;
+               }
+-              else
+-              {
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
+-                              pui32DstReadOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset);
+-                              pui32DstWriteOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset);
++      }
+-                              *pui32DstReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                              *pui32DstWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+-              }
++      if (psCCBKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++              psTACmd->sTQSyncReadOpsCompleteDevVAddr  = psSyncInfo->sReadOpsCompleteDevVAddr;
++              psTACmd->sTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->ui32TQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              psTACmd->ui32TQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+       }
++      if (psCCBKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++              psTACmd->s3DTQSyncReadOpsCompleteDevVAddr  = psSyncInfo->sReadOpsCompleteDevVAddr;
++              psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              psTACmd->ui323DTQSyncWriteOpsPendingVal  = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
+       if (psCCBKick->ui32NumTAStatusVals != 0)
+       {
+               
+               for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+               {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-                              *pui32TAStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                      }
+-                      else
+-                      {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui32TAStatusValueOffset[%d] out of range", i));
+-                      }
++                      psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+               }
+       }
++      psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
+       if (psCCBKick->ui32Num3DStatusVals != 0)
+       {
+               
+               for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+               {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+-                              *pui323DStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                      }
+-                      else
++                      psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              }
++      }
++
++      
++      psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++      for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++              psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++              
++              psTACmd->asSrcSyncs[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              
++              psTACmd->asSrcSyncs[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;  
++
++      }
++
++      if (psCCBKick->bFirstKickOrResume && psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hRenderSurfSyncInfo;
++              psTACmd->sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTACmd->sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++              psTACmd->ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              psTACmd->ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++
++#if defined(PDUMP)
++              if (PDumpIsCaptureFrameKM())
++              {
++                      if (psSyncInfo->psSyncData->ui32LastOpDumpVal == 0)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui323DStatusValueOffset[%d] out of range", i));
++                      
++                      PDUMPCOMMENT("Init render surface last op\r\n");
++
++                      PDUMPMEM(IMG_NULL,
++                              psSyncInfo->psSyncDataMemInfoKM,
++                              0,
++                              sizeof(PVRSRV_SYNC_DATA),
++                              0,
++                              MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                              psSyncInfo->psSyncDataMemInfoKM,
++                              offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++                              sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++                              0,
++                              MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+                       }
++
++                      psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+               }
++#endif        
+       }
++#if defined(PDUMP)
++      if (PDumpIsCaptureFrameKM())
++      {
++              PDUMPCOMMENT("Shared part of TA command\r\n");
++
++              PDUMPMEM(IMG_NULL, psCCBMemInfo, psCCBKick->ui32CCBOffset, sizeof(PVR3DIF4_CMDTA_SHARED), 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++              if (psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      IMG_UINT32 ui32HackValue;
++
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hRenderSurfSyncInfo;
++                      ui32HackValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++                      PDUMPCOMMENT("Hack render surface last op in TA cmd\r\n");
++
++                      PDUMPMEM(&ui32HackValue,
++                              psCCBMemInfo,
++                              psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, ui32WriteOpsPendingVal),
++                              sizeof(IMG_UINT32),
++                              0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++
++                              ui32HackValue = 0;
++                              PDUMPCOMMENT("Hack render surface read op in TA cmd\r\n");
++
++                      PDUMPMEM(&ui32HackValue,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sReadOpsCompleteDevVAddr),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                      PDUMPCOMMENT("Hack TA status value in TA cmd\r\n");
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                      PDUMPCOMMENT("Hack 3D status value in TA cmd\r\n");
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++      }
++#endif        
++
+       eError = SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand, &psCCBKick->sCommand, KERNEL_ID);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+-              
+-              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
+-              psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              if (psCCBKick->bFirstKickOrResume && psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hRenderSurfSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              }
++
++              for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++                      psSyncInfo->psSyncData->ui32ReadOpsPending--;
++              }
++
+               return eError;
+       }
+       else if (PVRSRV_OK != eError)
+@@ -132,70 +283,66 @@
+ #if defined(NO_HARDWARE)
+-      if (psCCBKick->ui32NumTAStatusVals != 0)
+-      {
+-              
+-              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+-              {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-
+-                              psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui32TAStatusValue;
+-                      }
+-              }
+-      }
+       
+-      if (psCCBKick->bTerminate)
++      if (psCCBKick->hTA3DSyncInfo)
+       {
+-              if (psCCBKick->hUpdateDstKernelSyncInfo != IMG_NULL)
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++              if (psCCBKick->bTADependency)
+               {
+-                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hUpdateDstKernelSyncInfo;
+-                      psSyncInfo->psSyncData->ui32WriteOpsComplete = ((pui32DstWriteOpsPendingVal != IMG_NULL) ? *pui32DstWriteOpsPendingVal : psCCBKick->ui32WriteOpsPendingVal) + 1;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+               }
++      }
+-              if (psCCBKick->ui32Num3DStatusVals != 0)
+-              {
+-                      
+-                      for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+-                      {
+-                              if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
+-                              {
+-                                      IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
+-                                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++      if (psCCBKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+-                                      psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui323DStatusValue;
+-                              }
+-                      }
+-              }
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
+       }
+-#endif
+-      return eError;
+-}
++      if (psCCBKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
+-IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
+-{
+-      PVRSRV_ERROR                    eError;
+-      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+-      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+-      IMG_UINT32                              ui32PowManFlags;
+-      PVRSRV_SGX_COMMAND              sCommand = {0};
++      
++      for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-      ui32PowManFlags = psHostCtl->ui32PowManFlags;
+-      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++      }
++      
++      
++      for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+       {
+-              
+-              return;
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
++
+       }
+-      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
+-      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
+-      if (eError != PVRSRV_OK)
++      if (psCCBKick->bTerminateOrAbort)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
++              if (psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hRenderSurfSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = psCCBKick->bFirstKickOrResume ? psSyncInfo->psSyncData->ui32WriteOpsPending : (psCCBKick->ui32WriteOpsPendingVal + 1);
++              }
++
++              
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                      psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++              }
+       }
++#endif
++
++      return eError;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c 1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c 2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,330 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM    15
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
++#else
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
++#endif
++
++
++static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bResetBIF,
++                                                                IMG_UINT32                    ui32PDUMPFlags,
++                                                                IMG_BOOL                              bPDump)
++{
++      IMG_UINT32 ui32SoftResetRegVal =
++                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
++                                      #endif
++                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
++                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      if (bResetBIF)
++      {
++              ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      }
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++      }
++}
++
++
++static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        IMG_UINT32                    ui32PDUMPFlags,
++                                                        IMG_BOOL                              bPDump)
++{
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      
++      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++      if (bPDump)
++      {
++              PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++              PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++      }
++      
++      
++
++}
++
++
++static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO    *psDevInfo,
++                                                          IMG_UINT32                  ui32PDUMPFlags,
++                                                              IMG_BOOL                        bPDump)
++{
++      IMG_UINT32 ui32RegVal;
++
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      }
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      }
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      {
++              
++
++
++              if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++                                                      0,
++                                                      EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++                                                      MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                      WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
++              }
++              
++              if (bPDump)
++              {
++                      PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags);
++              }
++      }
++#endif        
++}
++
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                IMG_UINT32                     ui32PDUMPFlags)
++{
++      IMG_UINT32 ui32RegVal;
++
++      const IMG_UINT32 ui32BifFaultMask =
++                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_UINT32                      ui32BIFCtrl;
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      IMG_UINT32                      ui32BIFMemArb;
++#endif 
++#endif 
++
++#ifndef PDUMP
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      psDevInfo->ui32NumResets++;
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++      if (ui32RegVal & ui32BifFaultMask)
++      {
++              
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      }
++#endif 
++
++      
++      SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      
++
++      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
++#endif 
++#endif 
++
++
++      
++
++
++
++
++      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++      SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++
++      for (;;)
++      {
++              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++              IMG_DEV_VIRTADDR sBifFault;
++              IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++              {
++                      break;
++              }
++              
++              
++
++
++              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++              PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++
++              
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++      }
++
++
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++#endif
++#if defined(FIX_HW_BRN_23410)
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++#endif
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
++      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++      
++      
++      SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      2008-12-18 15:47:29.000000000 +0100
+@@ -43,16 +43,314 @@
+ #include "pvr_debug.h"
+ #include "sgxutils.h"
+-IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
+-                                                                                      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr)
+-                                          
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psKick, offset) \
++      ((psKick)->offset + sizeof(type) < (psCCBMemInfo)->ui32AllocSize)
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psKick, offset) \
++      ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++      (psKick)->offset))
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
+ {
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+       PVRSRV_SGX_COMMAND sCommand = {0};
++      PVR3DIF4_TRANSFERCMD_SHARED *psTransferCmd;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError;
++
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      psTransferCmd =  CCB_DATA_FROM_OFFSET(PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++      if (psTransferCmd->ui32NumStatusVals > SGXTQ_MAX_STATUS)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (psKick->ui32StatusFirstSync +
++              (psKick->ui32NumSrcSync ? (psKick->ui32NumSrcSync - 1) : 0) +
++              (psKick->ui32NumDstSync ? (psKick->ui32NumDstSync - 1) : 0) >
++                      psTransferCmd->ui32NumStatusVals)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              psTransferCmd->ui32TASyncReadOpsPendingVal  = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      else
++      {
++              psTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++              psTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              psTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      else
++      {
++              psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++              psTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++      }
+-    sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
+-    sCommand.ui32Data[1] = sHWRenderContextDevVAddr.uiAddr;
+       
+-      return SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);  
++      psTransferCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++      psTransferCmd->ui32NumDstSync = psKick->ui32NumDstSync;
++
++      
++      if(psKick->ui32NumSrcSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++
++              psTransferCmd->ui32SrcWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++              psTransferCmd->ui32SrcReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sSrcWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; 
++              psTransferCmd->sSrcReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      if(psKick->ui32NumDstSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++
++              psTransferCmd->ui32DstWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++              psTransferCmd->ui32DstReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sDstWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->sDstReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      
++      if (psKick->ui32NumSrcSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++              psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++      }
++      if (psKick->ui32NumDstSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++              psSyncInfo->psSyncData->ui32WriteOpsPending++;
++      }
++
++      
++      if (psKick->ui32NumSrcSync > 1)
++      {
++              for(i = 1; i < psKick->ui32NumSrcSync; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psKick->ui32StatusFirstSync++;
++              }
++      }
++
++      if (psKick->ui32NumDstSync > 1)
++      {
++              for(i = 1; i < psKick->ui32NumDstSync; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].ui32StatusValue = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].sStatusDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++                      psKick->ui32StatusFirstSync++;
++              }
++      }
++
++#if defined(PDUMP)
++      PDUMPCOMMENT("Shared part of transfer command\r\n");
++      PDUMPMEM(IMG_NULL,
++                      psCCBMemInfo,
++                      psKick->ui32SharedCmdCCBOffset,
++                      sizeof(PVR3DIF4_TRANSFERCMD_SHARED),
++                      0,
++                      MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
++      sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++      
++      eError = SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);        
++
++#if defined(NO_HARDWARE)
++      
++      for(i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      for(i = 0; i < psKick->ui32NumDstSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++#endif
++
++      return eError;
+ }
+-#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
++                                          
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++      PVRSRV_SGX_COMMAND sCommand = {0};
++      PVR3DIF4_2DCMD_SHARED *ps2DCmd;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      IMG_BOOL bDstSyncDone = IMG_FALSE;
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      ps2DCmd =  CCB_DATA_FROM_OFFSET(PVR3DIF4_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++      OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++      
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              ps2DCmd->sTASyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              ps2DCmd->sTASyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr  = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr   = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              ps2DCmd->s3DSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              ps2DCmd->s3DSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              if (psSyncInfo == (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo)
++              {
++                      ps2DCmd->sSrcSyncData[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++                      ps2DCmd->sSrcSyncData[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++                      ps2DCmd->sDstSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++                      ps2DCmd->sDstSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++                      bDstSyncDone = IMG_TRUE;
++              }
++              else
++              {
++                      ps2DCmd->sSrcSyncData[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++                      ps2DCmd->sSrcSyncData[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              }
++
++              ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      if (psKick->hDstSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++
++              if (!bDstSyncDone)
++              {
++                      ps2DCmd->sDstSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++                      ps2DCmd->sDstSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              }
++
++              ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++#if defined(PDUMP)
++      
++      PDUMPCOMMENT("Shared part of 2D command\r\n");
++      PDUMPMEM(IMG_NULL,
++                      psCCBMemInfo,
++                      psKick->ui32SharedCmdCCBOffset,
++                      sizeof(PVR3DIF4_2DCMD_SHARED),
++                      0,
++                      MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_2DCMD;
++      sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++      
++      eError = SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);        
++
++#if defined(NO_HARDWARE)
++      
++      for(i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++      psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++#endif
++
++      return eError;
++}
++#endif        
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 2008-12-18 15:47:29.000000000 +0100
+@@ -46,6 +46,79 @@
+ #include <stdio.h>
+ #endif
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(IMG_UINT32 ui32DeviceIndex, IMG_UINT32 ui32CallerID);
++#endif
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE  *psDeviceNode,
++                                                                IMG_UINT32                    ui32CallerID)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++      if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
++              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
++      {
++              
++
++              {
++                      
++                      PDUMPSUSPEND();
++              
++#if defined(SYS_CUSTOM_POWERDOWN)
++                      
++
++
++                      eError = SysPowerDownMISR(psDeviceNode->sDevId.ui32DeviceIndex, ui32CallerID);
++#else                 
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                               PVRSRV_POWER_STATE_D3,
++                                                                                               ui32CallerID, IMG_FALSE);
++                      if (eError == PVRSRV_OK)
++                      {
++                              
++                              psSGXHostCtl->ui32NumActivePowerEvents++;
++                              
++                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
++                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++                              {
++                                      
++
++
++                                      if (ui32CallerID == ISR_ID)
++                                      {
++                                              psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                                      }
++                                      else
++                                      {
++                                              SGXScheduleProcessQueues(psDeviceNode);
++                                      }
++                              }
++                      }
++#endif
++                      if (eError == PVRSRV_ERROR_RETRY)
++                      {
++                              
++
++                              eError = PVRSRV_OK;
++                      }
++                      
++                      
++                      PDUMPRESUME();
++              }
++      }
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", eError));
++      }
++}
++#endif 
++
++
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(SGXAcquireKernelCCBSlot)
+ #endif
+@@ -255,147 +328,43 @@
+ Exit:
+       PVRSRVPowerUnlock(ui32CallerID);
+-      return eError;
+-}
+-
+-
+-#if 0 
+-PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
+-                                         IMG_UINT32                   ui32CCBSize,
+-                                         IMG_UINT32                   ui32AllocGran,
+-                                         IMG_UINT32                   ui32OverrunSize,
+-                                         IMG_HANDLE                   hDevMemHeap,
+-                                         PVRSRV_SGX_CCB               **ppsCCB)
+-{
+-      PVRSRV_SGX_CCB  *psCCB;
+-
+-      PVR_UNREFERENCED_PARAMETER(psSGXDevInfo);
+-
+-      psCCB = IMG_NULL;
+-
+-      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+-                                 sizeof(PVRSRV_SGX_CCB),
+-                                 (IMG_VOID **)&psCCB,
+-                                 IMG_NULL) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: psCCB alloc failed"));
+-
+-              return PVRSRV_ERROR_OUT_OF_MEMORY;
+-      }
+-
+-      
+-      psCCB->psCCBMemInfo = IMG_NULL;
+-      psCCB->psCCBCtlMemInfo = IMG_NULL;
+-      psCCB->pui32CCBLinAddr = IMG_NULL;
+-      psCCB->pui32WriteOffset = IMG_NULL;
+-      psCCB->pui32ReadOffset = IMG_NULL;
+-
+-      #ifdef PDUMP
+-      psCCB->ui32CCBDumpWOff = 0;
+-      #endif
+-
+-      
+-      if ( ui32CCBSize < 0x1000 )
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (ui32CallerID != ISR_ID)
+       {
+-              IMG_UINT32      i, ui32PowOfTwo;
++              
+-              ui32PowOfTwo = 0x1000;
+-              for (i = 12; i > 0; i--)
+-              {
+-                      if (ui32CCBSize & ui32PowOfTwo)
+-                      {
+-                              break;
+-                      }
+-      
+-                      ui32PowOfTwo >>= 1;
+-              }
+-      
+-              if (ui32CCBSize & (ui32PowOfTwo - 1))
+-              {
+-                      ui32PowOfTwo <<= 1;
+-              }
+-
+-              ui32AllocGran = ui32PowOfTwo;
+-      }
+-      else
+-      {
+-              ui32AllocGran = 0x1000;
++              SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
+       }
++#endif 
+-      
+-      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
+-                                                         hDevMemHeap,
+-                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
+-                                                         ui32CCBSize + ui32OverrunSize,
+-                                                         ui32AllocGran,
+-                                                         &psCCB->psCCBMemInfo) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBMemInfo alloc failed"));
+-
+-              goto ErrorExit;
+-      }
++      return eError;
++}
+-      psCCB->pui32CCBLinAddr = psCCB->psCCBMemInfo->pvLinAddrKM;
+-      psCCB->sCCBDevAddr = psCCB->psCCBMemInfo->sDevVAddr;
+-      psCCB->ui32Size = ui32CCBSize;
+-      psCCB->ui32AllocGran = ui32AllocGran;
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++      PVRSRV_SGX_COMMAND              sCommand = {0};
+-      
+-      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
+-                                                         hDevMemHeap,
+-                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
+-                                                         sizeof(PVRSRV_SGX_CCB_CTL),
+-                                                         32,
+-                                                         &psCCB->psCCBCtlMemInfo) != PVRSRV_OK)
++      ui32PowManFlags = psHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBCtlMemInfo alloc failed"));
+-
+-              goto ErrorExit;
++              
++              return;
+       }
+-      
+-      psCCB->pui32WriteOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32WriteOffset;
+-      psCCB->pui32ReadOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32ReadOffset;
+-
+-      
+-      *psCCB->pui32WriteOffset = 0;
+-      *psCCB->pui32ReadOffset = 0;
+-
+-      
+-      *ppsCCB = psCCB;
+-
+-      return PVRSRV_OK;
+-
+-ErrorExit:
+-
+-      
+-      if (psCCB->psCCBMemInfo)
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
++      if (eError != PVRSRV_OK)
+       {
+-              PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
+       }
+-
+-      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
+-
+-      return PVRSRV_ERROR_OUT_OF_MEMORY;
+-;
+ }
+-IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags)
+-{
+-      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
+-
+-      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBCtlMemInfo, IMG_FALSE);
+-      if (!(ui32PFlags & PFLAGS_POWERDOWN))
+-      {
+-              if (psCCB)
+-              {
+-                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
+-              }
+-      }
+-}
+-#endif 
+ #if defined (PDUMP)
+ IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
+                                                IMG_UINT32                                             ui32BufferArrayLength,
+@@ -513,18 +482,6 @@
+       psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
+       psSGXInternalDevInfo->ui32RegFlags = (IMG_BOOL)psDevInfo->ui32RegFlags;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              psSGXInternalDevInfo->hOSEvent = psEventObject->hOSEventKM;
+-      }
+-      else
+-      {
+-              psSGXInternalDevInfo->hOSEvent = IMG_NULL;
+-      }
+-#endif
+-
+       
+       psSGXInternalDevInfo->hCtlKernelMemInfoHandle =
+               (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
+@@ -532,11 +489,11 @@
+       return PVRSRV_OK;
+ }
+-static IMG_VOID SGXCleanupRequest(PVRSRV_SGXDEV_INFO  *psSGXDevInfo,
++static IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE  *psDeviceNode,
+                                                                 IMG_DEV_VIRTADDR              *psHWDataDevVAddr,
+-                                                                IMG_BOOL                              bContextCleanup)
++                                                                IMG_UINT32                    ui32ResManRequestFlag)
+ {
+-      IMG_UINT32                              ui32ResManRequestFlag = 0;
++      PVRSRV_SGXDEV_INFO              *psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_KERNEL_MEM_INFO  *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
+       PVRSRV_SGX_HOST_CTL             *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXHostCtlMemInfo->pvLinAddrKM;
+       IMG_UINT32                              ui32PowManFlags;
+@@ -554,25 +511,18 @@
+               
+               if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PDCACHE)
+               {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
++                      psSGXHostCtl->ui32ResManFlags |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
+                       psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PDCACHE;
+               }
+               if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PTCACHE)
+               {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
++                      psSGXHostCtl->ui32ResManFlags |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
+                       psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PTCACHE;
+               }
+-              if (bContextCleanup)
+-              {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST;
+-              }
+-              else
+-              {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST;
+-              }
+-              
++
+               
+               psSGXHostCtl->sResManCleanupData.uiAddr = psHWDataDevVAddr->uiAddr;
++              
+               psSGXHostCtl->ui32ResManFlags |= ui32ResManRequestFlag;
+               
+@@ -581,6 +531,9 @@
+               PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
+               
++              SGXScheduleProcessQueues(psDeviceNode);
++
++              
+               #if !defined(NO_HARDWARE)
+               if(PollForValueKM ((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32ResManFlags),
+                                       PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
+@@ -612,8 +565,8 @@
+ typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
+ {
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      IMG_DEV_VIRTADDR sHWDataDevVAddr;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
+       IMG_HANDLE hBlockAlloc;
+       PRESMAN_ITEM psResItem;
+ } SGX_HW_RENDER_CONTEXT_CLEANUP;
+@@ -625,8 +578,8 @@
+       PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
+       PVR_UNREFERENCED_PARAMETER(ui32Param);
+-      SGXCleanupRequest(psCleanup->psDevInfo,
+-                                                      &psCleanup->sHWDataDevVAddr, IMG_TRUE);
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHWRenderContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST);
+       OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+                         sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+@@ -636,8 +589,34 @@
+       return PVRSRV_OK;
+ }
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHWTransferContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
+ IMG_EXPORT
+-IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
+ {
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hBlockAlloc;
+@@ -656,8 +635,8 @@
+       }
+       psCleanup->hBlockAlloc = hBlockAlloc;
+-      psCleanup->psDevInfo = psSGXDevInfo;
+-      psCleanup->sHWDataDevVAddr = *psHWRenderContextDevVAddr;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
+       psResItem = ResManRegisterRes(RESMAN_TYPE_HW_RENDER_CONTEXT,
+                                                                 (IMG_VOID *)psCleanup,
+@@ -682,25 +661,173 @@
+ }
+ IMG_EXPORT
+-IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
+ {
+-      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++      PVRSRV_ERROR eError;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+-      SGXCleanupRequest(psDevInfo, &sHWRTDataSetDevVAddr, IMG_FALSE);
++      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
+ }
+ IMG_EXPORT
+-PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr)
+ {
+       PVRSRV_ERROR eError;
+-      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
+-      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
+-      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHWTransferContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHWTransferContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHW2DContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHW2DContextDevVAddr)
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_2D_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHW2DContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHW2DContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
+       eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
+       return eError;
+ }
++#endif
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++      SGXCleanupRequest((PVRSRV_DEVICE_NODE *)psDeviceNode, &sHWRTDataSetDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST);
++}
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 2008-12-18 15:47:29.000000000 +0100
+@@ -73,6 +73,13 @@
+                                                IMG_BOOL                                               bDumpPolls);
+ #endif
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++IMG_IMPORT
++IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE   *psDeviceNode,
++                                                               IMG_UINT32                     ui32CallerID);
++#endif 
++
+ IMG_IMPORT
+ PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
+                                                                        PVRSRV_SGX_COMMAND_TYPE        eCommandType,
+@@ -80,14 +87,31 @@
+                                                                        IMG_UINT32                                     ui32CallerID);
+ IMG_IMPORT
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
+ IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
+ IMG_IMPORT
+-IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
+ IMG_IMPORT
+-IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
+ IMG_IMPORT
+ PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHW2DContextDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext);
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   2008-12-18 15:47:29.000000000 +0100
+@@ -33,6 +33,12 @@
+ #define PVRSRV_MAX_BRIDGE_IN_SIZE     0x1000
+ #define PVRSRV_MAX_BRIDGE_OUT_SIZE    0x1000
++typedef       struct _PVR_PCI_DEV_TAG
++{
++      struct pci_dev          *psPCIDev;
++      HOST_PCI_INIT_FLAGS     ePCIFlags;
++      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
+ typedef struct _ENV_DATA_TAG
+ {
+@@ -43,8 +49,6 @@
+       IMG_UINT32              ui32IRQ;
+       IMG_VOID                *pvISRCookie;
+       struct tasklet_struct   sMISRTasklet;
+-      struct pci_dev          *psPCIDev;
+-      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+ } ENV_DATA;
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,221 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++   rwlock_t                      sLock;
++   struct list_head        sList;
++   
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++      struct completion sCompletion;
++      struct list_head        sList;
++      IMG_HANDLE                      hResItem;                               
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), 
++              (IMG_VOID **)&psEvenObjectList, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));           
++              return PVRSRV_ERROR_OUT_OF_MEMORY;      
++      }
++
++    INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++      rwlock_init(&psEvenObjectList->sLock);
++      
++      *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++      if(psEvenObjectList)    
++      {
++              if (!list_empty(&psEvenObjectList->sList)) 
++              {
++                       PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++                       return PVRSRV_ERROR_GENERIC;
++              }
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++      }
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject, IMG_BOOL bResManCallback)
++{
++      if(hOSEventObjectList)
++      {
++              PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++              if(hOSEventObject)
++              {
++                      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; 
++                      write_lock_bh(&psLinuxEventObjectList->sLock);
++                      list_del(&psLinuxEventObject->sList);
++                      write_unlock_bh(&psLinuxEventObjectList->sLock);
++              
++                      
++                      if(!bResManCallback && psLinuxEventObject->hResItem)
++                      {
++                              if(ResManFreeResByPtr(psLinuxEventObject->hResItem, IMG_FALSE) != PVRSRV_OK)
++                              {
++                                      return PVRSRV_ERROR_GENERIC;
++                              }
++                      }
++                      
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++                      
++                      return PVRSRV_OK;
++              }
++      }
++      return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      if(pvParam)             
++      {       
++              PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)pvParam; 
++              if(psLinuxEventObject->psLinuxEventObjectList)
++              {
++                      IMG_HANDLE hOSEventObjectList = (IMG_HANDLE)psLinuxEventObject->psLinuxEventObjectList; 
++                      return LinuxEventObjectDelete(hOSEventObjectList,(IMG_HANDLE) psLinuxEventObject, IMG_TRUE);
++              }
++      }       
++      return PVRSRV_ERROR_GENERIC;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; 
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), 
++              (IMG_VOID **)&psLinuxEventObject, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));            
++              return PVRSRV_ERROR_OUT_OF_MEMORY;      
++      }
++      
++      INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++      init_completion(&psLinuxEventObject->sCompletion);      
++    
++
++      psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++      psLinuxEventObject->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_EVENT_OBJECT,
++                                                                                                                              psLinuxEventObject,
++                                                                                                                              0,
++                                                                                                                              &LinuxEventObjectDeleteCallback,
++                                                                                                                              0);     
++
++      write_lock_bh(&psLinuxEventObjectList->sLock);
++      list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++    write_unlock_bh(&psLinuxEventObjectList->sLock);
++      
++      *phOSEventObject = psLinuxEventObject;
++
++      return PVRSRV_OK;        
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++      struct list_head *psListEntry, *psListEntryTemp, *psList;
++      psList = &psLinuxEventObjectList->sList;
++
++      list_for_each_safe(psListEntry, psListEntryTemp, psList) 
++      {
++                              psLinuxEventObject = list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); 
++                              complete(&psLinuxEventObject->sCompletion);                             
++      }
++      return  PVRSRV_OK;
++      
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
++      if(wait_for_completion_timeout(&psLinuxEventObject->sCompletion, msecs_to_jiffies(ui32MSTimeout)) == 0)
++      {
++              return PVRSRV_ERROR_TIMEOUT;
++      }
++#else
++      wait_for_completion(&psLinuxEventObject->sCompletion);
++#endif        
++      return  PVRSRV_OK;
++}
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject, IMG_BOOL bResManCallback);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,81 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++
++#
++MODULE                = pvrsrvkm
++
++KBUILDROOT    = ../../../..
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++                      -I$(EURASIAROOT)/services4/include \
++                      -I$(EURASIAROOT)/services4/srvkm/env/linux \
++                      -I$(EURASIAROOT)/services4/srvkm/include \
++                      -I$(EURASIAROOT)/services4/srvkm/bridged \
++                      -I$(EURASIAROOT)/services4/srvkm/devices/sgx \
++                      -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++                      -I$(EURASIAROOT)/services4/system/include 
++
++
++SOURCES             = $(KBUILDROOT)/srvkm/env/linux/osfunc.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mmap.c \
++                              $(KBUILDROOT)/srvkm/env/linux/module.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pdump.c \
++                              $(KBUILDROOT)/srvkm/env/linux/proc.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pvr_bridge_k.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pvr_debug.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mm.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mutex.c \
++                              $(KBUILDROOT)/srvkm/env/linux/event.c
++
++SOURCES            += $(KBUILDROOT)/srvkm/common/buffer_manager.c \
++                              $(KBUILDROOT)/srvkm/common/devicemem.c \
++                              $(KBUILDROOT)/srvkm/common/deviceclass.c \
++                              $(KBUILDROOT)/srvkm/common/handle.c \
++                              $(KBUILDROOT)/srvkm/common/hash.c \
++                              $(KBUILDROOT)/srvkm/common/metrics.c \
++                              $(KBUILDROOT)/srvkm/common/pvrsrv.c \
++                              $(KBUILDROOT)/srvkm/common/queue.c \
++                              $(KBUILDROOT)/srvkm/common/ra.c \
++                              $(KBUILDROOT)/srvkm/common/resman.c \
++                              $(KBUILDROOT)/srvkm/common/power.c \
++                              $(KBUILDROOT)/srvkm/common/mem.c \
++                              $(KBUILDROOT)/srvkm/bridged/bridged_pvr_bridge.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxinit.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxreset.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxutils.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxkick.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxtransfer.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/mmu.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/pb.c \
++                              $(KBUILDROOT)/srvkm/common/perproc.c \
++                              $(KBUILDROOT)/../services4/system/$(PVR_SYSTEM)/sysconfig.c \
++                              $(KBUILDROOT)/../services4/system/$(PVR_SYSTEM)/sysutils.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgx2dcore.c
++
++
++INCLUDES += -I$(EURASIAROOT)/services4/srvkm/hwdefs 
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 2008-12-18 15:47:29.000000000 +0100
+@@ -37,6 +37,7 @@
+ #endif
+ #include <linux/slab.h>
+ #include <linux/highmem.h>
++#include <linux/sched.h>
+ #include "img_defs.h"
+ #include "services.h"
+@@ -1078,7 +1079,11 @@
+ #if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
+     ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+     return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL);
++#else
++    return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL, NULL);
++#endif
+ }
+@@ -1445,9 +1450,6 @@
+ const IMG_CHAR *
+ LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
+ {
+-    PVR_ASSERT(LINUX_MEM_AREA_TYPE_COUNT == 5);
+-    PVR_ASSERT(eMemAreaType < LINUX_MEM_AREA_TYPE_COUNT);
+-    
+     
+     switch(eMemAreaType)
+     {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     2008-12-18 15:47:29.000000000 +0100
+@@ -25,7 +25,7 @@
+  ******************************************************************************/
+ #ifndef AUTOCONF_INCLUDED
+-// #include <linux/config.h>
++ #include <linux/config.h>
+ #endif
+ #include <linux/init.h>
+@@ -34,9 +34,19 @@
+ #include <linux/version.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
++
+ #if defined(LDM_PLATFORM)
+ #include <linux/platform_device.h>
+ #endif 
++
++#if defined(LDM_PCI)
++#include <linux/pci.h>
++#endif 
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
+ #include "img_defs.h"
+ #include "services.h"
+ #include "kerneldisplay.h"
+@@ -51,15 +61,13 @@
+ #include "handle.h"
+ #include "pvr_bridge_km.h"
+ #include "proc.h"
+-
++#include "pvrmodule.h"
+ #define CLASSNAME     "powervr"
+ #define DRVNAME               "pvrsrvkm"
+ #define DEVNAME               "pvrsrvkm"
+-MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+-MODULE_LICENSE("GPL");
+ MODULE_SUPPORTED_DEVICE(DEVNAME);
+ #ifdef DEBUG
+ static int debug = DBGPRIV_WARNING;
+@@ -99,24 +107,75 @@
+ };
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
+ #if defined(LDM_PLATFORM)
+-static int PVRSRVDriverRemove(struct platform_device *device);
+-static int PVRSRVDriverProbe(struct platform_device *device);
+-static int PVRSRVDriverSuspend(struct platform_device *device, pm_message_t state);
+-static void PVRSRVDriverShutdown(struct platform_device *device);
+-static int PVRSRVDriverResume(struct platform_device *device);
++#define       LDM_DEV struct platform_device
++#define       LDM_DRV struct platform_driver
++#if defined(LDM_PCI)
++#undef        LDM_PCI
++#endif 
++#endif 
+-static struct platform_driver powervr_driver = {
++#if defined(LDM_PCI)
++#define       LDM_DEV struct pci_dev
++#define       LDM_DRV struct pci_driver
++#endif 
++
++//static void PVRSRVClassDeviceRelease(struct class_device *class_device);
++
++/*static struct class powervr_class = {
++      .name                   = CLASSNAME,
++      .release                = PVRSRVClassDeviceRelease
++};*/
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverRemove(LDM_DEV *device);
++static int PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(LDM_PCI)
++static void PVRSRVDriverRemove(LDM_DEV *device);
++static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static void PVRSRVDriverShutdown(LDM_DEV *device);
++static int PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(LDM_PCI)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++      { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID2) },
++      { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(LDM_PLATFORM)
+       .driver = {
+-              .name           = DEVNAME,
++              .name           = DRVNAME,
+       },
++#endif
++#if defined(LDM_PCI)
++      .name           = DRVNAME,
++      .id_table = powervr_id_table,
++#endif
+       .probe          = PVRSRVDriverProbe,
++#if defined(LDM_PLATFORM)
+       .remove         = PVRSRVDriverRemove,
++#endif
++#if defined(LDM_PCI)
++      .remove         = __devexit_p(PVRSRVDriverRemove),
++#endif
+       .suspend        = PVRSRVDriverSuspend,
+       .resume         = PVRSRVDriverResume,
+       .shutdown       = PVRSRVDriverShutdown,
+ };
++LDM_DEV *gpsPVRLDMDev;
++
++ 
++#if defined(LDM_PLATFORM)
+ static void PVRSRVDeviceRelease(struct device *device);
+ static struct platform_device powervr_device = {
+@@ -126,18 +185,79 @@
+               .release                = PVRSRVDeviceRelease
+       }
+ };
++#endif 
++static ssize_t PVRSRVShowDev(struct class_device *pClassDevice, char *buf)
++{
++      PVR_TRACE(("PVRSRVShowDev(pClassDevice=%p)", pClassDevice));
+-static int PVRSRVDriverProbe(struct platform_device *pDevice)
++      return snprintf(buf, PAGE_SIZE, "%d:0\n", AssignedMajorNumber);
++}
++
++//static CLASS_DEVICE_ATTR(dev,  S_IRUGO, PVRSRVShowDev, NULL);
++
++/*static void PVRSRVClassDeviceRelease(struct class_device *pClassDevice)
++{
++      PVR_TRACE(("PVRSRVClassDeviceRelease(pClassDevice=%p)", pClassDevice));
++
++      kfree(pClassDevice);
++}*/
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(LDM_PCI)
++static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
+ {
+       SYS_DATA *psSysData;
+       PVRSRV_ERROR eError;
++      //struct class_device *pClassDevice;
+       int error;
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverProbe(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
+-      pDevice->dev.driver_data = NULL;
++      pDevice->dev.driver_data = NULL;        
++      /*pClassDevice = kmalloc(sizeof(*pClassDevice), GFP_KERNEL);
++
++      if (pClassDevice == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): no memory for class device instance.",
++                              pDevice));
++
++              return -ENOMEM;
++      }
++
++      memset(pClassDevice, 0, sizeof(*pClassDevice));
++
++      pDevice->dev.driver_data = (void *)pClassDevice;
++
++      
++      strncpy(pClassDevice->class_id, DEVNAME, BUS_ID_SIZE);
++
++      pClassDevice->class = &powervr_class;
++      pClassDevice->dev = &pDevice->dev;
++
++      
++      if ((error = class_device_register(pClassDevice)) != 0)
++      {
++              kfree(pClassDevice);
++
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): class_device_register failed (%d)",
++                              pDevice, error));
++              return error;
++      }
++
++      if ((error = class_device_create_file(pClassDevice, &class_device_attr_dev)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): class_device_create_file failed (%d)",
++                              pDevice, error));
++              return error;
++      }*/
+ #if 0
+       
+@@ -149,37 +269,34 @@
+       
+       if (SysAcquireData(&psSysData) != PVRSRV_OK)
+       {
++              gpsPVRLDMDev = pDevice;
++
+               if (SysInitialise() != PVRSRV_OK)
+               {
+                       return -ENODEV;
+               }
+-
+-              eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
+-              if(eError != PVRSRV_OK)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDriverProbe: Failed to connect to resource manager"));
+-                      error = -ENODEV;
+-              }
+       }
+       return 0;
+ }
+-static int PVRSRVDriverRemove(struct platform_device *pDevice)
++#if defined (LDM_PLATFORM)
++static int PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(LDM_PCI)
++static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
+ {
+       SYS_DATA *psSysData;
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverRemove(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
+-      if(PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE) != PVRSRV_OK)
+-      {
+-              return -EINVAL;
+-      }
+-      
+       if (SysAcquireData(&psSysData) == PVRSRV_OK)
+       {
+               SysDeinitialise(psSysData);
++
++              gpsPVRLDMDev = IMG_NULL;
+       }
+ #if 0
+@@ -189,68 +306,131 @@
+       }
+ #endif
++      //class_device_unregister((struct class_device *)pDevice->dev.driver_data);
++
++
++      pDevice->dev.driver_data = 0;
++
++#if defined (LDM_PLATFORM)
+       return 0;
++#endif
++#if defined (LDM_PCI)
++      return;
++#endif
+ }
+-static void PVRSRVDriverShutdown(struct platform_device *pDevice)
++static void PVRSRVDriverShutdown(LDM_DEV *pDevice)
+ {
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
+       (void) PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3);
+ }
+-static int PVRSRVDriverSuspend(struct platform_device *pDevice, pm_message_t state)
++static int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
+ {
+-
+-      PVR_DPF((PVR_DBG_WARNING,
+-                      "PVRSRVDriverSuspend(pDevice=%p)",
+-                      pDevice));
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL))
++      PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
+       if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
+       {
+               return -EINVAL;
+       }
+-
++#endif
+       return 0;
+ }
+-static int PVRSRVDriverResume(struct platform_device *pDevice)
++static int PVRSRVDriverResume(LDM_DEV *pDevice)
+ {
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverResume(pDevice=%p)", pDevice));
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL))
++      PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
+       if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
+       {
+               return -EINVAL;
+       }
+-
++#endif
+       return 0;
+ }
++#if defined(LDM_PLATFORM)
+ static void PVRSRVDeviceRelease(struct device *pDevice)
+ {
+       PVR_DPF((PVR_DBG_WARNING, "PVRSRVDeviceRelease(pDevice=%p)", pDevice));
+ }
+ #endif 
++#endif 
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, unsigned long count, void *data)
++{
++      char data_buffer[2];
++      IMG_UINT32 PVRPowerLevel;
++
++      if (count != sizeof(data_buffer))
++      {
++              return -EINVAL;
++      }
++      else
++      {
++              if (copy_from_user(data_buffer, buffer, count))
++                      return -EINVAL;
++              if (data_buffer[count - 1] != '\n')
++                      return -EINVAL;
++              PVRPowerLevel = data_buffer[0] - '0';
++              if (PVRPowerLevel != gPVRPowerLevel)
++              {
++                      if (PVRPowerLevel != 0)
++                      {
++                              if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
++                              {
++                                      return -EINVAL;
++                              }
++                      }
++                      else
++                      {
++                              if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++                              {
++                                      return -EINVAL;
++                              }
++                      }
++
++                      gPVRPowerLevel = PVRPowerLevel;
++              }
++      }
++      return (count);
++}
++
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      if (off == 0) {
++              *start = (char *)1;
++              return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++      }
++      *eof = 1;
++      return 0;
++}
++#endif
+ static int PVRSRVOpen(struct inode unref__ * pInode, struct file unref__ * pFile)
+ {
+       int Ret = 0;
+-      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVOpen"));
+-
+-    LinuxLockMutex(&gPVRSRVLock);
++      LinuxLockMutex(&gPVRSRVLock);
+       if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_TRUE) != PVRSRV_OK)
+       {
+               Ret = -ENOMEM;
+       }
+       
+-    LinuxUnLockMutex(&gPVRSRVLock);
++      LinuxUnLockMutex(&gPVRSRVLock);
+       return Ret;
+ }
+@@ -260,8 +440,6 @@
+ {
+       int Ret = 0;
+-      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRelease"));
+-
+       if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_FALSE) != PVRSRV_OK)
+       {
+               Ret = -ENOMEM;
+@@ -274,9 +452,12 @@
+ static int __init PVRCore_Init(void)
+ {
+       int error;
+-#if !defined(LDM_PLATFORM)
++#if !(defined(LDM_PLATFORM) || defined(LDM_PCI))
+       PVRSRV_ERROR eError;
+-#endif 
++#endif
++
++      PVR_TRACE(("PVRCore_Init"));
++
+       
+       AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
+@@ -287,7 +468,7 @@
+               return -EBUSY;
+       }
+-      PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: major device %d", AssignedMajorNumber));
++      PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
+       
+       if (CreateProcEntries ())
+@@ -313,9 +494,19 @@
+       PVRMMapInit();
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++      /*if ((error = class_register(&powervr_class)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register class (%d)", error));
++
++              goto init_failed;
++      }*/
++
+ #if defined(LDM_PLATFORM)
+       if ((error = platform_driver_register(&powervr_driver)) != 0)
+       {
++              //class_unregister(&powervr_class);
++
+               PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
+               goto init_failed;
+@@ -324,11 +515,25 @@
+       if ((error = platform_device_register(&powervr_device)) != 0)
+       {
+               platform_driver_unregister(&powervr_driver);
++              //class_unregister(&powervr_class);
+               PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
+               goto init_failed;
+       }
++#endif 
++
++#if defined(LDM_PCI)
++      if ((error = pci_register_driver(&powervr_driver)) != 0)
++      {
++              //class_unregister(&powervr_class);
++
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++              goto init_failed;
++      }
++#endif 
++
+ #else 
+       
+       if ((eError = SysInitialise()) != PVRSRV_OK)
+@@ -343,20 +548,12 @@
+ #endif
+               goto init_failed;
+       }
+-
+-      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
+-      if(eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"PVRCore_Init: Failed to connect to resource manager"));
+-              error = -ENODEV;
+-              goto init_failed;
+-      }
+ #endif 
++
+       return 0;
+ init_failed:
+-      (void) PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
+       PVRMMapCleanup();
+       LinuxMMCleanup();
+       RemoveProcEntries();
+@@ -370,23 +567,34 @@
+ static void __exit PVRCore_Cleanup(void)
+ {
+       SYS_DATA *psSysData;
+-#if !defined(LDM_PLATFORM)
++#if !(defined(LDM_PLATFORM) || defined (LDM_PCI))
+       PVRSRV_ERROR eError;
+-#endif 
++#endif
++
++      PVR_TRACE(("PVRCore_Cleanup"));
+       SysAcquireData(&psSysData);
+-      unregister_chrdev(AssignedMajorNumber, DRVNAME);
+       
++      /*if (unregister_chrdev(AssignedMajorNumber, DRVNAME))
++      {
++              PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
++      }*/
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
++#if defined(LDM_PCI)
++      pci_unregister_driver(&powervr_driver);
++#endif
++
+ #if defined (LDM_PLATFORM)
+       platform_device_unregister(&powervr_device);
+       platform_driver_unregister(&powervr_driver);
+-#else 
+-      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"KernelResManDisconnect: Failed to disconnect"));
+-      }
++#endif
++      //class_unregister(&powervr_class);
++
++#else 
+       
+       SysDeinitialise(psSysData);
+ #endif 
+@@ -399,7 +607,7 @@
+       RemoveProcEntries();
+-      PVR_DPF((PVR_DBG_WARNING,"unloading"));
++      PVR_TRACE(("PVRCore_Cleanup: unloading"));
+ }
+ module_init(PVRCore_Init);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     2008-12-18 15:47:29.000000000 +0100
+@@ -56,6 +56,9 @@
+ #include "env_data.h"
+ #include "proc.h"
+ #include "mutex.h"
++#include "event.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS               (100)
+ extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
+@@ -411,9 +414,6 @@
+       psEnvData->bLISRInstalled = IMG_FALSE;
+       
+-      psEnvData->psPCIDev = NULL;
+-
+-      
+       *ppvEnvSpecificData = psEnvData;
+       return PVRSRV_OK;
+@@ -426,7 +426,6 @@
+       PVR_ASSERT(!psEnvData->bMISRInstalled);
+       PVR_ASSERT(!psEnvData->bLISRInstalled);
+-      PVR_ASSERT(psEnvData->psPCIDev == NULL);
+       OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0x1000, psEnvData->pvBridgeData, IMG_NULL);
+@@ -1189,57 +1188,62 @@
+ }
+ #if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+-PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++
++IMG_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+       int err;
+       IMG_UINT32 i;
++      PVR_PCI_DEV *psPVRPCI;
+-      if (psEnvData->psPCIDev != NULL)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: A device has already been acquired"));
+-              return PVRSRV_ERROR_GENERIC;
+-      }
++      PVR_TRACE(("OSPCISetDev"));
+-      psEnvData->psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, psEnvData->psPCIDev);
+-      if (psEnvData->psPCIDev == NULL)
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)&psPVRPCI, IMG_NULL) != PVRSRV_OK)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++              return IMG_NULL;
+       }
+-      err = pci_enable_device(psEnvData->psPCIDev);
++      psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++      psPVRPCI->ePCIFlags = eFlags;
++
++      err = pci_enable_device(psPVRPCI->psPCIDev);
+       if (err != 0)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't enable device (%d)", err));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++              return IMG_NULL;
+       }
+-      if (eFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
+-              pci_set_master(psEnvData->psPCIDev);
++      if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psPVRPCI->psPCIDev);
+       
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++              psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+       }
+-      return PVRSRV_OK;
++      return (IMG_HANDLE)psPVRPCI;
+ }
+-PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ)
++IMG_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      struct pci_dev *psPCIDev;
+-      if (psEnvData->psPCIDev == NULL)
++      psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++      if (psPCIDev == NULL)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIIRQ: Device hasn't been acquired"));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++              return IMG_NULL;
+       }
+-      *pui32IRQ = psEnvData->psPCIDev->irq;
++      return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(IMG_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++      *pui32IRQ = psPVRPCI->psPCIDev->irq;
+       return PVRSRV_OK;
+ }
+@@ -1254,19 +1258,12 @@
+ };
+ static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+-                                                                       IMG_VOID *pvSysData,
++                                                                       IMG_HANDLE hPVRPCI,
+                                                                        IMG_UINT32 ui32Index
+                                                                        
+ )
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+-
+-      if (psEnvData->psPCIDev == NULL)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Device hasn't been acquired"));
+-              return 0;
+-      }
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       if (ui32Index >= DEVICE_COUNT_RESOURCE)
+       {
+@@ -1278,32 +1275,32 @@
+       switch (eFunc)
+       {
+               case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+-                      return pci_resource_len(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_START:
+-                      return pci_resource_start(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_END:
+-                      return pci_resource_end(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+               {
+                       
+ #ifdef FIXME
+                       int err;
+-                      err = pci_request_region(psEnvData->psPCIDev, ui32Index, "PowerVR");
++                      err = pci_request_region(psPVRPCI->psPCIDev, ui32Index, "PowerVR");
+                       if (err != 0)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
+                               return 0;
+                       }
+ #endif
+-                      psEnvData->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++                      psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+                       return 1;
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+-                      if (psEnvData->abPCIResourceInUse[ui32Index])
++                      if (psPVRPCI->abPCIResourceInUse[ui32Index])
+                       {
+-                              pci_release_region(psEnvData->psPCIDev, ui32Index);
+-                              psEnvData->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++                              pci_release_region(psPVRPCI->psPCIDev, ui32Index);
++                              psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+                       }
+                       return 1;
+               default:
+@@ -1314,62 +1311,160 @@
+       return 0;
+ }
+-IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeLen(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); 
+ }
+-IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeStart(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); 
+ }
+-IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); 
+ }
+-PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData,
+-                                                                 IMG_UINT32 ui32Index
+-                                                                 
+-)
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_HANDLE hPVRPCI,
++                                                                 IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData)
++PVRSRV_ERROR OSPCIReleaseDev(IMG_HANDLE hPVRPCI)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       int i;
+-      if (psEnvData->psPCIDev == NULL)
++      PVR_TRACE(("OSPCIReleaseDev"));
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              return PVRSRV_OK;
++              if (psPVRPCI->abPCIResourceInUse[i])
++              {
++                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++                      pci_release_region(psPVRPCI->psPCIDev, i);
++                      psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++              }
+       }
++      pci_disable_device(psPVRPCI->psPCIDev);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(IMG_HANDLE hPVRPCI)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++      int i;
++      int err;
++
++      PVR_TRACE(("OSPCISuspendDev"));
++
+       
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              if (psEnvData->abPCIResourceInUse[i])
++              if (psPVRPCI->abPCIResourceInUse[i])
+               {
+-                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
+-                      pci_release_region(psEnvData->psPCIDev, i);
+-                      psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++                      pci_release_region(psPVRPCI->psPCIDev, i);
+               }
+       }
+-      pci_disable_device(psEnvData->psPCIDev);
++      err = pci_save_state(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
+-      psEnvData->psPCIDev = NULL;
++      pci_disable_device(psPVRPCI->psPCIDev);
++
++      err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3cold);
++      switch(err)
++      {
++              case 0:
++                      break;
++              case -EIO:
++                      PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++                      break;
++              case -EINVAL:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++                      break;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++                      break;
++      }
+       return PVRSRV_OK;
+ }
++
++PVRSRV_ERROR OSPCIResumeDev(IMG_HANDLE hPVRPCI)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++      int err;
++      int i;
++
++      PVR_TRACE(("OSPCIResumeDev"));
++
++      err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);
++      switch(err)
++      {
++              case 0:
++                      break;
++              case -EIO:
++                      PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++                      break;
++              case -EINVAL:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++                      return PVRSRV_ERROR_GENERIC;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++                      return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_restore_state(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_enable_device(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psPVRPCI->psPCIDev);
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              if (psPVRPCI->abPCIResourceInUse[i])
++              {
++                      err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++                      if (err != 0)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++                      }
++              }
++
++      }
++
++      return PVRSRV_OK;
++}
++
+ #endif 
+ typedef struct TIMER_CALLBACK_DATA_TAG
+@@ -1418,7 +1513,7 @@
+       psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+       psTimerCBData->pvData = pvData;
+-      psTimerCBData->bActive = IMG_TRUE;
++      psTimerCBData->bActive = IMG_FALSE;
+       
+       
+@@ -1434,14 +1529,36 @@
+       psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
+       psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+       
++      return (IMG_HANDLE)psTimerCBData;
++}
++
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      psTimerCBData->bActive = IMG_TRUE;
++
+       
+       add_timer(&psTimerCBData->sTimer);
+       
+-      return (IMG_HANDLE)psTimerCBData;
++      return PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+ {
+       TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
+       
+@@ -1451,21 +1568,17 @@
+       
+       del_timer_sync(&psTimerCBData->sTimer); 
+       
+-      
+-      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
+-      
+       return PVRSRV_OK;
+ }
+ PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
+ {
++
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(psEventObject)
+       {
+-              struct completion *psCompletion;
+-
+               if(pszName)
+               {
+                       
+@@ -1478,26 +1591,20 @@
+                       snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
+               }
+               
+-              
+-              if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
+-                                      sizeof(struct completion), 
+-                                      (IMG_VOID **)&psCompletion, IMG_NULL) != PVRSRV_OK)
++              if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: failed to allocate memory for completion variable"));             
+-                      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;   
+               }
+-              init_completion(psCompletion);
+-      
+-              psEventObject->hOSEventKM = (IMG_HANDLE) psCompletion;
+       }
+       else
+       {
+         PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
+-        eError = PVRSRV_ERROR_INVALID_PARAMS;
++              eError = PVRSRV_ERROR_GENERIC;  
+       }
+       
+       return eError;
++
+ }
+@@ -1509,8 +1616,7 @@
+       {
+               if(psEventObject->hOSEventKM)
+               {
+-                      struct completion *psCompletion = (struct completion *) psEventObject->hOSEventKM;
+-                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(struct completion), psCompletion, IMG_NULL);
++                      LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
+               }
+               else
+               {
+@@ -1527,19 +1633,13 @@
+       return eError;
+ }
+-PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout)
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(hOSEventKM)
+       {
+-              LinuxUnLockMutex(&gPVRSRVLock);         
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
+-              wait_for_completion_timeout((struct completion *)hOSEventKM, msecs_to_jiffies(ui32MSTimeout));
+-#else
+-              wait_for_completion((struct completion *)hOSEventKM);
+-#endif        
+-              LinuxLockMutex(&gPVRSRVLock);
++              eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
+       }
+       else
+       {
+@@ -1550,13 +1650,60 @@
+       return eError;
+ }
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE *phOSEvent)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(psEventObject)
++      {
++              if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE hOSEventKM)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(psEventObject)
++      {
++              if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM, IMG_FALSE) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++      
++}
++
+ PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(hOSEventKM)
+       {
+-              complete_all((struct completion *) hOSEventKM);         
++              eError = LinuxEventObjectSignal(hOSEventKM);
+       }
+       else
+       {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      2008-12-18 15:47:29.000000000 +0100
+@@ -1205,15 +1205,14 @@
+       {
+               ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
+-#if 0
+               
+               if (ui32Written == 0)
+               {
+-                      ZwYieldExecution();
++                      OSReleaseThreadQuanta();
+               }
+-#endif
++
+               if (ui32Written != 0xFFFFFFFF)
+               {
+                       ui32Off += ui32Written;
+@@ -1302,6 +1301,14 @@
+       return bFrameDumped;
+ }
++IMG_VOID PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
+ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
+ {
+       __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       2008-12-18 15:47:29.000000000 +0100
+@@ -46,6 +46,11 @@
+ #ifdef DEBUG
+ int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data);
+ int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, unsigned long count, void *data);
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
+ #endif
+ static struct proc_dir_entry * dir;
+@@ -198,6 +203,15 @@
+         return -ENOMEM;
+     }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++      if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr/power_control"));
++
++        return -ENOMEM;
++    }
++#endif
+ #endif
+     return 0;
+@@ -219,6 +233,9 @@
+ {
+ #ifdef DEBUG
+     RemoveProcEntry("debug_level");
++#ifdef PVR_MANUAL_POWER_CONTROL
++    RemoveProcEntry("power_control");
++#endif
+ #endif
+     RemoveProcEntry("queue");
+     RemoveProcEntry("nodes");
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  2008-12-18 15:47:29.000000000 +0100
+@@ -161,7 +161,7 @@
+ void PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
+ {
+-      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x",(unsigned int)uDebugLevel);
++      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(unsigned int)uDebugLevel);
+       gPVRDebugLevel = uDebugLevel;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       2008-12-18 15:47:29.000000000 +0100
+@@ -33,12 +33,16 @@
+ #if defined(SGX535)
+ #include "sgx535defs.h"
+ #else
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
+ #if defined(SGX535_V1_1)
+ #include "sgx535defs.h"
+ #else
+ #endif
+ #endif
+ #endif
++#endif
+ #include "sgxerrata.h"
+ #include "sgxfeaturedefs.h"
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     2008-12-18 15:47:29.000000000 +0100
+@@ -43,6 +43,8 @@
+       #else
+       #if SGX_CORE_REV == 120
+       #else
++      #if SGX_CORE_REV == 121
++      #else
+       #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+               
+       #else
+@@ -51,6 +53,7 @@
+       #endif
+       #endif
+       #endif
++      #endif
+         #endif
+       
+       #define SGX_CORE_DEFINED
+@@ -69,16 +72,22 @@
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 1111
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 112
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 113
+               #define FIX_HW_BRN_23281
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        2008-12-18 15:47:29.000000000 +0100
+@@ -24,6 +24,12 @@
+  *
+  ******************************************************************************/
++#if defined(SGX520)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX520"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_520
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (28)
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
+ #if defined(SGX530)
+       #define SGX_CORE_FRIENDLY_NAME                                                  "SGX530"
+       #define SGX_CORE_ID                                                                             SGX_CORE_ID_530
+@@ -36,8 +42,9 @@
+       #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (32)
+       #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+       #define SGX_FEATURE_2D_HARDWARE
+-              #define SGX_FEATURE_AUTOCLOCKGATING
+-
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#endif
+ #endif
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/device.h git/drivers/gpu/pvr/services4/srvkm/include/device.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/device.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/device.h       2008-12-18 15:47:29.000000000 +0100
+@@ -225,39 +225,40 @@
+       struct _PVRSRV_DEVICE_NODE_     *psNext;
+ } PVRSRV_DEVICE_NODE;
+-PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
+-                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+-                                                                IMG_UINT32 ui32SOCInterruptBit,
+-                                                                IMG_UINT32 *pui32DeviceIndex );
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++                                                                                        PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                                        IMG_UINT32 ui32SOCInterruptBit,
++                                                                                        IMG_UINT32 *pui32DeviceIndex );
+-PVRSRV_ERROR PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
+-PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
+ #if !defined(USE_CODE)
+-IMG_IMPORT PVRSRV_ERROR PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
+-                                                                         IMG_UINT32 ui32Value,
+-                                                                         IMG_UINT32 ui32Mask,
+-                                                                         IMG_UINT32 ui32Waitus,
+-                                                                         IMG_UINT32 ui32Tries);
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++                                                                                                 IMG_UINT32 ui32Value,
++                                                                                                 IMG_UINT32 ui32Mask,
++                                                                                                 IMG_UINT32 ui32Waitus,
++                                                                                                 IMG_UINT32 ui32Tries);
+ #endif 
+ #if defined (USING_ISR_INTERRUPTS)
+-PVRSRV_ERROR PollForInterruptKM(IMG_UINT32 ui32Value,
++PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
+                                                               IMG_UINT32 ui32Mask,
+                                                               IMG_UINT32 ui32Waitus,
+                                                               IMG_UINT32 ui32Tries);
+ #endif 
+-PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData);
+-IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData);
+-IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
+-IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData);
+-IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
+ #if defined(__cplusplus)
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/handle.h git/drivers/gpu/pvr/services4/srvkm/include/handle.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/handle.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/handle.h       2008-12-18 15:47:29.000000000 +0100
+@@ -50,10 +50,13 @@
+       PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+       PVRSRV_HANDLE_TYPE_BUF_BUFFER,
+       PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
+       PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
+       PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+       PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+-      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT
++      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++      PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT
+ } PVRSRV_HANDLE_TYPE;
+ typedef enum
+@@ -126,6 +129,11 @@
+       
+       IMG_UINT32 ui32LastFreeIndexPlusOne;
++
++#ifdef        __linux__
++      
++      IMG_BOOL bVmallocUsed;
++#endif
+ } PVRSRV_HANDLE_BASE;
+ #ifdef        PVR_SECURE_HANDLES
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       2008-12-18 15:47:29.000000000 +0100
+@@ -148,14 +148,16 @@
+ IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
+ IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...);
+ #define OSStringLength(pszString) strlen(pszString)
+-PVRSRV_ERROR OSPowerManagerConnect(IMG_VOID);
+-PVRSRV_ERROR OSPowerManagerDisconnect(IMG_VOID);
+ PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+                                                                PVRSRV_EVENTOBJECT *psEventObject);
+ PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
+ PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
+-PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE hOSEventKM);
+ PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
+@@ -203,6 +205,8 @@
+ typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
+ IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
+ PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
+ PVRSRV_ERROR OSGetSysMemSize(IMG_UINT32 *pui32Bytes);
+@@ -211,17 +215,17 @@
+       HOST_PCI_INIT_FLAG_BUS_MASTER = 0x1,
+       HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
+ } HOST_PCI_INIT_FLAGS;
+-PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+-PVRSRV_ERROR OSPCISetDev(IMG_VOID *pvSysData, IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+-PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData);
+-PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ);
+-IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCISuspendDev(IMG_VOID *pvSysData);
+-PVRSRV_ERROR OSPCIResumeDev(IMG_VOID *pvSysData);
++IMG_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++IMG_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(IMG_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(IMG_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(IMG_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(IMG_HANDLE hPVRPCI);
+ PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     2008-12-18 15:47:29.000000000 +0100
+@@ -180,6 +180,8 @@
+       void PDump3DSignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
+                                                                  IMG_BOOL             bLastFrame);
++      IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32  ui32Flags);
++      
+       IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
+       void PDumpPerformanceCounterRegisters(IMG_UINT32        ui32DumpFrameNum,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/resman.h git/drivers/gpu/pvr/services4/srvkm/include/resman.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/resman.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/resman.h       2008-12-18 15:47:29.000000000 +0100
+@@ -34,7 +34,9 @@
+ enum {
+       
+       RESMAN_TYPE_SHARED_PB_DESC = 1,                                 
+-      RESMAN_TYPE_HW_RENDER_CONTEXT,                                          
++      RESMAN_TYPE_HW_RENDER_CONTEXT,                                  
++      RESMAN_TYPE_HW_TRANSFER_CONTEXT,                                
++      RESMAN_TYPE_HW_2D_CONTEXT,                                              
+       RESMAN_TYPE_TRANSFER_CONTEXT,                                   
+       
+@@ -57,6 +59,7 @@
+       RESMAN_TYPE_DEVICEMEM_WRAP,                                             
+       RESMAN_TYPE_DEVICEMEM_ALLOCATION,                               
+       RESMAN_TYPE_RESOURCE_PERPROC_DATA,                              
++      RESMAN_TYPE_EVENT_OBJECT,                                               
+     RESMAN_TYPE_SHARED_MEM_INFO,                    
+       
+       
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        2008-12-18 15:47:29.000000000 +0100
+@@ -33,9 +33,9 @@
+ #endif
+-IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State);
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
+-PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
+ #if defined (__cplusplus)
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/Makefile git/drivers/gpu/pvr/services4/srvkm/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/Makefile       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/Makefile       1970-01-01 01:00:00.000000000 +0100
+@@ -1,68 +0,0 @@
+-#
+-# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+-#
+-# This program is free software; you can redistribute it and/or modify it
+-# under the terms and conditions of the GNU General Public License,
+-# version 2, as published by the Free Software Foundation.
+-#
+-# This program is distributed in the hope it will be useful but, except
+-# as otherwise stated in writing, without any warranty; without even the
+-# implied warranty of merchantability or fitness for a particular purpose.
+-# See the GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License along with
+-# this program; if not, write to the Free Software Foundation, Inc.,
+-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-#
+-# The full GNU General Public License is included in this distribution in
+-# the file called "COPYING".
+-#
+-# Contact Information:
+-# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+-# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+-#
+-#
+-
+-obj-y +=      env/linux/osfunc.o              \
+-              env/linux/mmap.o                \
+-              env/linux/mod.o                 \
+-              env/linux/pdump.o               \
+-              env/linux/proc.o                \
+-              env/linux/pvr_bridge_k.o        \
+-              env/linux/pvr_debug.o           \
+-              env/linux/mm.o                  \
+-              env/linux/mutex.o
+-
+-obj-y +=      common/buffer_manager.o         \
+-              common/devicemem.o              \
+-              common/deviceclass.o            \
+-              common/handle.o                 \
+-              common/hash.o                   \
+-              common/metrics.o                \
+-              common/pvrsrv.o                 \
+-              common/queue.o                  \
+-              common/ra.o                     \
+-              common/resman.o                 \
+-              common/power.o                  \
+-              common/mem.o                    \
+-              bridged/bridged_pvr_bridge.o    \
+-              devices/sgx/sgxinit.o           \
+-              devices/sgx/sgxutils.o          \
+-              devices/sgx/sgxkick.o           \
+-              devices/sgx/sgxtransfer.o       \
+-              devices/sgx/mmu.o               \
+-              devices/sgx/pb.o                \
+-              common/perproc.o                \
+-              ../system/$(CONFIG_PVR_SYSTEM)/sysconfig.o      \
+-              ../system/$(CONFIG_PVR_SYSTEM)/sysutils.o       \
+-              devices/sgx/sgx2dcore.o
+-
+-INCLUDES =    -I$(src)/env/linux      \
+-              -I$(src)/include        \
+-              -I$(src)/bridged        \
+-              -I$(src)/devices/sgx    \
+-              -I$(src)/include        \
+-              -I$(src)/hwdefs
+-
+-ccflags-y += $(CONFIG_PVR_OPTS) $(INCLUDES)
+-
+diff -Nurd git/drivers/gpu/pvr/services4/system/include/syscommon.h git/drivers/gpu/pvr/services4/system/include/syscommon.h
+--- git/drivers/gpu/pvr/services4/system/include/syscommon.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/include/syscommon.h   2008-12-18 15:47:29.000000000 +0100
+@@ -83,11 +83,13 @@
+       RA_ARENA                                        *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; 
+     IMG_CHAR                    *pszVersionString;          
++      PVRSRV_EVENTOBJECT                      *psGlobalEventObject;                   
+ } SYS_DATA;
+ PVRSRV_ERROR SysInitialise(IMG_VOID);
++PVRSRV_ERROR SysFinalise(IMG_VOID);
+ IMG_UINT32 GetCPUTranslatedAddress(IMG_VOID);
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  2008-12-18 15:47:29.000000000 +0100
+@@ -360,8 +360,15 @@
+       }
+       gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_INITDEV;
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
+ #if defined(SYS_USING_INTERRUPTS)
++      PVRSRV_ERROR eError;
++
+       eError = OSInstallMISR(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+@@ -388,12 +395,12 @@
+       
+       gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+       if (!gpsSysData->pszVersionString)
+-      { 
+-              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+       }
+       else
+       {
+-              PVR_DPF((PVR_DBG_WARNING, "SysInitialise: Version string: %s", gpsSysData->pszVersionString));
++              PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+       }
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+@@ -641,7 +648,7 @@
+                       }
+                       gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_LISR;
+               }
+-#endif        
++#endif
+               if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
+               {
+                       DisableSystemClocks(gpsSysData);
+@@ -682,7 +689,7 @@
+                       }
+                       gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
+               }
+-#endif        
++#endif
+       }
+       return eError;
+ }
+@@ -706,7 +713,7 @@
+               DisableSGXClocks(gpsSysData);
+       }
+ #else 
+-      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+ #endif 
+       return PVRSRV_OK;
+ }
+@@ -718,12 +725,13 @@
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++
+       if (ui32DeviceIndex != gui32SGXDeviceID)
+       {
+               return eError;
+       }
+-      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       if (eCurrentPowerState == PVRSRV_POWER_STATE_D3)
+@@ -734,7 +742,7 @@
+ #else 
+       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+ #endif        
+-      
++
+       return eError;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  2008-12-18 15:47:29.000000000 +0100
+@@ -38,13 +38,6 @@
+ #define SYS_OMAP3430_SGX_IRQ                           21
+-#define SYS_OMAP3430_PM_REGS_SYS_PHYS_BASE     0x48306000
+-#define SYS_OMAP3430_PM_REGS_SIZE                      0x1000
+-
+-#define SYS_OMAP3430_CM_REGS_SYS_PHYS_BASE     0x48004000
+-#define SYS_OMAP3430_CM_REGS_SIZE                      0x1000
+-
+-
+ #define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE  0x48088024
+ #define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE      0x48088028
+ #define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE     0x48088040
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   2008-12-18 15:47:29.000000000 +0100
+@@ -52,7 +52,7 @@
+               return PVRSRV_OK;
+       }
+-      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Enabling SGX Clocks"));
++      PVR_TRACE(("EnableSGXClocks: Enabling SGX Clocks"));
+ #if defined(__linux__)
+       if (psSysSpecData->psSGX_FCK == IMG_NULL)
+--- /tmp/omaplfb_linux.c       2009-01-06 10:41:49.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2009-01-06 10:42:41.000000000 +0100
+@@ -108,6 +108,8 @@
+       (void) OMAPLFBVSyncIHandler(psSwapChain);
+ }
++#define DISPC_IRQ_VSYNC 0x0002
++
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+--- /tmp/Makefile      2009-01-06 11:32:47.000000000 +0100
++++ git/drivers/gpu/pvr/Makefile       2009-01-06 11:39:06.000000000 +0100
+@@ -16,6 +16,7 @@
+               services4/srvkm/env/linux/pvr_debug.o           \
+               services4/srvkm/env/linux/mm.o                  \
+               services4/srvkm/env/linux/mutex.o               \
++              services4/srvkm/env/linux/event.o \
+               services4/srvkm/common/buffer_manager.o         \
+               services4/srvkm/common/devicemem.o              \
+               services4/srvkm/common/deviceclass.o            \
+@@ -30,6 +31,7 @@
+               services4/srvkm/common/mem.o                    \
+               services4/srvkm/bridged/bridged_pvr_bridge.o    \
+               services4/srvkm/devices/sgx/sgxinit.o           \
++              services4/srvkm/devices/sgx/sgxreset.o \
+               services4/srvkm/devices/sgx/sgxutils.o          \
+               services4/srvkm/devices/sgx/sgxkick.o           \
+               services4/srvkm/devices/sgx/sgxtransfer.o       \
diff --git a/packages/linux/omap3-pandora-kernel-wifi/pvr/pvr-add.patch b/packages/linux/omap3-pandora-kernel-wifi/pvr/pvr-add.patch
new file mode 100755 (executable)
index 0000000..541e869
--- /dev/null
@@ -0,0 +1,155099 @@
+diff -Nurd git/drivers/gpu/drm-tungsten/ati_pcigart.c git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c
+--- kernel-2.6.27.orig/drivers/video/Kconfig
++++ kernel-2.6.27/drivers/video/Kconfig
+@@ -7,7 +7,7 @@
+
+ source "drivers/char/agp/Kconfig"
+
+-source "drivers/gpu/drm/Kconfig"
++source "drivers/gpu/Kconfig"
+
+ config VGASTATE
+        tristate
+--- git/drivers/gpu/drm-tungsten/ati_pcigart.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,199 @@
++/**
++ * \file ati_pcigart.c
++ * ATI PCI GART support
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++# define ATI_PCIGART_PAGE_SIZE                4096    /**< PCI GART page size */
++# define ATI_PCIGART_PAGE_MASK                (~(ATI_PCIGART_PAGE_SIZE-1))
++
++#define ATI_PCIE_WRITE 0x4
++#define ATI_PCIE_READ 0x8
++
++static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart)
++{
++      u32 page_base;
++
++      page_base = (u32)addr & ATI_PCIGART_PAGE_MASK;
++      switch(gart_info->gart_reg_if) {
++      case DRM_ATI_GART_IGP:
++              page_base |= (upper_32_bits(addr) & 0xff) << 4;
++              page_base |= 0xc;
++              break;
++      case DRM_ATI_GART_PCIE:
++              page_base >>= 8;
++              page_base |= (upper_32_bits(addr) & 0xff) << 24;
++              page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
++              break;
++      default:
++      case DRM_ATI_GART_PCI:
++              break;
++      }
++      *pci_gart = cpu_to_le32(page_base);
++}
++
++static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
++                                     struct drm_ati_pcigart_info *gart_info)
++{
++      gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
++                                              PAGE_SIZE,
++                                              gart_info->table_mask);
++      if (gart_info->table_handle == NULL)
++              return -ENOMEM;
++
++      return 0;
++}
++
++static void drm_ati_free_pcigart_table(struct drm_device *dev,
++                                     struct drm_ati_pcigart_info *gart_info)
++{
++      drm_pci_free(dev, gart_info->table_handle);
++      gart_info->table_handle = NULL;
++}
++
++int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
++{
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long pages;
++      int i;
++      int max_pages;
++
++      /* we need to support large memory configurations */
++      if (!entry) {
++              DRM_ERROR("no scatter/gather memory!\n");
++              return 0;
++      }
++
++      if (gart_info->bus_addr) {
++
++              max_pages = (gart_info->table_size / sizeof(u32));
++              pages = (entry->pages <= max_pages)
++                ? entry->pages : max_pages;
++
++              for (i = 0; i < pages; i++) {
++                      if (!entry->busaddr[i])
++                              break;
++                      pci_unmap_page(dev->pdev, entry->busaddr[i],
++                                       PAGE_SIZE, PCI_DMA_TODEVICE);
++              }
++
++              if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
++                      gart_info->bus_addr = 0;
++      }
++
++
++      if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
++          && gart_info->table_handle) {
++
++              drm_ati_free_pcigart_table(dev, gart_info);
++      }
++
++      return 1;
++}
++EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
++
++int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
++{
++      struct drm_sg_mem *entry = dev->sg;
++      void *address = NULL;
++      unsigned long pages;
++      u32 *pci_gart;
++      dma_addr_t bus_address = 0;
++      int i, j, ret = 0;
++      int max_pages;
++      dma_addr_t entry_addr;
++
++      if (!entry) {
++              DRM_ERROR("no scatter/gather memory!\n");
++              goto done;
++      }
++
++      if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
++              DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
++
++              ret = drm_ati_alloc_pcigart_table(dev, gart_info);
++              if (ret) {
++                      DRM_ERROR("cannot allocate PCI GART page!\n");
++                      goto done;
++              }
++
++              address = gart_info->table_handle->vaddr;
++              bus_address = gart_info->table_handle->busaddr;
++      } else {
++              address = gart_info->addr;
++              bus_address = gart_info->bus_addr;
++              DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
++                        bus_address, (unsigned long)address);
++      }
++
++      pci_gart = (u32 *) address;
++
++      max_pages = (gart_info->table_size / sizeof(u32));
++      pages = (entry->pages <= max_pages)
++          ? entry->pages : max_pages;
++
++      memset(pci_gart, 0, max_pages * sizeof(u32));
++
++      for (i = 0; i < pages; i++) {
++              /* we need to support large memory configurations */
++              entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
++                                               0, PAGE_SIZE, PCI_DMA_TODEVICE);
++              if (entry->busaddr[i] == 0) {
++                      DRM_ERROR("unable to map PCIGART pages!\n");
++                      drm_ati_pcigart_cleanup(dev, gart_info);
++                      address = NULL;
++                      bus_address = 0;
++                      goto done;
++              }
++
++              entry_addr = entry->busaddr[i];
++              for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
++                      gart_insert_page_into_table(gart_info, entry_addr, pci_gart);
++                      pci_gart++;
++                      entry_addr += ATI_PCIGART_PAGE_SIZE;
++              }
++      }
++
++      ret = 1;
++
++#if defined(__i386__) || defined(__x86_64__)
++      wbinvd();
++#else
++      mb();
++#endif
++
++      done:
++      gart_info->addr = address;
++      gart_info->bus_addr = bus_address;
++      return ret;
++}
++EXPORT_SYMBOL(drm_ati_pcigart_init);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_agpsupport.c git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c
+--- git/drivers/gpu/drm-tungsten/drm_agpsupport.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,715 @@
++/**
++ * \file drm_agpsupport.c
++ * DRM support for AGP/GART backend
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include <linux/module.h>
++
++#if __OS_HAS_AGP
++
++/**
++ * Get AGP information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a (output) drm_agp_info structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been initialized and acquired and fills in the
++ * drm_agp_info structure with the information in drm_agp_head::agp_info.
++ */
++int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
++{
++      DRM_AGP_KERN *kern;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++
++      kern = &dev->agp->agp_info;
++      info->agp_version_major = kern->version.major;
++      info->agp_version_minor = kern->version.minor;
++      info->mode = kern->mode;
++      info->aperture_base = kern->aper_base;
++      info->aperture_size = kern->aper_size * 1024 * 1024;
++      info->memory_allowed = kern->max_memory << PAGE_SHIFT;
++      info->memory_used = kern->current_memory << PAGE_SHIFT;
++      info->id_vendor = kern->device->vendor;
++      info->id_device = kern->device->device;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_info);
++
++int drm_agp_info_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_info *info = data;
++      int err;
++
++      err = drm_agp_info(dev, info);
++      if (err)
++              return err;
++
++      return 0;
++}
++
++/**
++ * Acquire the AGP device.
++ *
++ * \param dev DRM device that is to acquire AGP.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device hasn't been acquired before and calls
++ * \c agp_backend_acquire.
++ */
++int drm_agp_acquire(struct drm_device * dev)
++{
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      int retcode;
++#endif
++
++      if (!dev->agp)
++              return -ENODEV;
++      if (dev->agp->acquired)
++              return -EBUSY;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      if ((retcode = agp_backend_acquire()))
++              return retcode;
++#else
++      if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
++              return -ENODEV;
++#endif
++
++      dev->agp->acquired = 1;
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_acquire);
++
++/**
++ * Acquire the AGP device (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device hasn't been acquired before and calls
++ * \c agp_backend_acquire.
++ */
++int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
++}
++
++/**
++ * Release the AGP device.
++ *
++ * \param dev DRM device that is to release AGP.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
++ */
++int drm_agp_release(struct drm_device *dev)
++{
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_backend_release();
++#else
++      agp_backend_release(dev->agp->bridge);
++#endif
++      dev->agp->acquired = 0;
++      return 0;
++
++}
++EXPORT_SYMBOL(drm_agp_release);
++
++int drm_agp_release_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      return drm_agp_release(dev);
++}
++
++/**
++ * Enable the AGP bus.
++ *
++ * \param dev DRM device that has previously acquired AGP.
++ * \param mode Requested AGP mode.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been acquired but not enabled, and calls
++ * \c agp_enable.
++ */
++int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
++{
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++
++      dev->agp->mode = mode.mode;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_enable(mode.mode);
++#else
++      agp_enable(dev->agp->bridge, mode.mode);
++#endif
++      dev->agp->enabled = 1;
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_enable);
++
++int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_agp_mode *mode = data;
++
++      return drm_agp_enable(dev, *mode);
++}
++
++/**
++ * Allocate AGP memory.
++ *
++ * \param inode device inode.
++ * \param file_priv file private pointer.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_buffer structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired, allocates the
++ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
++ */
++int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
++{
++      struct drm_agp_mem *entry;
++      DRM_AGP_MEM *memory;
++      unsigned long pages;
++      u32 type;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
++              return -ENOMEM;
++
++      memset(entry, 0, sizeof(*entry));
++
++      pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
++      type = (u32) request->type;
++      if (!(memory = drm_alloc_agp(dev, pages, type))) {
++              drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++              return -ENOMEM;
++      }
++
++      entry->handle = (unsigned long)memory->key + 1;
++      entry->memory = memory;
++      entry->bound = 0;
++      entry->pages = pages;
++      list_add(&entry->head, &dev->agp->memory);
++
++      request->handle = entry->handle;
++      request->physical = memory->physical;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_alloc);
++
++
++int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_agp_buffer *request = data;
++
++      return drm_agp_alloc(dev, request);
++}
++
++/**
++ * Search for the AGP memory entry associated with a handle.
++ *
++ * \param dev DRM device structure.
++ * \param handle AGP memory handle.
++ * \return pointer to the drm_agp_mem structure associated with \p handle.
++ *
++ * Walks through drm_agp_head::memory until finding a matching handle.
++ */
++static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
++                                         unsigned long handle)
++{
++      struct drm_agp_mem *entry;
++
++      list_for_each_entry(entry, &dev->agp->memory, head) {
++              if (entry->handle == handle)
++                      return entry;
++      }
++      return NULL;
++}
++
++/**
++ * Unbind AGP memory from the GATT (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_binding structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and acquired, looks-up the AGP memory
++ * entry and passes it to the unbind_agp() function.
++ */
++int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
++{
++      struct drm_agp_mem *entry;
++      int ret;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (!entry->bound)
++              return -EINVAL;
++      ret = drm_unbind_agp(entry->memory);
++      if (ret == 0)
++              entry->bound = 0;
++      return ret;
++}
++EXPORT_SYMBOL(drm_agp_unbind);
++
++
++int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_agp_binding *request = data;
++
++      return drm_agp_unbind(dev, request);
++}
++
++
++/**
++ * Bind AGP memory into the GATT (ioctl)
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_binding structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired and that no memory
++ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
++ * it to bind_agp() function.
++ */
++int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
++{
++      struct drm_agp_mem *entry;
++      int retcode;
++      int page;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (entry->bound)
++              return -EINVAL;
++      page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
++      if ((retcode = drm_bind_agp(entry->memory, page)))
++              return retcode;
++      entry->bound = dev->agp->base + (page << PAGE_SHIFT);
++      DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
++                dev->agp->base, entry->bound);
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_bind);
++
++
++int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_binding *request = data;
++
++      return drm_agp_bind(dev, request);
++}
++
++
++/**
++ * Free AGP memory (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_buffer structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired and looks up the
++ * AGP memory entry. If the memory it's currently bound, unbind it via
++ * unbind_agp(). Frees it via free_agp() as well as the entry itself
++ * and unlinks from the doubly linked list it's inserted in.
++ */
++int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
++{
++      struct drm_agp_mem *entry;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (entry->bound)
++              drm_unbind_agp(entry->memory);
++
++      list_del(&entry->head);
++
++      drm_free_agp(entry->memory, entry->pages);
++      drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_free);
++
++
++
++int drm_agp_free_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_buffer *request = data;
++
++      return drm_agp_free(dev, request);
++}
++
++
++/**
++ * Initialize the AGP resources.
++ *
++ * \return pointer to a drm_agp_head structure.
++ *
++ * Gets the drm_agp_t structure which is made available by the agpgart module
++ * via the inter_module_* functions. Creates and initializes a drm_agp_head
++ * structure.
++ */
++struct drm_agp_head *drm_agp_init(struct drm_device *dev)
++{
++      struct drm_agp_head *head = NULL;
++
++      if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
++              return NULL;
++      memset((void *)head, 0, sizeof(*head));
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_copy_info(&head->agp_info);
++#else
++      head->bridge = agp_find_bridge(dev->pdev);
++      if (!head->bridge) {
++              if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
++                      drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
++                      return NULL;
++              }
++              agp_copy_info(head->bridge, &head->agp_info);
++              agp_backend_release(head->bridge);
++      } else {
++              agp_copy_info(head->bridge, &head->agp_info);
++      }
++#endif
++      if (head->agp_info.chipset == NOT_SUPPORTED) {
++              drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
++              return NULL;
++      }
++      INIT_LIST_HEAD(&head->memory);
++      head->cant_use_aperture = head->agp_info.cant_use_aperture;
++      head->page_mask = head->agp_info.page_mask;
++      head->base = head->agp_info.aper_base;
++      return head;
++}
++
++/** Calls agp_allocate_memory() */
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
++{
++      return agp_allocate_memory(pages, type);
++}
++#else
++DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
++                                   size_t pages, u32 type)
++{
++      return agp_allocate_memory(bridge, pages, type);
++}
++#endif
++
++/** Calls agp_free_memory() */
++int drm_agp_free_memory(DRM_AGP_MEM * handle)
++{
++      if (!handle)
++              return 0;
++      agp_free_memory(handle);
++      return 1;
++}
++
++/** Calls agp_bind_memory() */
++int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
++{
++      if (!handle)
++              return -EINVAL;
++      return agp_bind_memory(handle, start);
++}
++EXPORT_SYMBOL(drm_agp_bind_memory);
++
++/** Calls agp_unbind_memory() */
++int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
++{
++      if (!handle)
++              return -EINVAL;
++      return agp_unbind_memory(handle);
++}
++
++/**
++ * Binds a collection of pages into AGP memory at the given offset, returning
++ * the AGP memory structure containing them.
++ *
++ * No reference is held on the pages during this time -- it is up to the
++ * caller to handle that.
++ */
++DRM_AGP_MEM *
++drm_agp_bind_pages(struct drm_device *dev,
++                 struct page **pages,
++                 unsigned long num_pages,
++                 uint32_t gtt_offset)
++{
++      DRM_AGP_MEM *mem;
++      int ret, i;
++
++      DRM_DEBUG("drm_agp_populate_ttm\n");
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
++#else
++      mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
++                                    AGP_USER_MEMORY);
++#endif
++      if (mem == NULL) {
++              DRM_ERROR("Failed to allocate memory for %ld pages\n",
++                        num_pages);
++              return NULL;
++      }
++
++      for (i = 0; i < num_pages; i++)
++              mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
++      mem->page_count = num_pages;
++
++      mem->is_flushed = true;
++      ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
++      if (ret != 0) {
++              DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
++              agp_free_memory(mem);
++              return NULL;
++      }
++
++      return mem;
++}
++EXPORT_SYMBOL(drm_agp_bind_pages);
++
++/*
++ * AGP ttm backend interface.
++ */
++
++#ifndef AGP_USER_TYPES
++#define AGP_USER_TYPES (1 << 16)
++#define AGP_USER_MEMORY (AGP_USER_TYPES)
++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
++#endif
++#define AGP_REQUIRED_MAJOR 0
++#define AGP_REQUIRED_MINOR 102
++
++static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
++{
++      return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
++}
++
++
++static int drm_agp_populate(struct drm_ttm_backend *backend,
++                          unsigned long num_pages, struct page **pages,
++                          struct page *dummy_read_page)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      struct page **cur_page, **last_page = pages + num_pages;
++      DRM_AGP_MEM *mem;
++      int dummy_page_count = 0;
++
++      if (drm_alloc_memctl(num_pages * sizeof(void *)))
++              return -1;
++
++      DRM_DEBUG("drm_agp_populate_ttm\n");
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
++#else
++      mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++#endif
++      if (!mem) {
++              drm_free_memctl(num_pages * sizeof(void *));
++              return -1;
++      }
++
++      DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
++      mem->page_count = 0;
++      for (cur_page = pages; cur_page < last_page; ++cur_page) {
++              struct page *page = *cur_page;
++              if (!page) {
++                      page = dummy_read_page;
++                      ++dummy_page_count;
++              }
++              mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
++      }
++      if (dummy_page_count)
++              DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
++      agp_be->mem = mem;
++      return 0;
++}
++
++static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
++                          struct drm_bo_mem_reg *bo_mem)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      DRM_AGP_MEM *mem = agp_be->mem;
++      int ret;
++      int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
++
++      DRM_DEBUG("drm_agp_bind_ttm\n");
++      mem->is_flushed = true;
++      mem->type = AGP_USER_MEMORY;
++      /* CACHED MAPPED implies not snooped memory */
++      if (snooped)
++              mem->type = AGP_USER_CACHED_MEMORY;
++
++      ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
++      if (ret)
++              DRM_ERROR("AGP Bind memory failed\n");
++
++      DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
++                      DRM_BE_FLAG_BOUND_CACHED : 0,
++                      DRM_BE_FLAG_BOUND_CACHED);
++      return ret;
++}
++
++static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++
++      DRM_DEBUG("drm_agp_unbind_ttm\n");
++      if (agp_be->mem->is_bound)
++              return drm_agp_unbind_memory(agp_be->mem);
++      else
++              return 0;
++}
++
++static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      DRM_AGP_MEM *mem = agp_be->mem;
++
++      DRM_DEBUG("drm_agp_clear_ttm\n");
++      if (mem) {
++              unsigned long num_pages = mem->page_count;
++              backend->func->unbind(backend);
++              agp_free_memory(mem);
++              drm_free_memctl(num_pages * sizeof(void *));
++      }
++      agp_be->mem = NULL;
++}
++
++static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be;
++
++      if (backend) {
++              DRM_DEBUG("drm_agp_destroy_ttm\n");
++              agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
++              if (agp_be) {
++                      if (agp_be->mem)
++                              backend->func->clear(backend);
++                      drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
++              }
++      }
++}
++
++static struct drm_ttm_backend_func agp_ttm_backend = {
++      .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
++      .populate = drm_agp_populate,
++      .clear = drm_agp_clear_ttm,
++      .bind = drm_agp_bind_ttm,
++      .unbind = drm_agp_unbind_ttm,
++      .destroy =  drm_agp_destroy_ttm,
++};
++
++struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
++{
++
++      struct drm_agp_ttm_backend *agp_be;
++      struct agp_kern_info *info;
++
++      if (!dev->agp) {
++              DRM_ERROR("AGP is not initialized.\n");
++              return NULL;
++      }
++      info = &dev->agp->agp_info;
++
++      if (info->version.major != AGP_REQUIRED_MAJOR ||
++          info->version.minor < AGP_REQUIRED_MINOR) {
++              DRM_ERROR("Wrong agpgart version %d.%d\n"
++                        "\tYou need at least version %d.%d.\n",
++                        info->version.major,
++                        info->version.minor,
++                        AGP_REQUIRED_MAJOR,
++                        AGP_REQUIRED_MINOR);
++              return NULL;
++      }
++
++
++      agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
++      if (!agp_be)
++              return NULL;
++
++      agp_be->mem = NULL;
++
++      agp_be->bridge = dev->agp->bridge;
++      agp_be->populated = false;
++      agp_be->backend.func = &agp_ttm_backend;
++      agp_be->backend.dev = dev;
++
++      return &agp_be->backend;
++}
++EXPORT_SYMBOL(drm_agp_init_ttm);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
++void drm_agp_chipset_flush(struct drm_device *dev)
++{
++      agp_flush_chipset(dev->agp->bridge);
++}
++EXPORT_SYMBOL(drm_agp_chipset_flush);
++#endif
++
++#endif                                /* __OS_HAS_AGP */
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_auth.c git-nokia/drivers/gpu/drm-tungsten/drm_auth.c
+--- git/drivers/gpu/drm-tungsten/drm_auth.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_auth.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**
++ * \file drm_auth.c
++ * IOCTLs for authentication
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Find the file with the given magic number.
++ *
++ * \param dev DRM device.
++ * \param magic magic number.
++ *
++ * Searches in drm_device::magiclist within all files with the same hash key
++ * the one with matching magic number, while holding the drm_device::struct_mutex
++ * lock.
++ */
++static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
++{
++      struct drm_file *retval = NULL;
++      struct drm_magic_entry *pt;
++      struct drm_hash_item *hash;
++
++      mutex_lock(&dev->struct_mutex);
++      if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
++              pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
++              retval = pt->priv;
++      }
++      mutex_unlock(&dev->struct_mutex);
++      return retval;
++}
++
++/**
++ * Adds a magic number.
++ *
++ * \param dev DRM device.
++ * \param priv file private data.
++ * \param magic magic number.
++ *
++ * Creates a drm_magic_entry structure and appends to the linked list
++ * associated the magic number hash key in drm_device::magiclist, while holding
++ * the drm_device::struct_mutex lock.
++ */
++static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
++                       drm_magic_t magic)
++{
++      struct drm_magic_entry *entry;
++
++      DRM_DEBUG("%d\n", magic);
++
++      entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
++      if (!entry)
++              return -ENOMEM;
++      memset(entry, 0, sizeof(*entry));
++      entry->priv = priv;
++      entry->hash_item.key = (unsigned long)magic;
++      mutex_lock(&dev->struct_mutex);
++      drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
++      list_add_tail(&entry->head, &dev->magicfree);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Remove a magic number.
++ *
++ * \param dev DRM device.
++ * \param magic magic number.
++ *
++ * Searches and unlinks the entry in drm_device::magiclist with the magic
++ * number hash key, while holding the drm_device::struct_mutex lock.
++ */
++static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
++{
++      struct drm_magic_entry *pt;
++      struct drm_hash_item *hash;
++
++      DRM_DEBUG("%d\n", magic);
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++      pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
++      drm_ht_remove_item(&dev->magiclist, hash);
++      list_del(&pt->head);
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
++
++      return 0;
++}
++
++/**
++ * Get a unique magic number (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a resulting drm_auth structure.
++ * \return zero on success, or a negative number on failure.
++ *
++ * If there is a magic number in drm_file::magic then use it, otherwise
++ * searches an unique non-zero magic number and add it associating it with \p
++ * file_priv.
++ */
++int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      static drm_magic_t sequence = 0;
++      static DEFINE_SPINLOCK(lock);
++      struct drm_auth *auth = data;
++
++      /* Find unique magic */
++      if (file_priv->magic) {
++              auth->magic = file_priv->magic;
++      } else {
++              do {
++                      spin_lock(&lock);
++                      if (!sequence)
++                              ++sequence;     /* reserve 0 */
++                      auth->magic = sequence++;
++                      spin_unlock(&lock);
++              } while (drm_find_file(dev, auth->magic));
++              file_priv->magic = auth->magic;
++              drm_add_magic(dev, file_priv, auth->magic);
++      }
++
++      DRM_DEBUG("%u\n", auth->magic);
++
++      return 0;
++}
++
++/**
++ * Authenticate with a magic.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_auth structure.
++ * \return zero if authentication successed, or a negative number otherwise.
++ *
++ * Checks if \p file_priv is associated with the magic number passed in \arg.
++ */
++int drm_authmagic(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_auth *auth = data;
++      struct drm_file *file;
++
++      DRM_DEBUG("%u\n", auth->magic);
++      if ((file = drm_find_file(dev, auth->magic))) {
++              file->authenticated = 1;
++              drm_remove_magic(dev, auth->magic);
++              return 0;
++      }
++      return -EINVAL;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo.c git-nokia/drivers/gpu/drm-tungsten/drm_bo.c
+--- git/drivers/gpu/drm-tungsten/drm_bo.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2796 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++/*
++ * Locking may look a bit complicated but isn't really:
++ *
++ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
++ * when there is a chance that it can be zero before or after the operation.
++ *
++ * dev->struct_mutex also protects all lists and list heads,
++ * Hash tables and hash heads.
++ *
++ * bo->mutex protects the buffer object itself excluding the usage field.
++ * bo->mutex does also protect the buffer list heads, so to manipulate those,
++ * we need both the bo->mutex and the dev->struct_mutex.
++ *
++ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
++ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
++ * the list traversal will, in general, need to be restarted.
++ *
++ */
++
++static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
++static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
++
++static inline uint64_t drm_bo_type_flags(unsigned type)
++{
++      return (1ULL << (24 + type));
++}
++
++/*
++ * bo locked. dev->struct_mutex locked.
++ */
++
++void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
++{
++      struct drm_mem_type_manager *man;
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++      DRM_ASSERT_LOCKED(&bo->mutex);
++
++      man = &bo->dev->bm.man[bo->pinned_mem_type];
++      list_add_tail(&bo->pinned_lru, &man->pinned);
++}
++
++void drm_bo_add_to_lru(struct drm_buffer_object *bo)
++{
++      struct drm_mem_type_manager *man;
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++
++      if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
++          || bo->mem.mem_type != bo->pinned_mem_type) {
++              man = &bo->dev->bm.man[bo->mem.mem_type];
++              list_add_tail(&bo->lru, &man->lru);
++      } else {
++              INIT_LIST_HEAD(&bo->lru);
++      }
++}
++
++static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
++{
++#ifdef DRM_ODD_MM_COMPAT
++      int ret;
++
++      if (!bo->map_list.map)
++              return 0;
++
++      ret = drm_bo_lock_kmm(bo);
++      if (ret)
++              return ret;
++      drm_bo_unmap_virtual(bo);
++      if (old_is_pci)
++              drm_bo_finish_unmap(bo);
++#else
++      if (!bo->map_list.map)
++              return 0;
++
++      drm_bo_unmap_virtual(bo);
++#endif
++      return 0;
++}
++
++static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
++{
++#ifdef DRM_ODD_MM_COMPAT
++      int ret;
++
++      if (!bo->map_list.map)
++              return;
++
++      ret = drm_bo_remap_bound(bo);
++      if (ret) {
++              DRM_ERROR("Failed to remap a bound buffer object.\n"
++                        "\tThis might cause a sigbus later.\n");
++      }
++      drm_bo_unlock_kmm(bo);
++#endif
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int drm_bo_add_ttm(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      int ret = 0;
++      uint32_t page_flags = 0;
++
++      DRM_ASSERT_LOCKED(&bo->mutex);
++      bo->ttm = NULL;
++
++      if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
++              page_flags |= DRM_TTM_PAGE_WRITE;
++
++      switch (bo->type) {
++      case drm_bo_type_device:
++      case drm_bo_type_kernel:
++              bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
++                                       page_flags, dev->bm.dummy_read_page);
++              if (!bo->ttm)
++                      ret = -ENOMEM;
++              break;
++      case drm_bo_type_user:
++              bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
++                                       page_flags | DRM_TTM_PAGE_USER,
++                                       dev->bm.dummy_read_page);
++              if (!bo->ttm)
++                      ret = -ENOMEM;
++
++              ret = drm_ttm_set_user(bo->ttm, current,
++                                     bo->buffer_start,
++                                     bo->num_pages);
++              if (ret)
++                      return ret;
++
++              break;
++      default:
++              DRM_ERROR("Illegal buffer object type\n");
++              ret = -EINVAL;
++              break;
++      }
++
++      return ret;
++}
++
++static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
++                                struct drm_bo_mem_reg *mem,
++                                int evict, int no_wait)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
++      int new_is_pci = drm_mem_reg_is_pci(dev, mem);
++      struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
++      struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
++      int ret = 0;
++
++      if (old_is_pci || new_is_pci ||
++          ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
++              ret = drm_bo_vm_pre_move(bo, old_is_pci);
++      if (ret)
++              return ret;
++
++      /*
++       * Create and bind a ttm if required.
++       */
++
++      if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
++              ret = drm_bo_add_ttm(bo);
++              if (ret)
++                      goto out_err;
++
++              if (mem->mem_type != DRM_BO_MEM_LOCAL) {
++                      ret = drm_ttm_bind(bo->ttm, mem);
++                      if (ret)
++                              goto out_err;
++              }
++
++              if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
++                      
++                      struct drm_bo_mem_reg *old_mem = &bo->mem;
++                      uint64_t save_flags = old_mem->flags;
++                      uint64_t save_proposed_flags = old_mem->proposed_flags;
++                      
++                      *old_mem = *mem;
++                      mem->mm_node = NULL;
++                      old_mem->proposed_flags = save_proposed_flags;
++                      DRM_FLAG_MASKED(save_flags, mem->flags,
++                                      DRM_BO_MASK_MEMTYPE);
++                      goto moved;
++              }
++              
++      }
++
++      if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
++          !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
++              ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
++      else if (dev->driver->bo_driver->move) 
++              ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
++      else
++              ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++      if (ret)
++              goto out_err;
++
++moved:
++      if (old_is_pci || new_is_pci)
++              drm_bo_vm_post_move(bo);
++
++      if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
++              ret =
++                  dev->driver->bo_driver->invalidate_caches(dev,
++                                                            bo->mem.flags);
++              if (ret)
++                      DRM_ERROR("Can not flush read caches\n");
++      }
++
++      DRM_FLAG_MASKED(bo->priv_flags,
++                      (evict) ? _DRM_BO_FLAG_EVICTED : 0,
++                      _DRM_BO_FLAG_EVICTED);
++
++      if (bo->mem.mm_node)
++              bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++                      bm->man[bo->mem.mem_type].gpu_offset;
++
++
++      return 0;
++
++out_err:
++      if (old_is_pci || new_is_pci)
++              drm_bo_vm_post_move(bo);
++
++      new_man = &bm->man[bo->mem.mem_type];
++      if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
++              drm_ttm_unbind(bo->ttm);
++              drm_ttm_destroy(bo->ttm);
++              bo->ttm = NULL;
++      }
++
++      return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
++{
++      struct drm_fence_object *fence = bo->fence;
++
++      if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++              return -EBUSY;
++
++      if (fence) {
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              return -EBUSY;
++      }
++      return 0;
++}
++
++static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
++{
++      int ret;
++
++      mutex_lock(&bo->mutex);
++      ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
++      mutex_unlock(&bo->mutex);
++      return ret;
++}
++
++
++/*
++ * Call bo->mutex locked.
++ * Wait until the buffer is idle.
++ */
++
++int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
++              int no_wait, int check_unfenced)
++{
++      int ret;
++
++      DRM_ASSERT_LOCKED(&bo->mutex);
++      while(unlikely(drm_bo_busy(bo, check_unfenced))) {
++              if (no_wait)
++                      return -EBUSY;
++
++              if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
++                      mutex_unlock(&bo->mutex);
++                      wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
++                      mutex_lock(&bo->mutex);
++                      bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++              }
++
++              if (bo->fence) {
++                      struct drm_fence_object *fence;
++                      uint32_t fence_type = bo->fence_type;
++
++                      drm_fence_reference_unlocked(&fence, bo->fence);
++                      mutex_unlock(&bo->mutex);
++
++                      ret = drm_fence_object_wait(fence, lazy, !interruptible,
++                                                  fence_type);
++
++                      drm_fence_usage_deref_unlocked(&fence);
++                      mutex_lock(&bo->mutex);
++                      bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++                      if (ret)
++                              return ret;
++              }
++
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_wait);
++
++static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      if (bo->fence) {
++              if (bm->nice_mode) {
++                      unsigned long _end = jiffies + 3 * DRM_HZ;
++                      int ret;
++                      do {
++                              ret = drm_bo_wait(bo, 0, 0, 0, 0);
++                              if (ret && allow_errors)
++                                      return ret;
++
++                      } while (ret && !time_after_eq(jiffies, _end));
++
++                      if (bo->fence) {
++                              bm->nice_mode = 0;
++                              DRM_ERROR("Detected GPU lockup or "
++                                        "fence driver was taken down. "
++                                        "Evicting buffer.\n");
++                      }
++              }
++              if (bo->fence)
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++      }
++      return 0;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ * Attempts to remove all private references to a buffer by expiring its
++ * fence object and removing from lru lists and memory managers.
++ */
++
++static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      atomic_inc(&bo->usage);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_lock(&bo->mutex);
++
++      DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++
++      if (bo->fence && drm_fence_object_signaled(bo->fence,
++                                                 bo->fence_type))
++              drm_fence_usage_deref_unlocked(&bo->fence);
++
++      if (bo->fence && remove_all)
++              (void)drm_bo_expire_fence(bo, 0);
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!atomic_dec_and_test(&bo->usage))
++              goto out;
++
++      if (!bo->fence) {
++              list_del_init(&bo->lru);
++              if (bo->mem.mm_node) {
++                      drm_mm_put_block(bo->mem.mm_node);
++                      if (bo->pinned_node == bo->mem.mm_node)
++                              bo->pinned_node = NULL;
++                      bo->mem.mm_node = NULL;
++              }
++              list_del_init(&bo->pinned_lru);
++              if (bo->pinned_node) {
++                      drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = NULL;
++              }
++              list_del_init(&bo->ddestroy);
++              mutex_unlock(&bo->mutex);
++              drm_bo_destroy_locked(bo);
++              return;
++      }
++
++      if (list_empty(&bo->ddestroy)) {
++              drm_fence_object_flush(bo->fence, bo->fence_type);
++              list_add_tail(&bo->ddestroy, &bm->ddestroy);
++              schedule_delayed_work(&bm->wq,
++                                    ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
++      }
++
++out:
++      mutex_unlock(&bo->mutex);
++      return;
++}
++
++/*
++ * Verify that refcount is 0 and that there are no internal references
++ * to the buffer object. Then destroy it.
++ */
++
++static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
++          list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
++          list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
++              if (bo->fence != NULL) {
++                      DRM_ERROR("Fence was non-zero.\n");
++                      drm_bo_cleanup_refs(bo, 0);
++                      return;
++              }
++
++#ifdef DRM_ODD_MM_COMPAT
++              BUG_ON(!list_empty(&bo->vma_list));
++              BUG_ON(!list_empty(&bo->p_mm_list));
++#endif
++
++              if (bo->ttm) {
++                      drm_ttm_unbind(bo->ttm);
++                      drm_ttm_destroy(bo->ttm);
++                      bo->ttm = NULL;
++              }
++
++              atomic_dec(&bm->count);
++
++              drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
++
++              return;
++      }
++
++      /*
++       * Some stuff is still trying to reference the buffer object.
++       * Get rid of those references.
++       */
++
++      drm_bo_cleanup_refs(bo, 0);
++
++      return;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ */
++
++static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      struct drm_buffer_object *entry, *nentry;
++      struct list_head *list, *next;
++
++      list_for_each_safe(list, next, &bm->ddestroy) {
++              entry = list_entry(list, struct drm_buffer_object, ddestroy);
++
++              nentry = NULL;
++              if (next != &bm->ddestroy) {
++                      nentry = list_entry(next, struct drm_buffer_object,
++                                          ddestroy);
++                      atomic_inc(&nentry->usage);
++              }
++
++              drm_bo_cleanup_refs(entry, remove_all);
++
++              if (nentry)
++                      atomic_dec(&nentry->usage);
++      }
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++static void drm_bo_delayed_workqueue(void *data)
++#else
++static void drm_bo_delayed_workqueue(struct work_struct *work)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      struct drm_device *dev = (struct drm_device *) data;
++      struct drm_buffer_manager *bm = &dev->bm;
++#else
++      struct drm_buffer_manager *bm =
++          container_of(work, struct drm_buffer_manager, wq.work);
++      struct drm_device *dev = container_of(bm, struct drm_device, bm);
++#endif
++
++      DRM_DEBUG("Delayed delete Worker\n");
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++      drm_bo_delayed_delete(dev, 0);
++      if (bm->initialized && !list_empty(&bm->ddestroy)) {
++              schedule_delayed_work(&bm->wq,
++                                    ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
++{
++      struct drm_buffer_object *tmp_bo = *bo;
++      bo = NULL;
++
++      DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
++
++      if (atomic_dec_and_test(&tmp_bo->usage))
++              drm_bo_destroy_locked(tmp_bo);
++}
++EXPORT_SYMBOL(drm_bo_usage_deref_locked);
++
++static void drm_bo_base_deref_locked(struct drm_file *file_priv,
++                                   struct drm_user_object *uo)
++{
++      struct drm_buffer_object *bo =
++          drm_user_object_entry(uo, struct drm_buffer_object, base);
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++
++      drm_bo_takedown_vm_locked(bo);
++      drm_bo_usage_deref_locked(&bo);
++}
++
++void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
++{
++      struct drm_buffer_object *tmp_bo = *bo;
++      struct drm_device *dev = tmp_bo->dev;
++
++      *bo = NULL;
++      if (atomic_dec_and_test(&tmp_bo->usage)) {
++              mutex_lock(&dev->struct_mutex);
++              if (atomic_read(&tmp_bo->usage) == 0)
++                      drm_bo_destroy_locked(tmp_bo);
++              mutex_unlock(&dev->struct_mutex);
++      }
++}
++EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
++
++void drm_putback_buffer_objects(struct drm_device *dev)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct list_head *list = &bm->unfenced;
++      struct drm_buffer_object *entry, *next;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(entry, next, list, lru) {
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++
++              mutex_lock(&entry->mutex);
++              BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
++              mutex_lock(&dev->struct_mutex);
++
++              list_del_init(&entry->lru);
++              DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++              wake_up_all(&entry->event_queue);
++
++              /*
++               * FIXME: Might want to put back on head of list
++               * instead of tail here.
++               */
++
++              drm_bo_add_to_lru(entry);
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_locked(&entry);
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++EXPORT_SYMBOL(drm_putback_buffer_objects);
++
++
++/*
++ * Note. The caller has to register (if applicable)
++ * and deregister fence object usage.
++ */
++
++int drm_fence_buffer_objects(struct drm_device *dev,
++                           struct list_head *list,
++                           uint32_t fence_flags,
++                           struct drm_fence_object *fence,
++                           struct drm_fence_object **used_fence)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *entry;
++      uint32_t fence_type = 0;
++      uint32_t fence_class = ~0;
++      int count = 0;
++      int ret = 0;
++      struct list_head *l;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!list)
++              list = &bm->unfenced;
++
++      if (fence)
++              fence_class = fence->fence_class;
++
++      list_for_each_entry(entry, list, lru) {
++              BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
++              fence_type |= entry->new_fence_type;
++              if (fence_class == ~0)
++                      fence_class = entry->new_fence_class;
++              else if (entry->new_fence_class != fence_class) {
++                      DRM_ERROR("Unmatching fence classes on unfenced list: "
++                                "%d and %d.\n",
++                                fence_class,
++                                entry->new_fence_class);
++                      ret = -EINVAL;
++                      goto out;
++              }
++              count++;
++      }
++
++      if (!count) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      if (fence) {
++              if ((fence_type & fence->type) != fence_type ||
++                  (fence->fence_class != fence_class)) {
++                      DRM_ERROR("Given fence doesn't match buffers "
++                                "on unfenced list.\n");
++                      ret = -EINVAL;
++                      goto out;
++              }
++      } else {
++              mutex_unlock(&dev->struct_mutex);
++              ret = drm_fence_object_create(dev, fence_class, fence_type,
++                                            fence_flags | DRM_FENCE_FLAG_EMIT,
++                                            &fence);
++              mutex_lock(&dev->struct_mutex);
++              if (ret)
++                      goto out;
++      }
++
++      count = 0;
++      l = list->next;
++      while (l != list) {
++              prefetch(l->next);
++              entry = list_entry(l, struct drm_buffer_object, lru);
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++              mutex_lock(&entry->mutex);
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(l);
++              if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      count++;
++                      if (entry->fence)
++                              drm_fence_usage_deref_locked(&entry->fence);
++                      entry->fence = drm_fence_reference_locked(fence);
++                      entry->fence_class = entry->new_fence_class;
++                      entry->fence_type = entry->new_fence_type;
++                      DRM_FLAG_MASKED(entry->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++                      wake_up_all(&entry->event_queue);
++                      drm_bo_add_to_lru(entry);
++              }
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_locked(&entry);
++              l = list->next;
++      }
++      DRM_DEBUG("Fenced %d buffers\n", count);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      *used_fence = fence;
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_buffer_objects);
++
++/*
++ * bo->mutex locked
++ */
++
++static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
++                      int no_wait)
++{
++      int ret = 0;
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg evict_mem;
++
++      /*
++       * Someone might have modified the buffer before we took the
++       * buffer mutex.
++       */
++
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              if (unlikely(bo->mem.flags &
++                           (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
++                      goto out_unlock;
++              if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++                      goto out_unlock;
++              if (unlikely(bo->mem.mem_type != mem_type))
++                      goto out_unlock;
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
++              if (ret)
++                      goto out_unlock;
++
++      } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++
++      evict_mem = bo->mem;
++      evict_mem.mm_node = NULL;
++
++      evict_mem = bo->mem;
++      evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
++
++      mutex_lock(&dev->struct_mutex);
++      list_del_init(&bo->lru);
++      mutex_unlock(&dev->struct_mutex);
++
++      ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
++
++      if (ret) {
++              if (ret != -EAGAIN)
++                      DRM_ERROR("Failed to find memory space for "
++                                "buffer 0x%p eviction.\n", bo);
++              goto out;
++      }
++
++      ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
++
++      if (ret) {
++              if (ret != -EAGAIN)
++                      DRM_ERROR("Buffer eviction failed\n");
++              goto out;
++      }
++
++      DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
++                      _DRM_BO_FLAG_EVICTED);
++
++out:
++      mutex_lock(&dev->struct_mutex);
++      if (evict_mem.mm_node) {
++              if (evict_mem.mm_node != bo->pinned_node)
++                      drm_mm_put_block(evict_mem.mm_node);
++              evict_mem.mm_node = NULL;
++      }
++      drm_bo_add_to_lru(bo);
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int drm_bo_mem_force_space(struct drm_device *dev,
++                                struct drm_bo_mem_reg *mem,
++                                uint32_t mem_type, int no_wait)
++{
++      struct drm_mm_node *node;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *entry;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++      struct list_head *lru;
++      unsigned long num_pages = mem->num_pages;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      do {
++              node = drm_mm_search_free(&man->manager, num_pages,
++                                        mem->page_alignment, 1);
++              if (node)
++                      break;
++
++              lru = &man->lru;
++              if (lru->next == lru)
++                      break;
++
++              entry = list_entry(lru->next, struct drm_buffer_object, lru);
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++              mutex_lock(&entry->mutex);
++              ret = drm_bo_evict(entry, mem_type, no_wait);
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_unlocked(&entry);
++              if (ret)
++                      return ret;
++              mutex_lock(&dev->struct_mutex);
++      } while (1);
++
++      if (!node) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      node = drm_mm_get_block(node, num_pages, mem->page_alignment);
++      if (unlikely(!node)) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++      mem->mm_node = node;
++      mem->mem_type = mem_type;
++      return 0;
++}
++
++static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
++                              int disallow_fixed,
++                              uint32_t mem_type,
++                              uint64_t mask, uint32_t *res_mask)
++{
++      uint64_t cur_flags = drm_bo_type_flags(mem_type);
++      uint64_t flag_diff;
++
++      if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
++              return 0;
++      if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
++              cur_flags |= DRM_BO_FLAG_CACHED;
++      if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
++              cur_flags |= DRM_BO_FLAG_MAPPABLE;
++      if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
++              DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
++
++      if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
++              return 0;
++
++      if (mem_type == DRM_BO_MEM_LOCAL) {
++              *res_mask = cur_flags;
++              return 1;
++      }
++
++      flag_diff = (mask ^ cur_flags);
++      if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
++              cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
++
++      if ((flag_diff & DRM_BO_FLAG_CACHED) &&
++          (!(mask & DRM_BO_FLAG_CACHED) ||
++           (mask & DRM_BO_FLAG_FORCE_CACHING)))
++              return 0;
++
++      if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
++          ((mask & DRM_BO_FLAG_MAPPABLE) ||
++           (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
++              return 0;
++
++      *res_mask = cur_flags;
++      return 1;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver.  If free space isn't found, then
++ * drm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int drm_bo_mem_space(struct drm_buffer_object *bo,
++                   struct drm_bo_mem_reg *mem, int no_wait)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man;
++
++      uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
++      const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
++      uint32_t i;
++      uint32_t mem_type = DRM_BO_MEM_LOCAL;
++      uint32_t cur_flags;
++      int type_found = 0;
++      int type_ok = 0;
++      int has_eagain = 0;
++      struct drm_mm_node *node = NULL;
++      int ret;
++
++      mem->mm_node = NULL;
++      for (i = 0; i < num_prios; ++i) {
++              mem_type = prios[i];
++              man = &bm->man[mem_type];
++
++              type_ok = drm_bo_mt_compatible(man,
++                                             bo->type == drm_bo_type_user,
++                                             mem_type, mem->proposed_flags,
++                                             &cur_flags);
++
++              if (!type_ok)
++                      continue;
++
++              if (mem_type == DRM_BO_MEM_LOCAL)
++                      break;
++
++              if ((mem_type == bo->pinned_mem_type) &&
++                  (bo->pinned_node != NULL)) {
++                      node = bo->pinned_node;
++                      break;
++              }
++
++              mutex_lock(&dev->struct_mutex);
++              if (man->has_type && man->use_type) {
++                      type_found = 1;
++                      node = drm_mm_search_free(&man->manager, mem->num_pages,
++                                                mem->page_alignment, 1);
++                      if (node)
++                              node = drm_mm_get_block(node, mem->num_pages,
++                                                      mem->page_alignment);
++              }
++              mutex_unlock(&dev->struct_mutex);
++              if (node)
++                      break;
++      }
++
++      if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
++              mem->mm_node = node;
++              mem->mem_type = mem_type;
++              mem->flags = cur_flags;
++              return 0;
++      }
++
++      if (!type_found)
++              return -EINVAL;
++
++      num_prios = dev->driver->bo_driver->num_mem_busy_prio;
++      prios = dev->driver->bo_driver->mem_busy_prio;
++
++      for (i = 0; i < num_prios; ++i) {
++              mem_type = prios[i];
++              man = &bm->man[mem_type];
++
++              if (!man->has_type)
++                      continue;
++
++              if (!drm_bo_mt_compatible(man,
++                                        bo->type == drm_bo_type_user,
++                                        mem_type,
++                                        mem->proposed_flags,
++                                        &cur_flags))
++                      continue;
++
++              ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
++
++              if (ret == 0 && mem->mm_node) {
++                      mem->flags = cur_flags;
++                      return 0;
++              }
++
++              if (ret == -EAGAIN)
++                      has_eagain = 1;
++      }
++
++      ret = (has_eagain) ? -EAGAIN : -ENOMEM;
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_mem_space);
++
++/*
++ * drm_bo_propose_flags:
++ *
++ * @bo: the buffer object getting new flags
++ *
++ * @new_flags: the new set of proposed flag bits
++ *
++ * @new_mask: the mask of bits changed in new_flags
++ *
++ * Modify the proposed_flag bits in @bo
++ */
++static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
++                                       uint64_t new_flags, uint64_t new_mask)
++{
++      uint32_t new_access;
++
++      /* Copy unchanging bits from existing proposed_flags */
++      DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
++       
++      if (bo->type == drm_bo_type_user &&
++          ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
++           (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
++              DRM_ERROR("User buffers require cache-coherent memory.\n");
++              return -EINVAL;
++      }
++
++      if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
++              DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
++              return -EPERM;
++      }
++
++      if (likely(new_mask & DRM_BO_MASK_MEM) &&
++          (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
++          !DRM_SUSER(DRM_CURPROC)) {
++              if (likely(bo->mem.flags & new_flags & new_mask &
++                         DRM_BO_MASK_MEM))
++                      new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
++                              (bo->mem.flags & DRM_BO_MASK_MEM);
++              else {
++                      DRM_ERROR("Incompatible memory type specification "
++                                "for NO_EVICT buffer.\n");
++                      return -EPERM;
++              }
++      }
++
++      if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
++              DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
++              return -EPERM;
++      }
++
++      new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
++                                DRM_BO_FLAG_READ);
++
++      if (new_access == 0) {
++              DRM_ERROR("Invalid buffer object rwx properties\n");
++              return -EINVAL;
++      }
++
++      bo->mem.proposed_flags = new_flags;
++      return 0;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ */
++
++struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
++                                            uint32_t handle, int check_owner)
++{
++      struct drm_user_object *uo;
++      struct drm_buffer_object *bo;
++
++      uo = drm_lookup_user_object(file_priv, handle);
++
++      if (!uo || (uo->type != drm_buffer_type)) {
++              DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
++              return NULL;
++      }
++
++      if (check_owner && file_priv != uo->owner) {
++              if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
++                      return NULL;
++      }
++
++      bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
++      atomic_inc(&bo->usage);
++      return bo;
++}
++EXPORT_SYMBOL(drm_lookup_buffer_object);
++
++/*
++ * Call bo->mutex locked.
++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
++ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
++ */
++
++static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
++{
++      struct drm_fence_object *fence = bo->fence;
++
++      if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++              return -EBUSY;
++
++      if (fence) {
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              return -EBUSY;
++      }
++      return 0;
++}
++
++int drm_bo_evict_cached(struct drm_buffer_object *bo)
++{
++      int ret = 0;
++
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
++      if (bo->mem.mm_node)
++              ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
++      return ret;
++}
++
++EXPORT_SYMBOL(drm_bo_evict_cached);
++/*
++ * Wait until a buffer is unmapped.
++ */
++
++static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
++{
++      int ret = 0;
++
++      if (likely(atomic_read(&bo->mapped)) == 0)
++              return 0;
++
++      if (unlikely(no_wait))
++              return -EBUSY;
++
++      do {
++              mutex_unlock(&bo->mutex);
++              ret = wait_event_interruptible(bo->event_queue,
++                                             atomic_read(&bo->mapped) == 0);
++              mutex_lock(&bo->mutex);
++              bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++
++              if (ret == -ERESTARTSYS)
++                      ret = -EAGAIN;
++      } while((ret == 0) && atomic_read(&bo->mapped) > 0);
++
++      return ret;
++}
++
++/*
++ * Fill in the ioctl reply argument with buffer info.
++ * Bo locked.
++ */
++
++void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
++                       struct drm_bo_info_rep *rep)
++{
++      if (!rep)
++              return;
++
++      rep->handle = bo->base.hash.key;
++      rep->flags = bo->mem.flags;
++      rep->size = bo->num_pages * PAGE_SIZE;
++      rep->offset = bo->offset;
++
++      /*
++       * drm_bo_type_device buffers have user-visible
++       * handles which can be used to share across
++       * processes. Hand that back to the application
++       */
++      if (bo->type == drm_bo_type_device)
++              rep->arg_handle = bo->map_list.user_token;
++      else
++              rep->arg_handle = 0;
++
++      rep->proposed_flags = bo->mem.proposed_flags;
++      rep->buffer_start = bo->buffer_start;
++      rep->fence_flags = bo->fence_type;
++      rep->rep_flags = 0;
++      rep->page_alignment = bo->mem.page_alignment;
++
++      if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
++              DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
++                              DRM_BO_REP_BUSY);
++      }
++}
++EXPORT_SYMBOL(drm_bo_fill_rep_arg);
++
++/*
++ * Wait for buffer idle and register that we've mapped the buffer.
++ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
++ * so that if the client dies, the mapping is automatically
++ * unregistered.
++ */
++
++static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
++                               uint32_t map_flags, unsigned hint,
++                               struct drm_bo_info_rep *rep)
++{
++      struct drm_buffer_object *bo;
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret = 0;
++      int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++              if (unlikely(ret))
++                      goto out;
++
++              if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
++                      drm_bo_evict_cached(bo);
++
++      } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
++
++      atomic_inc(&bo->mapped);
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret) {
++              if (atomic_dec_and_test(&bo->mapped))
++                      wake_up_all(&bo->event_queue);
++
++      } else
++              drm_bo_fill_rep_arg(bo, rep);
++
++ out:
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++
++      return ret;
++}
++
++static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      struct drm_ref_object *ro;
++      int ret = 0;
++
++      mutex_lock(&dev->struct_mutex);
++
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      if (!bo) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
++      if (!ro) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      drm_remove_ref_object(file_priv, ro);
++      drm_bo_usage_deref_locked(&bo);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/*
++ * Call struct-sem locked.
++ */
++
++static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
++                                       struct drm_user_object *uo,
++                                       enum drm_ref_type action)
++{
++      struct drm_buffer_object *bo =
++          drm_user_object_entry(uo, struct drm_buffer_object, base);
++
++      /*
++       * We DON'T want to take the bo->lock here, because we want to
++       * hold it when we wait for unmapped buffer.
++       */
++
++      BUG_ON(action != _DRM_REF_TYPE1);
++
++      if (atomic_dec_and_test(&bo->mapped))
++              wake_up_all(&bo->event_queue);
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
++                     int no_wait, int move_unfenced)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = 0;
++      struct drm_bo_mem_reg mem;
++
++      BUG_ON(bo->fence != NULL);
++
++      mem.num_pages = bo->num_pages;
++      mem.size = mem.num_pages << PAGE_SHIFT;
++      mem.proposed_flags = new_mem_flags;
++      mem.page_alignment = bo->mem.page_alignment;
++
++      mutex_lock(&bm->evict_mutex);
++      mutex_lock(&dev->struct_mutex);
++      list_del_init(&bo->lru);
++      mutex_unlock(&dev->struct_mutex);
++
++      /*
++       * Determine where to move the buffer.
++       */
++      ret = drm_bo_mem_space(bo, &mem, no_wait);
++      if (ret)
++              goto out_unlock;
++
++      ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
++
++out_unlock:
++      mutex_lock(&dev->struct_mutex);
++      if (ret || !move_unfenced) {
++              if (mem.mm_node) {
++                      if (mem.mm_node != bo->pinned_node)
++                              drm_mm_put_block(mem.mm_node);
++                      mem.mm_node = NULL;
++              }
++              drm_bo_add_to_lru(bo);
++              if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      wake_up_all(&bo->event_queue);
++                      DRM_FLAG_MASKED(bo->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++              }
++      } else {
++              list_add_tail(&bo->lru, &bm->unfenced);
++              DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
++                              _DRM_BO_FLAG_UNFENCED);
++      }
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&bm->evict_mutex);
++      return ret;
++}
++
++static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
++{
++      uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
++
++      if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
++              return 0;
++      if ((flag_diff & DRM_BO_FLAG_CACHED) &&
++          (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
++           (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
++              return 0;
++
++      if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
++          ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
++           (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
++              return 0;
++      return 1;
++}
++
++/**
++ * drm_buffer_object_validate:
++ *
++ * @bo: the buffer object to modify
++ *
++ * @fence_class: the new fence class covering this buffer
++ *
++ * @move_unfenced: a boolean indicating whether switching the
++ * memory space of this buffer should cause the buffer to
++ * be placed on the unfenced list.
++ *
++ * @no_wait: whether this function should return -EBUSY instead
++ * of waiting.
++ *
++ * Change buffer access parameters. This can involve moving
++ * the buffer to the correct memory type, pinning the buffer
++ * or changing the class/type of fence covering this buffer
++ *
++ * Must be called with bo locked.
++ */
++
++static int drm_buffer_object_validate(struct drm_buffer_object *bo,
++                                    uint32_t fence_class,
++                                    int move_unfenced, int no_wait,
++                                    int move_buffer)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret;
++
++      if (move_buffer) {
++              ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
++                                       move_unfenced);
++              if (ret) {
++                      if (ret != -EAGAIN)
++                              DRM_ERROR("Failed moving buffer.\n");
++                      if (ret == -ENOMEM)
++                              DRM_ERROR("Out of aperture space or "
++                                        "DRM memory quota.\n");
++                      return ret;
++              }
++      }
++
++      /*
++       * Pinned buffers.
++       */
++
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
++              bo->pinned_mem_type = bo->mem.mem_type;
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&bo->pinned_lru);
++              drm_bo_add_to_pinned_lru(bo);
++
++              if (bo->pinned_node != bo->mem.mm_node) {
++                      if (bo->pinned_node != NULL)
++                              drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = bo->mem.mm_node;
++              }
++
++              mutex_unlock(&dev->struct_mutex);
++
++      } else if (bo->pinned_node != NULL) {
++
++              mutex_lock(&dev->struct_mutex);
++
++              if (bo->pinned_node != bo->mem.mm_node)
++                      drm_mm_put_block(bo->pinned_node);
++
++              list_del_init(&bo->pinned_lru);
++              bo->pinned_node = NULL;
++              mutex_unlock(&dev->struct_mutex);
++
++      }
++
++      /*
++       * We might need to add a TTM.
++       */
++
++      if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
++              ret = drm_bo_add_ttm(bo);
++              if (ret)
++                      return ret;
++      }
++      /*
++       * Validation has succeeded, move the access and other
++       * non-mapping-related flag bits from the proposed flags to
++       * the active flags
++       */
++
++      DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
++
++      /*
++       * Finally, adjust lru to be sure.
++       */
++
++      mutex_lock(&dev->struct_mutex);
++      list_del(&bo->lru);
++      if (move_unfenced) {
++              list_add_tail(&bo->lru, &bm->unfenced);
++              DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
++                              _DRM_BO_FLAG_UNFENCED);
++      } else {
++              drm_bo_add_to_lru(bo);
++              if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      wake_up_all(&bo->event_queue);
++                      DRM_FLAG_MASKED(bo->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/*
++ * This function is called with bo->mutex locked, but may release it
++ * temporarily to wait for events.
++ */
++
++static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
++                                     uint64_t flags,
++                                     uint64_t mask,
++                                     uint32_t hint,
++                                     uint32_t fence_class,
++                                     int no_wait,
++                                     int *move_buffer)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      uint32_t ftype;
++
++      int ret;
++
++      DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
++                (unsigned long long) bo->mem.proposed_flags,
++                (unsigned long long) bo->mem.flags);
++
++      ret = drm_bo_modify_proposed_flags (bo, flags, mask);
++      if (ret)
++              return ret;
++
++      ret = drm_bo_wait_unmapped(bo, no_wait);
++      if (ret)
++              return ret;
++
++      ret = driver->fence_type(bo, &fence_class, &ftype);
++
++      if (ret) {
++              DRM_ERROR("Driver did not support given buffer permissions.\n");
++              return ret;
++      }
++
++      /*
++       * We're switching command submission mechanism,
++       * or cannot simply rely on the hardware serializing for us.
++       * Insert a driver-dependant barrier or wait for buffer idle.
++       */
++
++      if ((fence_class != bo->fence_class) ||
++          ((ftype ^ bo->fence_type) & bo->fence_type)) {
++
++              ret = -EINVAL;
++              if (driver->command_stream_barrier) {
++                      ret = driver->command_stream_barrier(bo,
++                                                           fence_class,
++                                                           ftype,
++                                                           no_wait);
++              }
++              if (ret && ret != -EAGAIN) 
++                      ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++              
++              if (ret)
++                      return ret;
++      }
++
++      bo->new_fence_class = fence_class;
++      bo->new_fence_type = ftype;
++
++      /*
++       * Check whether we need to move buffer.
++       */
++
++      *move_buffer = 0;
++      if (!drm_bo_mem_compat(&bo->mem)) {
++              *move_buffer = 1;
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++      }
++
++      return ret;
++}
++
++/**
++ * drm_bo_do_validate:
++ *
++ * @bo:       the buffer object
++ *
++ * @flags: access rights, mapping parameters and cacheability. See
++ * the DRM_BO_FLAG_* values in drm.h
++ *
++ * @mask: Which flag values to change; this allows callers to modify
++ * things without knowing the current state of other flags.
++ *
++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
++ * values in drm.h.
++ *
++ * @fence_class: a driver-specific way of doing fences. Presumably,
++ * this would be used if the driver had more than one submission and
++ * fencing mechanism. At this point, there isn't any use of this
++ * from the user mode code.
++ *
++ * @rep: To be stuffed with the reply from validation
++ * 
++ * 'validate' a buffer object. This changes where the buffer is
++ * located, along with changing access modes.
++ */
++
++int drm_bo_do_validate(struct drm_buffer_object *bo,
++                     uint64_t flags, uint64_t mask, uint32_t hint,
++                     uint32_t fence_class,
++                     struct drm_bo_info_rep *rep)
++{
++      int ret;
++      int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
++      int move_buffer;
++
++      mutex_lock(&bo->mutex);
++
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
++                                                fence_class, no_wait,
++                                                &move_buffer);
++              if (ret)
++                      goto out;
++
++      } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
++
++      ret = drm_buffer_object_validate(bo,
++                                       fence_class,
++                                       !(hint & DRM_BO_HINT_DONT_FENCE),
++                                       no_wait,
++                                       move_buffer);
++
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++out:
++      if (rep)
++              drm_bo_fill_rep_arg(bo, rep);
++
++      mutex_unlock(&bo->mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_do_validate);
++
++/**
++ * drm_bo_handle_validate
++ *
++ * @file_priv: the drm file private, used to get a handle to the user context
++ *
++ * @handle: the buffer object handle
++ *
++ * @flags: access rights, mapping parameters and cacheability. See
++ * the DRM_BO_FLAG_* values in drm.h
++ *
++ * @mask: Which flag values to change; this allows callers to modify
++ * things without knowing the current state of other flags.
++ *
++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
++ * values in drm.h.
++ *
++ * @fence_class: a driver-specific way of doing fences. Presumably,
++ * this would be used if the driver had more than one submission and
++ * fencing mechanism. At this point, there isn't any use of this
++ * from the user mode code.
++ *
++ * @rep: To be stuffed with the reply from validation
++ *
++ * @bp_rep: To be stuffed with the buffer object pointer
++ *
++ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
++ * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
++ * This is a convenience wrapper only.
++ */
++
++int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
++                         uint64_t flags, uint64_t mask,
++                         uint32_t hint,
++                         uint32_t fence_class,
++                         struct drm_bo_info_rep *rep,
++                         struct drm_buffer_object **bo_rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      if (bo->base.owner != file_priv)
++              mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
++
++      ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
++
++      if (!ret && bo_rep)
++              *bo_rep = bo;
++      else
++              drm_bo_usage_deref_unlocked(&bo);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_handle_validate);
++
++
++static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
++                            struct drm_bo_info_rep *rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++
++      /*
++       * FIXME: Quick busy here?
++       */
++
++      drm_bo_busy(bo, 1);
++      drm_bo_fill_rep_arg(bo, rep);
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++      return 0;
++}
++
++static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
++                            uint32_t hint,
++                            struct drm_bo_info_rep *rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++      ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
++      if (ret)
++              goto out;
++
++      drm_bo_fill_rep_arg(bo, rep);
++out:
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++      return ret;
++}
++
++int drm_buffer_object_create(struct drm_device *dev,
++                           unsigned long size,
++                           enum drm_bo_type type,
++                           uint64_t flags,
++                           uint32_t hint,
++                           uint32_t page_alignment,
++                           unsigned long buffer_start,
++                           struct drm_buffer_object **buf_obj)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *bo;
++      int ret = 0;
++      unsigned long num_pages;
++
++      size += buffer_start & ~PAGE_MASK;
++      num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      if (num_pages == 0) {
++              DRM_ERROR("Illegal buffer object size.\n");
++              return -EINVAL;
++      }
++
++      bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
++
++      if (!bo)
++              return -ENOMEM;
++
++      mutex_init(&bo->mutex);
++      mutex_lock(&bo->mutex);
++
++      atomic_set(&bo->usage, 1);
++      atomic_set(&bo->mapped, 0);
++      DRM_INIT_WAITQUEUE(&bo->event_queue);
++      INIT_LIST_HEAD(&bo->lru);
++      INIT_LIST_HEAD(&bo->pinned_lru);
++      INIT_LIST_HEAD(&bo->ddestroy);
++#ifdef DRM_ODD_MM_COMPAT
++      INIT_LIST_HEAD(&bo->p_mm_list);
++      INIT_LIST_HEAD(&bo->vma_list);
++#endif
++      bo->dev = dev;
++      bo->type = type;
++      bo->num_pages = num_pages;
++      bo->mem.mem_type = DRM_BO_MEM_LOCAL;
++      bo->mem.num_pages = bo->num_pages;
++      bo->mem.mm_node = NULL;
++      bo->mem.page_alignment = page_alignment;
++      bo->buffer_start = buffer_start & PAGE_MASK;
++      bo->priv_flags = 0;
++      bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
++                       DRM_BO_FLAG_MAPPABLE);
++      bo->mem.proposed_flags = 0;
++      atomic_inc(&bm->count);
++      /*
++       * Use drm_bo_modify_proposed_flags to error-check the proposed flags
++       */
++      ret = drm_bo_modify_proposed_flags (bo, flags, flags);
++      if (ret)
++              goto out_err;
++
++      /*
++       * For drm_bo_type_device buffers, allocate
++       * address space from the device so that applications
++       * can mmap the buffer from there
++       */
++      if (bo->type == drm_bo_type_device) {
++              mutex_lock(&dev->struct_mutex);
++              ret = drm_bo_setup_vm_locked(bo);
++              mutex_unlock(&dev->struct_mutex);
++              if (ret)
++                      goto out_err;
++      }
++
++      mutex_unlock(&bo->mutex);
++      ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
++                               0, NULL);
++      if (ret)
++              goto out_err_unlocked;
++
++      *buf_obj = bo;
++      return 0;
++
++out_err:
++      mutex_unlock(&bo->mutex);
++out_err_unlocked:
++      drm_bo_usage_deref_unlocked(&bo);
++      return ret;
++}
++EXPORT_SYMBOL(drm_buffer_object_create);
++
++
++static int drm_bo_add_user_object(struct drm_file *file_priv,
++                                struct drm_buffer_object *bo, int shareable)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(file_priv, &bo->base, shareable);
++      if (ret)
++              goto out;
++
++      bo->base.remove = drm_bo_base_deref_locked;
++      bo->base.type = drm_buffer_type;
++      bo->base.ref_struct_locked = NULL;
++      bo->base.unref = drm_buffer_user_object_unmap;
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_create_arg *arg = data;
++      struct drm_bo_create_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_buffer_object *entry;
++      enum drm_bo_type bo_type;
++      int ret = 0;
++
++      DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
++          (int)(req->size / 1024), req->page_alignment * 4);
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * If the buffer creation request comes in with a starting address,
++       * that points at the desired user pages to map. Otherwise, create
++       * a drm_bo_type_device buffer, which uses pages allocated from the kernel
++       */
++      bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
++
++      /*
++       * User buffers cannot be shared
++       */
++      if (bo_type == drm_bo_type_user)
++              req->flags &= ~DRM_BO_FLAG_SHAREABLE;
++
++      ret = drm_buffer_object_create(file_priv->minor->dev,
++                                     req->size, bo_type, req->flags,
++                                     req->hint, req->page_alignment,
++                                     req->buffer_start, &entry);
++      if (ret)
++              goto out;
++
++      ret = drm_bo_add_user_object(file_priv, entry,
++                                   req->flags & DRM_BO_FLAG_SHAREABLE);
++      if (ret) {
++              drm_bo_usage_deref_unlocked(&entry);
++              goto out;
++      }
++
++      mutex_lock(&entry->mutex);
++      drm_bo_fill_rep_arg(entry, rep);
++      mutex_unlock(&entry->mutex);
++
++out:
++      return ret;
++}
++
++int drm_bo_setstatus_ioctl(struct drm_device *dev,
++                         void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_buffer_object *bo;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (ret)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      if (bo->base.owner != file_priv)
++              req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
++
++      ret = drm_bo_do_validate(bo, req->flags, req->mask,
++                               req->hint | DRM_BO_HINT_DONT_FENCE,
++                               bo->fence_class, rep);
++
++      drm_bo_usage_deref_unlocked(&bo);
++
++      (void) drm_bo_read_unlock(&dev->bm.bm_lock);
++
++      return ret;
++}
++
++int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
++                                  req->hint, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_handle_arg *arg = data;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_buffer_object_unmap(file_priv, arg->handle);
++      return ret;
++}
++
++
++int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_reference_info_arg *arg = data;
++      struct drm_bo_handle_arg *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_user_object *uo;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_ref(file_priv, req->handle,
++                                drm_buffer_type, &uo);
++      if (ret)
++              return ret;
++
++      ret = drm_bo_handle_info(file_priv, req->handle, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_handle_arg *arg = data;
++      int ret = 0;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
++      return ret;
++}
++
++int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_reference_info_arg *arg = data;
++      struct drm_bo_handle_arg *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_handle_info(file_priv, req->handle, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_handle_wait(file_priv, req->handle,
++                               req->hint, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++static int drm_bo_leave_list(struct drm_buffer_object *bo,
++                           uint32_t mem_type,
++                           int free_pinned,
++                           int allow_errors)
++{
++      struct drm_device *dev = bo->dev;
++      int ret = 0;
++
++      mutex_lock(&bo->mutex);
++
++      ret = drm_bo_expire_fence(bo, allow_errors);
++      if (ret)
++              goto out;
++
++      if (free_pinned) {
++              DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&bo->pinned_lru);
++              if (bo->pinned_node == bo->mem.mm_node)
++                      bo->pinned_node = NULL;
++              if (bo->pinned_node != NULL) {
++                      drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = NULL;
++              }
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
++              DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
++                        "cleanup. Removing flag and evicting.\n");
++              bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
++              bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
++      }
++
++      if (bo->mem.mem_type == mem_type)
++              ret = drm_bo_evict(bo, mem_type, 0);
++
++      if (ret) {
++              if (allow_errors) {
++                      goto out;
++              } else {
++                      ret = 0;
++                      DRM_ERROR("Cleanup eviction failed\n");
++              }
++      }
++
++out:
++      mutex_unlock(&bo->mutex);
++      return ret;
++}
++
++
++static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
++                                       int pinned_list)
++{
++      if (pinned_list)
++              return list_entry(list, struct drm_buffer_object, pinned_lru);
++      else
++              return list_entry(list, struct drm_buffer_object, lru);
++}
++
++/*
++ * dev->struct_mutex locked.
++ */
++
++static int drm_bo_force_list_clean(struct drm_device *dev,
++                                 struct list_head *head,
++                                 unsigned mem_type,
++                                 int free_pinned,
++                                 int allow_errors,
++                                 int pinned_list)
++{
++      struct list_head *list, *next, *prev;
++      struct drm_buffer_object *entry, *nentry;
++      int ret;
++      int do_restart;
++
++      /*
++       * The list traversal is a bit odd here, because an item may
++       * disappear from the list when we release the struct_mutex or
++       * when we decrease the usage count. Also we're not guaranteed
++       * to drain pinned lists, so we can't always restart.
++       */
++
++restart:
++      nentry = NULL;
++      list_for_each_safe(list, next, head) {
++              prev = list->prev;
++
++              entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
++              atomic_inc(&entry->usage);
++              if (nentry) {
++                      atomic_dec(&nentry->usage);
++                      nentry = NULL;
++              }
++
++              /*
++               * Protect the next item from destruction, so we can check
++               * its list pointers later on.
++               */
++
++              if (next != head) {
++                      nentry = drm_bo_entry(next, pinned_list);
++                      atomic_inc(&nentry->usage);
++              }
++              mutex_unlock(&dev->struct_mutex);
++
++              ret = drm_bo_leave_list(entry, mem_type, free_pinned,
++                                      allow_errors);
++              mutex_lock(&dev->struct_mutex);
++
++              drm_bo_usage_deref_locked(&entry);
++              if (ret)
++                      return ret;
++
++              /*
++               * Has the next item disappeared from the list?
++               */
++
++              do_restart = ((next->prev != list) && (next->prev != prev));
++
++              if (nentry != NULL && do_restart)
++                      drm_bo_usage_deref_locked(&nentry);
++
++              if (do_restart)
++                      goto restart;
++      }
++      return 0;
++}
++
++int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++      int ret = -EINVAL;
++
++      if (mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", mem_type);
++              return ret;
++      }
++
++      if (!man->has_type) {
++              DRM_ERROR("Trying to take down uninitialized "
++                        "memory manager type %u\n", mem_type);
++              return ret;
++      }
++
++      if ((man->kern_init_type) && (kern_clean == 0)) {
++              DRM_ERROR("Trying to take down kernel initialized "
++                        "memory manager type %u\n", mem_type);
++              return -EPERM;
++      }
++
++      man->use_type = 0;
++      man->has_type = 0;
++
++      ret = 0;
++      if (mem_type > 0) {
++              BUG_ON(!list_empty(&bm->unfenced));
++              drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
++              drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
++
++              if (drm_mm_clean(&man->manager)) {
++                      drm_mm_takedown(&man->manager);
++              } else {
++                      ret = -EBUSY;
++              }
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_clean_mm);
++
++/**
++ *Evict all buffers of a particular mem_type, but leave memory manager
++ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
++ *point since we have the hardware lock.
++ */
++
++static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
++{
++      int ret;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++
++      if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
++              return -EINVAL;
++      }
++
++      if (!man->has_type) {
++              DRM_ERROR("Memory type %u has not been initialized.\n",
++                        mem_type);
++              return 0;
++      }
++
++      ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
++      if (ret)
++              return ret;
++      ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
++
++      return ret;
++}
++
++int drm_bo_init_mm(struct drm_device *dev, unsigned type,
++                 unsigned long p_offset, unsigned long p_size,
++                 int kern_init)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = -EINVAL;
++      struct drm_mem_type_manager *man;
++
++      if (type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", type);
++              return ret;
++      }
++
++      man = &bm->man[type];
++      if (man->has_type) {
++              DRM_ERROR("Memory manager already initialized for type %d\n",
++                        type);
++              return ret;
++      }
++
++      ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
++      if (ret)
++              return ret;
++
++      ret = 0;
++      if (type != DRM_BO_MEM_LOCAL) {
++              if (!p_size) {
++                      DRM_ERROR("Zero size memory manager type %d\n", type);
++                      return ret;
++              }
++              ret = drm_mm_init(&man->manager, p_offset, p_size);
++              if (ret)
++                      return ret;
++      }
++      man->has_type = 1;
++      man->use_type = 1;
++      man->kern_init_type = kern_init;
++      man->size = p_size;
++
++      INIT_LIST_HEAD(&man->lru);
++      INIT_LIST_HEAD(&man->pinned);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_init_mm);
++
++/*
++ * This function is intended to be called on drm driver unload.
++ * If you decide to call it from lastclose, you must protect the call
++ * from a potentially racing drm_bo_driver_init in firstopen.
++ * (This may happen on X server restart).
++ */
++
++int drm_bo_driver_finish(struct drm_device *dev)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = 0;
++      unsigned i = DRM_BO_MEM_TYPES;
++      struct drm_mem_type_manager *man;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!bm->initialized)
++              goto out;
++      bm->initialized = 0;
++
++      while (i--) {
++              man = &bm->man[i];
++              if (man->has_type) {
++                      man->use_type = 0;
++                      if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
++                              ret = -EBUSY;
++                              DRM_ERROR("DRM memory manager type %d "
++                                        "is not clean.\n", i);
++                      }
++                      man->has_type = 0;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!cancel_delayed_work(&bm->wq))
++              flush_scheduled_work();
++
++      mutex_lock(&dev->struct_mutex);
++      drm_bo_delayed_delete(dev, 1);
++      if (list_empty(&bm->ddestroy))
++              DRM_DEBUG("Delayed destroy list was clean\n");
++
++      if (list_empty(&bm->man[0].lru))
++              DRM_DEBUG("Swap list was clean\n");
++
++      if (list_empty(&bm->man[0].pinned))
++              DRM_DEBUG("NO_MOVE list was clean\n");
++
++      if (list_empty(&bm->unfenced))
++              DRM_DEBUG("Unfenced list was clean\n");
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      ClearPageReserved(bm->dummy_read_page);
++#endif
++      __free_page(bm->dummy_read_page);
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing drm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int drm_bo_driver_init(struct drm_device *dev)
++{
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = -EINVAL;
++
++      bm->dummy_read_page = NULL;
++      drm_bo_init_lock(&bm->bm_lock);
++      mutex_lock(&dev->struct_mutex);
++      if (!driver)
++              goto out_unlock;
++
++      bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++      if (!bm->dummy_read_page) {
++              ret = -ENOMEM;
++              goto out_unlock;
++      }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      SetPageReserved(bm->dummy_read_page);
++#endif
++
++      /*
++       * Initialize the system memory buffer type.
++       * Other types need to be driver / IOCTL initialized.
++       */
++      ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
++      if (ret)
++              goto out_unlock;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
++#else
++      INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
++#endif
++      bm->initialized = 1;
++      bm->nice_mode = 1;
++      atomic_set(&bm->count, 0);
++      bm->cur_pages = 0;
++      INIT_LIST_HEAD(&bm->unfenced);
++      INIT_LIST_HEAD(&bm->ddestroy);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_driver_init);
++
++int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_init_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
++      if (ret)
++              return ret;
++
++      ret = -EINVAL;
++      if (arg->magic != DRM_BO_INIT_MAGIC) {
++              DRM_ERROR("You are using an old libdrm that is not compatible with\n"
++                        "\tthe kernel DRM module. Please upgrade your libdrm.\n");
++              return -EINVAL;
++      }
++      if (arg->major != DRM_BO_INIT_MAJOR) {
++              DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
++                        "\tversion don't match. Got %d, expected %d.\n",
++                        arg->major, DRM_BO_INIT_MAJOR);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized.\n");
++              goto out;
++      }
++      if (arg->mem_type == 0) {
++              DRM_ERROR("System memory buffers already initialized.\n");
++              goto out;
++      }
++      ret = drm_bo_init_mm(dev, arg->mem_type,
++                           arg->p_offset, arg->p_size, 0);
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
++
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
++      if (ret)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = -EINVAL;
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized\n");
++              goto out;
++      }
++      if (arg->mem_type == 0) {
++              DRM_ERROR("No takedown for System memory buffers.\n");
++              goto out;
++      }
++      ret = 0;
++      if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
++              if (ret == -EINVAL)
++                      DRM_ERROR("Memory manager type %d not clean. "
++                                "Delaying takedown\n", arg->mem_type);
++              ret = 0;
++      }
++out:
++      mutex_unlock(&dev->struct_mutex);
++      (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
++
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
++              DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
++              ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
++              if (ret)
++                      return ret;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_bo_lock_mm(dev, arg->mem_type);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret) {
++              (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
++              return ret;
++      }
++
++      return 0;
++}
++
++int drm_mm_unlock_ioctl(struct drm_device *dev,
++                      void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
++              ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
++int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_info_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      struct drm_mem_type_manager *man;
++      int ret = 0;
++      int mem_type = arg->mem_type;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized\n");
++              ret = -EINVAL;
++              goto out;
++      }
++
++
++      man = &bm->man[arg->mem_type];
++
++      arg->p_size = man->size;
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++     
++      return ret;
++}
++/*
++ * buffer object vm functions.
++ */
++
++int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++
++      if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
++              if (mem->mem_type == DRM_BO_MEM_LOCAL)
++                      return 0;
++
++              if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
++                      return 0;
++
++              if (mem->flags & DRM_BO_FLAG_CACHED)
++                      return 0;
++      }
++      return 1;
++}
++EXPORT_SYMBOL(drm_mem_reg_is_pci);
++
++/**
++ * \c Get the PCI offset for the buffer object memory.
++ *
++ * \param bo The buffer object.
++ * \param bus_base On return the base of the PCI region
++ * \param bus_offset On return the byte offset into the PCI region
++ * \param bus_size On return the byte size of the buffer object or zero if
++ *     the buffer object memory is not accessible through a PCI region.
++ * \return Failure indication.
++ *
++ * Returns -EINVAL if the buffer object is currently not mappable.
++ * Otherwise returns zero.
++ */
++
++int drm_bo_pci_offset(struct drm_device *dev,
++                    struct drm_bo_mem_reg *mem,
++                    unsigned long *bus_base,
++                    unsigned long *bus_offset, unsigned long *bus_size)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++
++      *bus_size = 0;
++      if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
++              return -EINVAL;
++
++      if (drm_mem_reg_is_pci(dev, mem)) {
++              *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++              *bus_size = mem->num_pages << PAGE_SHIFT;
++              *bus_base = man->io_offset;
++      }
++
++      return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
++      loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++      if (!dev->dev_mapping)
++              return;
++
++      unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
++}
++
++/**
++ * drm_bo_takedown_vm_locked:
++ *
++ * @bo: the buffer object to remove any drm device mapping
++ *
++ * Remove any associated vm mapping on the drm device node that
++ * would have been created for a drm_bo_type_device buffer
++ */
++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
++{
++      struct drm_map_list *list;
++      drm_local_map_t *map;
++      struct drm_device *dev = bo->dev;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      if (bo->type != drm_bo_type_device)
++              return;
++
++      list = &bo->map_list;
++      if (list->user_token) {
++              drm_ht_remove_item(&dev->map_hash, &list->hash);
++              list->user_token = 0;
++      }
++      if (list->file_offset_node) {
++              drm_mm_put_block(list->file_offset_node);
++              list->file_offset_node = NULL;
++      }
++
++      map = list->map;
++      if (!map)
++              return;
++
++      drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
++      list->map = NULL;
++      list->user_token = 0ULL;
++      drm_bo_usage_deref_locked(&bo);
++}
++
++/**
++ * drm_bo_setup_vm_locked:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to drm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
++{
++      struct drm_map_list *list = &bo->map_list;
++      drm_local_map_t *map;
++      struct drm_device *dev = bo->dev;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
++      if (!list->map)
++              return -ENOMEM;
++
++      map = list->map;
++      map->offset = 0;
++      map->type = _DRM_TTM;
++      map->flags = _DRM_REMOVABLE;
++      map->size = bo->mem.num_pages * PAGE_SIZE;
++      atomic_inc(&bo->usage);
++      map->handle = (void *)bo;
++
++      list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
++                                                  bo->mem.num_pages, 0, 0);
++
++      if (unlikely(!list->file_offset_node)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++
++      list->file_offset_node = drm_mm_get_block(list->file_offset_node,
++                                                bo->mem.num_pages, 0);
++
++      if (unlikely(!list->file_offset_node)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++              
++      list->hash.key = list->file_offset_node->start;
++      if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++
++      list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
++
++      return 0;
++}
++
++int drm_bo_version_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
++
++      arg->major = DRM_BO_INIT_MAJOR;
++      arg->minor = DRM_BO_INIT_MINOR;
++      arg->patchlevel = DRM_BO_INIT_PATCH;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c
+--- git/drivers/gpu/drm-tungsten/drm_bo_lock.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++/*
++ * This file implements a simple replacement for the buffer manager use
++ * of the heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from allocating a
++ * new memory area.
++ * Typical use in write mode is X server VT switching, and it's allowed
++ * to leave kernel space with the write lock held. If a user-space process
++ * dies while having the write-lock, it will be released during the file
++ * descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and no_pfn; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking may be interruptible for low signal-delivery
++ * latency. The locking functions will return -EAGAIN if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any kernel mutexes
++ * or spinlocks.
++ */
++
++#include "drmP.h"
++
++void drm_bo_init_lock(struct drm_bo_lock *lock)
++{
++      DRM_INIT_WAITQUEUE(&lock->queue);
++      atomic_set(&lock->write_lock_pending, 0);
++      atomic_set(&lock->readers, 0);
++}
++
++void drm_bo_read_unlock(struct drm_bo_lock *lock)
++{
++      if (atomic_dec_and_test(&lock->readers))
++              wake_up_all(&lock->queue);
++}
++EXPORT_SYMBOL(drm_bo_read_unlock);
++
++int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
++{
++      while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++              int ret;
++              
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->write_lock_pending) == 0);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                  (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++              if (ret)
++                      return -EAGAIN;
++      }
++
++      while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++              int ret;
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->readers) != -1);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                      (lock->queue, atomic_read(&lock->readers) != -1);
++              if (ret)
++                      return -EAGAIN;
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_read_lock);
++
++static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
++{
++      if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++              return -EINVAL;
++      wake_up_all(&lock->queue);
++      return 0;
++}
++
++static void drm_bo_write_lock_remove(struct drm_file *file_priv,
++                                   struct drm_user_object *item)
++{
++      struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
++      int ret;
++
++      ret = __drm_bo_write_unlock(lock);
++      BUG_ON(ret);
++}
++
++int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
++                    struct drm_file *file_priv)
++{
++      int ret = 0;
++      struct drm_device *dev;
++
++      atomic_inc(&lock->write_lock_pending);
++
++      while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->readers) == 0);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                  (lock->queue, atomic_read(&lock->readers) == 0);
++
++              if (ret) {
++                      atomic_dec(&lock->write_lock_pending);
++                      wake_up_all(&lock->queue);
++                      return -EAGAIN;
++              }
++      }
++
++      /*
++       * Add a dummy user-object, the destructor of which will
++       * make sure the lock is released if the client dies
++       * while holding it.
++       */
++
++      if (atomic_dec_and_test(&lock->write_lock_pending))
++              wake_up_all(&lock->queue);
++      dev = file_priv->minor->dev;
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(file_priv, &lock->base, 0);
++      lock->base.remove = &drm_bo_write_lock_remove;
++      lock->base.type = drm_lock_type;
++      if (ret)
++              (void)__drm_bo_write_unlock(lock);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_ref_object *ro;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (lock->base.owner != file_priv) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++      ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
++      BUG_ON(!ro);
++      drm_remove_ref_object(file_priv, ro);
++      lock->base.owner = NULL;
++
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_move.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c
+--- git/drivers/gpu/drm-tungsten/drm_bo_move.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,630 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++/**
++ * Free the old memory node unless it's a pinned region and we
++ * have not been requested to free also pinned regions.
++ */
++
++static void drm_bo_free_old_node(struct drm_buffer_object *bo)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
++              mutex_lock(&bo->dev->struct_mutex);
++              drm_mm_put_block(old_mem->mm_node);
++              mutex_unlock(&bo->dev->struct_mutex);
++      }
++      old_mem->mm_node = NULL;
++}
++
++int drm_bo_move_ttm(struct drm_buffer_object *bo,
++                  int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_ttm *ttm = bo->ttm;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      int ret;
++
++      if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
++              if (evict)
++                      drm_ttm_evict(ttm);
++              else
++                      drm_ttm_unbind(ttm);
++
++              drm_bo_free_old_node(bo);
++              DRM_FLAG_MASKED(old_mem->flags,
++                              DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
++                              DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
++              old_mem->mem_type = DRM_BO_MEM_LOCAL;
++              save_flags = old_mem->flags;
++      }
++      if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
++              ret = drm_ttm_bind(ttm, new_mem);
++              if (ret)
++                      return ret;
++      }
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_move_ttm);
++
++/**
++ * \c Return a kernel virtual address to the buffer object PCI memory.
++ *
++ * \param bo The buffer object.
++ * \return Failure indication.
++ *
++ * Returns -EINVAL if the buffer object is currently not mappable.
++ * Returns -ENOMEM if the ioremap operation failed.
++ * Otherwise returns zero.
++ *
++ * After a successfull call, bo->iomap contains the virtual address, or NULL
++ * if the buffer object content is not accessible through PCI space.
++ * Call bo->mutex locked.
++ */
++
++int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
++                      void **virtual)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long bus_base;
++      int ret;
++      void *addr;
++
++      *virtual = NULL;
++      ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
++      if (ret || bus_size == 0)
++              return ret;
++
++      if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
++              addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++      else {
++              addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++              if (!addr)
++                      return -ENOMEM;
++      }
++      *virtual = addr;
++      return 0;
++}
++EXPORT_SYMBOL(drm_mem_reg_ioremap);
++
++/**
++ * \c Unmap mapping obtained using drm_bo_ioremap
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
++                       void *virtual)
++{
++      struct drm_buffer_manager *bm;
++      struct drm_mem_type_manager *man;
++
++      bm = &dev->bm;
++      man = &bm->man[mem->mem_type];
++
++      if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
++              iounmap(virtual);
++}
++
++static int drm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++      uint32_t *dstP =
++          (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++      uint32_t *srcP =
++          (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++      int i;
++      for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++              iowrite32(ioread32(srcP++), dstP++);
++      return 0;
++}
++
++static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
++                              unsigned long page)
++{
++      struct page *d = drm_ttm_get_page(ttm, page);
++      void *dst;
++
++      if (!d)
++              return -ENOMEM;
++
++      src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++      dst = kmap(d);
++      if (!dst)
++              return -ENOMEM;
++
++      memcpy_fromio(dst, src, PAGE_SIZE);
++      kunmap(d);
++      return 0;
++}
++
++static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
++{
++      struct page *s = drm_ttm_get_page(ttm, page);
++      void *src;
++
++      if (!s)
++              return -ENOMEM;
++
++      dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++      src = kmap(s);
++      if (!src)
++              return -ENOMEM;
++
++      memcpy_toio(dst, src, PAGE_SIZE);
++      kunmap(s);
++      return 0;
++}
++
++int drm_bo_move_memcpy(struct drm_buffer_object *bo,
++                     int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
++      struct drm_ttm *ttm = bo->ttm;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      struct drm_bo_mem_reg old_copy = *old_mem;
++      void *old_iomap;
++      void *new_iomap;
++      int ret;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      unsigned long i;
++      unsigned long page;
++      unsigned long add = 0;
++      int dir;
++
++      ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
++      if (ret)
++              return ret;
++      ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
++      if (ret)
++              goto out;
++
++      if (old_iomap == NULL && new_iomap == NULL)
++              goto out2;
++      if (old_iomap == NULL && ttm == NULL)
++              goto out2;
++
++      add = 0;
++      dir = 1;
++
++      if ((old_mem->mem_type == new_mem->mem_type) &&
++          (new_mem->mm_node->start <
++           old_mem->mm_node->start + old_mem->mm_node->size)) {
++              dir = -1;
++              add = new_mem->num_pages - 1;
++      }
++
++      for (i = 0; i < new_mem->num_pages; ++i) {
++              page = i * dir + add;
++              if (old_iomap == NULL)
++                      ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
++              else if (new_iomap == NULL)
++                      ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
++              else
++                      ret = drm_copy_io_page(new_iomap, old_iomap, page);
++              if (ret)
++                      goto out1;
++      }
++      mb();
++out2:
++      drm_bo_free_old_node(bo);
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++
++      if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
++              drm_ttm_unbind(ttm);
++              drm_ttm_destroy(ttm);
++              bo->ttm = NULL;
++      }
++
++out1:
++      drm_mem_reg_iounmap(dev, new_mem, new_iomap);
++out:
++      drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_move_memcpy);
++
++/*
++ * Transfer a buffer object's memory and LRU status to a newly
++ * created object. User-space references remains with the old
++ * object. Call bo->mutex locked.
++ */
++
++int drm_buffer_object_transfer(struct drm_buffer_object *bo,
++                             struct drm_buffer_object **new_obj)
++{
++      struct drm_buffer_object *fbo;
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
++      if (!fbo)
++              return -ENOMEM;
++
++      *fbo = *bo;
++      mutex_init(&fbo->mutex);
++      mutex_lock(&fbo->mutex);
++      mutex_lock(&dev->struct_mutex);
++
++      DRM_INIT_WAITQUEUE(&bo->event_queue);
++      INIT_LIST_HEAD(&fbo->ddestroy);
++      INIT_LIST_HEAD(&fbo->lru);
++      INIT_LIST_HEAD(&fbo->pinned_lru);
++#ifdef DRM_ODD_MM_COMPAT
++      INIT_LIST_HEAD(&fbo->vma_list);
++      INIT_LIST_HEAD(&fbo->p_mm_list);
++#endif
++
++      fbo->fence = drm_fence_reference_locked(bo->fence);
++      fbo->pinned_node = NULL;
++      fbo->mem.mm_node->private = (void *)fbo;
++      atomic_set(&fbo->usage, 1);
++      atomic_inc(&bm->count);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&fbo->mutex);
++
++      *new_obj = fbo;
++      return 0;
++}
++
++/*
++ * Since move is underway, we need to block signals in this function.
++ * We cannot restart until it has finished.
++ */
++
++int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
++                            int evict, int no_wait, uint32_t fence_class,
++                            uint32_t fence_type, uint32_t fence_flags,
++                            struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      int ret;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      struct drm_buffer_object *old_obj;
++
++      if (bo->fence)
++              drm_fence_usage_deref_unlocked(&bo->fence);
++      ret = drm_fence_object_create(dev, fence_class, fence_type,
++                                    fence_flags | DRM_FENCE_FLAG_EMIT,
++                                    &bo->fence);
++      bo->fence_type = fence_type;
++      if (ret)
++              return ret;
++
++#ifdef DRM_ODD_MM_COMPAT
++      /*
++       * In this mode, we don't allow pipelining a copy blit,
++       * since the buffer will be accessible from user space
++       * the moment we return and rebuild the page tables.
++       *
++       * With normal vm operation, page tables are rebuilt
++       * on demand using fault(), which waits for buffer idle.
++       */
++      if (1)
++#else
++      if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
++                    bo->mem.mm_node != NULL))
++#endif
++      {
++              if (bo->fence) {
++                      (void) drm_fence_object_wait(bo->fence, 0, 1,
++                                                  bo->fence_type);
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++              }
++              drm_bo_free_old_node(bo);
++
++              if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
++                      drm_ttm_unbind(bo->ttm);
++                      drm_ttm_destroy(bo->ttm);
++                      bo->ttm = NULL;
++              }
++      } else {
++
++              /* This should help pipeline ordinary buffer moves.
++               *
++               * Hang old buffer memory on a new buffer object,
++               * and leave it to be released when the GPU
++               * operation has completed.
++               */
++
++              ret = drm_buffer_object_transfer(bo, &old_obj);
++
++              if (ret)
++                      return ret;
++
++              if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
++                      old_obj->ttm = NULL;
++              else
++                      bo->ttm = NULL;
++
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&old_obj->lru);
++              DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++              drm_bo_add_to_lru(old_obj);
++
++              drm_bo_usage_deref_locked(&old_obj);
++              mutex_unlock(&dev->struct_mutex);
++
++      }
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
++
++int drm_bo_same_page(unsigned long offset,
++                   unsigned long offset2)
++{
++      return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++EXPORT_SYMBOL(drm_bo_same_page);
++
++unsigned long drm_bo_offset_end(unsigned long offset,
++                              unsigned long end)
++{
++      offset = (offset + PAGE_SIZE) & PAGE_MASK;
++      return (end < offset) ? end : offset;
++}
++EXPORT_SYMBOL(drm_bo_offset_end);
++
++static pgprot_t drm_kernel_io_prot(uint32_t map_type)
++{
++      pgprot_t tmp = PAGE_KERNEL;
++
++#if defined(__i386__) || defined(__x86_64__)
++#ifdef USE_PAT_WC
++#warning using pat
++      if (drm_use_pat() && map_type == _DRM_TTM) {
++              pgprot_val(tmp) |= _PAGE_PAT;
++              return tmp;
++      }
++#endif
++      if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
++              pgprot_val(tmp) |= _PAGE_PCD;
++              pgprot_val(tmp) &= ~_PAGE_PWT;
++      }
++#elif defined(__powerpc__)
++      pgprot_val(tmp) |= _PAGE_NO_CACHE;
++      if (map_type == _DRM_REGISTERS)
++              pgprot_val(tmp) |= _PAGE_GUARDED;
++#endif
++#if defined(__ia64__)
++      if (map_type == _DRM_TTM)
++              tmp = pgprot_writecombine(tmp);
++      else
++              tmp = pgprot_noncached(tmp);
++#endif
++      return tmp;
++}
++
++static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
++                        unsigned long bus_offset, unsigned long bus_size,
++                        struct drm_bo_kmap_obj *map)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++
++      if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
++              map->bo_kmap_type = bo_map_premapped;
++              map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
++      } else {
++              map->bo_kmap_type = bo_map_iomap;
++              map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
++      }
++      return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
++                         unsigned long start_page, unsigned long num_pages,
++                         struct drm_bo_kmap_obj *map)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++      pgprot_t prot;
++      struct drm_ttm *ttm = bo->ttm;
++      struct page *d;
++      int i;
++
++      BUG_ON(!ttm);
++
++      if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
++
++              /*
++               * We're mapping a single page, and the desired
++               * page protection is consistent with the bo.
++               */
++
++              map->bo_kmap_type = bo_map_kmap;
++              map->page = drm_ttm_get_page(ttm, start_page);
++              map->virtual = kmap(map->page);
++      } else {
++              /*
++               * Populate the part we're mapping;
++               */
++
++              for (i = start_page; i < start_page + num_pages; ++i) {
++                      d = drm_ttm_get_page(ttm, i);
++                      if (!d)
++                              return -ENOMEM;
++              }
++
++              /*
++               * We need to use vmap to get the desired page protection
++               * or to make the buffer object look contigous.
++               */
++
++              prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
++                      PAGE_KERNEL :
++                      drm_kernel_io_prot(man->drm_bus_maptype);
++              map->bo_kmap_type = bo_map_vmap;
++              map->virtual = vmap(ttm->pages + start_page,
++                                  num_pages, 0, prot);
++      }
++      return (!map->virtual) ? -ENOMEM : 0;
++}
++
++/*
++ * This function is to be used for kernel mapping of buffer objects.
++ * It chooses the appropriate mapping method depending on the memory type
++ * and caching policy the buffer currently has.
++ * Mapping multiple pages or buffers that live in io memory is a bit slow and
++ * consumes vmalloc space. Be restrictive with such mappings.
++ * Mapping single pages usually returns the logical kernel address,
++ * (which is fast)
++ * BUG may use slower temporary mappings for high memory pages or
++ * uncached / write-combined pages.
++ *
++ * The function fills in a drm_bo_kmap_obj which can be used to return the
++ * kernel virtual address of the buffer.
++ *
++ * Code servicing a non-priviliged user request is only allowed to map one
++ * page at a time. We might need to implement a better scheme to stop such
++ * processes from consuming all vmalloc space.
++ */
++
++int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
++              unsigned long num_pages, struct drm_bo_kmap_obj *map)
++{
++      int ret;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      map->virtual = NULL;
++
++      if (num_pages > bo->num_pages)
++              return -EINVAL;
++      if (start_page > bo->num_pages)
++              return -EINVAL;
++#if 0
++      if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++              return -EPERM;
++#endif
++      ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
++                              &bus_offset, &bus_size);
++
++      if (ret)
++              return ret;
++
++      if (bus_size == 0) {
++              return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
++      } else {
++              bus_offset += start_page << PAGE_SHIFT;
++              bus_size = num_pages << PAGE_SHIFT;
++              return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
++      }
++}
++EXPORT_SYMBOL(drm_bo_kmap);
++
++void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
++{
++      if (!map->virtual)
++              return;
++
++      switch (map->bo_kmap_type) {
++      case bo_map_iomap:
++              iounmap(map->virtual);
++              break;
++      case bo_map_vmap:
++              vunmap(map->virtual);
++              break;
++      case bo_map_kmap:
++              kunmap(map->page);
++              break;
++      case bo_map_premapped:
++              break;
++      default:
++              BUG();
++      }
++      map->virtual = NULL;
++      map->page = NULL;
++}
++EXPORT_SYMBOL(drm_bo_kunmap);
++
++int drm_bo_pfn_prot(struct drm_buffer_object *bo,
++                  unsigned long dst_offset,
++                  unsigned long *pfn,
++                  pgprot_t *prot)
++{
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_device *dev = bo->dev;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long bus_base;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++      int ret;
++
++      ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
++                              &bus_size);
++      if (ret)
++              return -EINVAL;
++
++      if (bus_size != 0)
++              *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++      else if (!bo->ttm)
++              return -EINVAL;
++      else
++              *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
++
++      *prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
++              PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_pfn_prot);
++
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bufs.c git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c
+--- git/drivers/gpu/drm-tungsten/drm_bufs.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1608 @@
++/**
++ * \file drm_bufs.c
++ * Generic buffer template
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
++{
++      return pci_resource_start(dev->pdev, resource);
++}
++EXPORT_SYMBOL(drm_get_resource_start);
++
++unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
++{
++      return pci_resource_len(dev->pdev, resource);
++}
++EXPORT_SYMBOL(drm_get_resource_len);
++
++struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map)
++{
++      struct drm_map_list *entry;
++      list_for_each_entry(entry, &dev->maplist, head) {
++              if (entry->map && map->type == entry->map->type &&
++                  ((entry->map->offset == map->offset) ||
++                   (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
++                      return entry;
++              }
++      }
++
++      return NULL;
++}
++EXPORT_SYMBOL(drm_find_matching_map);
++
++static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
++                        unsigned long user_token, int hashed_handle)
++{
++      int use_hashed_handle;
++
++#if (BITS_PER_LONG == 64)
++      use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
++#elif (BITS_PER_LONG == 32)
++      use_hashed_handle = hashed_handle;
++#else
++#error Unsupported long size. Neither 64 nor 32 bits.
++#endif
++
++      if (!use_hashed_handle) {
++              int ret;
++              hash->key = user_token >> PAGE_SHIFT;
++              ret = drm_ht_insert_item(&dev->map_hash, hash);
++              if (ret != -EINVAL)
++                      return ret;
++      }
++      return drm_ht_just_insert_please(&dev->map_hash, hash,
++                                       user_token, 32 - PAGE_SHIFT - 3,
++                                       0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
++}
++
++/**
++ * Ioctl to specify a range of memory that is available for mapping by a non-root process.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_map structure.
++ * \return zero on success or a negative value on error.
++ *
++ * Adjusts the memory offset to its absolute value according to the mapping
++ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
++ * applicable and if supported by the kernel.
++ */
++static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
++                         unsigned int size, enum drm_map_type type,
++                         enum drm_map_flags flags,
++                         struct drm_map_list **maplist)
++{
++      struct drm_map *map;
++      struct drm_map_list *list;
++      drm_dma_handle_t *dmah;
++      unsigned long user_token;
++      int ret;
++
++      map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
++      if (!map)
++              return -ENOMEM;
++
++      map->offset = offset;
++      map->size = size;
++      map->flags = flags;
++      map->type = type;
++
++      /* Only allow shared memory to be removable since we only keep enough
++       * book keeping information about shared memory to allow for removal
++       * when processes fork.
++       */
++      if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
++                map->offset, map->size, map->type);
++      if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      map->mtrr = -1;
++      map->handle = NULL;
++
++      switch (map->type) {
++      case _DRM_REGISTERS:
++      case _DRM_FRAME_BUFFER:
++#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
++              if (map->offset + (map->size - 1) < map->offset ||
++                  map->offset < virt_to_phys(high_memory)) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++#endif
++#ifdef __alpha__
++              map->offset += dev->hose->mem_space->start;
++#endif
++              /* Some drivers preinitialize some maps, without the X Server
++               * needing to be aware of it.  Therefore, we just return success
++               * when the server tries to create a duplicate map.
++               */
++              list = drm_find_matching_map(dev, map);
++              if (list != NULL) {
++                      if (list->map->size != map->size) {
++                              DRM_DEBUG("Matching maps of type %d with "
++                                        "mismatched sizes, (%ld vs %ld)\n",
++                                        map->type, map->size,
++                                        list->map->size);
++                              list->map->size = map->size;
++                      }
++
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      *maplist = list;
++                      return 0;
++              }
++
++              if (drm_core_has_MTRR(dev)) {
++                      if (map->type == _DRM_FRAME_BUFFER ||
++                          (map->flags & _DRM_WRITE_COMBINING)) {
++                              map->mtrr = mtrr_add(map->offset, map->size,
++                                                   MTRR_TYPE_WRCOMB, 1);
++                      }
++              }
++              if (map->type == _DRM_REGISTERS) {
++                      map->handle = ioremap(map->offset, map->size);
++                      if (!map->handle) {
++                              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                              return -ENOMEM;
++                      }
++              }
++              break;
++      case _DRM_SHM:
++              list = drm_find_matching_map(dev, map);
++              if (list != NULL) {
++                      if(list->map->size != map->size) {
++                              DRM_DEBUG("Matching maps of type %d with "
++                                 "mismatched sizes, (%ld vs %ld)\n",
++                                  map->type, map->size, list->map->size);
++                              list->map->size = map->size;
++                      }
++
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      *maplist = list;
++                      return 0;
++              }
++              map->handle = vmalloc_user(map->size);
++              DRM_DEBUG("%lu %d %p\n",
++                        map->size, drm_order(map->size), map->handle);
++              if (!map->handle) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -ENOMEM;
++              }
++              map->offset = (unsigned long)map->handle;
++              if (map->flags & _DRM_CONTAINS_LOCK) {
++                      /* Prevent a 2nd X Server from creating a 2nd lock */
++                      if (dev->lock.hw_lock != NULL) {
++                              vfree(map->handle);
++                              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                              return -EBUSY;
++                      }
++                      dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
++              }
++              break;
++      case _DRM_AGP: {
++              struct drm_agp_mem *entry;
++              int valid = 0;
++
++              if (!drm_core_has_AGP(dev)) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++#ifdef __alpha__
++              map->offset += dev->hose->mem_space->start;
++#endif
++              /* In some cases (i810 driver), user space may have already
++               * added the AGP base itself, because dev->agp->base previously
++               * only got set during AGP enable.  So, only add the base
++               * address if the map's offset isn't already within the
++               * aperture.
++               */
++              if (map->offset < dev->agp->base ||
++                  map->offset > dev->agp->base +
++                  dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
++                      map->offset += dev->agp->base;
++              }
++              map->mtrr = dev->agp->agp_mtrr; /* for getmap */
++
++              /* This assumes the DRM is in total control of AGP space.
++               * It's not always the case as AGP can be in the control
++               * of user space (i.e. i810 driver). So this loop will get
++               * skipped and we double check that dev->agp->memory is
++               * actually set as well as being invalid before EPERM'ing
++               */
++              list_for_each_entry(entry, &dev->agp->memory, head) {
++                      if ((map->offset >= entry->bound) &&
++                          (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
++                              valid = 1;
++                              break;
++                      }
++              }
++              if (!list_empty(&dev->agp->memory) && !valid) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EPERM;
++              }
++              DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
++              break;
++      }
++      case _DRM_SCATTER_GATHER:
++              if (!dev->sg) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++              map->offset += (unsigned long)dev->sg->virtual;
++              break;
++      case _DRM_CONSISTENT:
++              /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
++               * As we're limiting the address to 2^32-1 (or less),
++               * casting it down to 32 bits is no problem, but we
++               * need to point to a 64bit variable first. */
++              dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
++              if (!dmah) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -ENOMEM;
++              }
++              map->handle = dmah->vaddr;
++              map->offset = (unsigned long)dmah->busaddr;
++              kfree(dmah);
++              break;
++      default:
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++
++      list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
++      if (!list) {
++              if (map->type == _DRM_REGISTERS)
++                      iounmap(map->handle);
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      memset(list, 0, sizeof(*list));
++      list->map = map;
++
++      mutex_lock(&dev->struct_mutex);
++      list_add(&list->head, &dev->maplist);
++
++      /* Assign a 32-bit handle */
++
++      user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
++              map->offset;
++      ret = drm_map_handle(dev, &list->hash, user_token, 0);
++
++      if (ret) {
++              if (map->type == _DRM_REGISTERS)
++                      iounmap(map->handle);
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              drm_free(list, sizeof(*list), DRM_MEM_MAPS);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      list->user_token = list->hash.key << PAGE_SHIFT;
++      mutex_unlock(&dev->struct_mutex);
++
++      *maplist = list;
++      return 0;
++}
++
++int drm_addmap(struct drm_device *dev, unsigned int offset,
++             unsigned int size, enum drm_map_type type,
++             enum drm_map_flags flags, drm_local_map_t ** map_ptr)
++{
++      struct drm_map_list *list;
++      int rc;
++
++      rc = drm_addmap_core(dev, offset, size, type, flags, &list);
++      if (!rc)
++              *map_ptr = list->map;
++      return rc;
++}
++
++EXPORT_SYMBOL(drm_addmap);
++
++int drm_addmap_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_map *map = data;
++      struct drm_map_list *maplist;
++      int err;
++
++      if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
++              return -EPERM;
++
++      err = drm_addmap_core(dev, map->offset, map->size, map->type,
++                            map->flags, &maplist);
++
++      if (err)
++              return err;
++
++      /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
++      map->handle = (void *)(unsigned long)maplist->user_token;
++      return 0;
++}
++
++/**
++ * Remove a map private from list and deallocate resources if the mapping
++ * isn't in use.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a struct drm_map structure.
++ * \return zero on success or a negative value on error.
++ *
++ * Searches the map on drm_device::maplist, removes it from the list, see if
++ * its being used, and free any associate resource (such as MTRR's) if it's not
++ * being on use.
++ *
++ * \sa drm_addmap
++ */
++int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
++{
++      struct drm_map_list *r_list = NULL, *list_t;
++      drm_dma_handle_t dmah;
++      int found = 0;
++
++      /* Find the list entry for the map and remove it */
++      list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
++              if (r_list->map == map) {
++                      list_del(&r_list->head);
++                      drm_ht_remove_key(&dev->map_hash,
++                                        r_list->user_token >> PAGE_SHIFT);
++                      drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found)
++              return -EINVAL;
++
++      /* List has wrapped around to the head pointer, or it's empty and we
++       * didn't find anything.
++       */
++
++      switch (map->type) {
++      case _DRM_REGISTERS:
++              iounmap(map->handle);
++              /* FALLTHROUGH */
++      case _DRM_FRAME_BUFFER:
++              if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
++                      int retcode;
++                      retcode = mtrr_del(map->mtrr, map->offset, map->size);
++                      DRM_DEBUG("mtrr_del=%d\n", retcode);
++              }
++              break;
++      case _DRM_SHM:
++              vfree(map->handle);
++              break;
++      case _DRM_AGP:
++      case _DRM_SCATTER_GATHER:
++              break;
++      case _DRM_CONSISTENT:
++              dmah.vaddr = map->handle;
++              dmah.busaddr = map->offset;
++              dmah.size = map->size;
++              __drm_pci_free(dev, &dmah);
++              break;
++      case _DRM_TTM:
++              BUG_ON(1);
++      }
++      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_rmmap_locked);
++
++int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
++{
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_rmmap_locked(dev, map);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_rmmap);
++
++/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
++ * the last close of the device, and this is necessary for cleanup when things
++ * exit uncleanly.  Therefore, having userland manually remove mappings seems
++ * like a pointless exercise since they're going away anyway.
++ *
++ * One use case might be after addmap is allowed for normal users for SHM and
++ * gets used by drivers that the server doesn't need to care about.  This seems
++ * unlikely.
++ */
++int drm_rmmap_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_map *request = data;
++      drm_local_map_t *map = NULL;
++      struct drm_map_list *r_list;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map &&
++                  r_list->user_token == (unsigned long)request->handle &&
++                  r_list->map->flags & _DRM_REMOVABLE) {
++                      map = r_list->map;
++                      break;
++              }
++      }
++
++      /* List has wrapped around to the head pointer, or its empty we didn't
++       * find anything.
++       */
++      if (list_empty(&dev->maplist) || !map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      /* Register and framebuffer maps are permanent */
++      if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
++              mutex_unlock(&dev->struct_mutex);
++              return 0;
++      }
++
++      ret = drm_rmmap_locked(dev, map);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * Cleanup after an error on one of the addbufs() functions.
++ *
++ * \param dev DRM device.
++ * \param entry buffer entry where the error occurred.
++ *
++ * Frees any pages and buffers associated with the given entry.
++ */
++static void drm_cleanup_buf_error(struct drm_device *dev,
++                                struct drm_buf_entry *entry)
++{
++      int i;
++
++      if (entry->seg_count) {
++              for (i = 0; i < entry->seg_count; i++) {
++                      if (entry->seglist[i]) {
++                              drm_pci_free(dev, entry->seglist[i]);
++                      }
++              }
++              drm_free(entry->seglist,
++                       entry->seg_count *
++                       sizeof(*entry->seglist), DRM_MEM_SEGS);
++
++              entry->seg_count = 0;
++      }
++
++      if (entry->buf_count) {
++              for (i = 0; i < entry->buf_count; i++) {
++                      if (entry->buflist[i].dev_private) {
++                              drm_free(entry->buflist[i].dev_private,
++                                       entry->buflist[i].dev_priv_size,
++                                       DRM_MEM_BUFS);
++                      }
++              }
++              drm_free(entry->buflist,
++                       entry->buf_count *
++                       sizeof(*entry->buflist), DRM_MEM_BUFS);
++
++              entry->buf_count = 0;
++      }
++}
++
++#if __OS_HAS_AGP
++/**
++ * Add AGP buffers for DMA transfers.
++ *
++ * \param dev struct drm_device to which the buffers are to be added.
++ * \param request pointer to a struct drm_buf_desc describing the request.
++ * \return zero on success or a negative number on failure.
++ *
++ * After some sanity checks creates a drm_buf structure for each buffer and
++ * reallocates the buffer list of the same size order to accommodate the new
++ * buffers.
++ */
++int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_agp_mem *agp_entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i, valid;
++      struct drm_buf **temp_buflist;
++
++      if (!dma)
++              return -EINVAL;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = dev->agp->base + request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lx\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      /* Make sure buffers are located in AGP memory that we own */
++      valid = 0;
++      list_for_each_entry(agp_entry, &dev->agp->memory, head) {
++              if ((agp_offset >= agp_entry->bound) &&
++                  (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
++                      valid = 1;
++                      break;
++              }
++      }
++      if (!list_empty(&dev->agp->memory) && !valid) {
++              DRM_DEBUG("zone invalid\n");
++              return -EINVAL;
++      }
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_AGP;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++EXPORT_SYMBOL(drm_addbufs_agp);
++#endif                                /* __OS_HAS_AGP */
++
++int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int count;
++      int order;
++      int size;
++      int total;
++      int page_order;
++      struct drm_buf_entry *entry;
++      drm_dma_handle_t *dmah;
++      struct drm_buf *buf;
++      int alignment;
++      unsigned long offset;
++      int i;
++      int byte_count;
++      int page_count;
++      unsigned long *temp_pagelist;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
++                request->count, request->size, size, order, dev->queue_count);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
++                                 DRM_MEM_SEGS);
++      if (!entry->seglist) {
++              drm_free(entry->buflist,
++                       count * sizeof(*entry->buflist), DRM_MEM_BUFS);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->seglist, 0, count * sizeof(*entry->seglist));
++
++      /* Keep the original pagelist until we know all the allocations
++       * have succeeded
++       */
++      temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
++                                * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++      if (!temp_pagelist) {
++              drm_free(entry->buflist,
++                       count * sizeof(*entry->buflist), DRM_MEM_BUFS);
++              drm_free(entry->seglist,
++                       count * sizeof(*entry->seglist), DRM_MEM_SEGS);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memcpy(temp_pagelist,
++             dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
++      DRM_DEBUG("pagelist: %d entries\n",
++                dma->page_count + (count << page_order));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++      byte_count = 0;
++      page_count = 0;
++
++      while (entry->buf_count < count) {
++
++              dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
++
++              if (!dmah) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      entry->seg_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      drm_free(temp_pagelist,
++                               (dma->page_count + (count << page_order))
++                               * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              entry->seglist[entry->seg_count++] = dmah;
++              for (i = 0; i < (1 << page_order); i++) {
++                      DRM_DEBUG("page %d @ 0x%08lx\n",
++                                dma->page_count + page_count,
++                                (unsigned long)dmah->vaddr + PAGE_SIZE * i);
++                      temp_pagelist[dma->page_count + page_count++]
++                              = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
++              }
++              for (offset = 0;
++                   offset + size <= total && entry->buf_count < count;
++                   offset += alignment, ++entry->buf_count) {
++                      buf = &entry->buflist[entry->buf_count];
++                      buf->idx = dma->buf_count + entry->buf_count;
++                      buf->total = alignment;
++                      buf->order = order;
++                      buf->used = 0;
++                      buf->offset = (dma->byte_count + byte_count + offset);
++                      buf->address = (void *)(dmah->vaddr + offset);
++                      buf->bus_address = dmah->busaddr + offset;
++                      buf->next = NULL;
++                      buf->waiting = 0;
++                      buf->pending = 0;
++                      init_waitqueue_head(&buf->dma_wait);
++                      buf->file_priv = NULL;
++
++                      buf->dev_priv_size = dev->driver->dev_priv_size;
++                      buf->dev_private = drm_alloc(buf->dev_priv_size,
++                                                   DRM_MEM_BUFS);
++                      if (!buf->dev_private) {
++                              /* Set count correctly so we free the proper amount. */
++                              entry->buf_count = count;
++                              entry->seg_count = count;
++                              drm_cleanup_buf_error(dev, entry);
++                              drm_free(temp_pagelist,
++                                       (dma->page_count +
++                                        (count << page_order))
++                                       * sizeof(*dma->pagelist),
++                                       DRM_MEM_PAGES);
++                              mutex_unlock(&dev->struct_mutex);
++                              atomic_dec(&dev->buf_alloc);
++                              return -ENOMEM;
++                      }
++                      memset(buf->dev_private, 0, buf->dev_priv_size);
++
++                      DRM_DEBUG("buffer %d @ %p\n",
++                                entry->buf_count, buf->address);
++              }
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              drm_free(temp_pagelist,
++                       (dma->page_count + (count << page_order))
++                       * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      /* No allocations failed, so now we can replace the orginal pagelist
++       * with the new one.
++       */
++      if (dma->page_count) {
++              drm_free(dma->pagelist,
++                       dma->page_count * sizeof(*dma->pagelist),
++                       DRM_MEM_PAGES);
++      }
++      dma->pagelist = temp_pagelist;
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += entry->seg_count << page_order;
++      dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      if (request->flags & _DRM_PCI_BUFFER_RO)
++              dma->flags = _DRM_DMA_USE_PCI_RO;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++
++}
++EXPORT_SYMBOL(drm_addbufs_pci);
++
++static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lu\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset
++                                      + (unsigned long)dev->sg->virtual);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_SG;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++
++int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lu\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_FB;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++EXPORT_SYMBOL(drm_addbufs_fb);
++
++
++/**
++ * Add buffers for DMA transfers (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a struct drm_buf_desc request.
++ * \return zero on success or a negative number on failure.
++ *
++ * According with the memory type specified in drm_buf_desc::flags and the
++ * build options, it dispatches the call either to addbufs_agp(),
++ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
++ * PCI memory respectively.
++ */
++int drm_addbufs(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_buf_desc *request = data;
++      int ret;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++#if __OS_HAS_AGP
++      if (request->flags & _DRM_AGP_BUFFER)
++              ret = drm_addbufs_agp(dev, request);
++      else
++#endif
++      if (request->flags & _DRM_SG_BUFFER)
++              ret = drm_addbufs_sg(dev, request);
++      else if (request->flags & _DRM_FB_BUFFER)
++              ret = drm_addbufs_fb(dev, request);
++      else
++              ret = drm_addbufs_pci(dev, request);
++
++      return ret;
++}
++
++/**
++ * Get information about the buffer mappings.
++ *
++ * This was originally mean for debugging purposes, or by a sophisticated
++ * client library to determine how best to use the available buffers (e.g.,
++ * large buffers can be used for image transfer).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_info structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Increments drm_device::buf_use while holding the drm_device::count_lock
++ * lock, preventing of allocating more buffers after this call. Information
++ * about each requested buffer is then copied into user space.
++ */
++int drm_infobufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_info *request = data;
++      int i;
++      int count;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      spin_lock(&dev->count_lock);
++      if (atomic_read(&dev->buf_alloc)) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      ++dev->buf_use;         /* Can't allocate more after this call */
++      spin_unlock(&dev->count_lock);
++
++      for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
++              if (dma->bufs[i].buf_count)
++                      ++count;
++      }
++
++      DRM_DEBUG("count = %d\n", count);
++
++      if (request->count >= count) {
++              for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
++                      if (dma->bufs[i].buf_count) {
++                              struct drm_buf_desc __user *to =
++                                  &request->list[count];
++                              struct drm_buf_entry *from = &dma->bufs[i];
++                              struct drm_freelist *list = &dma->bufs[i].freelist;
++                              if (copy_to_user(&to->count,
++                                               &from->buf_count,
++                                               sizeof(from->buf_count)) ||
++                                  copy_to_user(&to->size,
++                                               &from->buf_size,
++                                               sizeof(from->buf_size)) ||
++                                  copy_to_user(&to->low_mark,
++                                               &list->low_mark,
++                                               sizeof(list->low_mark)) ||
++                                  copy_to_user(&to->high_mark,
++                                               &list->high_mark,
++                                               sizeof(list->high_mark)))
++                                      return -EFAULT;
++
++                              DRM_DEBUG("%d %d %d %d %d\n",
++                                        i,
++                                        dma->bufs[i].buf_count,
++                                        dma->bufs[i].buf_size,
++                                        dma->bufs[i].freelist.low_mark,
++                                        dma->bufs[i].freelist.high_mark);
++                              ++count;
++                      }
++              }
++      }
++      request->count = count;
++
++      return 0;
++}
++
++/**
++ * Specifies a low and high water mark for buffer allocation
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg a pointer to a drm_buf_desc structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies that the size order is bounded between the admissible orders and
++ * updates the respective drm_device_dma::bufs entry low and high water mark.
++ *
++ * \note This ioctl is deprecated and mostly never used.
++ */
++int drm_markbufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_desc *request = data;
++      int order;
++      struct drm_buf_entry *entry;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      DRM_DEBUG("%d, %d, %d\n",
++                request->size, request->low_mark, request->high_mark);
++      order = drm_order(request->size);
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      entry = &dma->bufs[order];
++
++      if (request->low_mark < 0 || request->low_mark > entry->buf_count)
++              return -EINVAL;
++      if (request->high_mark < 0 || request->high_mark > entry->buf_count)
++              return -EINVAL;
++
++      entry->freelist.low_mark = request->low_mark;
++      entry->freelist.high_mark = request->high_mark;
++
++      return 0;
++}
++
++/**
++ * Unreserve the buffers in list, previously reserved using drmDMA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_free structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls free_buffer() for each used buffer.
++ * This function is primarily used for debugging.
++ */
++int drm_freebufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_free *request = data;
++      int i;
++      int idx;
++      struct drm_buf *buf;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      DRM_DEBUG("%d\n", request->count);
++      for (i = 0; i < request->count; i++) {
++              if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
++                      return -EFAULT;
++              if (idx < 0 || idx >= dma->buf_count) {
++                      DRM_ERROR("Index %d (of %d max)\n",
++                                idx, dma->buf_count - 1);
++                      return -EINVAL;
++              }
++              buf = dma->buflist[idx];
++              if (buf->file_priv != file_priv) {
++                      DRM_ERROR("Process %d freeing buffer not owned\n",
++                                current->pid);
++                      return -EINVAL;
++              }
++              drm_free_buffer(dev, buf);
++      }
++
++      return 0;
++}
++
++/**
++ * Maps all of the DMA buffers into client-virtual space (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
++ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
++ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
++ * drm_mmap_dma().
++ */
++int drm_mapbufs(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int retcode = 0;
++      const int zero = 0;
++      unsigned long virtual;
++      unsigned long address;
++      struct drm_buf_map *request = data;
++      int i;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      spin_lock(&dev->count_lock);
++      if (atomic_read(&dev->buf_alloc)) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      dev->buf_use++;         /* Can't allocate more after this call */
++      spin_unlock(&dev->count_lock);
++
++      if (request->count >= dma->buf_count) {
++              if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
++                  || (drm_core_check_feature(dev, DRIVER_SG)
++                      && (dma->flags & _DRM_DMA_USE_SG))
++                  || (drm_core_check_feature(dev, DRIVER_FB_DMA)
++                      && (dma->flags & _DRM_DMA_USE_FB))) {
++                      struct drm_map *map = dev->agp_buffer_map;
++                      unsigned long token = dev->agp_buffer_token;
++
++                      if (!map) {
++                              retcode = -EINVAL;
++                              goto done;
++                      }
++                      down_write(&current->mm->mmap_sem);
++                      virtual = do_mmap(file_priv->filp, 0, map->size,
++                                        PROT_READ | PROT_WRITE,
++                                        MAP_SHARED,
++                                        token);
++                      up_write(&current->mm->mmap_sem);
++              } else {
++                      down_write(&current->mm->mmap_sem);
++                      virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
++                                        PROT_READ | PROT_WRITE,
++                                        MAP_SHARED, 0);
++                      up_write(&current->mm->mmap_sem);
++              }
++              if (virtual > -1024UL) {
++                      /* Real error */
++                      retcode = (signed long)virtual;
++                      goto done;
++              }
++              request->virtual = (void __user *)virtual;
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      if (copy_to_user(&request->list[i].idx,
++                                       &dma->buflist[i]->idx,
++                                       sizeof(request->list[0].idx))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      if (copy_to_user(&request->list[i].total,
++                                       &dma->buflist[i]->total,
++                                       sizeof(request->list[0].total))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      if (copy_to_user(&request->list[i].used,
++                                       &zero, sizeof(zero))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      address = virtual + dma->buflist[i]->offset;    /* *** */
++                      if (copy_to_user(&request->list[i].address,
++                                       &address, sizeof(address))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++              }
++      }
++      done:
++      request->count = dma->buf_count;
++      DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
++
++      return retcode;
++}
++
++/**
++ * Compute size order.  Returns the exponent of the smaller power of two which
++ * is greater or equal to given number.
++ *
++ * \param size size.
++ * \return order.
++ *
++ * \todo Can be made faster.
++ */
++int drm_order(unsigned long size)
++{
++      int order;
++      unsigned long tmp;
++
++      for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
++
++      if (size & (size - 1))
++              ++order;
++
++      return order;
++}
++EXPORT_SYMBOL(drm_order);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_compat.c
+--- git/drivers/gpu/drm-tungsten/drm_compat.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,860 @@
++/**************************************************************************
++ *
++ * This kernel module is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ **************************************************************************/
++/*
++ * This code provides access to unexported mm kernel features. It is necessary
++ * to use the new DRM memory manager code with kernels that don't support it
++ * directly.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ *          Linux kernel mm subsystem authors.
++ *          (Most code taken from there).
++ */
++
++#include "drmP.h"
++
++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * These have bad performance in the AGP module for the indicated kernel versions.
++ */
++
++int drm_map_page_into_agp(struct page *page)
++{
++        int i;
++        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
++        /* Caller's responsibility to call global_flush_tlb() for
++         * performance reasons */
++        return i;
++}
++
++int drm_unmap_page_from_agp(struct page *page)
++{
++        int i;
++        i = change_page_attr(page, 1, PAGE_KERNEL);
++        /* Caller's responsibility to call global_flush_tlb() for
++         * performance reasons */
++        return i;
++}
++#endif
++
++
++#if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++
++/*
++ * The protection map was exported in 2.6.19
++ */
++
++pgprot_t vm_get_page_prot(unsigned long vm_flags)
++{
++#ifdef MODULE
++      static pgprot_t drm_protection_map[16] = {
++              __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
++              __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
++      };
++
++      return drm_protection_map[vm_flags & 0x0F];
++#else
++      extern pgprot_t protection_map[];
++      return protection_map[vm_flags & 0x0F];
++#endif
++};
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * vm code for kernels below 2.6.15 in which version a major vm write
++ * occured. This implement a simple straightforward
++ * version similar to what's going to be
++ * in kernel 2.6.19+
++ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
++ * nopfn.
++ */
++
++static struct {
++      spinlock_t lock;
++      struct page *dummy_page;
++      atomic_t present;
++} drm_np_retry =
++{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
++
++
++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
++                                  struct fault_data *data);
++
++
++struct page * get_nopage_retry(void)
++{
++      if (atomic_read(&drm_np_retry.present) == 0) {
++              struct page *page = alloc_page(GFP_KERNEL);
++              if (!page)
++                      return NOPAGE_OOM;
++              spin_lock(&drm_np_retry.lock);
++              drm_np_retry.dummy_page = page;
++              atomic_set(&drm_np_retry.present,1);
++              spin_unlock(&drm_np_retry.lock);
++      }
++      get_page(drm_np_retry.dummy_page);
++      return drm_np_retry.dummy_page;
++}
++
++void free_nopage_retry(void)
++{
++      if (atomic_read(&drm_np_retry.present) == 1) {
++              spin_lock(&drm_np_retry.lock);
++              __free_page(drm_np_retry.dummy_page);
++              drm_np_retry.dummy_page = NULL;
++              atomic_set(&drm_np_retry.present, 0);
++              spin_unlock(&drm_np_retry.lock);
++      }
++}
++
++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                             unsigned long address,
++                             int *type)
++{
++      struct fault_data data;
++
++      if (type)
++              *type = VM_FAULT_MINOR;
++
++      data.address = address;
++      data.vma = vma;
++      drm_bo_vm_fault(vma, &data);
++      switch (data.type) {
++      case VM_FAULT_OOM:
++              return NOPAGE_OOM;
++      case VM_FAULT_SIGBUS:
++              return NOPAGE_SIGBUS;
++      default:
++              break;
++      }
++
++      return NOPAGE_REFAULT;
++}
++
++#endif
++
++#if !defined(DRM_FULL_MM_COMPAT) && \
++  ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
++   (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
++
++static int drm_pte_is_clear(struct vm_area_struct *vma,
++                          unsigned long addr)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      int ret = 1;
++      pte_t *pte;
++      pmd_t *pmd;
++      pud_t *pud;
++      pgd_t *pgd;
++
++      spin_lock(&mm->page_table_lock);
++      pgd = pgd_offset(mm, addr);
++      if (pgd_none(*pgd))
++              goto unlock;
++      pud = pud_offset(pgd, addr);
++        if (pud_none(*pud))
++              goto unlock;
++      pmd = pmd_offset(pud, addr);
++      if (pmd_none(*pmd))
++              goto unlock;
++      pte = pte_offset_map(pmd, addr);
++      if (!pte)
++              goto unlock;
++      ret = pte_none(*pte);
++      pte_unmap(pte);
++ unlock:
++      spin_unlock(&mm->page_table_lock);
++      return ret;
++}
++
++static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
++                unsigned long pfn)
++{
++      int ret;
++      if (!drm_pte_is_clear(vma, addr))
++              return -EBUSY;
++
++      ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
++      return ret;
++}
++
++
++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
++                                  struct fault_data *data)
++{
++      unsigned long address = data->address;
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      dev = bo->dev;
++      drm_bo_read_lock(&dev->bm.bm_lock, 0);
++
++      mutex_lock(&bo->mutex);
++
++      err = drm_bo_wait(bo, 0, 1, 0);
++      if (err) {
++              data->type = (err == -EAGAIN) ?
++                      VM_FAULT_MINOR : VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              unsigned long _end = jiffies + 3*DRM_HZ;
++              uint32_t new_mask = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++
++              do {
++                      err = drm_bo_move_buffer(bo, new_mask, 0, 0);
++              } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
++
++              if (err) {
++                      DRM_ERROR("Timeout moving buffer to mappable location.\n");
++                      data->type = VM_FAULT_SIGBUS;
++                      goto out_unlock;
++              }
++      }
++
++      if (address > vma->vm_end) {
++              data->type = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      dev = bo->dev;
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              data->type = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      data->type = VM_FAULT_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, address, pfn);
++
++      if (!err || err == -EBUSY)
++              data->type = VM_FAULT_MINOR;
++      else
++              data->type = VM_FAULT_OOM;
++out_unlock:
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return NULL;
++}
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
++  !defined(DRM_FULL_MM_COMPAT)
++
++/**
++ */
++
++unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
++                         unsigned long address)
++{
++      struct fault_data data;
++      data.address = address;
++
++      (void) drm_bo_vm_fault(vma, &data);
++      if (data.type == VM_FAULT_OOM)
++              return NOPFN_OOM;
++      else if (data.type == VM_FAULT_SIGBUS)
++              return NOPFN_SIGBUS;
++
++      /*
++       * pfn already set.
++       */
++
++      return 0;
++}
++#endif
++
++
++#ifdef DRM_ODD_MM_COMPAT
++
++/*
++ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
++ * workaround for a single BUG statement in do_no_page in these versions. The
++ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
++ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
++ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
++ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
++ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
++ * phew.
++ */
++
++typedef struct p_mm_entry {
++      struct list_head head;
++      struct mm_struct *mm;
++      atomic_t refcount;
++        int locked;
++} p_mm_entry_t;
++
++typedef struct vma_entry {
++      struct list_head head;
++      struct vm_area_struct *vma;
++} vma_entry_t;
++
++
++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                             unsigned long address,
++                             int *type)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++
++      mutex_lock(&bo->mutex);
++
++      if (type)
++              *type = VM_FAULT_MINOR;
++
++      if (address > vma->vm_end) {
++              page = NOPAGE_SIGBUS;
++              goto out_unlock;
++      }
++
++      dev = bo->dev;
++
++      if (drm_mem_reg_is_pci(dev, &bo->mem)) {
++              DRM_ERROR("Invalid compat nopage.\n");
++              page = NOPAGE_SIGBUS;
++              goto out_unlock;
++      }
++
++      ttm = bo->ttm;
++      drm_ttm_fixup_caching(ttm);
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++      page = drm_ttm_get_page(ttm, page_offset);
++      if (!page) {
++              page = NOPAGE_OOM;
++              goto out_unlock;
++      }
++
++      get_page(page);
++out_unlock:
++      mutex_unlock(&bo->mutex);
++      return page;
++}
++
++
++
++
++int drm_bo_map_bound(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
++      int ret = 0;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
++                              &bus_offset, &bus_size);
++      BUG_ON(ret);
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
++              unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
++              pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
++              ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
++                                       vma->vm_end - vma->vm_start,
++                                       pgprot);
++      }
++
++      return ret;
++}
++
++
++int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
++{
++      p_mm_entry_t *entry, *n_entry;
++      vma_entry_t *v_entry;
++      struct mm_struct *mm = vma->vm_mm;
++
++      v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
++      if (!v_entry) {
++              DRM_ERROR("Allocation of vma pointer entry failed\n");
++              return -ENOMEM;
++      }
++      v_entry->vma = vma;
++
++      list_add_tail(&v_entry->head, &bo->vma_list);
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              if (mm == entry->mm) {
++                      atomic_inc(&entry->refcount);
++                      return 0;
++              } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
++      }
++
++      n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
++      if (!n_entry) {
++              DRM_ERROR("Allocation of process mm pointer entry failed\n");
++              return -ENOMEM;
++      }
++      INIT_LIST_HEAD(&n_entry->head);
++      n_entry->mm = mm;
++      n_entry->locked = 0;
++      atomic_set(&n_entry->refcount, 0);
++      list_add_tail(&n_entry->head, &entry->head);
++
++      return 0;
++}
++
++void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
++{
++      p_mm_entry_t *entry, *n;
++      vma_entry_t *v_entry, *v_n;
++      int found = 0;
++      struct mm_struct *mm = vma->vm_mm;
++
++      list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
++              if (v_entry->vma == vma) {
++                      found = 1;
++                      list_del(&v_entry->head);
++                      drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
++                      break;
++              }
++      }
++      BUG_ON(!found);
++
++      list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
++              if (mm == entry->mm) {
++                      if (atomic_add_negative(-1, &entry->refcount)) {
++                              list_del(&entry->head);
++                              BUG_ON(entry->locked);
++                              drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
++                      }
++                      return;
++              }
++      }
++      BUG_ON(1);
++}
++
++
++
++int drm_bo_lock_kmm(struct drm_buffer_object * bo)
++{
++      p_mm_entry_t *entry;
++      int lock_ok = 1;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              BUG_ON(entry->locked);
++              if (!down_write_trylock(&entry->mm->mmap_sem)) {
++                      lock_ok = 0;
++                      break;
++              }
++              entry->locked = 1;
++      }
++
++      if (lock_ok)
++              return 0;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              if (!entry->locked)
++                      break;
++              up_write(&entry->mm->mmap_sem);
++              entry->locked = 0;
++      }
++
++      /*
++       * Possible deadlock. Try again. Our callers should handle this
++       * and restart.
++       */
++
++      return -EAGAIN;
++}
++
++void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
++{
++      p_mm_entry_t *entry;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              BUG_ON(!entry->locked);
++              up_write(&entry->mm->mmap_sem);
++              entry->locked = 0;
++      }
++}
++
++int drm_bo_remap_bound(struct drm_buffer_object *bo)
++{
++      vma_entry_t *v_entry;
++      int ret = 0;
++
++      if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
++              list_for_each_entry(v_entry, &bo->vma_list, head) {
++                      ret = drm_bo_map_bound(v_entry->vma);
++                      if (ret)
++                              break;
++              }
++      }
++
++      return ret;
++}
++
++void drm_bo_finish_unmap(struct drm_buffer_object *bo)
++{
++      vma_entry_t *v_entry;
++
++      list_for_each_entry(v_entry, &bo->vma_list, head) {
++              v_entry->vma->vm_flags &= ~VM_PFNMAP;
++      }
++}
++
++#endif
++
++#ifdef DRM_IDR_COMPAT_FN
++/* only called when idp->lock is held */
++static void __free_layer(struct idr *idp, struct idr_layer *p)
++{
++      p->ary[0] = idp->id_free;
++      idp->id_free = p;
++      idp->id_free_cnt++;
++}
++
++static void free_layer(struct idr *idp, struct idr_layer *p)
++{
++      unsigned long flags;
++
++      /*
++       * Depends on the return element being zeroed.
++       */
++      spin_lock_irqsave(&idp->lock, flags);
++      __free_layer(idp, p);
++      spin_unlock_irqrestore(&idp->lock, flags);
++}
++
++/**
++ * idr_for_each - iterate through all stored pointers
++ * @idp: idr handle
++ * @fn: function to be called for each pointer
++ * @data: data passed back to callback function
++ *
++ * Iterate over the pointers registered with the given idr.  The
++ * callback function will be called for each pointer currently
++ * registered, passing the id, the pointer and the data pointer passed
++ * to this function.  It is not safe to modify the idr tree while in
++ * the callback, so functions such as idr_get_new and idr_remove are
++ * not allowed.
++ *
++ * We check the return of @fn each time. If it returns anything other
++ * than 0, we break out and return that value.
++ *
++* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
++ */
++int idr_for_each(struct idr *idp,
++               int (*fn)(int id, void *p, void *data), void *data)
++{
++      int n, id, max, error = 0;
++      struct idr_layer *p;
++      struct idr_layer *pa[MAX_LEVEL];
++      struct idr_layer **paa = &pa[0];
++
++      n = idp->layers * IDR_BITS;
++      p = idp->top;
++      max = 1 << n;
++
++      id = 0;
++      while (id < max) {
++              while (n > 0 && p) {
++                      n -= IDR_BITS;
++                      *paa++ = p;
++                      p = p->ary[(id >> n) & IDR_MASK];
++              }
++
++              if (p) {
++                      error = fn(id, (void *)p, data);
++                      if (error)
++                              break;
++              }
++
++              id += 1 << n;
++              while (n < fls(id)) {
++                      n += IDR_BITS;
++                      p = *--paa;
++              }
++      }
++
++      return error;
++}
++EXPORT_SYMBOL(idr_for_each);
++
++/**
++ * idr_remove_all - remove all ids from the given idr tree
++ * @idp: idr handle
++ *
++ * idr_destroy() only frees up unused, cached idp_layers, but this
++ * function will remove all id mappings and leave all idp_layers
++ * unused.
++ *
++ * A typical clean-up sequence for objects stored in an idr tree, will
++ * use idr_for_each() to free all objects, if necessay, then
++ * idr_remove_all() to remove all ids, and idr_destroy() to free
++ * up the cached idr_layers.
++ */
++void idr_remove_all(struct idr *idp)
++{
++       int n, id, max, error = 0;
++       struct idr_layer *p;
++       struct idr_layer *pa[MAX_LEVEL];
++       struct idr_layer **paa = &pa[0];
++
++       n = idp->layers * IDR_BITS;
++       p = idp->top;
++       max = 1 << n;
++
++       id = 0;
++       while (id < max && !error) {
++               while (n > IDR_BITS && p) {
++                       n -= IDR_BITS;
++                       *paa++ = p;
++                       p = p->ary[(id >> n) & IDR_MASK];
++               }
++
++               id += 1 << n;
++               while (n < fls(id)) {
++                       if (p) {
++                               memset(p, 0, sizeof *p);
++                               free_layer(idp, p);
++                       }
++                       n += IDR_BITS;
++                       p = *--paa;
++               }
++       }
++       idp->top = NULL;
++       idp->layers = 0;
++}
++EXPORT_SYMBOL(idr_remove_all);
++
++#endif /* DRM_IDR_COMPAT_FN */
++
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++/**
++ * idr_replace - replace pointer for given id
++ * @idp: idr handle
++ * @ptr: pointer you want associated with the id
++ * @id: lookup key
++ *
++ * Replace the pointer registered with an id and return the old value.
++ * A -ENOENT return indicates that @id was not found.
++ * A -EINVAL return indicates that @id was not within valid constraints.
++ *
++ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
++ */
++void *idr_replace(struct idr *idp, void *ptr, int id)
++{
++      int n;
++      struct idr_layer *p, *old_p;
++
++      n = idp->layers * IDR_BITS;
++      p = idp->top;
++
++      id &= MAX_ID_MASK;
++
++      if (id >= (1 << n))
++              return ERR_PTR(-EINVAL);
++
++      n -= IDR_BITS;
++      while ((n > 0) && p) {
++              p = p->ary[(id >> n) & IDR_MASK];
++              n -= IDR_BITS;
++      }
++
++      n = id & IDR_MASK;
++      if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
++              return ERR_PTR(-ENOENT);
++
++      old_p = p->ary[n];
++      p->ary[n] = ptr;
++
++      return (void *)old_p;
++}
++EXPORT_SYMBOL(idr_replace);
++#endif
++
++#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
++#define drm_kmap_get_fixmap_pte(vaddr)                                        \
++      pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
++                         pgprot_t protection)
++{
++      enum fixed_addresses idx;
++      unsigned long vaddr;
++      static pte_t *km_pte;
++      static int initialized = 0;
++
++      if (unlikely(!initialized)) {
++              km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
++              initialized = 1;
++      }
++
++      pagefault_disable();
++      idx = type + KM_TYPE_NR*smp_processor_id();
++      vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++      set_pte(km_pte-idx, pfn_pte(pfn, protection));
++
++      return (void*) vaddr;
++}
++
++EXPORT_SYMBOL(kmap_atomic_prot_pfn);
++
++#endif
++
++#ifdef DRM_FULL_MM_COMPAT
++#ifdef DRM_NO_FAULT
++unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                            unsigned long address)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long ret = NOPFN_REFAULT;
++
++      if (address > vma->vm_end)
++              return NOPFN_SIGBUS;
++
++      dev = bo->dev;
++      err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (err)
++              return NOPFN_REFAULT;
++
++      err = mutex_lock_interruptible(&bo->mutex);
++      if (err) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return NOPFN_REFAULT;
++      }
++
++      err = drm_bo_wait(bo, 0, 1, 0, 1);
++      if (err) {
++              ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++              goto out_unlock;
++      }
++
++      bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              uint32_t new_flags = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++              err = drm_bo_move_buffer(bo, new_flags, 0, 0);
++              if (err) {
++                      ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
++                      goto out_unlock;
++              }
++      }
++
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              ret = NOPFN_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      ret = NOPFN_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, address, pfn);
++      if (err) {
++              ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
++              goto out_unlock;
++      }
++out_unlock:
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.h git-nokia/drivers/gpu/drm-tungsten/drm_compat.h
+--- git/drivers/gpu/drm-tungsten/drm_compat.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,380 @@
++/**
++ * \file drm_compat.h
++ * Backward compatability definitions for Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_COMPAT_H_
++#define _DRM_COMPAT_H_
++
++#ifndef minor
++#define minor(x) MINOR((x))
++#endif
++
++#ifndef MODULE_LICENSE
++#define MODULE_LICENSE(x)
++#endif
++
++#ifndef preempt_disable
++#define preempt_disable()
++#define preempt_enable()
++#endif
++
++#ifndef pte_offset_map
++#define pte_offset_map pte_offset
++#define pte_unmap(pte)
++#endif
++
++#ifndef module_param
++#define module_param(name, type, perm)
++#endif
++
++/* older kernels had different irq args */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#undef DRM_IRQ_ARGS
++#define DRM_IRQ_ARGS          int irq, void *arg, struct pt_regs *regs
++#endif
++
++#ifndef list_for_each_safe
++#define list_for_each_safe(pos, n, head)                              \
++      for (pos = (head)->next, n = pos->next; pos != (head);          \
++              pos = n, n = pos->next)
++#endif
++
++#ifndef list_for_each_entry
++#define list_for_each_entry(pos, head, member)                                \
++       for (pos = list_entry((head)->next, typeof(*pos), member),     \
++                    prefetch(pos->member.next);                               \
++            &pos->member != (head);                                   \
++            pos = list_entry(pos->member.next, typeof(*pos), member), \
++                    prefetch(pos->member.next))
++#endif
++
++#ifndef list_for_each_entry_safe
++#define list_for_each_entry_safe(pos, n, head, member)                  \
++        for (pos = list_entry((head)->next, typeof(*pos), member),      \
++                n = list_entry(pos->member.next, typeof(*pos), member); \
++             &pos->member != (head);                                    \
++             pos = n, n = list_entry(n->member.next, typeof(*n), member))
++#endif
++
++#ifndef __user
++#define __user
++#endif
++
++#if !defined(__put_page)
++#define __put_page(p)           atomic_dec(&(p)->count)
++#endif
++
++#if !defined(__GFP_COMP)
++#define __GFP_COMP 0
++#endif
++
++#if !defined(IRQF_SHARED)
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
++{
++  return remap_page_range(vma, from,
++                        pfn << PAGE_SHIFT,
++                        size,
++                        pgprot);
++}
++
++static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
++{
++      void *addr;
++
++      addr = kmalloc(size * nmemb, flags);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++#define mutex_lock down
++#define mutex_unlock up
++
++#define mutex semaphore
++
++#define mutex_init(a) sema_init((a), 1)
++
++#endif
++
++#ifndef DEFINE_SPINLOCK
++#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
++#endif
++
++/* old architectures */
++#ifdef __AMD64__
++#define __x86_64__
++#endif
++
++/* sysfs __ATTR macro */
++#ifndef __ATTR
++#define __ATTR(_name,_mode,_show,_store) { \
++        .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE },     \
++        .show   = _show,                                        \
++        .store  = _store,                                       \
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++#define vmalloc_user(_size) ({void * tmp = vmalloc(_size);   \
++      if (tmp) memset(tmp, 0, size);                       \
++      (tmp);})
++#endif
++
++#ifndef list_for_each_entry_safe_reverse
++#define list_for_each_entry_safe_reverse(pos, n, head, member)          \
++        for (pos = list_entry((head)->prev, typeof(*pos), member),      \
++                n = list_entry(pos->member.prev, typeof(*pos), member); \
++             &pos->member != (head);                                    \
++             pos = n, n = list_entry(n->member.prev, typeof(*n), member))
++#endif
++
++#include <linux/mm.h>
++#include <asm/page.h>
++
++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
++     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
++#define DRM_ODD_MM_COMPAT
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
++#define DRM_FULL_MM_COMPAT
++#endif
++
++
++/*
++ * Flush relevant caches and clear a VMA structure so that page references
++ * will cause a page fault. Don't flush tlbs.
++ */
++
++extern void drm_clear_vma(struct vm_area_struct *vma,
++                        unsigned long addr, unsigned long end);
++
++/*
++ * Return the PTE protection map entries for the VMA flags given by
++ * flags. This is a functional interface to the kernel's protection map.
++ */
++
++extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
++
++#ifndef GFP_DMA32
++#define GFP_DMA32 GFP_KERNEL
++#endif
++#ifndef __GFP_DMA32
++#define __GFP_DMA32 GFP_KERNEL
++#endif
++
++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * These are too slow in earlier kernels.
++ */
++
++extern int drm_unmap_page_from_agp(struct page *page);
++extern int drm_map_page_into_agp(struct page *page);
++
++#define map_page_into_agp drm_map_page_into_agp
++#define unmap_page_from_agp drm_unmap_page_from_agp
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++extern struct page *get_nopage_retry(void);
++extern void free_nopage_retry(void);
++
++#define NOPAGE_REFAULT get_nopage_retry()
++#endif
++
++
++#ifndef DRM_FULL_MM_COMPAT
++
++/*
++ * For now, just return a dummy page that we've allocated out of
++ * static space. The page will be put by do_nopage() since we've already
++ * filled out the pte.
++ */
++
++struct fault_data {
++      struct vm_area_struct *vma;
++      unsigned long address;
++      pgoff_t pgoff;
++      unsigned int flags;
++
++      int type;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                                   unsigned long address,
++                                   int *type);
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
++  !defined(DRM_FULL_MM_COMPAT)
++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                                   unsigned long address);
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
++#endif /* ndef DRM_FULL_MM_COMPAT */
++
++#ifdef DRM_ODD_MM_COMPAT
++
++struct drm_buffer_object;
++
++
++/*
++ * Add a vma to the ttm vma list, and the
++ * process mm pointer to the ttm mm list. Needs the ttm mutex.
++ */
++
++extern int drm_bo_add_vma(struct drm_buffer_object * bo,
++                         struct vm_area_struct *vma);
++/*
++ * Delete a vma and the corresponding mm pointer from the
++ * ttm lists. Needs the ttm mutex.
++ */
++extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
++                            struct vm_area_struct *vma);
++
++/*
++ * Attempts to lock all relevant mmap_sems for a ttm, while
++ * not releasing the ttm mutex. May return -EAGAIN to avoid
++ * deadlocks. In that case the caller shall release the ttm mutex,
++ * schedule() and try again.
++ */
++
++extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
++
++/*
++ * Unlock all relevant mmap_sems for a ttm.
++ */
++extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
++
++/*
++ * If the ttm was bound to the aperture, this function shall be called
++ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
++ * vmas mapping this ttm. This is needed just after unmapping the ptes of
++ * the vma, otherwise the do_nopage() function will bug :(. The function
++ * releases the mmap_sems for this ttm.
++ */
++
++extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
++
++/*
++ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
++ * fault these pfns in, because the first one will set the vma VM_PFNMAP
++ * flag, which will make the next fault bug in do_nopage(). The function
++ * releases the mmap_sems for this ttm.
++ */
++
++extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
++
++
++/*
++ * Remap a vma for a bound ttm. Call with the ttm mutex held and
++ * the relevant mmap_sem locked.
++ */
++extern int drm_bo_map_bound(struct vm_area_struct *vma);
++
++#endif
++
++/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
++#define DRM_IDR_COMPAT_FN
++#define DRM_NO_FAULT
++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                                   unsigned long address);
++#endif
++#ifdef DRM_IDR_COMPAT_FN
++int idr_for_each(struct idr *idp,
++               int (*fn)(int id, void *p, void *data), void *data);
++void idr_remove_all(struct idr *idp);
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++void *idr_replace(struct idr *idp, void *ptr, int id);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++typedef _Bool                   bool;
++#endif
++
++
++#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
++#define DRM_KMAP_ATOMIC_PROT_PFN
++extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
++                                pgprot_t protection);
++#endif
++
++#if !defined(flush_agp_mappings)
++#define flush_agp_mappings() do {} while(0)
++#endif
++
++#ifndef DMA_BIT_MASK
++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
++#endif
++
++#ifndef VM_CAN_NONLINEAR
++#define DRM_VM_NOPAGE 1
++#endif
++
++#ifdef DRM_VM_NOPAGE
++
++extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
++                                unsigned long address, int *type);
++
++extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
++                                    unsigned long address, int *type);
++
++extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
++                                    unsigned long address, int *type);
++
++extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
++                                   unsigned long address, int *type);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
++#define drm_core_ioremap_wc drm_core_ioremap
++#endif
++
++#ifndef OS_HAS_GEM
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
++#define OS_HAS_GEM 1
++#else
++#define OS_HAS_GEM 0
++#endif
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_context.c git-nokia/drivers/gpu/drm-tungsten/drm_context.c
+--- git/drivers/gpu/drm-tungsten/drm_context.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_context.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,472 @@
++/**
++ * \file drm_context.c
++ * IOCTLs for generic contexts
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * ChangeLog:
++ *  2001-11-16        Torsten Duwe <duwe@caldera.de>
++ *            added context constructor/destructor hooks,
++ *            needed by SiS driver's memory management.
++ */
++
++#include "drmP.h"
++
++/******************************************************************/
++/** \name Context bitmap support */
++/*@{*/
++
++/**
++ * Free a handle from the context bitmap.
++ *
++ * \param dev DRM device.
++ * \param ctx_handle context handle.
++ *
++ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
++ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
++ * lock.
++ */
++void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_remove(&dev->ctx_idr, ctx_handle);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Context bitmap allocation.
++ *
++ * \param dev DRM device.
++ * \return (non-negative) context handle on success or a negative number on failure.
++ *
++ * Allocate a new idr from drm_device::ctx_idr while holding the
++ * drm_device::struct_mutex lock.
++ */
++static int drm_ctxbitmap_next(struct drm_device *dev)
++{
++      int new_id;
++      int ret;
++
++again:
++      if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++      mutex_lock(&dev->struct_mutex);
++      ret = idr_get_new_above(&dev->ctx_idr, NULL,
++                              DRM_RESERVED_CONTEXTS, &new_id);
++      if (ret == -EAGAIN) {
++              mutex_unlock(&dev->struct_mutex);
++              goto again;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++      return new_id;
++}
++
++/**
++ * Context bitmap initialization.
++ *
++ * \param dev DRM device.
++ *
++ * Initialise the drm_device::ctx_idr
++ */
++int drm_ctxbitmap_init(struct drm_device *dev)
++{
++      idr_init(&dev->ctx_idr);
++      return 0;
++}
++
++/**
++ * Context bitmap cleanup.
++ *
++ * \param dev DRM device.
++ *
++ * Free all idr members using drm_ctx_sarea_free helper function
++ * while holding the drm_device::struct_mutex lock.
++ */
++void drm_ctxbitmap_cleanup(struct drm_device *dev)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_remove_all(&dev->ctx_idr);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/*@}*/
++
++/******************************************************************/
++/** \name Per Context SAREA Support */
++/*@{*/
++
++/**
++ * Get per-context SAREA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_priv_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Gets the map from drm_device::ctx_idr with the handle specified and
++ * returns its handle.
++ */
++int drm_getsareactx(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_ctx_priv_map *request = data;
++      struct drm_map *map;
++      struct drm_map_list *_entry;
++
++      mutex_lock(&dev->struct_mutex);
++
++      map = idr_find(&dev->ctx_idr, request->ctx_id);
++      if (!map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->handle = NULL;
++      list_for_each_entry(_entry, &dev->maplist, head) {
++              if (_entry->map == map) {
++                      request->handle =
++                          (void *)(unsigned long)_entry->user_token;
++                      break;
++              }
++      }
++      if (request->handle == NULL)
++              return -EINVAL;
++
++      return 0;
++}
++
++/**
++ * Set per-context SAREA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_priv_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches the mapping specified in \p arg and update the entry in
++ * drm_device::ctx_idr with it.
++ */
++int drm_setsareactx(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_ctx_priv_map *request = data;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list = NULL;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map
++                  && r_list->user_token == (unsigned long) request->handle)
++                      goto found;
++      }
++      bad:
++      mutex_unlock(&dev->struct_mutex);
++      return -EINVAL;
++
++      found:
++      map = r_list->map;
++      if (!map)
++              goto bad;
++
++      if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
++              goto bad;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/*@}*/
++
++/******************************************************************/
++/** \name The actual DRM context handling routines */
++/*@{*/
++
++/**
++ * Switch context.
++ *
++ * \param dev DRM device.
++ * \param old old context handle.
++ * \param new new context handle.
++ * \return zero on success or a negative number on failure.
++ *
++ * Attempt to set drm_device::context_flag.
++ */
++static int drm_context_switch(struct drm_device *dev, int old, int new)
++{
++      if (test_and_set_bit(0, &dev->context_flag)) {
++              DRM_ERROR("Reentering -- FIXME\n");
++              return -EBUSY;
++      }
++
++      DRM_DEBUG("Context switch from %d to %d\n", old, new);
++
++      if (new == dev->last_context) {
++              clear_bit(0, &dev->context_flag);
++              return 0;
++      }
++
++      return 0;
++}
++
++/**
++ * Complete context switch.
++ *
++ * \param dev DRM device.
++ * \param new new context handle.
++ * \return zero on success or a negative number on failure.
++ *
++ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
++ * hardware lock is held, clears the drm_device::context_flag and wakes up
++ * drm_device::context_wait.
++ */
++static int drm_context_switch_complete(struct drm_device *dev, int new)
++{
++      dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
++      dev->last_switch = jiffies;
++
++      if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
++              DRM_ERROR("Lock isn't held after context switch\n");
++      }
++
++      /* If a context switch is ever initiated
++         when the kernel holds the lock, release
++         that lock here. */
++      clear_bit(0, &dev->context_flag);
++      wake_up(&dev->context_wait);
++
++      return 0;
++}
++
++/**
++ * Reserve contexts.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_res structure.
++ * \return zero on success or a negative number on failure.
++ */
++int drm_resctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx_res *res = data;
++      struct drm_ctx ctx;
++      int i;
++
++      if (res->count >= DRM_RESERVED_CONTEXTS) {
++              memset(&ctx, 0, sizeof(ctx));
++              for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
++                      ctx.handle = i;
++                      if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
++                              return -EFAULT;
++              }
++      }
++      res->count = DRM_RESERVED_CONTEXTS;
++
++      return 0;
++}
++
++/**
++ * Add context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Get a new handle for the context and copy to userspace.
++ */
++int drm_addctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx_list *ctx_entry;
++      struct drm_ctx *ctx = data;
++
++      ctx->handle = drm_ctxbitmap_next(dev);
++      if (ctx->handle == DRM_KERNEL_CONTEXT) {
++              /* Skip kernel's context and get a new one. */
++              ctx->handle = drm_ctxbitmap_next(dev);
++      }
++      DRM_DEBUG("%d\n", ctx->handle);
++      if (ctx->handle == -1) {
++              DRM_DEBUG("Not enough free contexts.\n");
++              /* Should this return -EBUSY instead? */
++              return -ENOMEM;
++      }
++
++      if (ctx->handle != DRM_KERNEL_CONTEXT) {
++              if (dev->driver->context_ctor)
++                      if (!dev->driver->context_ctor(dev, ctx->handle)) {
++                              DRM_DEBUG("Running out of ctxs or memory.\n");
++                              return -ENOMEM;
++                      }
++      }
++
++      ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
++      if (!ctx_entry) {
++              DRM_DEBUG("out of memory\n");
++              return -ENOMEM;
++      }
++
++      INIT_LIST_HEAD(&ctx_entry->head);
++      ctx_entry->handle = ctx->handle;
++      ctx_entry->tag = file_priv;
++
++      mutex_lock(&dev->ctxlist_mutex);
++      list_add(&ctx_entry->head, &dev->ctxlist);
++      ++dev->ctx_count;
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      return 0;
++}
++
++int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      /* This does nothing */
++      return 0;
++}
++
++/**
++ * Get context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ */
++int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      /* This is 0, because we don't handle any context flags */
++      ctx->flags = 0;
++
++      return 0;
++}
++
++/**
++ * Switch context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls context_switch().
++ */
++int drm_switchctx(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      return drm_context_switch(dev, dev->last_context, ctx->handle);
++}
++
++/**
++ * New context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls context_switch_complete().
++ */
++int drm_newctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      drm_context_switch_complete(dev, ctx->handle);
++
++      return 0;
++}
++
++/**
++ * Remove context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
++ */
++int drm_rmctx(struct drm_device *dev, void *data,
++            struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
++              file_priv->remove_auth_on_close = 1;
++      }
++      if (ctx->handle != DRM_KERNEL_CONTEXT) {
++              if (dev->driver->context_dtor)
++                      dev->driver->context_dtor(dev, ctx->handle);
++              drm_ctxbitmap_free(dev, ctx->handle);
++      }
++
++      mutex_lock(&dev->ctxlist_mutex);
++      if (!list_empty(&dev->ctxlist)) {
++              struct drm_ctx_list *pos, *n;
++
++              list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
++                      if (pos->handle == ctx->handle) {
++                              list_del(&pos->head);
++                              drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
++                              --dev->ctx_count;
++                      }
++              }
++      }
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      return 0;
++}
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_core.h git-nokia/drivers/gpu/drm-tungsten/drm_core.h
+--- git/drivers/gpu/drm-tungsten/drm_core.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_core.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#define CORE_AUTHOR           "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
++
++#define CORE_NAME             "drm"
++#define CORE_DESC             "DRM shared core routines"
++#define CORE_DATE             "20060810"
++
++#define DRM_IF_MAJOR  1
++#define DRM_IF_MINOR  3
++
++#define CORE_MAJOR    1
++#define CORE_MINOR    1
++#define CORE_PATCHLEVEL 0
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_dma.c git-nokia/drivers/gpu/drm-tungsten/drm_dma.c
+--- git/drivers/gpu/drm-tungsten/drm_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,179 @@
++/**
++ * \file drm_dma.c
++ * DMA IOCTL and function support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Initialize the DMA data.
++ *
++ * \param dev DRM device.
++ * \return zero on success or a negative value on failure.
++ *
++ * Allocate and initialize a drm_device_dma structure.
++ */
++int drm_dma_setup(struct drm_device *dev)
++{
++      int i;
++
++      dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
++      if (!dev->dma)
++              return -ENOMEM;
++
++      memset(dev->dma, 0, sizeof(*dev->dma));
++
++      for (i = 0; i <= DRM_MAX_ORDER; i++)
++              memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
++
++      return 0;
++}
++
++/**
++ * Cleanup the DMA resources.
++ *
++ * \param dev DRM device.
++ *
++ * Free all pages associated with DMA buffers, the buffers and pages lists, and
++ * finally the drm_device::dma structure itself.
++ */
++void drm_dma_takedown(struct drm_device *dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i, j;
++
++      if (!dma)
++              return;
++
++      /* Clear dma buffers */
++      for (i = 0; i <= DRM_MAX_ORDER; i++) {
++              if (dma->bufs[i].seg_count) {
++                      DRM_DEBUG("order %d: buf_count = %d,"
++                                " seg_count = %d\n",
++                                i,
++                                dma->bufs[i].buf_count,
++                                dma->bufs[i].seg_count);
++                      for (j = 0; j < dma->bufs[i].seg_count; j++) {
++                              if (dma->bufs[i].seglist[j]) {
++                                      drm_pci_free(dev, dma->bufs[i].seglist[j]);
++                              }
++                      }
++                      drm_free(dma->bufs[i].seglist,
++                               dma->bufs[i].seg_count
++                               * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
++              }
++              if (dma->bufs[i].buf_count) {
++                      for (j = 0; j < dma->bufs[i].buf_count; j++) {
++                              if (dma->bufs[i].buflist[j].dev_private) {
++                                      drm_free(dma->bufs[i].buflist[j].
++                                               dev_private,
++                                               dma->bufs[i].buflist[j].
++                                               dev_priv_size, DRM_MEM_BUFS);
++                              }
++                      }
++                      drm_free(dma->bufs[i].buflist,
++                               dma->bufs[i].buf_count *
++                               sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
++              }
++      }
++
++      if (dma->buflist) {
++              drm_free(dma->buflist,
++                       dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      }
++
++      if (dma->pagelist) {
++              drm_free(dma->pagelist,
++                       dma->page_count * sizeof(*dma->pagelist),
++                       DRM_MEM_PAGES);
++      }
++      drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
++      dev->dma = NULL;
++}
++
++/**
++ * Free a buffer.
++ *
++ * \param dev DRM device.
++ * \param buf buffer to free.
++ *
++ * Resets the fields of \p buf.
++ */
++void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
++{
++      if (!buf)
++              return;
++
++      buf->waiting = 0;
++      buf->pending = 0;
++      buf->file_priv = NULL;
++      buf->used = 0;
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
++          && waitqueue_active(&buf->dma_wait)) {
++              wake_up_interruptible(&buf->dma_wait);
++      }
++}
++
++/**
++ * Reclaim the buffers.
++ *
++ * \param file_priv DRM file private.
++ *
++ * Frees each buffer associated with \p file_priv not already on the hardware.
++ */
++void drm_core_reclaim_buffers(struct drm_device *dev,
++                            struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma)
++              return;
++      for (i = 0; i < dma->buf_count; i++) {
++              if (dma->buflist[i]->file_priv == file_priv) {
++                      switch (dma->buflist[i]->list) {
++                      case DRM_LIST_NONE:
++                              drm_free_buffer(dev, dma->buflist[i]);
++                              break;
++                      case DRM_LIST_WAIT:
++                              dma->buflist[i]->list = DRM_LIST_RECLAIM;
++                              break;
++                      default:
++                              /* Buffer already on hardware. */
++                              break;
++                      }
++              }
++      }
++}
++EXPORT_SYMBOL(drm_core_reclaim_buffers);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_drawable.c git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c
+--- git/drivers/gpu/drm-tungsten/drm_drawable.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,192 @@
++/**
++ * \file drm_drawable.c
++ * IOCTLs for drawables
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ * \author Michel Dänzer <michel@tungstengraphics.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Allocate drawable ID and memory to store information about it.
++ */
++int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      unsigned long irqflags;
++      struct drm_draw *draw = data;
++      int new_id = 0;
++      int ret;
++
++again:
++      if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++
++      spin_lock_irqsave(&dev->drw_lock, irqflags);
++      ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
++      if (ret == -EAGAIN) {
++              spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++              goto again;
++      }
++
++      spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++
++      draw->handle = new_id;
++
++      DRM_DEBUG("%d\n", draw->handle);
++
++      return 0;
++}
++
++/**
++ * Free drawable ID and memory to store information about it.
++ */
++int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_draw *draw = data;
++      unsigned long irqflags;
++
++      spin_lock_irqsave(&dev->drw_lock, irqflags);
++
++      drm_free(drm_get_drawable_info(dev, draw->handle),
++               sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
++
++      idr_remove(&dev->drw_idr, draw->handle);
++
++      spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++      DRM_DEBUG("%d\n", draw->handle);
++      return 0;
++}
++
++int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_update_draw *update = data;
++      unsigned long irqflags;
++      struct drm_clip_rect *rects;
++      struct drm_drawable_info *info;
++      int err;
++
++      info = idr_find(&dev->drw_idr, update->handle);
++      if (!info) {
++              info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
++              if (!info)
++                      return -ENOMEM;
++              if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
++                      DRM_ERROR("No such drawable %d\n", update->handle);
++                      drm_free(info, sizeof(*info), DRM_MEM_BUFS);
++                      return -EINVAL;
++              }
++      }
++
++      switch (update->type) {
++      case DRM_DRAWABLE_CLIPRECTS:
++              if (update->num != info->num_rects) {
++                      rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
++                                       DRM_MEM_BUFS);
++              } else
++                      rects = info->rects;
++
++              if (update->num && !rects) {
++                      DRM_ERROR("Failed to allocate cliprect memory\n");
++                      err = -ENOMEM;
++                      goto error;
++              }
++
++              if (update->num && DRM_COPY_FROM_USER(rects,
++                                                   (struct drm_clip_rect __user *)
++                                                   (unsigned long)update->data,
++                                                   update->num *
++                                                   sizeof(*rects))) {
++                      DRM_ERROR("Failed to copy cliprects from userspace\n");
++                      err = -EFAULT;
++                      goto error;
++              }
++
++              spin_lock_irqsave(&dev->drw_lock, irqflags);
++
++              if (rects != info->rects) {
++                      drm_free(info->rects, info->num_rects *
++                               sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
++              }
++
++              info->rects = rects;
++              info->num_rects = update->num;
++
++              spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++
++              DRM_DEBUG("Updated %d cliprects for drawable %d\n",
++                        info->num_rects, update->handle);
++              break;
++      default:
++              DRM_ERROR("Invalid update type %d\n", update->type);
++              return -EINVAL;
++      }
++
++      return 0;
++
++error:
++      if (rects != info->rects)
++              drm_free(rects, update->num * sizeof(struct drm_clip_rect),
++                       DRM_MEM_BUFS);
++
++      return err;
++}
++
++/**
++ * Caller must hold the drawable spinlock!
++ */
++struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
++{
++      return idr_find(&dev->drw_idr, id);
++}
++EXPORT_SYMBOL(drm_get_drawable_info);
++
++static int drm_drawable_free(int idr, void *p, void *data)
++{
++      struct drm_drawable_info *info = p;
++
++      if (info) {
++              drm_free(info->rects, info->num_rects *
++                       sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
++              drm_free(info, sizeof(*info), DRM_MEM_BUFS);
++      }
++
++      return 0;
++}
++
++void drm_drawable_free_all(struct drm_device *dev)
++{
++      idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
++      idr_remove_all(&dev->drw_idr);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_drv.c git-nokia/drivers/gpu/drm-tungsten/drm_drv.c
+--- git/drivers/gpu/drm-tungsten/drm_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,697 @@
++/**
++ * \file drm_drv.c
++ * Generic driver template
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ *
++ * To use this template, you must at least define the following (samples
++ * given for the MGA driver):
++ *
++ * \code
++ * #define DRIVER_AUTHOR      "VA Linux Systems, Inc."
++ *
++ * #define DRIVER_NAME                "mga"
++ * #define DRIVER_DESC                "Matrox G200/G400"
++ * #define DRIVER_DATE                "20001127"
++ *
++ * #define drm_x              mga_##x
++ * \endcode
++ */
++
++/*
++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "drm_core.h"
++
++static void drm_cleanup(struct drm_device * dev);
++int drm_fb_loaded = 0;
++
++static int drm_version(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++/** Ioctl table */
++static struct drm_ioctl_desc drm_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
++      /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
++      DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++#if __OS_HAS_AGP
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++#endif
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
++
++#if OS_HAS_GEM
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
++#endif
++};
++
++#define DRM_CORE_IOCTL_COUNT  ARRAY_SIZE( drm_ioctls )
++
++
++/**
++ * Take down the DRM device.
++ *
++ * \param dev DRM device structure.
++ *
++ * Frees every resource in \p dev.
++ *
++ * \sa drm_device
++ */
++int drm_lastclose(struct drm_device * dev)
++{
++      struct drm_magic_entry *pt, *next;
++      struct drm_map_list *r_list, *list_t;
++      struct drm_vma_entry *vma, *vma_temp;
++      int i;
++
++      DRM_DEBUG("\n");
++
++      /*
++       * We can't do much about this function failing.
++       */
++
++      drm_bo_driver_finish(dev);
++
++      if (dev->driver->lastclose)
++              dev->driver->lastclose(dev);
++      DRM_DEBUG("driver lastclose completed\n");
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      /* Free drawable information memory */
++      mutex_lock(&dev->struct_mutex);
++
++      drm_drawable_free_all(dev);
++      del_timer(&dev->timer);
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++
++      if (dev->magicfree.next) {
++              list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
++                      list_del(&pt->head);
++                      drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
++                      drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
++              }
++              drm_ht_remove(&dev->magiclist);
++      }
++
++
++      /* Clear AGP information */
++      if (drm_core_has_AGP(dev) && dev->agp) {
++              struct drm_agp_mem *entry, *tempe;
++
++              /* Remove AGP resources, but leave dev->agp
++                 intact until drv_cleanup is called. */
++              list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
++                      if (entry->bound)
++                              drm_unbind_agp(entry->memory);
++                      drm_free_agp(entry->memory, entry->pages);
++                      drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++              }
++              INIT_LIST_HEAD(&dev->agp->memory);
++
++              if (dev->agp->acquired)
++                      drm_agp_release(dev);
++
++              dev->agp->acquired = 0;
++              dev->agp->enabled = 0;
++      }
++      if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
++              drm_sg_cleanup(dev->sg);
++              dev->sg = NULL;
++      }
++
++      /* Clear vma list (only built for debugging) */
++      list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
++              list_del(&vma->head);
++              drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
++      }
++
++      list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
++              if (!(r_list->map->flags & _DRM_DRIVER)) {
++                      drm_rmmap_locked(dev, r_list->map);
++                      r_list = NULL;
++              }
++      }
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
++              for (i = 0; i < dev->queue_count; i++) {
++
++                      if (dev->queuelist[i]) {
++                              drm_free(dev->queuelist[i],
++                                       sizeof(*dev->queuelist[0]),
++                                       DRM_MEM_QUEUES);
++                              dev->queuelist[i] = NULL;
++                      }
++              }
++              drm_free(dev->queuelist,
++                       dev->queue_slots * sizeof(*dev->queuelist),
++                       DRM_MEM_QUEUES);
++              dev->queuelist = NULL;
++      }
++      dev->queue_count = 0;
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              drm_dma_takedown(dev);
++
++      if (dev->lock.hw_lock) {
++              dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
++              dev->lock.file_priv = NULL;
++              wake_up_interruptible(&dev->lock.lock_queue);
++      }
++      dev->dev_mapping = NULL;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("lastclose completed\n");
++      return 0;
++}
++
++void drm_cleanup_pci(struct pci_dev *pdev)
++{
++      struct drm_device *dev = pci_get_drvdata(pdev);
++
++      pci_set_drvdata(pdev, NULL);
++      pci_release_regions(pdev);
++      if (dev)
++              drm_cleanup(dev);
++}
++EXPORT_SYMBOL(drm_cleanup_pci);
++
++/**
++ * Module initialization. Called via init_module at module load time, or via
++ * linux/init/main.c (this is not currently supported).
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Initializes an array of drm_device structures, and attempts to
++ * initialize all available devices, using consecutive minors, registering the
++ * stubs and initializing the AGP device.
++ *
++ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
++ * after the initialization for driver customization.
++ */
++int drm_init(struct drm_driver *driver,
++                     struct pci_device_id *pciidlist)
++{
++      struct pci_dev *pdev;
++      struct pci_device_id *pid;
++      int rc, i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
++              pid = &pciidlist[i];
++
++              pdev = NULL;
++              /* pass back in pdev to account for multiple identical cards */
++              while ((pdev =
++                      pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
++                                     pid->subdevice, pdev))) {
++                      /* Are there device class requirements? */
++                      if ((pid->class != 0)
++                              && ((pdev->class & pid->class_mask) != pid->class)) {
++                              continue;
++                      }
++                      /* is there already a driver loaded, or (short circuit saves work) */
++                      /* does something like VesaFB have control of the memory region? */
++                      if (
++#ifdef CONFIG_PCI
++                          pci_dev_driver(pdev) ||
++#endif
++                          pci_request_regions(pdev, "DRM scan")) {
++                              /* go into stealth mode */
++                              drm_fb_loaded = 1;
++                              pci_dev_put(pdev);
++                              break;
++                      }
++                      /* no fbdev or vesadev, put things back and wait for normal probe */
++                      pci_release_regions(pdev);
++              }
++      }
++
++      if (!drm_fb_loaded)
++              return pci_register_driver(&driver->pci_driver);
++      else {
++              for (i = 0; pciidlist[i].vendor != 0; i++) {
++                      pid = &pciidlist[i];
++
++                      pdev = NULL;
++                      /* pass back in pdev to account for multiple identical cards */
++                      while ((pdev =
++                              pci_get_subsys(pid->vendor, pid->device,
++                                             pid->subvendor, pid->subdevice,
++                                             pdev))) {
++                              /* Are there device class requirements? */
++                              if ((pid->class != 0)
++                                      && ((pdev->class & pid->class_mask) != pid->class)) {
++                                      continue;
++                              }
++#ifdef CONFIG_PCI
++                              /* stealth mode requires a manual probe */
++                              pci_dev_get(pdev);
++#endif
++                              if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
++                                      pci_dev_put(pdev);
++                                      return rc;
++                              }
++                      }
++              }
++              DRM_INFO("Used old pci detect: framebuffer loaded\n");
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_init);
++
++/**
++ * Called via cleanup_module() at module unload time.
++ *
++ * Cleans up all DRM device, calling drm_lastclose().
++ *
++ * \sa drm_init
++ */
++static void drm_cleanup(struct drm_device * dev)
++{
++
++      DRM_DEBUG("\n");
++      if (!dev) {
++              DRM_ERROR("cleanup called no dev\n");
++              return;
++      }
++
++      drm_lastclose(dev);
++      drm_fence_manager_takedown(dev);
++
++      if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
++          && dev->agp->agp_mtrr >= 0) {
++              int retval;
++              retval = mtrr_del(dev->agp->agp_mtrr,
++                                dev->agp->agp_info.aper_base,
++                                dev->agp->agp_info.aper_size * 1024 * 1024);
++              DRM_DEBUG("mtrr_del=%d\n", retval);
++      }
++
++      if (drm_core_has_AGP(dev) && dev->agp) {
++              drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
++              dev->agp = NULL;
++      }
++      if (dev->driver->unload)
++              dev->driver->unload(dev);
++
++      if (!drm_fb_loaded)
++              pci_disable_device(dev->pdev);
++
++      drm_ctxbitmap_cleanup(dev);
++      drm_ht_remove(&dev->map_hash);
++      drm_mm_takedown(&dev->offset_manager);
++      drm_ht_remove(&dev->object_hash);
++
++      drm_put_minor(dev);
++      if (drm_put_dev(dev))
++              DRM_ERROR("Cannot unload module\n");
++}
++
++int drm_minors_cleanup(int id, void *ptr, void *data)
++{
++      struct drm_minor *minor = ptr;
++      struct drm_device *dev;
++      struct drm_driver *driver = data;
++
++      dev = minor->dev;
++      if (minor->dev->driver != driver)
++              return 0;
++
++      if (minor->type != DRM_MINOR_LEGACY)
++              return 0;
++
++      if (dev)
++              pci_dev_put(dev->pdev);
++      drm_cleanup(dev);
++      return 1;
++}
++
++void drm_exit(struct drm_driver *driver)
++{
++      DRM_DEBUG("\n");
++      if (drm_fb_loaded) {
++              idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
++      } else
++              pci_unregister_driver(&driver->pci_driver);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      free_nopage_retry();
++#endif
++      DRM_INFO("Module unloaded\n");
++}
++EXPORT_SYMBOL(drm_exit);
++
++/** File operations structure */
++static const struct file_operations drm_stub_fops = {
++      .owner = THIS_MODULE,
++      .open = drm_stub_open
++};
++
++static int __init drm_core_init(void)
++{
++      int ret;
++      struct sysinfo si;
++      unsigned long avail_memctl_mem;
++      unsigned long max_memctl_mem;
++
++      idr_init(&drm_minors_idr);
++      si_meminfo(&si);
++
++      /*
++       * AGP only allows low / DMA32 memory ATM.
++       */
++
++      avail_memctl_mem = si.totalram - si.totalhigh;
++
++      /*
++       * Avoid overflows
++       */
++
++      max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
++      max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
++
++      if (avail_memctl_mem >= max_memctl_mem)
++              avail_memctl_mem = max_memctl_mem;
++
++      drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
++
++      ret = -ENOMEM;
++
++      if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
++              goto err_p1;
++
++      drm_class = drm_sysfs_create(THIS_MODULE, "drm");
++      if (IS_ERR(drm_class)) {
++              printk(KERN_ERR "DRM: Error creating drm class.\n");
++              ret = PTR_ERR(drm_class);
++              goto err_p2;
++      }
++
++      drm_proc_root = proc_mkdir("dri", NULL);
++      if (!drm_proc_root) {
++              DRM_ERROR("Cannot create /proc/dri\n");
++              ret = -1;
++              goto err_p3;
++      }
++
++      drm_mem_init();
++
++      DRM_INFO("Initialized %s %d.%d.%d %s\n",
++               CORE_NAME,
++               CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++      return 0;
++err_p3:
++      drm_sysfs_destroy();
++err_p2:
++      unregister_chrdev(DRM_MAJOR, "drm");
++
++      idr_destroy(&drm_minors_idr);
++err_p1:
++      return ret;
++}
++
++static void __exit drm_core_exit(void)
++{
++      remove_proc_entry("dri", NULL);
++      drm_sysfs_destroy();
++
++      unregister_chrdev(DRM_MAJOR, "drm");
++
++      idr_destroy(&drm_minors_idr);
++}
++
++module_init(drm_core_init);
++module_exit(drm_core_exit);
++
++/**
++ * Get version information
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_version structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Fills in the version information in \p arg.
++ */
++static int drm_version(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_version *version = data;
++      int len;
++
++      version->version_major = dev->driver->major;
++      version->version_minor = dev->driver->minor;
++      version->version_patchlevel = dev->driver->patchlevel;
++      DRM_COPY(version->name, dev->driver->name);
++      DRM_COPY(version->date, dev->driver->date);
++      DRM_COPY(version->desc, dev->driver->desc);
++
++      return 0;
++}
++
++/**
++ * Called whenever a process performs an ioctl on /dev/drm.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ *
++ * Looks up the ioctl function in the ::ioctls table, checking for root
++ * previleges if so required, and dispatches to the respective function.
++ *
++ * Copies data in and out according to the size and direction given in cmd,
++ * which must match the ioctl cmd known by the kernel.  The kernel uses a 512
++ * byte stack buffer to store the ioctl arguments in kernel space.  Should we
++ * ever need much larger ioctl arguments, we may need to allocate memory.
++ */
++int drm_ioctl(struct inode *inode, struct file *filp,
++            unsigned int cmd, unsigned long arg)
++{
++      return drm_unlocked_ioctl(filp, cmd, arg);
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_ioctl_desc *ioctl;
++      drm_ioctl_t *func;
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      int retcode = -EINVAL;
++      char kdata[512];
++
++      atomic_inc(&dev->ioctl_count);
++      atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++      ++file_priv->ioctl_count;
++
++      DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
++                current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device),
++                file_priv->authenticated);
++
++      if ((nr >= DRM_CORE_IOCTL_COUNT) &&
++          ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
++              goto err_i1;
++      if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++              && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
++              ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
++      else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++              ioctl = &drm_ioctls[nr];
++              cmd = ioctl->cmd;
++      } else {
++              retcode = -EINVAL;
++              goto err_i1;
++      }
++#if 0
++      /*
++       * This check is disabled, because driver private ioctl->cmd
++       * are not the ioctl commands with size and direction bits but
++       * just the indices. The DRM core ioctl->cmd are the proper ioctl
++       * commands. The drivers' ioctl tables need to be fixed.
++       */
++      if (ioctl->cmd != cmd) {
++              retcode = -EINVAL;
++              goto err_i1;
++      }
++#endif
++
++      func = ioctl->func;
++      /* is there a local override? */
++      if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
++              func = dev->driver->dma_ioctl;
++
++      if (cmd & IOC_IN) {
++              if (copy_from_user(kdata, (void __user *)arg,
++                                 _IOC_SIZE(cmd)) != 0) {
++                      retcode = -EACCES;
++                      goto err_i1;
++              }
++      }
++
++      if (!func) {
++              DRM_DEBUG("no function\n");
++              retcode = -EINVAL;
++      } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
++                 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
++                 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
++              retcode = -EACCES;
++      } else {
++              retcode = func(dev, kdata, file_priv);
++      }
++
++      if (cmd & IOC_OUT) {
++              if (copy_to_user((void __user *)arg, kdata,
++                               _IOC_SIZE(cmd)) != 0)
++                      retcode = -EACCES;
++      }
++
++err_i1:
++      atomic_dec(&dev->ioctl_count);
++      if (retcode)
++              DRM_DEBUG("ret = %d\n", retcode);
++      return retcode;
++}
++EXPORT_SYMBOL(drm_unlocked_ioctl);
++
++drm_local_map_t *drm_getsarea(struct drm_device *dev)
++{
++      struct drm_map_list *entry;
++
++      list_for_each_entry(entry, &dev->maplist, head) {
++              if (entry->map && entry->map->type == _DRM_SHM &&
++                  (entry->map->flags & _DRM_CONTAINS_LOCK)) {
++                      return entry->map;
++              }
++      }
++      return NULL;
++}
++EXPORT_SYMBOL(drm_getsarea);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_fence.c git-nokia/drivers/gpu/drm-tungsten/drm_fence.c
+--- git/drivers/gpu/drm-tungsten/drm_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,829 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
++                         int interruptible, uint32_t mask, 
++                         unsigned long end_jiffies)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      uint32_t count = 0;
++      int ret;
++
++      DECLARE_WAITQUEUE(entry, current);
++      add_wait_queue(&fc->fence_queue, &entry);
++
++      ret = 0;
++      
++      for (;;) {
++              __set_current_state((interruptible) ? 
++                                  TASK_INTERRUPTIBLE :
++                                  TASK_UNINTERRUPTIBLE);
++              if (drm_fence_object_signaled(fence, mask))
++                      break;
++              if (time_after_eq(jiffies, end_jiffies)) {
++                      ret = -EBUSY;
++                      break;
++              }
++              if (lazy)
++                      schedule_timeout(1);
++              else if ((++count & 0x0F) == 0){
++                      __set_current_state(TASK_RUNNING);
++                      schedule();
++                      __set_current_state((interruptible) ? 
++                                          TASK_INTERRUPTIBLE :
++                                          TASK_UNINTERRUPTIBLE);
++              }                       
++              if (interruptible && signal_pending(current)) {
++                      ret = -EAGAIN;
++                      break;
++              }
++      }
++      __set_current_state(TASK_RUNNING);
++      remove_wait_queue(&fc->fence_queue, &entry);
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_wait_polling);
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
++                     uint32_t sequence, uint32_t type, uint32_t error)
++{
++      int wake = 0;
++      uint32_t diff;
++      uint32_t relevant_type;
++      uint32_t new_type;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct list_head *head;
++      struct drm_fence_object *fence, *next;
++      int found = 0;
++
++      if (list_empty(&fc->ring))
++              return;
++
++      list_for_each_entry(fence, &fc->ring, ring) {
++              diff = (sequence - fence->sequence) & driver->sequence_mask;
++              if (diff > driver->wrap_diff) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      fc->waiting_types &= ~type;
++      head = (found) ? &fence->ring : &fc->ring;
++
++      list_for_each_entry_safe_reverse(fence, next, head, ring) {
++              if (&fence->ring == &fc->ring)
++                      break;
++
++              if (error) {
++                      fence->error = error;
++                      fence->signaled_types = fence->type;
++                      list_del_init(&fence->ring);
++                      wake = 1;
++                      break;
++              }
++
++              if (type & DRM_FENCE_TYPE_EXE)
++                      type |= fence->native_types;
++
++              relevant_type = type & fence->type;
++              new_type = (fence->signaled_types | relevant_type) ^
++                      fence->signaled_types;
++
++              if (new_type) {
++                      fence->signaled_types |= new_type;
++                      DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++                                fence->base.hash.key, fence->signaled_types);
++
++                      if (driver->needed_flush)
++                              fc->pending_flush |= driver->needed_flush(fence);
++
++                      if (new_type & fence->waiting_types)
++                              wake = 1;
++              }
++
++              fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
++
++              if (!(fence->type & ~fence->signaled_types)) {
++                      DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++                                fence->base.hash.key);
++                      list_del_init(&fence->ring);
++              }
++      }
++
++      /*
++       * Reinstate lost waiting types.
++       */
++
++      if ((fc->waiting_types & type) != type) {
++              head = head->prev;
++              list_for_each_entry(fence, head, ring) {
++                      if (&fence->ring == &fc->ring)
++                              break;
++                      diff = (fc->highest_waiting_sequence - fence->sequence) &
++                              driver->sequence_mask;
++                      if (diff > driver->wrap_diff)
++                              break;
++                      
++                      fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
++              }
++      }
++
++      if (wake) 
++              wake_up_all(&fc->fence_queue);
++}
++EXPORT_SYMBOL(drm_fence_handler);
++
++static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      unsigned long flags;
++
++      write_lock_irqsave(&fm->lock, flags);
++      list_del_init(ring);
++      write_unlock_irqrestore(&fm->lock, flags);
++}
++
++void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
++{
++      struct drm_fence_object *tmp_fence = *fence;
++      struct drm_device *dev = tmp_fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      *fence = NULL;
++      if (atomic_dec_and_test(&tmp_fence->usage)) {
++              drm_fence_unring(dev, &tmp_fence->ring);
++              DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
++                        tmp_fence->base.hash.key);
++              atomic_dec(&fm->count);
++              BUG_ON(!list_empty(&tmp_fence->base.list));
++              drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
++      }
++}
++EXPORT_SYMBOL(drm_fence_usage_deref_locked);
++
++void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
++{
++      struct drm_fence_object *tmp_fence = *fence;
++      struct drm_device *dev = tmp_fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      *fence = NULL;
++      if (atomic_dec_and_test(&tmp_fence->usage)) {
++              mutex_lock(&dev->struct_mutex);
++              if (atomic_read(&tmp_fence->usage) == 0) {
++                      drm_fence_unring(dev, &tmp_fence->ring);
++                      atomic_dec(&fm->count);
++                      BUG_ON(!list_empty(&tmp_fence->base.list));
++                      drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
++              }
++              mutex_unlock(&dev->struct_mutex);
++      }
++}
++EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
++
++struct drm_fence_object
++*drm_fence_reference_locked(struct drm_fence_object *src)
++{
++      DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
++
++      atomic_inc(&src->usage);
++      return src;
++}
++
++void drm_fence_reference_unlocked(struct drm_fence_object **dst,
++                                struct drm_fence_object *src)
++{
++      mutex_lock(&src->dev->struct_mutex);
++      *dst = src;
++      atomic_inc(&src->usage);
++      mutex_unlock(&src->dev->struct_mutex);
++}
++EXPORT_SYMBOL(drm_fence_reference_unlocked);
++
++static void drm_fence_object_destroy(struct drm_file *priv,
++                                   struct drm_user_object *base)
++{
++      struct drm_fence_object *fence =
++          drm_user_object_entry(base, struct drm_fence_object, base);
++
++      drm_fence_usage_deref_locked(&fence);
++}
++
++int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
++{
++      unsigned long flags;
++      int signaled;
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      
++      mask &= fence->type;
++      read_lock_irqsave(&fm->lock, flags);
++      signaled = (mask & fence->signaled_types) == mask;
++      read_unlock_irqrestore(&fm->lock, flags);
++      if (!signaled && driver->poll) {
++              write_lock_irqsave(&fm->lock, flags);
++              driver->poll(dev, fence->fence_class, mask);
++              signaled = (mask & fence->signaled_types) == mask;
++              write_unlock_irqrestore(&fm->lock, flags);
++      }
++      return signaled;
++}
++EXPORT_SYMBOL(drm_fence_object_signaled);
++
++
++int drm_fence_object_flush(struct drm_fence_object *fence,
++                         uint32_t type)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      unsigned long irq_flags;
++      uint32_t saved_pending_flush;
++      uint32_t diff;
++      int call_flush;
++
++      if (type & ~fence->type) {
++              DRM_ERROR("Flush trying to extend fence type, "
++                        "0x%x, 0x%x\n", type, fence->type);
++              return -EINVAL;
++      }
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++      fence->waiting_types |= type;
++      fc->waiting_types |= fence->waiting_types;
++      diff = (fence->sequence - fc->highest_waiting_sequence) & 
++              driver->sequence_mask;
++
++      if (diff < driver->wrap_diff)
++              fc->highest_waiting_sequence = fence->sequence;
++
++      /*
++       * fence->waiting_types has changed. Determine whether
++       * we need to initiate some kind of flush as a result of this.
++       */
++
++      saved_pending_flush = fc->pending_flush;
++      if (driver->needed_flush) 
++              fc->pending_flush |= driver->needed_flush(fence);
++
++      if (driver->poll)
++              driver->poll(dev, fence->fence_class, fence->waiting_types);
++
++      call_flush = fc->pending_flush;
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++
++      if (call_flush && driver->flush)
++              driver->flush(dev, fence->fence_class);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_flush);
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
++                       uint32_t sequence)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
++      struct drm_fence_object *fence;
++      unsigned long irq_flags;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      int call_flush;
++
++      uint32_t diff;
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++
++      list_for_each_entry_reverse(fence, &fc->ring, ring) {
++              diff = (sequence - fence->sequence) & driver->sequence_mask;
++              if (diff <= driver->flush_diff)
++                      break;
++      
++              fence->waiting_types = fence->type;
++              fc->waiting_types |= fence->type;
++
++              if (driver->needed_flush)
++                      fc->pending_flush |= driver->needed_flush(fence);
++      }       
++      
++      if (driver->poll)
++              driver->poll(dev, fence_class, fc->waiting_types);
++
++      call_flush = fc->pending_flush;
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++
++      if (call_flush && driver->flush)
++              driver->flush(dev, fence->fence_class);
++
++      /*
++       * FIXME: Shold we implement a wait here for really old fences?
++       */
++
++}
++EXPORT_SYMBOL(drm_fence_flush_old);
++
++int drm_fence_object_wait(struct drm_fence_object *fence,
++                        int lazy, int ignore_signals, uint32_t mask)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      int ret = 0;
++      unsigned long _end = 3 * DRM_HZ;
++
++      if (mask & ~fence->type) {
++              DRM_ERROR("Wait trying to extend fence type"
++                        " 0x%08x 0x%08x\n", mask, fence->type);
++              BUG();
++              return -EINVAL;
++      }
++
++      if (driver->wait)
++              return driver->wait(fence, lazy, !ignore_signals, mask);
++
++
++      drm_fence_object_flush(fence, mask);
++      if (driver->has_irq(dev, fence->fence_class, mask)) {
++              if (!ignore_signals)
++                      ret = wait_event_interruptible_timeout
++                              (fc->fence_queue, 
++                               drm_fence_object_signaled(fence, mask), 
++                               3 * DRM_HZ);
++              else 
++                      ret = wait_event_timeout
++                              (fc->fence_queue, 
++                               drm_fence_object_signaled(fence, mask), 
++                               3 * DRM_HZ);
++
++              if (unlikely(ret == -ERESTARTSYS))
++                      return -EAGAIN;
++
++              if (unlikely(ret == 0))
++                      return -EBUSY;
++
++              return 0;
++      }
++
++      return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
++                                    _end);
++}
++EXPORT_SYMBOL(drm_fence_object_wait);
++
++
++
++int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
++                        uint32_t fence_class, uint32_t type)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      unsigned long flags;
++      uint32_t sequence;
++      uint32_t native_types;
++      int ret;
++
++      drm_fence_unring(dev, &fence->ring);
++      ret = driver->emit(dev, fence_class, fence_flags, &sequence,
++                         &native_types);
++      if (ret)
++              return ret;
++
++      write_lock_irqsave(&fm->lock, flags);
++      fence->fence_class = fence_class;
++      fence->type = type;
++      fence->waiting_types = 0;
++      fence->signaled_types = 0;
++      fence->error = 0;
++      fence->sequence = sequence;
++      fence->native_types = native_types;
++      if (list_empty(&fc->ring))
++              fc->highest_waiting_sequence = sequence - 1;
++      list_add_tail(&fence->ring, &fc->ring);
++      fc->latest_queued_sequence = sequence;
++      write_unlock_irqrestore(&fm->lock, flags);
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_emit);
++
++static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
++                               uint32_t type,
++                               uint32_t fence_flags,
++                               struct drm_fence_object *fence)
++{
++      int ret = 0;
++      unsigned long flags;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      mutex_lock(&dev->struct_mutex);
++      atomic_set(&fence->usage, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      write_lock_irqsave(&fm->lock, flags);
++      INIT_LIST_HEAD(&fence->ring);
++
++      /*
++       *  Avoid hitting BUG() for kernel-only fence objects.
++       */
++
++      INIT_LIST_HEAD(&fence->base.list);
++      fence->fence_class = fence_class;
++      fence->type = type;
++      fence->signaled_types = 0;
++      fence->waiting_types = 0;
++      fence->sequence = 0;
++      fence->error = 0;
++      fence->dev = dev;
++      write_unlock_irqrestore(&fm->lock, flags);
++      if (fence_flags & DRM_FENCE_FLAG_EMIT) {
++              ret = drm_fence_object_emit(fence, fence_flags,
++                                          fence->fence_class, type);
++      }
++      return ret;
++}
++
++int drm_fence_add_user_object(struct drm_file *priv,
++                            struct drm_fence_object *fence, int shareable)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(priv, &fence->base, shareable);
++      if (ret)
++              goto out;
++      atomic_inc(&fence->usage);
++      fence->base.type = drm_fence_type;
++      fence->base.remove = &drm_fence_object_destroy;
++      DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_add_user_object);
++
++int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
++                          uint32_t type, unsigned flags,
++                          struct drm_fence_object **c_fence)
++{
++      struct drm_fence_object *fence;
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
++      if (!fence) {
++              DRM_ERROR("Out of memory creating fence object\n");
++              return -ENOMEM;
++      }
++      ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
++      if (ret) {
++              drm_fence_usage_deref_unlocked(&fence);
++              return ret;
++      }
++      *c_fence = fence;
++      atomic_inc(&fm->count);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_create);
++
++void drm_fence_manager_init(struct drm_device *dev)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fence_class;
++      struct drm_fence_driver *fed = dev->driver->fence_driver;
++      int i;
++      unsigned long flags;
++
++      rwlock_init(&fm->lock);
++      write_lock_irqsave(&fm->lock, flags);
++      fm->initialized = 0;
++      if (!fed)
++          goto out_unlock;
++
++      fm->initialized = 1;
++      fm->num_classes = fed->num_classes;
++      BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
++
++      for (i = 0; i < fm->num_classes; ++i) {
++          fence_class = &fm->fence_class[i];
++
++          memset(fence_class, 0, sizeof(*fence_class));
++          INIT_LIST_HEAD(&fence_class->ring);
++          DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
++      }
++
++      atomic_set(&fm->count, 0);
++ out_unlock:
++      write_unlock_irqrestore(&fm->lock, flags);
++}
++
++void drm_fence_fill_arg(struct drm_fence_object *fence,
++                      struct drm_fence_arg *arg)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      unsigned long irq_flags;
++
++      read_lock_irqsave(&fm->lock, irq_flags);
++      arg->handle = fence->base.hash.key;
++      arg->fence_class = fence->fence_class;
++      arg->type = fence->type;
++      arg->signaled = fence->signaled_types;
++      arg->error = fence->error;
++      arg->sequence = fence->sequence;
++      read_unlock_irqrestore(&fm->lock, irq_flags);
++}
++EXPORT_SYMBOL(drm_fence_fill_arg);
++
++void drm_fence_manager_takedown(struct drm_device *dev)
++{
++}
++
++struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
++                                               uint32_t handle)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_fence_object *fence;
++
++      mutex_lock(&dev->struct_mutex);
++      uo = drm_lookup_user_object(priv, handle);
++      if (!uo || (uo->type != drm_fence_type)) {
++              mutex_unlock(&dev->struct_mutex);
++              return NULL;
++      }
++      fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
++      mutex_unlock(&dev->struct_mutex);
++      return fence;
++}
++
++int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      if (arg->flags & DRM_FENCE_FLAG_EMIT)
++              LOCK_TEST_WITH_RETURN(dev, file_priv);
++      ret = drm_fence_object_create(dev, arg->fence_class,
++                                    arg->type, arg->flags, &fence);
++      if (ret)
++              return ret;
++      ret = drm_fence_add_user_object(file_priv, fence,
++                                      arg->flags &
++                                      DRM_FENCE_FLAG_SHAREABLE);
++      if (ret) {
++              drm_fence_usage_deref_unlocked(&fence);
++              return ret;
++      }
++
++      /*
++       * usage > 0. No need to lock dev->struct_mutex;
++       */
++
++      arg->handle = fence->base.hash.key;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      struct drm_user_object *uo;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
++      if (ret)
++              return ret;
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
++}
++
++int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_flush(fence, arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_wait(fence,
++                                  arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
++                                  0, arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
++                                  arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized\n");
++              return -EINVAL;
++      }
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
++                                     NULL, &fence);
++      if (ret)
++              return ret;
++
++      if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
++              ret = drm_fence_add_user_object(file_priv, fence,
++                                              arg->flags &
++                                              DRM_FENCE_FLAG_SHAREABLE);
++              if (ret)
++                      return ret;
++      }
++
++      arg->handle = fence->base.hash.key;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_fops.c git-nokia/drivers/gpu/drm-tungsten/drm_fops.c
+--- git/drivers/gpu/drm-tungsten/drm_fops.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_fops.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,532 @@
++/**
++ * \file drm_fops.c
++ * File operations for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Daryll Strauss <daryll@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_sarea.h"
++#include <linux/poll.h>
++
++static int drm_open_helper(struct inode *inode, struct file *filp,
++                         struct drm_device * dev);
++
++static int drm_setup(struct drm_device * dev)
++{
++      drm_local_map_t *map;
++      int i;
++      int ret;
++      int sareapage;
++
++      if (dev->driver->firstopen) {
++              ret = dev->driver->firstopen(dev);
++              if (ret != 0)
++                      return ret;
++      }
++
++      dev->magicfree.next = NULL;
++
++      /* prebuild the SAREA */
++      sareapage = max(SAREA_MAX, PAGE_SIZE);
++      i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
++      if (i != 0)
++              return i;
++
++      atomic_set(&dev->ioctl_count, 0);
++      atomic_set(&dev->vma_count, 0);
++      dev->buf_use = 0;
++      atomic_set(&dev->buf_alloc, 0);
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
++              i = drm_dma_setup(dev);
++              if (i < 0)
++                      return i;
++      }
++
++      for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
++              atomic_set(&dev->counts[i], 0);
++
++      drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
++      INIT_LIST_HEAD(&dev->magicfree);
++
++      dev->sigdata.lock = NULL;
++      init_waitqueue_head(&dev->lock.lock_queue);
++      dev->queue_count = 0;
++      dev->queue_reserved = 0;
++      dev->queue_slots = 0;
++      dev->queuelist = NULL;
++      dev->context_flag = 0;
++      dev->interrupt_flag = 0;
++      dev->dma_flag = 0;
++      dev->last_context = 0;
++      dev->last_switch = 0;
++      dev->last_checked = 0;
++      init_waitqueue_head(&dev->context_wait);
++      dev->if_version = 0;
++
++      dev->ctx_start = 0;
++      dev->lck_start = 0;
++
++      dev->buf_async = NULL;
++      init_waitqueue_head(&dev->buf_readers);
++      init_waitqueue_head(&dev->buf_writers);
++
++      DRM_DEBUG("\n");
++
++      /*
++       * The kernel's context could be created here, but is now created
++       * in drm_dma_enqueue.  This is more resource-efficient for
++       * hardware that does not do DMA, but may mean that
++       * drm_select_queue fails between the time the interrupt is
++       * initialized and the time the queues are initialized.
++       */
++
++      return 0;
++}
++
++/**
++ * Open file.
++ *
++ * \param inode device inode
++ * \param filp file pointer.
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches the DRM device with the same minor number, calls open_helper(), and
++ * increments the device open count. If the open count was previous at zero,
++ * i.e., it's the first that the device is open, then calls setup().
++ */
++int drm_open(struct inode *inode, struct file *filp)
++{
++      struct drm_device *dev = NULL;
++      int minor_id = iminor(inode);
++      struct drm_minor *minor;
++      int retcode = 0;
++
++      minor = idr_find(&drm_minors_idr, minor_id);
++      if (!minor)
++              return -ENODEV;
++
++      if (!(dev = minor->dev))
++              return -ENODEV;
++
++      retcode = drm_open_helper(inode, filp, dev);
++      if (!retcode) {
++              atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++              spin_lock(&dev->count_lock);
++              if (!dev->open_count++) {
++                      spin_unlock(&dev->count_lock);
++                      retcode = drm_setup(dev);
++                      goto out;
++              }
++              spin_unlock(&dev->count_lock);
++      }
++
++out:
++      mutex_lock(&dev->struct_mutex);
++      BUG_ON((dev->dev_mapping != NULL) &&
++             (dev->dev_mapping != inode->i_mapping));
++      if (dev->dev_mapping == NULL)
++              dev->dev_mapping = inode->i_mapping;
++      mutex_unlock(&dev->struct_mutex);
++
++      return retcode;
++}
++EXPORT_SYMBOL(drm_open);
++
++/**
++ * File \c open operation.
++ *
++ * \param inode device inode.
++ * \param filp file pointer.
++ *
++ * Puts the dev->fops corresponding to the device minor number into
++ * \p filp, call the \c open method, and restore the file operations.
++ */
++int drm_stub_open(struct inode *inode, struct file *filp)
++{
++      struct drm_device *dev = NULL;
++      struct drm_minor *minor;
++      int minor_id = iminor(inode);
++      int err = -ENODEV;
++      const struct file_operations *old_fops;
++
++      DRM_DEBUG("\n");
++
++      minor = idr_find(&drm_minors_idr, minor_id);
++      if (!minor)
++              return -ENODEV;
++      
++      if (!(dev = minor->dev))
++              return -ENODEV;
++
++      old_fops = filp->f_op;
++      filp->f_op = fops_get(&dev->driver->fops);
++      if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
++              fops_put(filp->f_op);
++              filp->f_op = fops_get(old_fops);
++      }
++      fops_put(old_fops);
++
++      return err;
++}
++
++/**
++ * Check whether DRI will run on this CPU.
++ *
++ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
++ */
++static int drm_cpu_valid(void)
++{
++#if defined(__i386__)
++      if (boot_cpu_data.x86 == 3)
++              return 0;       /* No cmpxchg on a 386 */
++#endif
++#if defined(__sparc__) && !defined(__sparc_v9__)
++      return 0;               /* No cmpxchg before v9 sparc. */
++#endif
++      return 1;
++}
++
++/**
++ * Called whenever a process opens /dev/drm.
++ *
++ * \param inode device inode.
++ * \param filp file pointer.
++ * \param dev device.
++ * \return zero on success or a negative number on failure.
++ *
++ * Creates and initializes a drm_file structure for the file private data in \p
++ * filp and add it into the double linked list in \p dev.
++ */
++static int drm_open_helper(struct inode *inode, struct file *filp,
++                         struct drm_device * dev)
++{
++      int minor_id = iminor(inode);
++      struct drm_file *priv;
++      int ret;
++      int i, j;
++
++      if (filp->f_flags & O_EXCL)
++              return -EBUSY;  /* No exclusive opens */
++      if (!drm_cpu_valid())
++              return -EINVAL;
++
++      DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor_id);
++
++      priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
++      if (!priv)
++              return -ENOMEM;
++
++      memset(priv, 0, sizeof(*priv));
++      filp->private_data = priv;
++      priv->filp = filp;
++      priv->uid = current->euid;
++      priv->pid = current->pid;
++      priv->minor = idr_find(&drm_minors_idr, minor_id);
++      priv->ioctl_count = 0;
++      /* for compatibility root is always authenticated */
++      priv->authenticated = capable(CAP_SYS_ADMIN);
++      priv->lock_count = 0;
++
++      INIT_LIST_HEAD(&priv->lhead);
++      INIT_LIST_HEAD(&priv->refd_objects);
++
++      for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
++              ret = drm_ht_create(&priv->refd_object_hash[i],
++                                  DRM_FILE_HASH_ORDER);
++              if (ret)
++                      break;
++      }
++
++      if (ret) {
++              for (j = 0; j < i; ++j)
++                      drm_ht_remove(&priv->refd_object_hash[j]);
++              goto out_free;
++      }
++
++      if (dev->driver->driver_features & DRIVER_GEM)
++              drm_gem_open(dev, priv);
++
++      if (dev->driver->open) {
++              ret = dev->driver->open(dev, priv);
++              if (ret < 0)
++                      goto out_free;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (list_empty(&dev->filelist))
++              priv->master = 1;
++
++      list_add(&priv->lhead, &dev->filelist);
++      mutex_unlock(&dev->struct_mutex);
++
++#ifdef __alpha__
++      /*
++       * Default the hose
++       */
++      if (!dev->hose) {
++              struct pci_dev *pci_dev;
++              pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
++              if (pci_dev) {
++                      dev->hose = pci_dev->sysdata;
++                      pci_dev_put(pci_dev);
++              }
++              if (!dev->hose) {
++                      struct pci_bus *b = pci_bus_b(pci_root_buses.next);
++                      if (b)
++                              dev->hose = b->sysdata;
++              }
++      }
++#endif
++
++      return 0;
++      out_free:
++      drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
++      filp->private_data = NULL;
++      return ret;
++}
++
++/** No-op. */
++int drm_fasync(int fd, struct file *filp, int on)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      int retcode;
++
++      DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
++                (long)old_encode_dev(priv->minor->device));
++      retcode = fasync_helper(fd, filp, on, &dev->buf_async);
++      if (retcode < 0)
++              return retcode;
++      return 0;
++}
++EXPORT_SYMBOL(drm_fasync);
++
++static void drm_object_release(struct file *filp)
++{
++      struct drm_file *priv = filp->private_data;
++      struct list_head *head;
++      struct drm_ref_object *ref_object;
++      int i;
++
++      /*
++       * Free leftover ref objects created by me. Note that we cannot use
++       * list_for_each() here, as the struct_mutex may be temporarily
++       * released by the remove_() functions, and thus the lists may be
++       * altered.
++       * Also, a drm_remove_ref_object() will not remove it
++       * from the list unless its refcount is 1.
++       */
++
++      head = &priv->refd_objects;
++      while (head->next != head) {
++              ref_object = list_entry(head->next, struct drm_ref_object, list);
++              drm_remove_ref_object(priv, ref_object);
++              head = &priv->refd_objects;
++      }
++
++      for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
++              drm_ht_remove(&priv->refd_object_hash[i]);
++}
++
++/**
++ * Release file.
++ *
++ * \param inode device inode
++ * \param file_priv DRM file private.
++ * \return zero on success or a negative number on failure.
++ *
++ * If the hardware lock is held then free it, and take it again for the kernel
++ * context since it's necessary to reclaim buffers. Unlink the file private
++ * data from its list and free it. Decreases the open count and if it reaches
++ * zero calls drm_lastclose().
++ */
++int drm_release(struct inode *inode, struct file *filp)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      int retcode = 0;
++
++      lock_kernel();
++
++      DRM_DEBUG("open_count = %d\n", dev->open_count);
++
++      if (dev->driver->preclose)
++              dev->driver->preclose(dev, file_priv);
++
++      /* ========================================================
++       * Begin inline drm_release
++       */
++
++      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
++                current->pid, (long)old_encode_dev(file_priv->minor->device),
++                dev->open_count);
++
++      if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
++              if (drm_i_have_hw_lock(dev, file_priv)) {
++                      dev->driver->reclaim_buffers_locked(dev, file_priv);
++              } else {
++                      unsigned long _end=jiffies + 3*DRM_HZ;
++                      int locked = 0;
++
++                      drm_idlelock_take(&dev->lock);
++
++                      /*
++                       * Wait for a while.
++                       */
++
++                      do{
++                              spin_lock_bh(&dev->lock.spinlock);
++                              locked = dev->lock.idle_has_lock;
++                              spin_unlock_bh(&dev->lock.spinlock);
++                              if (locked)
++                                      break;
++                              schedule();
++                      } while (!time_after_eq(jiffies, _end));
++
++                      if (!locked) {
++                              DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
++                                        "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
++                                        "\tI will go on reclaiming the buffers anyway.\n");
++                      }
++
++                      dev->driver->reclaim_buffers_locked(dev, file_priv);
++                      drm_idlelock_release(&dev->lock);
++              }
++      }
++
++      if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
++
++              drm_idlelock_take(&dev->lock);
++              dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
++              drm_idlelock_release(&dev->lock);
++
++      }
++
++      if (drm_i_have_hw_lock(dev, file_priv)) {
++              DRM_DEBUG("File %p released, freeing lock for context %d\n",
++                        filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
++
++              drm_lock_free(&dev->lock,
++                            _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
++      }
++
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
++          !dev->driver->reclaim_buffers_locked) {
++              dev->driver->reclaim_buffers(dev, file_priv);
++      }
++
++      if (dev->driver->driver_features & DRIVER_GEM)
++              drm_gem_release(dev, file_priv);
++
++      drm_fasync(-1, filp, 0);
++
++      mutex_lock(&dev->ctxlist_mutex);
++
++      if (!list_empty(&dev->ctxlist)) {
++              struct drm_ctx_list *pos, *n;
++
++              list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
++                      if (pos->tag == file_priv &&
++                          pos->handle != DRM_KERNEL_CONTEXT) {
++                              if (dev->driver->context_dtor)
++                                      dev->driver->context_dtor(dev,
++                                                                pos->handle);
++
++                              drm_ctxbitmap_free(dev, pos->handle);
++
++                              list_del(&pos->head);
++                              drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
++                              --dev->ctx_count;
++                      }
++              }
++      }
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      mutex_lock(&dev->struct_mutex);
++      drm_object_release(filp);
++      if (file_priv->remove_auth_on_close == 1) {
++              struct drm_file *temp;
++
++              list_for_each_entry(temp, &dev->filelist, lhead)
++                      temp->authenticated = 0;
++      }
++      list_del(&file_priv->lhead);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (dev->driver->postclose)
++              dev->driver->postclose(dev, file_priv);
++      drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
++
++      /* ========================================================
++       * End inline drm_release
++       */
++
++      atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
++      spin_lock(&dev->count_lock);
++      if (!--dev->open_count) {
++              if (atomic_read(&dev->ioctl_count) || dev->blocked) {
++                      DRM_ERROR("Device busy: %d %d\n",
++                                atomic_read(&dev->ioctl_count), dev->blocked);
++                      spin_unlock(&dev->count_lock);
++                      unlock_kernel();
++                      return -EBUSY;
++              }
++              spin_unlock(&dev->count_lock);
++              unlock_kernel();
++              return drm_lastclose(dev);
++      }
++      spin_unlock(&dev->count_lock);
++
++      unlock_kernel();
++
++      return retcode;
++}
++EXPORT_SYMBOL(drm_release);
++
++/** No-op. */
++/* This is to deal with older X servers that believe 0 means data is
++ * available which is not the correct return for a poll function.
++ * This cannot be fixed until the Xserver is fixed. Xserver will need
++ * to set a newer interface version to avoid breaking older Xservers.
++ * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22"
++ * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try
++ * to return the correct response.
++ */
++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */
++      return 0;
++}
++EXPORT_SYMBOL(drm_poll);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_gem.c git-nokia/drivers/gpu/drm-tungsten/drm_gem.c
+--- git/drivers/gpu/drm-tungsten/drm_gem.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_gem.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,444 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include <linux/version.h>
++
++#include "drmP.h"
++
++#if OS_HAS_GEM
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/pagemap.h>
++
++/** @file drm_gem.c
++ *
++ * This file provides some of the base ioctls and library routines for
++ * the graphics memory manager implemented by each device driver.
++ *
++ * Because various devices have different requirements in terms of
++ * synchronization and migration strategies, implementing that is left up to
++ * the driver, and all that the general API provides should be generic --
++ * allocating objects, reading/writing data with the cpu, freeing objects.
++ * Even there, platform-dependent optimizations for reading/writing data with
++ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
++ * the DRI2 implementation wants to have at least allocate/mmap be generic.
++ *
++ * The goal was to have swap-backed object allocation managed through
++ * struct file.  However, file descriptors as handles to a struct file have
++ * two major failings:
++ * - Process limits prevent more than 1024 or so being used at a time by
++ *   default.
++ * - Inability to allocate high fds will aggravate the X Server's select()
++ *   handling, and likely that of many GL client applications as well.
++ *
++ * This led to a plan of using our own integer IDs (called handles, following
++ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
++ * ioctls.  The objects themselves will still include the struct file so
++ * that we can transition to fds if the required kernel infrastructure shows
++ * up at a later date, and as our interface with shmfs for memory allocation.
++ */
++
++/**
++ * Initialize the GEM device fields
++ */
++
++int
++drm_gem_init(struct drm_device *dev)
++{
++      spin_lock_init(&dev->object_name_lock);
++      idr_init(&dev->object_name_idr);
++      atomic_set(&dev->object_count, 0);
++      atomic_set(&dev->object_memory, 0);
++      atomic_set(&dev->pin_count, 0);
++      atomic_set(&dev->pin_memory, 0);
++      atomic_set(&dev->gtt_count, 0);
++      atomic_set(&dev->gtt_memory, 0);
++      return 0;
++}
++
++/**
++ * Allocate a GEM object of the specified size with shmfs backing store
++ */
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size)
++{
++      struct drm_gem_object *obj;
++
++      BUG_ON((size & (PAGE_SIZE - 1)) != 0);
++
++      obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
++
++      obj->dev = dev;
++      obj->filp = shmem_file_setup("drm mm object", size, 0);
++      if (IS_ERR(obj->filp)) {
++              kfree(obj);
++              return NULL;
++      }
++
++      kref_init(&obj->refcount);
++      kref_init(&obj->handlecount);
++      obj->size = size;
++      if (dev->driver->gem_init_object != NULL &&
++          dev->driver->gem_init_object(obj) != 0) {
++              fput(obj->filp);
++              kfree(obj);
++              return NULL;
++      }
++      atomic_inc(&dev->object_count);
++      atomic_add(obj->size, &dev->object_memory);
++      return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_alloc);
++
++/**
++ * Removes the mapping from handle to filp for this object.
++ */
++static int
++drm_gem_handle_delete(struct drm_file *filp, int handle)
++{
++      struct drm_device *dev;
++      struct drm_gem_object *obj;
++
++      /* This is gross. The idr system doesn't let us try a delete and
++       * return an error code.  It just spews if you fail at deleting.
++       * So, we have to grab a lock around finding the object and then
++       * doing the delete on it and dropping the refcount, or the user
++       * could race us to double-decrement the refcount and cause a
++       * use-after-free later.  Given the frequency of our handle lookups,
++       * we may want to use ida for number allocation and a hash table
++       * for the pointers, anyway.
++       */
++      spin_lock(&filp->table_lock);
++
++      /* Check if we currently have a reference on the object */
++      obj = idr_find(&filp->object_idr, handle);
++      if (obj == NULL) {
++              spin_unlock(&filp->table_lock);
++              return -EINVAL;
++      }
++      dev = obj->dev;
++
++      /* Release reference and decrement refcount. */
++      idr_remove(&filp->object_idr, handle);
++      spin_unlock(&filp->table_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_handle_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Create a handle for this object. This adds a handle reference
++ * to the object, which includes a regular reference count. Callers
++ * will likely want to dereference the object afterwards.
++ */
++int
++drm_gem_handle_create(struct drm_file *file_priv,
++                     struct drm_gem_object *obj,
++                     int *handlep)
++{
++      int     ret;
++
++      /*
++       * Get the user-visible handle using idr.
++       */
++again:
++      /* ensure there is space available to allocate a handle */
++      if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
++              return -ENOMEM;
++
++      /* do the allocation under our spinlock */
++      spin_lock(&file_priv->table_lock);
++      ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
++      spin_unlock(&file_priv->table_lock);
++      if (ret == -EAGAIN)
++              goto again;
++
++      if (ret != 0)
++              return ret;
++
++      drm_gem_object_handle_reference(obj);
++      return 0;
++}
++EXPORT_SYMBOL(drm_gem_handle_create);
++
++/** Returns a reference to the object named by the handle. */
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
++                    int handle)
++{
++      struct drm_gem_object *obj;
++
++      spin_lock(&filp->table_lock);
++
++      /* Check if we currently have a reference on the object */
++      obj = idr_find(&filp->object_idr, handle);
++      if (obj == NULL) {
++              spin_unlock(&filp->table_lock);
++              return NULL;
++      }
++
++      drm_gem_object_reference(obj);
++
++      spin_unlock(&filp->table_lock);
++
++      return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_lookup);
++
++/**
++ * Releases the handle to an mm object.
++ */
++int
++drm_gem_close_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_gem_close *args = data;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      ret = drm_gem_handle_delete(file_priv, args->handle);
++
++      return ret;
++}
++
++/**
++ * Create a global name for an object, returning the name.
++ *
++ * Note that the name does not hold a reference; when the object
++ * is freed, the name goes away.
++ */
++int
++drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_gem_flink *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++
++again:
++      if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
++              return -ENOMEM;
++
++      spin_lock(&dev->object_name_lock);
++      if (obj->name) {
++              spin_unlock(&dev->object_name_lock);
++              return -EEXIST;
++      }
++      ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
++                               &obj->name);
++      spin_unlock(&dev->object_name_lock);
++      if (ret == -EAGAIN)
++              goto again;
++
++      if (ret != 0) {
++              mutex_lock(&dev->struct_mutex);
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      /*
++       * Leave the reference from the lookup around as the
++       * name table now holds one
++       */
++      args->name = (uint64_t) obj->name;
++
++      return 0;
++}
++
++/**
++ * Open an object using the global name, returning a handle and the size.
++ *
++ * This handle (of course) holds a reference to the object, so the object
++ * will not go away until the handle is deleted.
++ */
++int
++drm_gem_open_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_gem_open *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++      int handle;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      spin_lock(&dev->object_name_lock);
++      obj = idr_find(&dev->object_name_idr, (int) args->name);
++      if (obj)
++              drm_gem_object_reference(obj);
++      spin_unlock(&dev->object_name_lock);
++      if (!obj)
++              return -ENOENT;
++
++      ret = drm_gem_handle_create(file_priv, obj, &handle);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret)
++              return ret;
++
++      args->handle = handle;
++      args->size = obj->size;
++
++      return 0;
++}
++
++/**
++ * Called at device open time, sets up the structure for handling refcounting
++ * of mm objects.
++ */
++void
++drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
++{
++      idr_init(&file_private->object_idr);
++      spin_lock_init(&file_private->table_lock);
++}
++
++/**
++ * Called at device close to release the file's
++ * handle references on objects.
++ */
++static int
++drm_gem_object_release_handle(int id, void *ptr, void *data)
++{
++      struct drm_gem_object *obj = ptr;
++
++      drm_gem_object_handle_unreference(obj);
++
++      return 0;
++}
++
++/**
++ * Called at close time when the filp is going away.
++ *
++ * Releases any remaining references on objects by this filp.
++ */
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_for_each(&file_private->object_idr,
++                   &drm_gem_object_release_handle, NULL);
++
++      idr_destroy(&file_private->object_idr);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Called after the last reference to the object has been lost.
++ *
++ * Frees the object
++ */
++void
++drm_gem_object_free(struct kref *kref)
++{
++      struct drm_gem_object *obj = (struct drm_gem_object *) kref;
++      struct drm_device *dev = obj->dev;
++
++      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++      if (dev->driver->gem_free_object != NULL)
++              dev->driver->gem_free_object(obj);
++
++      fput(obj->filp);
++      atomic_dec(&dev->object_count);
++      atomic_sub(obj->size, &dev->object_memory);
++      kfree(obj);
++}
++EXPORT_SYMBOL(drm_gem_object_free);
++
++/**
++ * Called after the last handle to the object has been closed
++ *
++ * Removes any name for the object. Note that this must be
++ * called before drm_gem_object_free or we'll be touching
++ * freed memory
++ */
++void
++drm_gem_object_handle_free(struct kref *kref)
++{
++      struct drm_gem_object *obj = container_of(kref,
++                                                struct drm_gem_object,
++                                                handlecount);
++      struct drm_device *dev = obj->dev;
++
++      /* Remove any name for this object */
++      spin_lock(&dev->object_name_lock);
++      if (obj->name) {
++              idr_remove(&dev->object_name_idr, obj->name);
++              spin_unlock(&dev->object_name_lock);
++              /*
++               * The object name held a reference to this object, drop
++               * that now.
++               */
++              drm_gem_object_unreference(obj);
++      } else
++              spin_unlock(&dev->object_name_lock);
++
++}
++EXPORT_SYMBOL(drm_gem_object_handle_free);
++
++#else
++
++int drm_gem_init(struct drm_device *dev)
++{
++      return 0;
++}
++
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
++{
++
++}
++
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
++{
++
++}
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm.h git-nokia/drivers/gpu/drm-tungsten/drm.h
+--- git/drivers/gpu/drm-tungsten/drm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1145 @@
++/**
++ * \file drm.h
++ * Header for the Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ *
++ * \par Acknowledgments:
++ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/**
++ * \mainpage
++ *
++ * The Direct Rendering Manager (DRM) is a device-independent kernel-level
++ * device driver that provides support for the XFree86 Direct Rendering
++ * Infrastructure (DRI).
++ *
++ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
++ * ways:
++ *     -# The DRM provides synchronized access to the graphics hardware via
++ *        the use of an optimized two-tiered lock.
++ *     -# The DRM enforces the DRI security policy for access to the graphics
++ *        hardware by only allowing authenticated X11 clients access to
++ *        restricted regions of memory.
++ *     -# The DRM provides a generic DMA engine, complete with multiple
++ *        queues and the ability to detect the need for an OpenGL context
++ *        switch.
++ *     -# The DRM is extensible via the use of small device-specific modules
++ *        that rely extensively on the API exported by the DRM module.
++ *
++ */
++
++#ifndef _DRM_H_
++#define _DRM_H_
++
++#ifndef __user
++#define __user
++#endif
++#ifndef __iomem
++#define __iomem
++#endif
++
++#ifdef __GNUC__
++# define DEPRECATED  __attribute__ ((deprecated))
++#else
++# define DEPRECATED
++#endif
++
++#if defined(__linux__)
++#include <asm/ioctl.h>                /* For _IO* macros */
++#define DRM_IOCTL_NR(n)               _IOC_NR(n)
++#define DRM_IOC_VOID          _IOC_NONE
++#define DRM_IOC_READ          _IOC_READ
++#define DRM_IOC_WRITE         _IOC_WRITE
++#define DRM_IOC_READWRITE     _IOC_READ|_IOC_WRITE
++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
++#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
++#include <sys/ioccom.h>
++#define DRM_IOCTL_NR(n)               ((n) & 0xff)
++#define DRM_IOC_VOID          IOC_VOID
++#define DRM_IOC_READ          IOC_OUT
++#define DRM_IOC_WRITE         IOC_IN
++#define DRM_IOC_READWRITE     IOC_INOUT
++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
++#endif
++
++#ifdef __OpenBSD__
++#define DRM_MAJOR       81
++#endif
++#if defined(__linux__) || defined(__NetBSD__)
++#define DRM_MAJOR       226
++#endif
++#define DRM_MAX_MINOR   15
++
++#define DRM_NAME      "drm"     /**< Name in kernel, /dev, and /proc */
++#define DRM_MIN_ORDER 5         /**< At least 2^5 bytes = 32 bytes */
++#define DRM_MAX_ORDER 22        /**< Up to 2^22 bytes = 4MB */
++#define DRM_RAM_PERCENT 10      /**< How much system ram can we lock? */
++
++#define _DRM_LOCK_HELD        0x80000000U /**< Hardware lock is held */
++#define _DRM_LOCK_CONT        0x40000000U /**< Hardware lock is contended */
++#define _DRM_LOCK_IS_HELD(lock)          ((lock) & _DRM_LOCK_HELD)
++#define _DRM_LOCK_IS_CONT(lock)          ((lock) & _DRM_LOCK_CONT)
++#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
++
++#if defined(__linux__)
++typedef unsigned int drm_handle_t;
++#else
++#include <sys/types.h>
++typedef unsigned long drm_handle_t;   /**< To mapped regions */
++#endif
++typedef unsigned int drm_context_t;   /**< GLXContext handle */
++typedef unsigned int drm_drawable_t;
++typedef unsigned int drm_magic_t;     /**< Magic for authentication */
++
++/**
++ * Cliprect.
++ *
++ * \warning If you change this structure, make sure you change
++ * XF86DRIClipRectRec in the server as well
++ *
++ * \note KW: Actually it's illegal to change either for
++ * backwards-compatibility reasons.
++ */
++struct drm_clip_rect {
++      unsigned short x1;
++      unsigned short y1;
++      unsigned short x2;
++      unsigned short y2;
++};
++
++/**
++ * Texture region,
++ */
++struct drm_tex_region {
++      unsigned char next;
++      unsigned char prev;
++      unsigned char in_use;
++      unsigned char padding;
++      unsigned int age;
++};
++
++/**
++ * Hardware lock.
++ *
++ * The lock structure is a simple cache-line aligned integer.  To avoid
++ * processor bus contention on a multiprocessor system, there should not be any
++ * other data stored in the same cache line.
++ */
++struct drm_hw_lock {
++      __volatile__ unsigned int lock;         /**< lock variable */
++      char padding[60];                       /**< Pad to cache line */
++};
++
++/* This is beyond ugly, and only works on GCC.  However, it allows me to use
++ * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
++ * fix is to use uint32_t instead of size_t, but that fix will break existing
++ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
++ * eventually happen, though.  I chose 'unsigned long' to be the fallback type
++ * because that works on all the platforms I know about.  Hopefully, the
++ * real fix will happen before that bites us.
++ */
++
++#ifdef __SIZE_TYPE__
++# define DRM_SIZE_T __SIZE_TYPE__
++#else
++# warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
++# define DRM_SIZE_T unsigned long
++#endif
++
++/**
++ * DRM_IOCTL_VERSION ioctl argument type.
++ *
++ * \sa drmGetVersion().
++ */
++struct drm_version {
++      int version_major;        /**< Major version */
++      int version_minor;        /**< Minor version */
++      int version_patchlevel;   /**< Patch level */
++      DRM_SIZE_T name_len;      /**< Length of name buffer */
++      char __user *name;                /**< Name of driver */
++      DRM_SIZE_T date_len;      /**< Length of date buffer */
++      char __user *date;                /**< User-space buffer to hold date */
++      DRM_SIZE_T desc_len;      /**< Length of desc buffer */
++      char __user *desc;                /**< User-space buffer to hold desc */
++};
++
++/**
++ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
++ *
++ * \sa drmGetBusid() and drmSetBusId().
++ */
++struct drm_unique {
++      DRM_SIZE_T unique_len;    /**< Length of unique */
++      char __user *unique;              /**< Unique name for driver instantiation */
++};
++
++#undef DRM_SIZE_T
++
++struct drm_list {
++      int count;                /**< Length of user-space structures */
++      struct drm_version __user *version;
++};
++
++struct drm_block {
++      int unused;
++};
++
++/**
++ * DRM_IOCTL_CONTROL ioctl argument type.
++ *
++ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
++ */
++struct drm_control {
++      enum {
++              DRM_ADD_COMMAND,
++              DRM_RM_COMMAND,
++              DRM_INST_HANDLER,
++              DRM_UNINST_HANDLER
++      } func;
++      int irq;
++};
++
++/**
++ * Type of memory to map.
++ */
++enum drm_map_type {
++      _DRM_FRAME_BUFFER = 0,    /**< WC (no caching), no core dump */
++      _DRM_REGISTERS = 1,       /**< no caching, no core dump */
++      _DRM_SHM = 2,             /**< shared, cached */
++      _DRM_AGP = 3,             /**< AGP/GART */
++      _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
++      _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
++      _DRM_TTM = 6
++};
++
++/**
++ * Memory mapping flags.
++ */
++enum drm_map_flags {
++      _DRM_RESTRICTED = 0x01,      /**< Cannot be mapped to user-virtual */
++      _DRM_READ_ONLY = 0x02,
++      _DRM_LOCKED = 0x04,          /**< shared, cached, locked */
++      _DRM_KERNEL = 0x08,          /**< kernel requires access */
++      _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
++      _DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
++      _DRM_REMOVABLE = 0x40,       /**< Removable mapping */
++      _DRM_DRIVER = 0x80           /**< Managed by driver */
++};
++
++struct drm_ctx_priv_map {
++      unsigned int ctx_id;     /**< Context requesting private mapping */
++      void *handle;            /**< Handle of map */
++};
++
++/**
++ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
++ * argument type.
++ *
++ * \sa drmAddMap().
++ */
++struct drm_map {
++      unsigned long offset;    /**< Requested physical address (0 for SAREA)*/
++      unsigned long size;      /**< Requested physical size (bytes) */
++      enum drm_map_type type;  /**< Type of memory to map */
++      enum drm_map_flags flags;        /**< Flags */
++      void *handle;            /**< User-space: "Handle" to pass to mmap() */
++                               /**< Kernel-space: kernel-virtual address */
++      int mtrr;                /**< MTRR slot used */
++      /*   Private data */
++};
++
++/**
++ * DRM_IOCTL_GET_CLIENT ioctl argument type.
++ */
++struct drm_client {
++      int idx;                /**< Which client desired? */
++      int auth;               /**< Is client authenticated? */
++      unsigned long pid;      /**< Process ID */
++      unsigned long uid;      /**< User ID */
++      unsigned long magic;    /**< Magic */
++      unsigned long iocs;     /**< Ioctl count */
++};
++
++enum drm_stat_type {
++      _DRM_STAT_LOCK,
++      _DRM_STAT_OPENS,
++      _DRM_STAT_CLOSES,
++      _DRM_STAT_IOCTLS,
++      _DRM_STAT_LOCKS,
++      _DRM_STAT_UNLOCKS,
++      _DRM_STAT_VALUE,        /**< Generic value */
++      _DRM_STAT_BYTE,         /**< Generic byte counter (1024bytes/K) */
++      _DRM_STAT_COUNT,        /**< Generic non-byte counter (1000/k) */
++
++      _DRM_STAT_IRQ,          /**< IRQ */
++      _DRM_STAT_PRIMARY,      /**< Primary DMA bytes */
++      _DRM_STAT_SECONDARY,    /**< Secondary DMA bytes */
++      _DRM_STAT_DMA,          /**< DMA */
++      _DRM_STAT_SPECIAL,      /**< Special DMA (e.g., priority or polled) */
++      _DRM_STAT_MISSED        /**< Missed DMA opportunity */
++          /* Add to the *END* of the list */
++};
++
++/**
++ * DRM_IOCTL_GET_STATS ioctl argument type.
++ */
++struct drm_stats {
++      unsigned long count;
++      struct {
++              unsigned long value;
++              enum drm_stat_type type;
++      } data[15];
++};
++
++/**
++ * Hardware locking flags.
++ */
++enum drm_lock_flags {
++      _DRM_LOCK_READY = 0x01,      /**< Wait until hardware is ready for DMA */
++      _DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
++      _DRM_LOCK_FLUSH = 0x04,      /**< Flush this context's DMA queue first */
++      _DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
++      /* These *HALT* flags aren't supported yet
++         -- they will be used to support the
++         full-screen DGA-like mode. */
++      _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
++      _DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
++};
++
++/**
++ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
++ *
++ * \sa drmGetLock() and drmUnlock().
++ */
++struct drm_lock {
++      int context;
++      enum drm_lock_flags flags;
++};
++
++/**
++ * DMA flags
++ *
++ * \warning
++ * These values \e must match xf86drm.h.
++ *
++ * \sa drm_dma.
++ */
++enum drm_dma_flags {
++      /* Flags for DMA buffer dispatch */
++      _DRM_DMA_BLOCK = 0x01,        /**<
++                                     * Block until buffer dispatched.
++                                     *
++                                     * \note The buffer may not yet have
++                                     * been processed by the hardware --
++                                     * getting a hardware lock with the
++                                     * hardware quiescent will ensure
++                                     * that the buffer has been
++                                     * processed.
++                                     */
++      _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
++      _DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
++
++      /* Flags for DMA buffer request */
++      _DRM_DMA_WAIT = 0x10,         /**< Wait for free buffers */
++      _DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
++      _DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
++};
++
++/**
++ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
++ *
++ * \sa drmAddBufs().
++ */
++struct drm_buf_desc {
++      int count;               /**< Number of buffers of this size */
++      int size;                /**< Size in bytes */
++      int low_mark;            /**< Low water mark */
++      int high_mark;           /**< High water mark */
++      enum {
++              _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
++              _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
++              _DRM_SG_BUFFER  = 0x04, /**< Scatter/gather memory buffer */
++              _DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
++              _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
++      } flags;
++      unsigned long agp_start; /**<
++                                * Start address of where the AGP buffers are
++                                * in the AGP aperture
++                                */
++};
++
++/**
++ * DRM_IOCTL_INFO_BUFS ioctl argument type.
++ */
++struct drm_buf_info {
++      int count;                /**< Number of buffers described in list */
++      struct drm_buf_desc __user *list; /**< List of buffer descriptions */
++};
++
++/**
++ * DRM_IOCTL_FREE_BUFS ioctl argument type.
++ */
++struct drm_buf_free {
++      int count;
++      int __user *list;
++};
++
++/**
++ * Buffer information
++ *
++ * \sa drm_buf_map.
++ */
++struct drm_buf_pub {
++      int idx;                       /**< Index into the master buffer list */
++      int total;                     /**< Buffer size */
++      int used;                      /**< Amount of buffer in use (for DMA) */
++      void __user *address;          /**< Address of buffer */
++};
++
++/**
++ * DRM_IOCTL_MAP_BUFS ioctl argument type.
++ */
++struct drm_buf_map {
++      int count;              /**< Length of the buffer list */
++#if defined(__cplusplus)
++      void __user *c_virtual;
++#else
++      void __user *virtual;           /**< Mmap'd area in user-virtual */
++#endif
++      struct drm_buf_pub __user *list;        /**< Buffer information */
++};
++
++/**
++ * DRM_IOCTL_DMA ioctl argument type.
++ *
++ * Indices here refer to the offset into the buffer list in drm_buf_get.
++ *
++ * \sa drmDMA().
++ */
++struct drm_dma {
++      int context;                      /**< Context handle */
++      int send_count;                   /**< Number of buffers to send */
++      int __user *send_indices;         /**< List of handles to buffers */
++      int __user *send_sizes;           /**< Lengths of data to send */
++      enum drm_dma_flags flags;         /**< Flags */
++      int request_count;                /**< Number of buffers requested */
++      int request_size;                 /**< Desired size for buffers */
++      int __user *request_indices;     /**< Buffer information */
++      int __user *request_sizes;
++      int granted_count;                /**< Number of buffers granted */
++};
++
++enum drm_ctx_flags {
++      _DRM_CONTEXT_PRESERVED = 0x01,
++      _DRM_CONTEXT_2DONLY = 0x02
++};
++
++/**
++ * DRM_IOCTL_ADD_CTX ioctl argument type.
++ *
++ * \sa drmCreateContext() and drmDestroyContext().
++ */
++struct drm_ctx {
++      drm_context_t handle;
++      enum drm_ctx_flags flags;
++};
++
++/**
++ * DRM_IOCTL_RES_CTX ioctl argument type.
++ */
++struct drm_ctx_res {
++      int count;
++      struct drm_ctx __user *contexts;
++};
++
++/**
++ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
++ */
++struct drm_draw {
++      drm_drawable_t handle;
++};
++
++/**
++ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
++ */
++typedef enum {
++      DRM_DRAWABLE_CLIPRECTS,
++} drm_drawable_info_type_t;
++
++struct drm_update_draw {
++      drm_drawable_t handle;
++      unsigned int type;
++      unsigned int num;
++      unsigned long long data;
++};
++
++/**
++ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
++ */
++struct drm_auth {
++      drm_magic_t magic;
++};
++
++/**
++ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
++ *
++ * \sa drmGetInterruptFromBusID().
++ */
++struct drm_irq_busid {
++      int irq;        /**< IRQ number */
++      int busnum;     /**< bus number */
++      int devnum;     /**< device number */
++      int funcnum;    /**< function number */
++};
++
++enum drm_vblank_seq_type {
++      _DRM_VBLANK_ABSOLUTE = 0x0,     /**< Wait for specific vblank sequence number */
++      _DRM_VBLANK_RELATIVE = 0x1,     /**< Wait for given number of vblanks */
++      _DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
++      _DRM_VBLANK_NEXTONMISS = 0x10000000,    /**< If missed, wait for next vblank */
++      _DRM_VBLANK_SECONDARY = 0x20000000,     /**< Secondary display controller */
++      _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
++};
++
++#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
++                              _DRM_VBLANK_NEXTONMISS)
++
++struct drm_wait_vblank_request {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      unsigned long signal;
++};
++
++struct drm_wait_vblank_reply {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      long tval_sec;
++      long tval_usec;
++};
++
++/**
++ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
++ *
++ * \sa drmWaitVBlank().
++ */
++union drm_wait_vblank {
++      struct drm_wait_vblank_request request;
++      struct drm_wait_vblank_reply reply;
++};
++
++
++#define _DRM_PRE_MODESET 1
++#define _DRM_POST_MODESET 2
++
++/**
++ * DRM_IOCTL_MODESET_CTL ioctl argument type
++ *
++ * \sa drmModesetCtl().
++ */
++struct drm_modeset_ctl {
++      uint32_t crtc;
++      uint32_t cmd;
++};
++
++/**
++ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
++ *
++ * \sa drmAgpEnable().
++ */
++struct drm_agp_mode {
++      unsigned long mode;     /**< AGP mode */
++};
++
++/**
++ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
++ *
++ * \sa drmAgpAlloc() and drmAgpFree().
++ */
++struct drm_agp_buffer {
++      unsigned long size;     /**< In bytes -- will round to page boundary */
++      unsigned long handle;   /**< Used for binding / unbinding */
++      unsigned long type;     /**< Type of memory to allocate */
++      unsigned long physical; /**< Physical used by i810 */
++};
++
++/**
++ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
++ *
++ * \sa drmAgpBind() and drmAgpUnbind().
++ */
++struct drm_agp_binding {
++      unsigned long handle;   /**< From drm_agp_buffer */
++      unsigned long offset;   /**< In bytes -- will round to page boundary */
++};
++
++/**
++ * DRM_IOCTL_AGP_INFO ioctl argument type.
++ *
++ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
++ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
++ * drmAgpVendorId() and drmAgpDeviceId().
++ */
++struct drm_agp_info {
++      int agp_version_major;
++      int agp_version_minor;
++      unsigned long mode;
++      unsigned long aperture_base;   /**< physical address */
++      unsigned long aperture_size;   /**< bytes */
++      unsigned long memory_allowed;  /**< bytes */
++      unsigned long memory_used;
++
++      /** \name PCI information */
++      /*@{ */
++      unsigned short id_vendor;
++      unsigned short id_device;
++      /*@} */
++};
++
++/**
++ * DRM_IOCTL_SG_ALLOC ioctl argument type.
++ */
++struct drm_scatter_gather {
++      unsigned long size;     /**< In bytes -- will round to page boundary */
++      unsigned long handle;   /**< Used for mapping / unmapping */
++};
++
++/**
++ * DRM_IOCTL_SET_VERSION ioctl argument type.
++ */
++struct drm_set_version {
++      int drm_di_major;
++      int drm_di_minor;
++      int drm_dd_major;
++      int drm_dd_minor;
++};
++
++
++#define DRM_FENCE_FLAG_EMIT                0x00000001
++#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
++/**
++ * On hardware with no interrupt events for operation completion,
++ * indicates that the kernel should sleep while waiting for any blocking
++ * operation to complete rather than spinning.
++ *
++ * Has no effect otherwise.
++ */
++#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
++#define DRM_FENCE_FLAG_NO_USER             0x00000010
++
++/* Reserved for driver use */
++#define DRM_FENCE_MASK_DRIVER              0xFF000000
++
++#define DRM_FENCE_TYPE_EXE                 0x00000001
++
++struct drm_fence_arg {
++      unsigned int handle;
++      unsigned int fence_class;
++      unsigned int type;
++      unsigned int flags;
++      unsigned int signaled;
++      unsigned int error;
++      unsigned int sequence;
++      unsigned int pad64;
++      uint64_t expand_pad[2]; /*Future expansion */
++};
++
++/* Buffer permissions, referring to how the GPU uses the buffers.
++ * these translate to fence types used for the buffers.
++ * Typically a texture buffer is read, A destination buffer is write and
++ *  a command (batch-) buffer is exe. Can be or-ed together.
++ */
++
++#define DRM_BO_FLAG_READ        (1ULL << 0)
++#define DRM_BO_FLAG_WRITE       (1ULL << 1)
++#define DRM_BO_FLAG_EXE         (1ULL << 2)
++
++/*
++ * All of the bits related to access mode
++ */
++#define DRM_BO_MASK_ACCESS    (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
++/*
++ * Status flags. Can be read to determine the actual state of a buffer.
++ * Can also be set in the buffer mask before validation.
++ */
++
++/*
++ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
++ * available to root and must be manually removed before buffer manager shutdown
++ * or lock.
++ * Flags: Acknowledge
++ */
++#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
++
++/*
++ * Mask: Require that the buffer is placed in mappable memory when validated.
++ *       If not set the buffer may or may not be in mappable memory when validated.
++ * Flags: If set, the buffer is in mappable memory.
++ */
++#define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
++
++/* Mask: The buffer should be shareable with other processes.
++ * Flags: The buffer is shareable with other processes.
++ */
++#define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
++
++/* Mask: If set, place the buffer in cache-coherent memory if available.
++ *       If clear, never place the buffer in cache coherent memory if validated.
++ * Flags: The buffer is currently in cache-coherent memory.
++ */
++#define DRM_BO_FLAG_CACHED      (1ULL << 7)
++
++/* Mask: Make sure that every time this buffer is validated,
++ *       it ends up on the same location provided that the memory mask is the same.
++ *       The buffer will also not be evicted when claiming space for
++ *       other buffers. Basically a pinned buffer but it may be thrown out as
++ *       part of buffer manager shutdown or locking.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
++
++/* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
++ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
++ * with unsnooped PTEs instead of snooped, by using chipset-specific cache
++ * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
++ * as the eviction to local memory (TTM unbind) on map is just a side effect
++ * to prevent aggressive cache prefetch from the GPU disturbing the cache
++ * management that the DRM is doing.
++ *
++ * Flags: Acknowledge.
++ * Buffers allocated with this flag should not be used for suballocators
++ * This type may have issues on CPUs with over-aggressive caching
++ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
++ */
++#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
++
++
++/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
++
++/*
++ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
++#define DRM_BO_FLAG_TILE           (1ULL << 15)
++
++/*
++ * Memory type flags that can be or'ed together in the mask, but only
++ * one appears in flags.
++ */
++
++/* System memory */
++#define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
++/* Translation table memory */
++#define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
++/* Vram memory */
++#define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
++/* Up to the driver to define. */
++#define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
++#define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
++#define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
++#define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
++#define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
++/* We can add more of these now with a 64-bit flag type */
++
++/*
++ * This is a mask covering all of the memory type flags; easier to just
++ * use a single constant than a bunch of | values. It covers
++ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
++ */
++#define DRM_BO_MASK_MEM         0x00000000FF000000ULL
++/*
++ * This adds all of the CPU-mapping options in with the memory
++ * type to label all bits which change how the page gets mapped
++ */
++#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
++                               DRM_BO_FLAG_CACHED_MAPPED | \
++                               DRM_BO_FLAG_CACHED | \
++                               DRM_BO_FLAG_MAPPABLE)
++                               
++/* Driver-private flags */
++#define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
++
++/*
++ * Don't block on validate and map. Instead, return EBUSY.
++ */
++#define DRM_BO_HINT_DONT_BLOCK  0x00000002
++/*
++ * Don't place this buffer on the unfenced list. This means
++ * that the buffer will not end up having a fence associated
++ * with it as a result of this operation
++ */
++#define DRM_BO_HINT_DONT_FENCE  0x00000004
++/**
++ * On hardware with no interrupt events for operation completion,
++ * indicates that the kernel should sleep while waiting for any blocking
++ * operation to complete rather than spinning.
++ *
++ * Has no effect otherwise.
++ */
++#define DRM_BO_HINT_WAIT_LAZY   0x00000008
++/*
++ * The client has compute relocations refering to this buffer using the
++ * offset in the presumed_offset field. If that offset ends up matching
++ * where this buffer lands, the kernel is free to skip executing those
++ * relocations
++ */
++#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
++
++#define DRM_BO_INIT_MAGIC 0xfe769812
++#define DRM_BO_INIT_MAJOR 1
++#define DRM_BO_INIT_MINOR 0
++#define DRM_BO_INIT_PATCH 0
++
++
++struct drm_bo_info_req {
++      uint64_t mask;
++      uint64_t flags;
++      unsigned int handle;
++      unsigned int hint;
++      unsigned int fence_class;
++      unsigned int desired_tile_stride;
++      unsigned int tile_info;
++      unsigned int pad64;
++      uint64_t presumed_offset;
++};
++
++struct drm_bo_create_req {
++      uint64_t flags;
++      uint64_t size;
++      uint64_t buffer_start;
++      unsigned int hint;
++      unsigned int page_alignment;
++};
++
++
++/*
++ * Reply flags
++ */
++
++#define DRM_BO_REP_BUSY 0x00000001
++
++struct drm_bo_info_rep {
++      uint64_t flags;
++      uint64_t proposed_flags;
++      uint64_t size;
++      uint64_t offset;
++      uint64_t arg_handle;
++      uint64_t buffer_start;
++      unsigned int handle;
++      unsigned int fence_flags;
++      unsigned int rep_flags;
++      unsigned int page_alignment;
++      unsigned int desired_tile_stride;
++      unsigned int hw_tile_stride;
++      unsigned int tile_info;
++      unsigned int pad64;
++      uint64_t expand_pad[4]; /*Future expansion */
++};
++
++struct drm_bo_arg_rep {
++      struct drm_bo_info_rep bo_info;
++      int ret;
++      unsigned int pad64;
++};
++
++struct drm_bo_create_arg {
++      union {
++              struct drm_bo_create_req req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_handle_arg {
++      unsigned int handle;
++};
++
++struct drm_bo_reference_info_arg {
++      union {
++              struct drm_bo_handle_arg req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_map_wait_idle_arg {
++      union {
++              struct drm_bo_info_req req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_op_req {
++      enum {
++              drm_bo_validate,
++              drm_bo_fence,
++              drm_bo_ref_fence,
++      } op;
++      unsigned int arg_handle;
++      struct drm_bo_info_req bo_req;
++};
++
++
++struct drm_bo_op_arg {
++      uint64_t next;
++      union {
++              struct drm_bo_op_req req;
++              struct drm_bo_arg_rep rep;
++      } d;
++      int handled;
++      unsigned int pad64;
++};
++
++
++#define DRM_BO_MEM_LOCAL 0
++#define DRM_BO_MEM_TT 1
++#define DRM_BO_MEM_VRAM 2
++#define DRM_BO_MEM_PRIV0 3
++#define DRM_BO_MEM_PRIV1 4
++#define DRM_BO_MEM_PRIV2 5
++#define DRM_BO_MEM_PRIV3 6
++#define DRM_BO_MEM_PRIV4 7
++
++#define DRM_BO_MEM_TYPES 8 /* For now. */
++
++#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
++#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
++
++struct drm_bo_version_arg {
++      uint32_t major;
++      uint32_t minor;
++      uint32_t patchlevel;
++};
++
++struct drm_mm_type_arg {
++      unsigned int mem_type;
++      unsigned int lock_flags;
++};
++
++struct drm_mm_init_arg {
++      unsigned int magic;
++      unsigned int major;
++      unsigned int minor;
++      unsigned int mem_type;
++      uint64_t p_offset;
++      uint64_t p_size;
++};
++
++struct drm_mm_info_arg {
++      unsigned int mem_type;
++      uint64_t p_size;
++};
++
++struct drm_gem_close {
++      /** Handle of the object to be closed. */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_gem_flink {
++      /** Handle for the object being named */
++      uint32_t handle;
++
++      /** Returned global name */
++      uint32_t name;
++};
++
++struct drm_gem_open {
++      /** Name of object being opened */
++      uint32_t name;
++
++      /** Returned handle for the object */
++      uint32_t handle;
++      
++      /** Returned size of the object */
++      uint64_t size;
++};
++
++/**
++ * \name Ioctls Definitions
++ */
++/*@{*/
++
++#define DRM_IOCTL_BASE                        'd'
++#define DRM_IO(nr)                    _IO(DRM_IOCTL_BASE,nr)
++#define DRM_IOR(nr,type)              _IOR(DRM_IOCTL_BASE,nr,type)
++#define DRM_IOW(nr,type)              _IOW(DRM_IOCTL_BASE,nr,type)
++#define DRM_IOWR(nr,type)             _IOWR(DRM_IOCTL_BASE,nr,type)
++
++#define DRM_IOCTL_VERSION             DRM_IOWR(0x00, struct drm_version)
++#define DRM_IOCTL_GET_UNIQUE          DRM_IOWR(0x01, struct drm_unique)
++#define DRM_IOCTL_GET_MAGIC           DRM_IOR( 0x02, struct drm_auth)
++#define DRM_IOCTL_IRQ_BUSID           DRM_IOWR(0x03, struct drm_irq_busid)
++#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
++#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
++#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
++#define DRM_IOCTL_SET_VERSION         DRM_IOWR(0x07, struct drm_set_version)
++#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08,  struct drm_modeset_ctl)
++
++#define DRM_IOCTL_GEM_CLOSE           DRM_IOW (0x09, struct drm_gem_close)
++#define DRM_IOCTL_GEM_FLINK           DRM_IOWR(0x0a, struct drm_gem_flink)
++#define DRM_IOCTL_GEM_OPEN            DRM_IOWR(0x0b, struct drm_gem_open)
++
++#define DRM_IOCTL_SET_UNIQUE          DRM_IOW( 0x10, struct drm_unique)
++#define DRM_IOCTL_AUTH_MAGIC          DRM_IOW( 0x11, struct drm_auth)
++#define DRM_IOCTL_BLOCK                       DRM_IOWR(0x12, struct drm_block)
++#define DRM_IOCTL_UNBLOCK             DRM_IOWR(0x13, struct drm_block)
++#define DRM_IOCTL_CONTROL             DRM_IOW( 0x14, struct drm_control)
++#define DRM_IOCTL_ADD_MAP             DRM_IOWR(0x15, struct drm_map)
++#define DRM_IOCTL_ADD_BUFS            DRM_IOWR(0x16, struct drm_buf_desc)
++#define DRM_IOCTL_MARK_BUFS           DRM_IOW( 0x17, struct drm_buf_desc)
++#define DRM_IOCTL_INFO_BUFS           DRM_IOWR(0x18, struct drm_buf_info)
++#define DRM_IOCTL_MAP_BUFS            DRM_IOWR(0x19, struct drm_buf_map)
++#define DRM_IOCTL_FREE_BUFS           DRM_IOW( 0x1a, struct drm_buf_free)
++
++#define DRM_IOCTL_RM_MAP              DRM_IOW( 0x1b, struct drm_map)
++
++#define DRM_IOCTL_SET_SAREA_CTX               DRM_IOW( 0x1c, struct drm_ctx_priv_map)
++#define DRM_IOCTL_GET_SAREA_CTX               DRM_IOWR(0x1d, struct drm_ctx_priv_map)
++
++#define DRM_IOCTL_ADD_CTX             DRM_IOWR(0x20, struct drm_ctx)
++#define DRM_IOCTL_RM_CTX              DRM_IOWR(0x21, struct drm_ctx)
++#define DRM_IOCTL_MOD_CTX             DRM_IOW( 0x22, struct drm_ctx)
++#define DRM_IOCTL_GET_CTX             DRM_IOWR(0x23, struct drm_ctx)
++#define DRM_IOCTL_SWITCH_CTX          DRM_IOW( 0x24, struct drm_ctx)
++#define DRM_IOCTL_NEW_CTX             DRM_IOW( 0x25, struct drm_ctx)
++#define DRM_IOCTL_RES_CTX             DRM_IOWR(0x26, struct drm_ctx_res)
++#define DRM_IOCTL_ADD_DRAW            DRM_IOWR(0x27, struct drm_draw)
++#define DRM_IOCTL_RM_DRAW             DRM_IOWR(0x28, struct drm_draw)
++#define DRM_IOCTL_DMA                 DRM_IOWR(0x29, struct drm_dma)
++#define DRM_IOCTL_LOCK                        DRM_IOW( 0x2a, struct drm_lock)
++#define DRM_IOCTL_UNLOCK              DRM_IOW( 0x2b, struct drm_lock)
++#define DRM_IOCTL_FINISH              DRM_IOW( 0x2c, struct drm_lock)
++
++#define DRM_IOCTL_AGP_ACQUIRE         DRM_IO(  0x30)
++#define DRM_IOCTL_AGP_RELEASE         DRM_IO(  0x31)
++#define DRM_IOCTL_AGP_ENABLE          DRM_IOW( 0x32, struct drm_agp_mode)
++#define DRM_IOCTL_AGP_INFO            DRM_IOR( 0x33, struct drm_agp_info)
++#define DRM_IOCTL_AGP_ALLOC           DRM_IOWR(0x34, struct drm_agp_buffer)
++#define DRM_IOCTL_AGP_FREE            DRM_IOW( 0x35, struct drm_agp_buffer)
++#define DRM_IOCTL_AGP_BIND            DRM_IOW( 0x36, struct drm_agp_binding)
++#define DRM_IOCTL_AGP_UNBIND          DRM_IOW( 0x37, struct drm_agp_binding)
++
++#define DRM_IOCTL_SG_ALLOC            DRM_IOWR(0x38, struct drm_scatter_gather)
++#define DRM_IOCTL_SG_FREE             DRM_IOW( 0x39, struct drm_scatter_gather)
++
++#define DRM_IOCTL_WAIT_VBLANK         DRM_IOWR(0x3a, union drm_wait_vblank)
++
++#define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
++
++#define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
++#define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
++#define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
++#define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
++
++#define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
++
++#define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
++#define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
++#define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
++#define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
++#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
++#define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
++#define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
++
++/*@}*/
++
++/**
++ * Device specific ioctls should only be in their respective headers
++ * The device specific ioctl range is from 0x40 to 0x99.
++ * Generic IOCTLS restart at 0xA0.
++ *
++ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
++ * drmCommandReadWrite().
++ */
++#define DRM_COMMAND_BASE                0x40
++#define DRM_COMMAND_END                 0xA0
++
++/* typedef area */
++#ifndef __KERNEL__
++typedef struct drm_clip_rect drm_clip_rect_t;
++typedef struct drm_tex_region drm_tex_region_t;
++typedef struct drm_hw_lock drm_hw_lock_t;
++typedef struct drm_version drm_version_t;
++typedef struct drm_unique drm_unique_t;
++typedef struct drm_list drm_list_t;
++typedef struct drm_block drm_block_t;
++typedef struct drm_control drm_control_t;
++typedef enum drm_map_type drm_map_type_t;
++typedef enum drm_map_flags drm_map_flags_t;
++typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
++typedef struct drm_map drm_map_t;
++typedef struct drm_client drm_client_t;
++typedef enum drm_stat_type drm_stat_type_t;
++typedef struct drm_stats drm_stats_t;
++typedef enum drm_lock_flags drm_lock_flags_t;
++typedef struct drm_lock drm_lock_t;
++typedef enum drm_dma_flags drm_dma_flags_t;
++typedef struct drm_buf_desc drm_buf_desc_t;
++typedef struct drm_buf_info drm_buf_info_t;
++typedef struct drm_buf_free drm_buf_free_t;
++typedef struct drm_buf_pub drm_buf_pub_t;
++typedef struct drm_buf_map drm_buf_map_t;
++typedef struct drm_dma drm_dma_t;
++typedef union drm_wait_vblank drm_wait_vblank_t;
++typedef struct drm_agp_mode drm_agp_mode_t;
++typedef enum drm_ctx_flags drm_ctx_flags_t;
++typedef struct drm_ctx drm_ctx_t;
++typedef struct drm_ctx_res drm_ctx_res_t;
++typedef struct drm_draw drm_draw_t;
++typedef struct drm_update_draw drm_update_draw_t;
++typedef struct drm_auth drm_auth_t;
++typedef struct drm_irq_busid drm_irq_busid_t;
++typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
++typedef struct drm_agp_buffer drm_agp_buffer_t;
++typedef struct drm_agp_binding drm_agp_binding_t;
++typedef struct drm_agp_info drm_agp_info_t;
++typedef struct drm_scatter_gather drm_scatter_gather_t;
++typedef struct drm_set_version drm_set_version_t;
++
++typedef struct drm_fence_arg drm_fence_arg_t;
++typedef struct drm_mm_type_arg drm_mm_type_arg_t;
++typedef struct drm_mm_init_arg drm_mm_init_arg_t;
++typedef enum drm_bo_type drm_bo_type_t;
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.c git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c
+--- git/drivers/gpu/drm-tungsten/drm_hashtab.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,207 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple open hash tab implementation.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "drm_hashtab.h"
++#include <linux/hash.h>
++
++int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
++{
++      unsigned int i;
++
++      ht->size = 1 << order;
++      ht->order = order;
++      ht->fill = 0;
++      ht->table = NULL;
++      ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
++      if (!ht->use_vmalloc) {
++              ht->table = drm_calloc(ht->size, sizeof(*ht->table),
++                                     DRM_MEM_HASHTAB);
++      }
++      if (!ht->table) {
++              ht->use_vmalloc = 1;
++              ht->table = vmalloc(ht->size * sizeof(*ht->table));
++      }
++      if (!ht->table) {
++              DRM_ERROR("Out of memory for hash table\n");
++              return -ENOMEM;
++      }
++      for (i = 0; i < ht->size; ++i) {
++              INIT_HLIST_HEAD(&ht->table[i]);
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_create);
++
++void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list;
++      unsigned int hashed_key;
++      int count = 0;
++
++      hashed_key = hash_long(key, ht->order);
++      DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
++      h_list = &ht->table[hashed_key];
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
++      }
++}
++
++static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
++                                        unsigned long key)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list;
++      unsigned int hashed_key;
++
++      hashed_key = hash_long(key, ht->order);
++      h_list = &ht->table[hashed_key];
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              if (entry->key == key)
++                      return list;
++              if (entry->key > key)
++                      break;
++      }
++      return NULL;
++}
++
++int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list, *parent;
++      unsigned int hashed_key;
++      unsigned long key = item->key;
++
++      hashed_key = hash_long(key, ht->order);
++      h_list = &ht->table[hashed_key];
++      parent = NULL;
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              if (entry->key == key)
++                      return -EINVAL;
++              if (entry->key > key)
++                      break;
++              parent = list;
++      }
++      if (parent) {
++              hlist_add_after(parent, &item->head);
++      } else {
++              hlist_add_head(&item->head, h_list);
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_insert_item);
++
++/*
++ * Just insert an item and return any "bits" bit key that hasn't been
++ * used before.
++ */
++int drm_ht_just_insert_please(struct drm_open_hash *ht,
++                            struct drm_hash_item *item,
++                            unsigned long seed, int bits, int shift,
++                            unsigned long add)
++{
++      int ret;
++      unsigned long mask = (1 << bits) - 1;
++      unsigned long first, unshifted_key;
++
++      unshifted_key = hash_long(seed, bits);
++      first = unshifted_key;
++      do {
++              item->key = (unshifted_key << shift) + add;
++              ret = drm_ht_insert_item(ht, item);
++              if (ret)
++                      unshifted_key = (unshifted_key + 1) & mask;
++      } while (ret && (unshifted_key != first));
++
++      if (ret) {
++              DRM_ERROR("Available key bit space exhausted\n");
++              return -EINVAL;
++      }
++      return 0;
++}
++
++int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
++                   struct drm_hash_item **item)
++{
++      struct hlist_node *list;
++
++      list = drm_ht_find_key(ht, key);
++      if (!list)
++              return -EINVAL;
++
++      *item = hlist_entry(list, struct drm_hash_item, head);
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_find_item);
++
++int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
++{
++      struct hlist_node *list;
++
++      list = drm_ht_find_key(ht, key);
++      if (list) {
++              hlist_del_init(list);
++              ht->fill--;
++              return 0;
++      }
++      return -EINVAL;
++}
++
++int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
++{
++      hlist_del_init(&item->head);
++      ht->fill--;
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_remove_item);
++
++void drm_ht_remove(struct drm_open_hash *ht)
++{
++      if (ht->table) {
++              if (ht->use_vmalloc)
++                      vfree(ht->table);
++              else
++                      drm_free(ht->table, ht->size * sizeof(*ht->table),
++                               DRM_MEM_HASHTAB);
++              ht->table = NULL;
++      }
++}
++EXPORT_SYMBOL(drm_ht_remove);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.h git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h
+--- git/drivers/gpu/drm-tungsten/drm_hashtab.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple open hash tab implementation.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef DRM_HASHTAB_H
++#define DRM_HASHTAB_H
++
++#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
++
++struct drm_hash_item {
++      struct hlist_node head;
++      unsigned long key;
++};
++
++struct drm_open_hash {
++      unsigned int size;
++      unsigned int order;
++      unsigned int fill;
++      struct hlist_head *table;
++      int use_vmalloc;
++};
++
++
++extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
++extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
++extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
++                                   unsigned long seed, int bits, int shift,
++                                   unsigned long add);
++extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
++
++extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
++extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
++extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
++extern void drm_ht_remove(struct drm_open_hash *ht);
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_internal.h git-nokia/drivers/gpu/drm-tungsten/drm_internal.h
+--- git/drivers/gpu/drm-tungsten/drm_internal.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_internal.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2007 Red Hat, Inc
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* This header file holds function prototypes and data types that are
++ * internal to the drm (not exported to user space) but shared across
++ * drivers and platforms */
++
++#ifndef __DRM_INTERNAL_H__
++#define __DRM_INTERNAL_H__
++
++/**
++ * Drawable information.
++ */
++struct drm_drawable_info {
++      unsigned int num_rects;
++      struct drm_clip_rect *rects;
++};
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioc32.c git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c
+--- git/drivers/gpu/drm-tungsten/drm_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1073 @@
++/**
++ * \file drm_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the DRM.
++ *
++ * \author Paul Mackerras <paulus@samba.org>
++ *
++ * Copyright (C) Paul Mackerras 2005.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm_core.h"
++
++#define DRM_IOCTL_VERSION32           DRM_IOWR(0x00, drm_version32_t)
++#define DRM_IOCTL_GET_UNIQUE32                DRM_IOWR(0x01, drm_unique32_t)
++#define DRM_IOCTL_GET_MAP32           DRM_IOWR(0x04, drm_map32_t)
++#define DRM_IOCTL_GET_CLIENT32                DRM_IOWR(0x05, drm_client32_t)
++#define DRM_IOCTL_GET_STATS32         DRM_IOR( 0x06, drm_stats32_t)
++
++#define DRM_IOCTL_SET_UNIQUE32                DRM_IOW( 0x10, drm_unique32_t)
++#define DRM_IOCTL_ADD_MAP32           DRM_IOWR(0x15, drm_map32_t)
++#define DRM_IOCTL_ADD_BUFS32          DRM_IOWR(0x16, drm_buf_desc32_t)
++#define DRM_IOCTL_MARK_BUFS32         DRM_IOW( 0x17, drm_buf_desc32_t)
++#define DRM_IOCTL_INFO_BUFS32         DRM_IOWR(0x18, drm_buf_info32_t)
++#define DRM_IOCTL_MAP_BUFS32          DRM_IOWR(0x19, drm_buf_map32_t)
++#define DRM_IOCTL_FREE_BUFS32         DRM_IOW( 0x1a, drm_buf_free32_t)
++
++#define DRM_IOCTL_RM_MAP32            DRM_IOW( 0x1b, drm_map32_t)
++
++#define DRM_IOCTL_SET_SAREA_CTX32     DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
++#define DRM_IOCTL_GET_SAREA_CTX32     DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
++
++#define DRM_IOCTL_RES_CTX32           DRM_IOWR(0x26, drm_ctx_res32_t)
++#define DRM_IOCTL_DMA32                       DRM_IOWR(0x29, drm_dma32_t)
++
++#define DRM_IOCTL_AGP_ENABLE32                DRM_IOW( 0x32, drm_agp_mode32_t)
++#define DRM_IOCTL_AGP_INFO32          DRM_IOR( 0x33, drm_agp_info32_t)
++#define DRM_IOCTL_AGP_ALLOC32         DRM_IOWR(0x34, drm_agp_buffer32_t)
++#define DRM_IOCTL_AGP_FREE32          DRM_IOW( 0x35, drm_agp_buffer32_t)
++#define DRM_IOCTL_AGP_BIND32          DRM_IOW( 0x36, drm_agp_binding32_t)
++#define DRM_IOCTL_AGP_UNBIND32                DRM_IOW( 0x37, drm_agp_binding32_t)
++
++#define DRM_IOCTL_SG_ALLOC32          DRM_IOW( 0x38, drm_scatter_gather32_t)
++#define DRM_IOCTL_SG_FREE32           DRM_IOW( 0x39, drm_scatter_gather32_t)
++
++#define DRM_IOCTL_WAIT_VBLANK32               DRM_IOWR(0x3a, drm_wait_vblank32_t)
++
++typedef struct drm_version_32 {
++      int version_major;        /**< Major version */
++      int version_minor;        /**< Minor version */
++      int version_patchlevel;   /**< Patch level */
++      u32 name_len;             /**< Length of name buffer */
++      u32 name;                 /**< Name of driver */
++      u32 date_len;             /**< Length of date buffer */
++      u32 date;                 /**< User-space buffer to hold date */
++      u32 desc_len;             /**< Length of desc buffer */
++      u32 desc;                 /**< User-space buffer to hold desc */
++} drm_version32_t;
++
++static int compat_drm_version(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_version32_t v32;
++      struct drm_version __user *version;
++      int err;
++
++      if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
++              return -EFAULT;
++
++      version = compat_alloc_user_space(sizeof(*version));
++      if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
++              return -EFAULT;
++      if (__put_user(v32.name_len, &version->name_len)
++          || __put_user((void __user *)(unsigned long)v32.name,
++                        &version->name)
++          || __put_user(v32.date_len, &version->date_len)
++          || __put_user((void __user *)(unsigned long)v32.date,
++                        &version->date)
++          || __put_user(v32.desc_len, &version->desc_len)
++          || __put_user((void __user *)(unsigned long)v32.desc,
++                        &version->desc))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_VERSION, (unsigned long)version);
++      if (err)
++              return err;
++
++      if (__get_user(v32.version_major, &version->version_major)
++          || __get_user(v32.version_minor, &version->version_minor)
++          || __get_user(v32.version_patchlevel, &version->version_patchlevel)
++          || __get_user(v32.name_len, &version->name_len)
++          || __get_user(v32.date_len, &version->date_len)
++          || __get_user(v32.desc_len, &version->desc_len))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_unique32 {
++      u32 unique_len; /**< Length of unique */
++      u32 unique;     /**< Unique name for driver instantiation */
++} drm_unique32_t;
++
++static int compat_drm_getunique(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_unique32_t uq32;
++      struct drm_unique __user *u;
++      int err;
++
++      if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
++              return -EFAULT;
++
++      u = compat_alloc_user_space(sizeof(*u));
++      if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
++              return -EFAULT;
++      if (__put_user(uq32.unique_len, &u->unique_len)
++          || __put_user((void __user *)(unsigned long)uq32.unique,
++                        &u->unique))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
++      if (err)
++              return err;
++
++      if (__get_user(uq32.unique_len, &u->unique_len))
++              return -EFAULT;
++      if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
++              return -EFAULT;
++      return 0;
++}
++
++static int compat_drm_setunique(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_unique32_t uq32;
++      struct drm_unique __user *u;
++
++      if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
++              return -EFAULT;
++
++      u = compat_alloc_user_space(sizeof(*u));
++      if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
++              return -EFAULT;
++      if (__put_user(uq32.unique_len, &u->unique_len)
++          || __put_user((void __user *)(unsigned long)uq32.unique,
++                        &u->unique))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
++}
++
++typedef struct drm_map32 {
++      u32 offset;             /**< Requested physical address (0 for SAREA)*/
++      u32 size;               /**< Requested physical size (bytes) */
++      enum drm_map_type type; /**< Type of memory to map */
++      enum drm_map_flags flags;       /**< Flags */
++      u32 handle;             /**< User-space: "Handle" to pass to mmap() */
++      int mtrr;               /**< MTRR slot used */
++} drm_map32_t;
++
++static int compat_drm_getmap(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      drm_map32_t m32;
++      struct drm_map __user *map;
++      int idx, err;
++      void *handle;
++
++      if (get_user(idx, &argp->offset))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user(idx, &map->offset))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_MAP, (unsigned long)map);
++      if (err)
++              return err;
++
++      if (__get_user(m32.offset, &map->offset)
++          || __get_user(m32.size, &map->size)
++          || __get_user(m32.type, &map->type)
++          || __get_user(m32.flags, &map->flags)
++          || __get_user(handle, &map->handle)
++          || __get_user(m32.mtrr, &map->mtrr))
++              return -EFAULT;
++
++      m32.handle = (unsigned long)handle;
++      if (copy_to_user(argp, &m32, sizeof(m32)))
++              return -EFAULT;
++      return 0;
++
++}
++
++static int compat_drm_addmap(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      drm_map32_t m32;
++      struct drm_map __user *map;
++      int err;
++      void *handle;
++
++      if (copy_from_user(&m32, argp, sizeof(m32)))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user(m32.offset, &map->offset)
++          || __put_user(m32.size, &map->size)
++          || __put_user(m32.type, &map->type)
++          || __put_user(m32.flags, &map->flags))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_ADD_MAP, (unsigned long)map);
++      if (err)
++              return err;
++
++      if (__get_user(m32.offset, &map->offset)
++          || __get_user(m32.mtrr, &map->mtrr)
++          || __get_user(handle, &map->handle))
++              return -EFAULT;
++
++      m32.handle = (unsigned long)handle;
++      if (m32.handle != (unsigned long)handle && printk_ratelimit())
++              printk(KERN_ERR "compat_drm_addmap truncated handle"
++                     " %p for type %d offset %x\n",
++                     handle, m32.type, m32.offset);
++
++      if (copy_to_user(argp, &m32, sizeof(m32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_rmmap(struct file *file, unsigned int cmd,
++                          unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      struct drm_map __user *map;
++      u32 handle;
++
++      if (get_user(handle, &argp->handle))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user((void *)(unsigned long)handle, &map->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RM_MAP, (unsigned long)map);
++}
++
++typedef struct drm_client32 {
++      int idx;        /**< Which client desired? */
++      int auth;       /**< Is client authenticated? */
++      u32 pid;        /**< Process ID */
++      u32 uid;        /**< User ID */
++      u32 magic;      /**< Magic */
++      u32 iocs;       /**< Ioctl count */
++} drm_client32_t;
++
++static int compat_drm_getclient(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_client32_t c32;
++      drm_client32_t __user *argp = (void __user *)arg;
++      struct drm_client __user *client;
++      int idx, err;
++
++      if (get_user(idx, &argp->idx))
++              return -EFAULT;
++
++      client = compat_alloc_user_space(sizeof(*client));
++      if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
++              return -EFAULT;
++      if (__put_user(idx, &client->idx))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_CLIENT, (unsigned long)client);
++      if (err)
++              return err;
++
++      if (__get_user(c32.auth, &client->auth)
++          || __get_user(c32.pid, &client->pid)
++          || __get_user(c32.uid, &client->uid)
++          || __get_user(c32.magic, &client->magic)
++          || __get_user(c32.iocs, &client->iocs))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &c32, sizeof(c32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_stats32 {
++      u32 count;
++      struct {
++              u32 value;
++              enum drm_stat_type type;
++      } data[15];
++} drm_stats32_t;
++
++static int compat_drm_getstats(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_stats32_t s32;
++      drm_stats32_t __user *argp = (void __user *)arg;
++      struct drm_stats __user *stats;
++      int i, err;
++
++      stats = compat_alloc_user_space(sizeof(*stats));
++      if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_STATS, (unsigned long)stats);
++      if (err)
++              return err;
++
++      if (__get_user(s32.count, &stats->count))
++              return -EFAULT;
++      for (i = 0; i < 15; ++i)
++              if (__get_user(s32.data[i].value, &stats->data[i].value)
++                  || __get_user(s32.data[i].type, &stats->data[i].type))
++                      return -EFAULT;
++
++      if (copy_to_user(argp, &s32, sizeof(s32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_buf_desc32 {
++      int count;               /**< Number of buffers of this size */
++      int size;                /**< Size in bytes */
++      int low_mark;            /**< Low water mark */
++      int high_mark;           /**< High water mark */
++      int flags;
++      u32 agp_start;           /**< Start address in the AGP aperture */
++} drm_buf_desc32_t;
++
++static int compat_drm_addbufs(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_buf_desc32_t __user *argp = (void __user *)arg;
++      struct drm_buf_desc __user *buf;
++      int err;
++      unsigned long agp_start;
++
++      buf = compat_alloc_user_space(sizeof(*buf));
++      if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
++              return -EFAULT;
++
++      if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
++          || __get_user(agp_start, &argp->agp_start)
++          || __put_user(agp_start, &buf->agp_start))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
++      if (err)
++              return err;
++
++      if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
++          || __get_user(agp_start, &buf->agp_start)
++          || __put_user(agp_start, &argp->agp_start))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_markbufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_desc32_t b32;
++      drm_buf_desc32_t __user *argp = (void __user *)arg;
++      struct drm_buf_desc __user *buf;
++
++      if (copy_from_user(&b32, argp, sizeof(b32)))
++              return -EFAULT;
++
++      buf = compat_alloc_user_space(sizeof(*buf));
++      if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
++              return -EFAULT;
++
++      if (__put_user(b32.size, &buf->size)
++          || __put_user(b32.low_mark, &buf->low_mark)
++          || __put_user(b32.high_mark, &buf->high_mark))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
++}
++
++typedef struct drm_buf_info32 {
++      int count;              /**< Entries in list */
++      u32 list;
++} drm_buf_info32_t;
++
++static int compat_drm_infobufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_info32_t req32;
++      drm_buf_info32_t __user *argp = (void __user *)arg;
++      drm_buf_desc32_t __user *to;
++      struct drm_buf_info __user *request;
++      struct drm_buf_desc __user *list;
++      size_t nbytes;
++      int i, err;
++      int count, actual;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      count = req32.count;
++      to = (drm_buf_desc32_t __user *)(unsigned long)req32.list;
++      if (count < 0)
++              count = 0;
++      if (count > 0
++          && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
++              return -EFAULT;
++
++      nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
++      request = compat_alloc_user_space(nbytes);
++      if (!access_ok(VERIFY_WRITE, request, nbytes))
++              return -EFAULT;
++      list = (struct drm_buf_desc *) (request + 1);
++
++      if (__put_user(count, &request->count)
++          || __put_user(list, &request->list))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_INFO_BUFS, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(actual, &request->count))
++              return -EFAULT;
++      if (count >= actual)
++              for (i = 0; i < actual; ++i)
++                      if (__copy_in_user(&to[i], &list[i],
++                                         offsetof(struct drm_buf_desc, flags)))
++                              return -EFAULT;
++
++      if (__put_user(actual, &argp->count))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_buf_pub32 {
++      int idx;                /**< Index into the master buffer list */
++      int total;              /**< Buffer size */
++      int used;               /**< Amount of buffer in use (for DMA) */
++      u32 address;            /**< Address of buffer */
++} drm_buf_pub32_t;
++
++typedef struct drm_buf_map32 {
++      int count;              /**< Length of the buffer list */
++      u32 virtual;            /**< Mmap'd area in user-virtual */
++      u32 list;               /**< Buffer information */
++} drm_buf_map32_t;
++
++static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_buf_map32_t __user *argp = (void __user *)arg;
++      drm_buf_map32_t req32;
++      drm_buf_pub32_t __user *list32;
++      struct drm_buf_map __user *request;
++      struct drm_buf_pub __user *list;
++      int i, err;
++      int count, actual;
++      size_t nbytes;
++      void __user *addr;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++      count = req32.count;
++      list32 = (void __user *)(unsigned long)req32.list;
++
++      if (count < 0)
++              return -EINVAL;
++      nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
++      request = compat_alloc_user_space(nbytes);
++      if (!access_ok(VERIFY_WRITE, request, nbytes))
++              return -EFAULT;
++      list = (struct drm_buf_pub *) (request + 1);
++
++      if (__put_user(count, &request->count)
++          || __put_user(list, &request->list))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_MAP_BUFS, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(actual, &request->count))
++              return -EFAULT;
++      if (count >= actual)
++              for (i = 0; i < actual; ++i)
++                      if (__copy_in_user(&list32[i], &list[i],
++                                         offsetof(struct drm_buf_pub, address))
++                          || __get_user(addr, &list[i].address)
++                          || __put_user((unsigned long)addr,
++                                        &list32[i].address))
++                              return -EFAULT;
++
++      if (__put_user(actual, &argp->count)
++          || __get_user(addr, &request->virtual)
++          || __put_user((unsigned long)addr, &argp->virtual))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_buf_free32 {
++      int count;
++      u32 list;
++} drm_buf_free32_t;
++
++static int compat_drm_freebufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_free32_t req32;
++      struct drm_buf_free __user *request;
++      drm_buf_free32_t __user *argp = (void __user *)arg;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(req32.count, &request->count)
++          || __put_user((int __user *)(unsigned long)req32.list,
++                        &request->list))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_FREE_BUFS, (unsigned long)request);
++}
++
++typedef struct drm_ctx_priv_map32 {
++      unsigned int ctx_id;     /**< Context requesting private mapping */
++      u32 handle;             /**< Handle of map */
++} drm_ctx_priv_map32_t;
++
++static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_ctx_priv_map32_t req32;
++      struct drm_ctx_priv_map __user *request;
++      drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(req32.ctx_id, &request->ctx_id)
++          || __put_user((void *)(unsigned long)req32.handle,
++                        &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
++}
++
++static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      struct drm_ctx_priv_map __user *request;
++      drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
++      int err;
++      unsigned int ctx_id;
++      void *handle;
++
++      if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(ctx_id, &argp->ctx_id))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(ctx_id, &request->ctx_id))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(handle, &request->handle)
++          || __put_user((unsigned long)handle, &argp->handle))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_ctx_res32 {
++      int count;
++      u32 contexts;
++} drm_ctx_res32_t;
++
++static int compat_drm_resctx(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_ctx_res32_t __user *argp = (void __user *)arg;
++      drm_ctx_res32_t res32;
++      struct drm_ctx_res __user *res;
++      int err;
++
++      if (copy_from_user(&res32, argp, sizeof(res32)))
++              return -EFAULT;
++
++      res = compat_alloc_user_space(sizeof(*res));
++      if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
++              return -EFAULT;
++      if (__put_user(res32.count, &res->count)
++          || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
++                        &res->contexts))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_RES_CTX, (unsigned long)res);
++      if (err)
++              return err;
++
++      if (__get_user(res32.count, &res->count)
++          || __put_user(res32.count, &argp->count))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_dma32 {
++      int context;              /**< Context handle */
++      int send_count;           /**< Number of buffers to send */
++      u32 send_indices;         /**< List of handles to buffers */
++      u32 send_sizes;           /**< Lengths of data to send */
++      enum drm_dma_flags flags;                 /**< Flags */
++      int request_count;        /**< Number of buffers requested */
++      int request_size;         /**< Desired size for buffers */
++      u32 request_indices;      /**< Buffer information */
++      u32 request_sizes;
++      int granted_count;        /**< Number of buffers granted */
++} drm_dma32_t;
++
++static int compat_drm_dma(struct file *file, unsigned int cmd,
++                        unsigned long arg)
++{
++      drm_dma32_t d32;
++      drm_dma32_t __user *argp = (void __user *)arg;
++      struct drm_dma __user *d;
++      int err;
++
++      if (copy_from_user(&d32, argp, sizeof(d32)))
++              return -EFAULT;
++
++      d = compat_alloc_user_space(sizeof(*d));
++      if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
++              return -EFAULT;
++
++      if (__put_user(d32.context, &d->context)
++          || __put_user(d32.send_count, &d->send_count)
++          || __put_user((int __user *)(unsigned long)d32.send_indices,
++                        &d->send_indices)
++          || __put_user((int __user *)(unsigned long)d32.send_sizes,
++                        &d->send_sizes)
++          || __put_user(d32.flags, &d->flags)
++          || __put_user(d32.request_count, &d->request_count)
++          || __put_user((int __user *)(unsigned long)d32.request_indices,
++                        &d->request_indices)
++          || __put_user((int __user *)(unsigned long)d32.request_sizes,
++                        &d->request_sizes))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_DMA, (unsigned long)d);
++      if (err)
++              return err;
++
++      if (__get_user(d32.request_size, &d->request_size)
++          || __get_user(d32.granted_count, &d->granted_count)
++          || __put_user(d32.request_size, &argp->request_size)
++          || __put_user(d32.granted_count, &argp->granted_count))
++              return -EFAULT;
++
++      return 0;
++}
++
++#if __OS_HAS_AGP
++typedef struct drm_agp_mode32 {
++      u32 mode;       /**< AGP mode */
++} drm_agp_mode32_t;
++
++static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_agp_mode32_t __user *argp = (void __user *)arg;
++      drm_agp_mode32_t m32;
++      struct drm_agp_mode __user *mode;
++
++      if (get_user(m32.mode, &argp->mode))
++              return -EFAULT;
++
++      mode = compat_alloc_user_space(sizeof(*mode));
++      if (put_user(m32.mode, &mode->mode))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
++}
++
++typedef struct drm_agp_info32 {
++      int agp_version_major;
++      int agp_version_minor;
++      u32 mode;
++      u32 aperture_base;      /* physical address */
++      u32 aperture_size;      /* bytes */
++      u32 memory_allowed;     /* bytes */
++      u32 memory_used;
++
++      /* PCI information */
++      unsigned short id_vendor;
++      unsigned short id_device;
++} drm_agp_info32_t;
++
++static int compat_drm_agp_info(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_info32_t __user *argp = (void __user *)arg;
++      drm_agp_info32_t i32;
++      struct drm_agp_info __user *info;
++      int err;
++
++      info = compat_alloc_user_space(sizeof(*info));
++      if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_AGP_INFO, (unsigned long)info);
++      if (err)
++              return err;
++
++      if (__get_user(i32.agp_version_major, &info->agp_version_major)
++          || __get_user(i32.agp_version_minor, &info->agp_version_minor)
++          || __get_user(i32.mode, &info->mode)
++          || __get_user(i32.aperture_base, &info->aperture_base)
++          || __get_user(i32.aperture_size, &info->aperture_size)
++          || __get_user(i32.memory_allowed, &info->memory_allowed)
++          || __get_user(i32.memory_used, &info->memory_used)
++          || __get_user(i32.id_vendor, &info->id_vendor)
++          || __get_user(i32.id_device, &info->id_device))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &i32, sizeof(i32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_agp_buffer32 {
++      u32 size;       /**< In bytes -- will round to page boundary */
++      u32 handle;     /**< Used for binding / unbinding */
++      u32 type;       /**< Type of memory to allocate */
++      u32 physical;   /**< Physical used by i810 */
++} drm_agp_buffer32_t;
++
++static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_agp_buffer32_t __user *argp = (void __user *)arg;
++      drm_agp_buffer32_t req32;
++      struct drm_agp_buffer __user *request;
++      int err;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.size, &request->size)
++          || __put_user(req32.type, &request->type))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(req32.handle, &request->handle)
++          || __get_user(req32.physical, &request->physical)
++          || copy_to_user(argp, &req32, sizeof(req32))) {
++              drm_ioctl(file->f_dentry->d_inode, file,
++                        DRM_IOCTL_AGP_FREE, (unsigned long)request);
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int compat_drm_agp_free(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_buffer32_t __user *argp = (void __user *)arg;
++      struct drm_agp_buffer __user *request;
++      u32 handle;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || get_user(handle, &argp->handle)
++          || __put_user(handle, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_FREE, (unsigned long)request);
++}
++
++typedef struct drm_agp_binding32 {
++      u32 handle;     /**< From drm_agp_buffer */
++      u32 offset;     /**< In bytes -- will round to page boundary */
++} drm_agp_binding32_t;
++
++static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_binding32_t __user *argp = (void __user *)arg;
++      drm_agp_binding32_t req32;
++      struct drm_agp_binding __user *request;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.handle, &request->handle)
++          || __put_user(req32.offset, &request->offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_BIND, (unsigned long)request);
++}
++
++static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_agp_binding32_t __user *argp = (void __user *)arg;
++      struct drm_agp_binding __user *request;
++      u32 handle;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || get_user(handle, &argp->handle)
++          || __put_user(handle, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
++}
++#endif                                /* __OS_HAS_AGP */
++
++typedef struct drm_scatter_gather32 {
++      u32 size;       /**< In bytes -- will round to page boundary */
++      u32 handle;     /**< Used for mapping / unmapping */
++} drm_scatter_gather32_t;
++
++static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_scatter_gather32_t __user *argp = (void __user *)arg;
++      struct drm_scatter_gather __user *request;
++      int err;
++      unsigned long x;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(x, &argp->size)
++          || __put_user(x, &request->size))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_SG_ALLOC, (unsigned long)request);
++      if (err)
++              return err;
++
++      /* XXX not sure about the handle conversion here... */
++      if (__get_user(x, &request->handle)
++          || __put_user(x >> PAGE_SHIFT, &argp->handle))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_sg_free(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_scatter_gather32_t __user *argp = (void __user *)arg;
++      struct drm_scatter_gather __user *request;
++      unsigned long x;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(x, &argp->handle)
++          || __put_user(x << PAGE_SHIFT, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SG_FREE, (unsigned long)request);
++}
++
++struct drm_wait_vblank_request32 {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      u32 signal;
++};
++
++struct drm_wait_vblank_reply32 {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      s32 tval_sec;
++      s32 tval_usec;
++};
++
++typedef union drm_wait_vblank32 {
++      struct drm_wait_vblank_request32 request;
++      struct drm_wait_vblank_reply32 reply;
++} drm_wait_vblank32_t;
++
++static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_wait_vblank32_t __user *argp = (void __user *)arg;
++      drm_wait_vblank32_t req32;
++      union drm_wait_vblank __user *request;
++      int err;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.request.type, &request->request.type)
++          || __put_user(req32.request.sequence, &request->request.sequence)
++          || __put_user(req32.request.signal, &request->request.signal))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(req32.reply.type, &request->reply.type)
++          || __get_user(req32.reply.sequence, &request->reply.sequence)
++          || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
++          || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &req32, sizeof(req32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++drm_ioctl_compat_t *drm_compat_ioctls[] = {
++      [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
++      [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
++      [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
++      [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
++      [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
++#if __OS_HAS_AGP
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
++#endif
++      [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
++      [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
++      [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/drm.
++ *
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn;
++      int ret;
++
++
++      /* Assume that ioctls without an explicit compat routine will "just
++       * work".  This may not always be a good assumption, but it's better
++       * than always failing.
++       */
++      if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
++              return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++
++      fn = drm_compat_ioctls[nr];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_compat_ioctl);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioctl.c git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c
+--- git/drivers/gpu/drm-tungsten/drm_ioctl.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,351 @@
++/**
++ * \file drm_ioctl.c
++ * IOCTL processing for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_core.h"
++
++#include "linux/pci.h"
++
++/**
++ * Get the bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_unique structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Copies the bus id from drm_device::unique into user space.
++ */
++int drm_getunique(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_unique *u = data;
++
++      if (u->unique_len >= dev->unique_len) {
++              if (copy_to_user(u->unique, dev->unique, dev->unique_len))
++                      return -EFAULT;
++      }
++      u->unique_len = dev->unique_len;
++
++      return 0;
++}
++
++/**
++ * Set the bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_unique structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Copies the bus id from userspace into drm_device::unique, and verifies that
++ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
++ * in interface version 1.1 and will return EBUSY when setversion has requested
++ * version 1.1 or greater.
++ */
++int drm_setunique(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_unique *u = data;
++      int domain, bus, slot, func, ret;
++
++      if (dev->unique_len || dev->unique)
++              return -EBUSY;
++
++      if (!u->unique_len || u->unique_len > 1024)
++              return -EINVAL;
++
++      dev->unique_len = u->unique_len;
++      dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
++      if (!dev->unique)
++              return -ENOMEM;
++      if (copy_from_user(dev->unique, u->unique, dev->unique_len))
++              return -EFAULT;
++
++      dev->unique[dev->unique_len] = '\0';
++
++      dev->devname =
++          drm_alloc(strlen(dev->driver->pci_driver.name) +
++                    strlen(dev->unique) + 2, DRM_MEM_DRIVER);
++      if (!dev->devname)
++              return -ENOMEM;
++
++      sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
++              dev->unique);
++
++      /* Return error if the busid submitted doesn't match the device's actual
++       * busid.
++       */
++      ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
++      if (ret != 3)
++              return -EINVAL;
++      domain = bus >> 8;
++      bus &= 0xff;
++
++      if ((domain != drm_get_pci_domain(dev)) ||
++          (bus != dev->pdev->bus->number) ||
++          (slot != PCI_SLOT(dev->pdev->devfn)) ||
++          (func != PCI_FUNC(dev->pdev->devfn)))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int drm_set_busid(struct drm_device * dev)
++{
++      int len;
++      if (dev->unique != NULL)
++              return -EBUSY;
++
++      dev->unique_len = 40;
++      dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
++      if (dev->unique == NULL)
++              return -ENOMEM;
++
++      len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
++                     drm_get_pci_domain(dev),
++                     dev->pdev->bus->number,
++                     PCI_SLOT(dev->pdev->devfn),
++                     PCI_FUNC(dev->pdev->devfn));
++      if (len > dev->unique_len)
++              DRM_ERROR("buffer overflow");
++
++      dev->devname =
++          drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
++                    2, DRM_MEM_DRIVER);
++      if (dev->devname == NULL)
++              return -ENOMEM;
++
++      sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
++              dev->unique);
++
++      return 0;
++}
++
++/**
++ * Get a mapping information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_map structure.
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches for the mapping with the specified offset and copies its information
++ * into userspace
++ */
++int drm_getmap(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_map *map = data;
++      struct drm_map_list *r_list = NULL;
++      struct list_head *list;
++      int idx;
++      int i;
++
++      idx = map->offset;
++
++      mutex_lock(&dev->struct_mutex);
++      if (idx < 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      i = 0;
++      list_for_each(list, &dev->maplist) {
++              if (i == idx) {
++                      r_list = list_entry(list, struct drm_map_list, head);
++                      break;
++              }
++              i++;
++      }
++      if (!r_list || !r_list->map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      map->offset = r_list->map->offset;
++      map->size = r_list->map->size;
++      map->type = r_list->map->type;
++      map->flags = r_list->map->flags;
++      map->handle = (void *)(unsigned long) r_list->user_token;
++      map->mtrr = r_list->map->mtrr;
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Get client information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_client structure.
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches for the client with the specified index and copies its information
++ * into userspace
++ */
++int drm_getclient(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_client *client = data;
++      struct drm_file *pt;
++      int idx;
++      int i;
++
++      idx = client->idx;
++      mutex_lock(&dev->struct_mutex);
++
++      i = 0;
++      list_for_each_entry(pt, &dev->filelist, lhead) {
++              if (i++ >= idx) {
++                      client->auth = pt->authenticated;
++                      client->pid = pt->pid;
++                      client->uid = pt->uid;
++                      client->magic = pt->magic;
++                      client->iocs = pt->ioctl_count;
++                      mutex_unlock(&dev->struct_mutex);
++
++                      return 0;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      return -EINVAL;
++}
++
++/**
++ * Get statistics information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_stats structure.
++ *
++ * \return zero on success or a negative number on failure.
++ */
++int drm_getstats(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_stats *stats = data;
++      int i;
++
++      memset(stats, 0, sizeof(*stats));
++
++      mutex_lock(&dev->struct_mutex);
++
++      for (i = 0; i < dev->counters; i++) {
++              if (dev->types[i] == _DRM_STAT_LOCK)
++                      stats->data[i].value =
++                          (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
++              else
++                      stats->data[i].value = atomic_read(&dev->counts[i]);
++              stats->data[i].type = dev->types[i];
++      }
++
++      stats->count = dev->counters;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Setversion ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Sets the requested interface version
++ */
++int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_set_version *sv = data;
++      int if_version, retcode = 0;
++
++      if (sv->drm_di_major != -1) {
++              if (sv->drm_di_major != DRM_IF_MAJOR ||
++                  sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
++                      retcode = -EINVAL;
++                      goto done;
++              }
++              if_version = DRM_IF_VERSION(sv->drm_di_major,
++                                          sv->drm_di_minor);
++              dev->if_version = max(if_version, dev->if_version);
++              if (sv->drm_di_minor >= 1) {
++                      /*
++                       * Version 1.1 includes tying of DRM to specific device
++                       */
++                      drm_set_busid(dev);
++              }
++      }
++
++      if (sv->drm_dd_major != -1) {
++              if (sv->drm_dd_major != dev->driver->major ||
++                  sv->drm_dd_minor < 0 || sv->drm_dd_minor >
++                  dev->driver->minor) {
++                      retcode = -EINVAL;
++                      goto done;
++              }
++
++              if (dev->driver->set_version)
++                      dev->driver->set_version(dev, sv);
++      }
++
++done:
++      sv->drm_di_major = DRM_IF_MAJOR;
++      sv->drm_di_minor = DRM_IF_MINOR;
++      sv->drm_dd_major = dev->driver->major;
++      sv->drm_dd_minor = dev->driver->minor;
++
++      return retcode;
++}
++
++/** No-op ioctl. */
++int drm_noop(struct drm_device *dev, void *data,
++           struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_irq.c git-nokia/drivers/gpu/drm-tungsten/drm_irq.c
+--- git/drivers/gpu/drm-tungsten/drm_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,771 @@
++/**
++ * \file drm_irq.c
++ * IRQ support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#include <linux/interrupt.h>  /* For task queue support */
++
++/**
++ * Get interrupt from bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_irq_busid structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Finds the PCI device with the specified bus id and gets its IRQ number.
++ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
++ * to that of the device that this DRM instance attached to.
++ */
++int drm_irq_by_busid(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_irq_busid *p = data;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
++          (p->busnum & 0xff) != dev->pdev->bus->number ||
++          p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
++              return -EINVAL;
++
++      p->irq = dev->pdev->irq;
++
++      DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
++                p->irq);
++
++      return 0;
++}
++
++static void vblank_disable_fn(unsigned long arg)
++{
++      struct drm_device *dev = (struct drm_device *)arg;
++      unsigned long irqflags;
++      int i;
++
++      if (!dev->vblank_disable_allowed)
++              return;
++
++      for (i = 0; i < dev->num_crtcs; i++) {
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++              if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
++                  dev->vblank_enabled[i]) {
++                      DRM_DEBUG("disabling vblank on crtc %d\n", i);
++                      dev->last_vblank[i] =
++                              dev->driver->get_vblank_counter(dev, i);
++                      dev->driver->disable_vblank(dev, i);
++                      dev->vblank_enabled[i] = 0;
++              }
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++      }
++}
++
++static void drm_vblank_cleanup(struct drm_device *dev)
++{
++      /* Bail if the driver didn't call drm_vblank_init() */
++      if (dev->num_crtcs == 0)
++              return;
++
++      del_timer(&dev->vblank_disable_timer);
++
++      vblank_disable_fn((unsigned long)dev);
++
++      drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++
++      dev->num_crtcs = 0;
++}
++
++int drm_vblank_init(struct drm_device *dev, int num_crtcs)
++{
++      int i, ret = -ENOMEM;
++
++      setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
++                  (unsigned long)dev);
++      init_timer_deferrable(&dev->vblank_disable_timer);
++      spin_lock_init(&dev->vbl_lock);
++      atomic_set(&dev->vbl_signal_pending, 0);
++      dev->num_crtcs = num_crtcs;
++
++      dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
++                                 DRM_MEM_DRIVER);
++      if (!dev->vbl_queue)
++              goto err;
++
++      dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
++                                DRM_MEM_DRIVER);
++      if (!dev->vbl_sigs)
++              goto err;
++
++      dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
++                                    DRM_MEM_DRIVER);
++      if (!dev->_vblank_count)
++              goto err;
++
++      dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_refcount)
++              goto err;
++
++      dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_enabled)
++              goto err;
++
++      dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
++      if (!dev->last_vblank)
++              goto err;
++
++      dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_inmodeset)
++              goto err;
++
++      /* Zero per-crtc vblank stuff */
++      for (i = 0; i < num_crtcs; i++) {
++              init_waitqueue_head(&dev->vbl_queue[i]);
++              INIT_LIST_HEAD(&dev->vbl_sigs[i]);
++              atomic_set(&dev->_vblank_count[i], 0);
++              atomic_set(&dev->vblank_refcount[i], 0);
++      }
++
++      dev->vblank_disable_allowed = 0;
++
++      return 0;
++
++err:
++      drm_vblank_cleanup(dev);
++      return ret;
++}
++EXPORT_SYMBOL(drm_vblank_init);
++
++/**
++ * Install IRQ handler.
++ *
++ * \param dev DRM device.
++ *
++ * Initializes the IRQ related data. Installs the handler, calling the driver
++ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
++ * before and after the installation.
++ */
++int drm_irq_install(struct drm_device * dev)
++{
++      int ret = 0;
++      unsigned long sh_flags = 0;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      if (dev->pdev->irq == 0)
++              return -EINVAL;
++
++      mutex_lock(&dev->struct_mutex);
++
++      /* Driver must have been initialized */
++      if (!dev->dev_private) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      if (dev->irq_enabled) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EBUSY;
++      }
++      dev->irq_enabled = 1;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("irq=%d\n", dev->pdev->irq);
++
++      /* Before installing handler */
++      dev->driver->irq_preinstall(dev);
++
++      /* Install handler */
++      if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
++              sh_flags = IRQF_SHARED;
++
++      ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
++                        sh_flags, dev->devname, dev);
++      if (ret < 0) {
++              mutex_lock(&dev->struct_mutex);
++              dev->irq_enabled = 0;
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++      /* Expose the device irq to device drivers that want to export it for
++       * whatever reason.
++       */
++      dev->irq = dev->pdev->irq;
++
++      /* After installing handler */
++      ret = dev->driver->irq_postinstall(dev);
++      if (ret < 0) {
++              mutex_lock(&dev->struct_mutex);
++              dev->irq_enabled = 0;
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_irq_install);
++
++/**
++ * Uninstall the IRQ handler.
++ *
++ * \param dev DRM device.
++ *
++ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
++ */
++int drm_irq_uninstall(struct drm_device * dev)
++{
++      int irq_enabled;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      mutex_lock(&dev->struct_mutex);
++      irq_enabled = dev->irq_enabled;
++      dev->irq_enabled = 0;
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!irq_enabled)
++              return -EINVAL;
++
++      DRM_DEBUG("irq=%d\n", dev->pdev->irq);
++
++      dev->driver->irq_uninstall(dev);
++
++      free_irq(dev->pdev->irq, dev);
++
++      drm_vblank_cleanup(dev);
++
++      dev->locked_tasklet_func = NULL;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_irq_uninstall);
++
++/**
++ * IRQ control ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_control structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls irq_install() or irq_uninstall() according to \p arg.
++ */
++int drm_control(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_control *ctl = data;
++
++      /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
++
++
++      switch (ctl->func) {
++      case DRM_INST_HANDLER:
++              if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++                      return 0;
++              if (dev->if_version < DRM_IF_VERSION(1, 2) &&
++                  ctl->irq != dev->pdev->irq)
++                      return -EINVAL;
++              return drm_irq_install(dev);
++      case DRM_UNINST_HANDLER:
++              if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++                      return 0;
++              return drm_irq_uninstall(dev);
++      default:
++              return -EINVAL;
++      }
++}
++
++/**
++ * drm_vblank_count - retrieve "cooked" vblank counter value
++ * @dev: DRM device
++ * @crtc: which counter to retrieve
++ *
++ * Fetches the "cooked" vblank count value that represents the number of
++ * vblank events since the system was booted, including lost events due to
++ * modesetting activity.
++ */
++u32 drm_vblank_count(struct drm_device *dev, int crtc)
++{
++      return atomic_read(&dev->_vblank_count[crtc]);
++}
++EXPORT_SYMBOL(drm_vblank_count);
++
++/**
++ * drm_update_vblank_count - update the master vblank counter
++ * @dev: DRM device
++ * @crtc: counter to update
++ *
++ * Call back into the driver to update the appropriate vblank counter
++ * (specified by @crtc).  Deal with wraparound, if it occurred, and
++ * update the last read value so we can deal with wraparound on the next
++ * call if necessary.
++ *
++ * Only necessary when going from off->on, to account for frames we
++ * didn't get an interrupt for.
++ *
++ * Note: caller must hold dev->vbl_lock since this reads & writes
++ * device vblank fields.
++ */
++static void drm_update_vblank_count(struct drm_device *dev, int crtc)
++{
++      u32 cur_vblank, diff;
++
++      /*
++       * Interrupts were disabled prior to this call, so deal with counter
++       * wrap if needed.
++       * NOTE!  It's possible we lost a full dev->max_vblank_count events
++       * here if the register is small or we had vblank interrupts off for
++       * a long time.
++       */
++      cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
++      diff = cur_vblank - dev->last_vblank[crtc];
++      if (cur_vblank < dev->last_vblank[crtc]) {
++              diff += dev->max_vblank_count;
++
++              DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
++                        crtc, dev->last_vblank[crtc], cur_vblank, diff);
++      }
++
++      DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
++                crtc, diff);
++
++      atomic_add(diff, &dev->_vblank_count[crtc]);
++}
++
++/**
++ * drm_vblank_get - get a reference count on vblank events
++ * @dev: DRM device
++ * @crtc: which CRTC to own
++ *
++ * Acquire a reference count on vblank events to avoid having them disabled
++ * while in use.
++ *
++ * RETURNS
++ * Zero on success, nonzero on failure.
++ */
++int drm_vblank_get(struct drm_device *dev, int crtc)
++{
++      unsigned long irqflags;
++      int ret = 0;
++
++      spin_lock_irqsave(&dev->vbl_lock, irqflags);
++      /* Going from 0->1 means we have to enable interrupts again */
++      if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
++          !dev->vblank_enabled[crtc]) {
++              ret = dev->driver->enable_vblank(dev, crtc);
++              if (ret)
++                      atomic_dec(&dev->vblank_refcount[crtc]);
++              else {
++                      dev->vblank_enabled[crtc] = 1;
++                      drm_update_vblank_count(dev, crtc);
++              }
++      }
++      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_vblank_get);
++
++/**
++ * drm_vblank_put - give up ownership of vblank events
++ * @dev: DRM device
++ * @crtc: which counter to give up
++ *
++ * Release ownership of a given vblank counter, turning off interrupts
++ * if possible.
++ */
++void drm_vblank_put(struct drm_device *dev, int crtc)
++{
++      /* Last user schedules interrupt disable */
++      if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
++          mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
++}
++EXPORT_SYMBOL(drm_vblank_put);
++
++/**
++ * drm_modeset_ctl - handle vblank event counter changes across mode switch
++ * @DRM_IOCTL_ARGS: standard ioctl arguments
++ *
++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
++ * ioctls around modesetting so that any lost vblank events are accounted for.
++ *
++ * Generally the counter will reset across mode sets.  If interrupts are
++ * enabled around this call, we don't have to do anything since the counter
++ * will have already been incremented.
++ */
++int drm_modeset_ctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_modeset_ctl *modeset = data;
++      unsigned long irqflags;
++      int crtc, ret = 0;
++
++      /* If drm_vblank_init() hasn't been called yet, just no-op */
++      if (!dev->num_crtcs)
++              goto out;
++
++      crtc = modeset->crtc;
++      if (crtc >= dev->num_crtcs) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      /*
++       * To avoid all the problems that might happen if interrupts
++       * were enabled/disabled around or between these calls, we just
++       * have the kernel take a reference on the CRTC (just once though
++       * to avoid corrupting the count if multiple, mismatch calls occur),
++       * so that interrupts remain enabled in the interim.
++       */
++      switch (modeset->cmd) {
++      case _DRM_PRE_MODESET:
++              if (!dev->vblank_inmodeset[crtc]) {
++                      dev->vblank_inmodeset[crtc] = 1;
++                      drm_vblank_get(dev, crtc);
++              }
++              break;
++      case _DRM_POST_MODESET:
++              if (dev->vblank_inmodeset[crtc]) {
++                      spin_lock_irqsave(&dev->vbl_lock, irqflags);
++                      dev->vblank_disable_allowed = 1;
++                      dev->vblank_inmodeset[crtc] = 0;
++                      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++                      drm_vblank_put(dev, crtc);
++              }
++              break;
++      default:
++              ret = -EINVAL;
++              break;
++      }
++
++out:
++      return ret;
++}
++
++/**
++ * Wait for VBLANK.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param data user argument, pointing to a drm_wait_vblank structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the IRQ is installed.
++ *
++ * If a signal is requested checks if this task has already scheduled the same signal
++ * for the same vblank sequence number - nothing to be done in
++ * that case. If the number of tasks waiting for the interrupt exceeds 100 the
++ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
++ * task.
++ *
++ * If a signal is not requested, then calls vblank_wait().
++ */
++int drm_wait_vblank(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      union drm_wait_vblank *vblwait = data;
++      int ret = 0;
++      unsigned int flags, seq, crtc;
++
++      if ((!dev->pdev->irq) || (!dev->irq_enabled))
++              return -EINVAL;
++
++      if (vblwait->request.type &
++          ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
++              DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
++                        vblwait->request.type,
++                        (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
++              return -EINVAL;
++      }
++
++      flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
++      crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
++
++      if (crtc >= dev->num_crtcs)
++              return -EINVAL;
++
++      ret = drm_vblank_get(dev, crtc);
++      if (ret)
++              return ret;
++      seq = drm_vblank_count(dev, crtc);
++
++      switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
++      case _DRM_VBLANK_RELATIVE:
++              vblwait->request.sequence += seq;
++              vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
++      case _DRM_VBLANK_ABSOLUTE:
++              break;
++      default:
++              ret = -EINVAL;
++              goto done;
++      }
++
++      if ((flags & _DRM_VBLANK_NEXTONMISS) &&
++          (seq - vblwait->request.sequence) <= (1<<23)) {
++              vblwait->request.sequence = seq + 1;
++      }
++
++      if (flags & _DRM_VBLANK_SIGNAL) {
++              unsigned long irqflags;
++              struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
++              struct drm_vbl_sig *vbl_sig;
++
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++
++              /* Check if this task has already scheduled the same signal
++               * for the same vblank sequence number; nothing to be done in
++               * that case
++               */
++              list_for_each_entry(vbl_sig, vbl_sigs, head) {
++                      if (vbl_sig->sequence == vblwait->request.sequence
++                          && vbl_sig->info.si_signo ==
++                          vblwait->request.signal
++                          && vbl_sig->task == current) {
++                              spin_unlock_irqrestore(&dev->vbl_lock,
++                                                     irqflags);
++                              vblwait->reply.sequence = seq;
++                              goto done;
++                      }
++              }
++
++              if (atomic_read(&dev->vbl_signal_pending) >= 100) {
++                      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++                      ret = -EBUSY;
++                      goto done;
++              }
++
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++              vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
++                                   DRM_MEM_DRIVER);
++              if (!vbl_sig) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              ret = drm_vblank_get(dev, crtc);
++              if (ret) {
++                      drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
++                               DRM_MEM_DRIVER);
++                      return ret;
++              }
++
++              atomic_inc(&dev->vbl_signal_pending);
++
++              vbl_sig->sequence = vblwait->request.sequence;
++              vbl_sig->info.si_signo = vblwait->request.signal;
++              vbl_sig->task = current;
++
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++
++              list_add_tail(&vbl_sig->head, vbl_sigs);
++
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++              vblwait->reply.sequence = seq;
++      } else {
++              DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
++                          ((drm_vblank_count(dev, crtc)
++                            - vblwait->request.sequence) <= (1 << 23)));
++
++              if (ret != -EINTR) {
++                      struct timeval now;
++
++                      do_gettimeofday(&now);
++
++                      vblwait->reply.tval_sec = now.tv_sec;
++                      vblwait->reply.tval_usec = now.tv_usec;
++                      vblwait->reply.sequence = drm_vblank_count(dev, crtc);
++              }
++      }
++
++done:
++      drm_vblank_put(dev, crtc);
++      return ret;
++}
++
++/**
++ * Send the VBLANK signals.
++ *
++ * \param dev DRM device.
++ * \param crtc CRTC where the vblank event occurred
++ *
++ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
++ *
++ * If a signal is not requested, then calls vblank_wait().
++ */
++static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
++{
++      struct drm_vbl_sig *vbl_sig, *tmp;
++      struct list_head *vbl_sigs;
++      unsigned int vbl_seq;
++      unsigned long flags;
++
++      spin_lock_irqsave(&dev->vbl_lock, flags);
++
++      vbl_sigs = &dev->vbl_sigs[crtc];
++      vbl_seq = drm_vblank_count(dev, crtc);
++
++      list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
++          if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
++              vbl_sig->info.si_code = vbl_seq;
++              send_sig_info(vbl_sig->info.si_signo,
++                            &vbl_sig->info, vbl_sig->task);
++
++              list_del(&vbl_sig->head);
++
++              drm_free(vbl_sig, sizeof(*vbl_sig),
++                       DRM_MEM_DRIVER);
++              atomic_dec(&dev->vbl_signal_pending);
++              drm_vblank_put(dev, crtc);
++          }
++      }
++
++      spin_unlock_irqrestore(&dev->vbl_lock, flags);
++}
++
++/**
++ * drm_handle_vblank - handle a vblank event
++ * @dev: DRM device
++ * @crtc: where this event occurred
++ *
++ * Drivers should call this routine in their vblank interrupt handlers to
++ * update the vblank counter and send any signals that may be pending.
++ */
++void drm_handle_vblank(struct drm_device *dev, int crtc)
++{
++      atomic_inc(&dev->_vblank_count[crtc]);
++      DRM_WAKEUP(&dev->vbl_queue[crtc]);
++      drm_vbl_send_signals(dev, crtc);
++}
++EXPORT_SYMBOL(drm_handle_vblank);
++
++/**
++ * Tasklet wrapper function.
++ *
++ * \param data DRM device in disguise.
++ *
++ * Attempts to grab the HW lock and calls the driver callback on success. On
++ * failure, leave the lock marked as contended so the callback can be called
++ * from drm_unlock().
++ */
++static void drm_locked_tasklet_func(unsigned long data)
++{
++      struct drm_device *dev = (struct drm_device *)data;
++      unsigned long irqflags;
++      void (*tasklet_func)(struct drm_device *);
++      
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++
++      if (!tasklet_func ||
++          !drm_lock_take(&dev->lock,
++                         DRM_KERNEL_CONTEXT)) {
++              return;
++      }
++
++      dev->lock.lock_time = jiffies;
++      atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      dev->locked_tasklet_func = NULL;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++      
++      if (tasklet_func != NULL)
++              tasklet_func(dev);
++
++      drm_lock_free(&dev->lock,
++                    DRM_KERNEL_CONTEXT);
++}
++
++/**
++ * Schedule a tasklet to call back a driver hook with the HW lock held.
++ *
++ * \param dev DRM device.
++ * \param func Driver callback.
++ *
++ * This is intended for triggering actions that require the HW lock from an
++ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
++ * completes. Note that the callback may be called from interrupt or process
++ * context, it must not make any assumptions about this. Also, the HW lock will
++ * be held with the kernel context or any client context.
++ */
++void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
++{
++      unsigned long irqflags;
++      static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
++          test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
++              return;
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++
++      if (dev->locked_tasklet_func) {
++              spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++              return;
++      }
++
++      dev->locked_tasklet_func = func;
++
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++
++      drm_tasklet.data = (unsigned long)dev;
++
++      tasklet_hi_schedule(&drm_tasklet);
++}
++EXPORT_SYMBOL(drm_locked_tasklet);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_lock.c
+--- git/drivers/gpu/drm-tungsten/drm_lock.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_lock.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,389 @@
++/**
++ * \file drm_lock.c
++ * IOCTLs for locking
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++static int drm_notifier(void *priv);
++
++/**
++ * Lock ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Add the current task to the lock wait queue, and attempt to take to lock.
++ */
++int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DECLARE_WAITQUEUE(entry, current);
++      struct drm_lock *lock = data;
++      int ret = 0;
++
++      ++file_priv->lock_count;
++
++      if (lock->context == DRM_KERNEL_CONTEXT) {
++              DRM_ERROR("Process %d using kernel context %d\n",
++                        current->pid, lock->context);
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
++                lock->context, current->pid,
++                dev->lock.hw_lock->lock, lock->flags);
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
++              if (lock->context < 0)
++                      return -EINVAL;
++
++      add_wait_queue(&dev->lock.lock_queue, &entry);
++      spin_lock_bh(&dev->lock.spinlock);
++      dev->lock.user_waiters++;
++      spin_unlock_bh(&dev->lock.spinlock);
++      for (;;) {
++              __set_current_state(TASK_INTERRUPTIBLE);
++              if (!dev->lock.hw_lock) {
++                      /* Device has been unregistered */
++                      ret = -EINTR;
++                      break;
++              }
++              if (drm_lock_take(&dev->lock, lock->context)) {
++                      dev->lock.file_priv = file_priv;
++                      dev->lock.lock_time = jiffies;
++                      atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++                      break;  /* Got lock */
++              }
++
++              /* Contention */
++              schedule();
++              if (signal_pending(current)) {
++                      ret = -ERESTARTSYS;
++                      break;
++              }
++      }
++      spin_lock_bh(&dev->lock.spinlock);
++      dev->lock.user_waiters--;
++      spin_unlock_bh(&dev->lock.spinlock);
++      __set_current_state(TASK_RUNNING);
++      remove_wait_queue(&dev->lock.lock_queue, &entry);
++
++      DRM_DEBUG("%d %s\n", lock->context,
++                ret ? "interrupted" : "has lock");
++      if (ret) return ret;
++
++      /* don't set the block all signals on the master process for now 
++       * really probably not the correct answer but lets us debug xkb
++       * xserver for now */
++      if (!file_priv->master) {
++              sigemptyset(&dev->sigmask);
++              sigaddset(&dev->sigmask, SIGSTOP);
++              sigaddset(&dev->sigmask, SIGTSTP);
++              sigaddset(&dev->sigmask, SIGTTIN);
++              sigaddset(&dev->sigmask, SIGTTOU);
++              dev->sigdata.context = lock->context;
++              dev->sigdata.lock = dev->lock.hw_lock;
++              block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
++      }
++
++      if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
++              dev->driver->dma_ready(dev);
++
++      if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
++      {
++              if (dev->driver->dma_quiescent(dev)) {
++                      DRM_DEBUG("%d waiting for DMA quiescent\n",
++                                lock->context);
++                      return -EBUSY;
++              }
++      }
++
++      if (dev->driver->kernel_context_switch &&
++          dev->last_context != lock->context) {
++              dev->driver->kernel_context_switch(dev, dev->last_context,
++                                                 lock->context);
++      }
++
++      return 0;
++}
++
++/**
++ * Unlock ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Transfer and free the lock.
++ */
++int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_lock *lock = data;
++      unsigned long irqflags;
++      void (*tasklet_func)(struct drm_device *);
++
++      if (lock->context == DRM_KERNEL_CONTEXT) {
++              DRM_ERROR("Process %d using kernel context %d\n",
++                        current->pid, lock->context);
++              return -EINVAL;
++      }
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      dev->locked_tasklet_func = NULL;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++      if (tasklet_func != NULL)
++              tasklet_func(dev);
++
++      atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++
++      /* kernel_context_switch isn't used by any of the x86 drm
++       * modules but is required by the Sparc driver.
++       */
++      if (dev->driver->kernel_context_switch_unlock)
++              dev->driver->kernel_context_switch_unlock(dev);
++      else {
++              if (drm_lock_free(&dev->lock,lock->context)) {
++                      /* FIXME: Should really bail out here. */
++              }
++      }
++
++      unblock_all_signals();
++      return 0;
++}
++
++/**
++ * Take the heavyweight lock.
++ *
++ * \param lock lock pointer.
++ * \param context locking context.
++ * \return one if the lock is held, or zero otherwise.
++ *
++ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
++ */
++int drm_lock_take(struct drm_lock_data *lock_data,
++                unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      do {
++              old = *lock;
++              if (old & _DRM_LOCK_HELD)
++                      new = old | _DRM_LOCK_CONT;
++              else {
++                      new = context | _DRM_LOCK_HELD |
++                              ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
++                               _DRM_LOCK_CONT : 0);
++              }
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++      spin_unlock_bh(&lock_data->spinlock);
++
++      /* Warn on recursive locking of user contexts. */
++      if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
++              if (context != DRM_KERNEL_CONTEXT) {
++                      DRM_ERROR("%d holds heavyweight lock\n",
++                                context);
++              }
++              return 0;
++      }
++
++      return !_DRM_LOCK_IS_HELD(old);
++}
++
++/**
++ * This takes a lock forcibly and hands it to context.        Should ONLY be used
++ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
++ *
++ * \param dev DRM device.
++ * \param lock lock pointer.
++ * \param context locking context.
++ * \return always one.
++ *
++ * Resets the lock file pointer.
++ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
++ */
++static int drm_lock_transfer(struct drm_lock_data *lock_data,
++                           unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      lock_data->file_priv = NULL;
++      do {
++              old = *lock;
++              new = context | _DRM_LOCK_HELD;
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++      return 1;
++}
++
++/**
++ * Free lock.
++ *
++ * \param dev DRM device.
++ * \param lock lock.
++ * \param context context.
++ *
++ * Resets the lock file pointer.
++ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
++ * waiting on the lock queue.
++ */
++int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      if (lock_data->kernel_waiters != 0) {
++              drm_lock_transfer(lock_data, 0);
++              lock_data->idle_has_lock = 1;
++              spin_unlock_bh(&lock_data->spinlock);
++              return 1;
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++
++      do {
++              old = *lock;
++              new = _DRM_LOCKING_CONTEXT(old);
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++
++      if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
++              DRM_ERROR("%d freed heavyweight lock held by %d\n",
++                        context, _DRM_LOCKING_CONTEXT(old));
++              return 1;
++      }
++      wake_up_interruptible(&lock_data->lock_queue);
++      return 0;
++}
++
++/**
++ * If we get here, it means that the process has called DRM_IOCTL_LOCK
++ * without calling DRM_IOCTL_UNLOCK.
++ *
++ * If the lock is not held, then let the signal proceed as usual.  If the lock
++ * is held, then set the contended flag and keep the signal blocked.
++ *
++ * \param priv pointer to a drm_sigdata structure.
++ * \return one if the signal should be delivered normally, or zero if the
++ * signal should be blocked.
++ */
++static int drm_notifier(void *priv)
++{
++      struct drm_sigdata *s = (struct drm_sigdata *) priv;
++      unsigned int old, new, prev;
++
++      /* Allow signal delivery if lock isn't held */
++      if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
++          || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
++              return 1;
++
++      /* Otherwise, set flag to force call to
++         drmUnlock */
++      do {
++              old = s->lock->lock;
++              new = old | _DRM_LOCK_CONT;
++              prev = cmpxchg(&s->lock->lock, old, new);
++      } while (prev != old);
++      return 0;
++}
++
++/**
++ * This function returns immediately and takes the hw lock
++ * with the kernel context if it is free, otherwise it gets the highest priority when and if
++ * it is eventually released.
++ *
++ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
++ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
++ * a deadlock, which is why the "idlelock" was invented).
++ *
++ * This should be sufficient to wait for GPU idle without
++ * having to worry about starvation.
++ */
++
++void drm_idlelock_take(struct drm_lock_data *lock_data)
++{
++      int ret = 0;
++
++      spin_lock_bh(&lock_data->spinlock);
++      lock_data->kernel_waiters++;
++      if (!lock_data->idle_has_lock) {
++
++              spin_unlock_bh(&lock_data->spinlock);
++              ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
++              spin_lock_bh(&lock_data->spinlock);
++
++              if (ret == 1)
++                      lock_data->idle_has_lock = 1;
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++}
++EXPORT_SYMBOL(drm_idlelock_take);
++
++void drm_idlelock_release(struct drm_lock_data *lock_data)
++{
++      unsigned int old, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      if (--lock_data->kernel_waiters == 0) {
++              if (lock_data->idle_has_lock) {
++                      do {
++                              old = *lock;
++                              prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
++                      } while (prev != old);
++                      wake_up_interruptible(&lock_data->lock_queue);
++                      lock_data->idle_has_lock = 0;
++              }
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++}
++EXPORT_SYMBOL(drm_idlelock_release);
++
++int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
++{
++
++      return (file_priv->lock_count && dev->lock.hw_lock &&
++              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
++              dev->lock.file_priv == file_priv);
++}
++
++EXPORT_SYMBOL(drm_i_have_hw_lock);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.c git-nokia/drivers/gpu/drm-tungsten/drm_memory.c
+--- git/drivers/gpu/drm-tungsten/drm_memory.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,374 @@
++/**
++ * \file drm_memory.c
++ * Memory management wrappers for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/highmem.h>
++#include <asm-generic/iomap.h>
++#include "drmP.h"
++
++static struct {
++      spinlock_t lock;
++      uint64_t cur_used;
++      uint64_t emer_used;
++      uint64_t low_threshold;
++      uint64_t high_threshold;
++      uint64_t emer_threshold;
++} drm_memctl = {
++      .lock = SPIN_LOCK_UNLOCKED
++};
++
++static inline size_t drm_size_align(size_t size)
++{
++      size_t tmpSize = 4;
++      if (size > PAGE_SIZE)
++              return PAGE_ALIGN(size);
++
++      while (tmpSize < size)
++              tmpSize <<= 1;
++
++      return (size_t) tmpSize;
++}
++
++int drm_alloc_memctl(size_t size)
++{
++        int ret = 0;
++      unsigned long a_size = drm_size_align(size);
++      unsigned long new_used;
++
++      spin_lock(&drm_memctl.lock);
++      new_used = drm_memctl.cur_used + a_size;
++      if (likely(new_used < drm_memctl.high_threshold)) {
++              drm_memctl.cur_used = new_used;
++              goto out;
++      }
++
++      /*
++       * Allow small allocations from root-only processes to
++       * succeed until the emergency threshold is reached.
++       */
++
++      new_used += drm_memctl.emer_used;
++      if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
++                   (a_size > 16*PAGE_SIZE) ||
++                   (new_used > drm_memctl.emer_threshold))) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      drm_memctl.cur_used = drm_memctl.high_threshold;
++      drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
++out:
++      spin_unlock(&drm_memctl.lock);
++      return ret;
++}
++EXPORT_SYMBOL(drm_alloc_memctl);
++
++
++void drm_free_memctl(size_t size)
++{
++      unsigned long a_size = drm_size_align(size);
++
++      spin_lock(&drm_memctl.lock);
++      if (likely(a_size >= drm_memctl.emer_used)) {
++              a_size -= drm_memctl.emer_used;
++              drm_memctl.emer_used = 0;
++      } else {
++              drm_memctl.emer_used -= a_size;
++              a_size = 0;
++      }
++      drm_memctl.cur_used -= a_size;
++      spin_unlock(&drm_memctl.lock);
++}
++EXPORT_SYMBOL(drm_free_memctl);
++
++void drm_query_memctl(uint64_t *cur_used,
++                    uint64_t *emer_used,
++                    uint64_t *low_threshold,
++                    uint64_t *high_threshold,
++                    uint64_t *emer_threshold)
++{
++      spin_lock(&drm_memctl.lock);
++      *cur_used = drm_memctl.cur_used;
++      *emer_used = drm_memctl.emer_used;
++      *low_threshold = drm_memctl.low_threshold;
++      *high_threshold = drm_memctl.high_threshold;
++      *emer_threshold = drm_memctl.emer_threshold;
++      spin_unlock(&drm_memctl.lock);
++}
++EXPORT_SYMBOL(drm_query_memctl);
++
++void drm_init_memctl(size_t p_low_threshold,
++                   size_t p_high_threshold,
++                   size_t unit_size)
++{
++      spin_lock(&drm_memctl.lock);
++      drm_memctl.emer_used = 0;
++      drm_memctl.cur_used = 0;
++      drm_memctl.low_threshold = p_low_threshold * unit_size;
++      drm_memctl.high_threshold = p_high_threshold * unit_size;
++      drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
++              drm_memctl.high_threshold;
++      spin_unlock(&drm_memctl.lock);
++}
++
++
++#ifndef DEBUG_MEMORY
++
++/** No-op. */
++void drm_mem_init(void)
++{
++}
++
++/**
++ * Called when "/proc/dri/%dev%/mem" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param len requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * No-op.
++ */
++int drm_mem_info(char *buf, char **start, off_t offset,
++               int len, int *eof, void *data)
++{
++      return 0;
++}
++
++/** Wrapper around kmalloc() */
++void *drm_calloc(size_t nmemb, size_t size, int area)
++{
++      return kcalloc(nmemb, size, GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_calloc);
++
++/** Wrapper around kmalloc() and kfree() */
++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
++{
++      void *pt;
++
++      if (!(pt = kmalloc(size, GFP_KERNEL)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, DRM_MIN(oldsize,size));
++              kfree(oldpt);
++      }
++      return pt;
++}
++
++/**
++ * Allocate pages.
++ *
++ * \param order size order.
++ * \param area memory area. (Not used.)
++ * \return page address on success, or zero on failure.
++ *
++ * Allocate and reserve free pages.
++ */
++unsigned long drm_alloc_pages(int order, int area)
++{
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address)
++              return 0;
++
++      /* Zero */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++/**
++ * Free pages.
++ *
++ * \param address address of the pages to free.
++ * \param order size order.
++ * \param area memory area. (Not used.)
++ *
++ * Unreserve and free pages allocated by alloc_pages().
++ */
++void drm_free_pages(unsigned long address, int order, int area)
++{
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address)
++              return;
++
++      /* Unreserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              ClearPageReserved(virt_to_page(addr));
++      }
++
++      free_pages(address, order);
++}
++
++#if __OS_HAS_AGP
++static void *agp_remap(unsigned long offset, unsigned long size,
++                            struct drm_device * dev)
++{
++      unsigned long *phys_addr_map, i, num_pages =
++          PAGE_ALIGN(size) / PAGE_SIZE;
++      struct drm_agp_mem *agpmem;
++      struct page **page_map;
++      void *addr;
++
++      size = PAGE_ALIGN(size);
++
++#ifdef __alpha__
++      offset -= dev->hose->mem_space->start;
++#endif
++
++      list_for_each_entry(agpmem, &dev->agp->memory, head)
++              if (agpmem->bound <= offset
++                  && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
++                  (offset + size))
++                      break;
++      if (!agpmem)
++              return NULL;
++
++      /*
++       * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
++       * the CPU do not get remapped by the GART.  We fix this by using the kernel's
++       * page-table instead (that's probably faster anyhow...).
++       */
++      /* note: use vmalloc() because num_pages could be large... */
++      page_map = vmalloc(num_pages * sizeof(struct page *));
++      if (!page_map)
++              return NULL;
++
++      phys_addr_map =
++          agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
++      for (i = 0; i < num_pages; ++i)
++              page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
++      addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
++      vfree(page_map);
++
++      return addr;
++}
++
++/** Wrapper around agp_allocate_memory() */
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      return drm_agp_allocate_memory(pages, type);
++}
++#else
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
++}
++#endif
++
++/** Wrapper around agp_free_memory() */
++int drm_free_agp(DRM_AGP_MEM * handle, int pages)
++{
++      return drm_agp_free_memory(handle) ? 0 : -EINVAL;
++}
++EXPORT_SYMBOL(drm_free_agp);
++
++/** Wrapper around agp_bind_memory() */
++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
++{
++      return drm_agp_bind_memory(handle, start);
++}
++
++/** Wrapper around agp_unbind_memory() */
++int drm_unbind_agp(DRM_AGP_MEM * handle)
++{
++      return drm_agp_unbind_memory(handle);
++}
++EXPORT_SYMBOL(drm_unbind_agp);
++
++#else  /* __OS_HAS_AGP*/
++static void *agp_remap(unsigned long offset, unsigned long size,
++                     struct drm_device * dev)
++{
++      return NULL;
++}
++#endif                                /* agp */
++#else
++static void *agp_remap(unsigned long offset, unsigned long size,
++                     struct drm_device * dev)
++{
++      return NULL;
++}
++#endif                                /* debug_memory */
++
++void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
++{
++      if (drm_core_has_AGP(dev) &&
++          dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
++              map->handle = agp_remap(map->offset, map->size, dev);
++      else
++              map->handle = ioremap(map->offset, map->size);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremap);
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
++{
++      map->handle = ioremap_wc(map->offset, map->size);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremap_wc);
++#endif
++
++void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
++{
++      if (!map->handle || !map->size)
++              return;
++
++      if (drm_core_has_AGP(dev) &&
++          dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
++              vunmap(map->handle);
++      else
++              iounmap(map->handle);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.c git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c
+--- git/drivers/gpu/drm-tungsten/drm_memory_debug.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,403 @@
++/**
++ * \file drm_memory_debug.c
++ * Memory management wrappers for DRM.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#ifdef DEBUG_MEMORY
++
++typedef struct drm_mem_stats {
++      const char *name;
++      int succeed_count;
++      int free_count;
++      int fail_count;
++      unsigned long bytes_allocated;
++      unsigned long bytes_freed;
++} drm_mem_stats_t;
++
++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
++static unsigned long drm_ram_available = 0;   /* In pages */
++static unsigned long drm_ram_used = 0;
++static drm_mem_stats_t drm_mem_stats[] = {
++      [DRM_MEM_DMA] = {"dmabufs"},
++      [DRM_MEM_SAREA] = {"sareas"},
++      [DRM_MEM_DRIVER] = {"driver"},
++      [DRM_MEM_MAGIC] = {"magic"},
++      [DRM_MEM_IOCTLS] = {"ioctltab"},
++      [DRM_MEM_MAPS] = {"maplist"},
++      [DRM_MEM_VMAS] = {"vmalist"},
++      [DRM_MEM_BUFS] = {"buflist"},
++      [DRM_MEM_SEGS] = {"seglist"},
++      [DRM_MEM_PAGES] = {"pagelist"},
++      [DRM_MEM_FILES] = {"files"},
++      [DRM_MEM_QUEUES] = {"queues"},
++      [DRM_MEM_CMDS] = {"commands"},
++      [DRM_MEM_MAPPINGS] = {"mappings"},
++      [DRM_MEM_BUFLISTS] = {"buflists"},
++      [DRM_MEM_AGPLISTS] = {"agplist"},
++      [DRM_MEM_SGLISTS] = {"sglist"},
++      [DRM_MEM_TOTALAGP] = {"totalagp"},
++      [DRM_MEM_BOUNDAGP] = {"boundagp"},
++      [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
++      [DRM_MEM_CTXLIST] = {"ctxlist"},
++      [DRM_MEM_STUB] = {"stub"},
++      {NULL, 0,}              /* Last entry must be null */
++};
++
++void drm_mem_init(void)
++{
++      drm_mem_stats_t *mem;
++      struct sysinfo si;
++
++      for (mem = drm_mem_stats; mem->name; ++mem) {
++              mem->succeed_count = 0;
++              mem->free_count = 0;
++              mem->fail_count = 0;
++              mem->bytes_allocated = 0;
++              mem->bytes_freed = 0;
++      }
++
++      si_meminfo(&si);
++      drm_ram_available = si.totalram;
++      drm_ram_used = 0;
++}
++
++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
++
++static int drm__mem_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data)
++{
++      drm_mem_stats_t *pt;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *eof = 0;
++      *start = &buf[offset];
++
++      DRM_PROC_PRINT("                  total counts                  "
++                     " |    outstanding  \n");
++      DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
++                     " | allocs      bytes\n\n");
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "system", 0, 0, 0,
++                     drm_ram_available << (PAGE_SHIFT - 10));
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "locked", 0, 0, 0, drm_ram_used >> 10);
++      DRM_PROC_PRINT("\n");
++      for (pt = drm_mem_stats; pt->name; pt++) {
++              DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
++                             pt->name,
++                             pt->succeed_count,
++                             pt->free_count,
++                             pt->fail_count,
++                             pt->bytes_allocated,
++                             pt->bytes_freed,
++                             pt->succeed_count - pt->free_count,
++                             (long)pt->bytes_allocated
++                             - (long)pt->bytes_freed);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++int drm_mem_info(char *buf, char **start, off_t offset,
++               int len, int *eof, void *data)
++{
++      int ret;
++
++      spin_lock(&drm_mem_lock);
++      ret = drm__mem_info(buf, start, offset, len, eof, data);
++      spin_unlock(&drm_mem_lock);
++      return ret;
++}
++
++void *drm_alloc(size_t size, int area)
++{
++      void *pt;
++
++      if (!size) {
++              DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
++              return NULL;
++      }
++
++      if (!(pt = kmalloc(size, GFP_KERNEL))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return NULL;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      spin_unlock(&drm_mem_lock);
++      return pt;
++}
++EXPORT_SYMBOL(drm_alloc);
++
++void *drm_calloc(size_t nmemb, size_t size, int area)
++{
++      void *addr;
++
++      addr = drm_alloc(nmemb * size, area);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++EXPORT_SYMBOL(drm_calloc);
++
++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
++{
++      void *pt;
++
++      if (!(pt = drm_alloc(size, area)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, oldsize);
++              drm_free(oldpt, oldsize, area);
++      }
++      return pt;
++}
++EXPORT_SYMBOL(drm_realloc);
++
++void drm_free(void *pt, size_t size, int area)
++{
++      int alloc_count;
++      int free_count;
++
++      if (!pt)
++              DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
++      else
++              kfree(pt);
++      spin_lock(&drm_mem_lock);
++      drm_mem_stats[area].bytes_freed += size;
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++EXPORT_SYMBOL(drm_free);
++
++unsigned long drm_alloc_pages(int order, int area)
++{
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += bytes;
++      drm_ram_used += bytes;
++      spin_unlock(&drm_mem_lock);
++
++      /* Zero outside the lock */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++void drm_free_pages(unsigned long address, int order, int area)
++{
++      unsigned long bytes = PAGE_SIZE << order;
++      int alloc_count;
++      int free_count;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address) {
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++      } else {
++              /* Unreserve */
++              for (addr = address, sz = bytes;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              free_pages(address, order);
++      }
++
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += bytes;
++      drm_ram_used -= bytes;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++#if __OS_HAS_AGP
++
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      DRM_AGP_MEM *handle;
++
++      if (!pages) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
++              return NULL;
++      }
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      if ((handle = drm_agp_allocate_memory(pages, type))) {
++#else
++      if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) {
++#endif
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return handle;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return NULL;
++}
++
++int drm_free_agp(DRM_AGP_MEM * handle, int pages)
++{
++      int alloc_count;
++      int free_count;
++      int retval = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                            "Attempt to free NULL AGP handle\n");
++              return retval;
++      }
++
++      if (drm_agp_free_memory(handle)) {
++              spin_lock(&drm_mem_lock);
++              free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
++              alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              if (free_count > alloc_count) {
++                      DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                                    "Excess frees: %d frees, %d allocs\n",
++                                    free_count, alloc_count);
++              }
++              return 0;
++      }
++      return retval;
++}
++
++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
++{
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to bind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if (!(retcode = drm_agp_bind_memory(handle, start))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
++                  += handle->page_count << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return retcode;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return retcode;
++}
++
++int drm_unbind_agp(DRM_AGP_MEM * handle)
++{
++      int alloc_count;
++      int free_count;
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to unbind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if ((retcode = drm_agp_unbind_memory(handle)))
++              return retcode;
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
++      alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++      drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
++          += handle->page_count << PAGE_SHIFT;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++      return retcode;
++}
++
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.h git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h
+--- git/drivers/gpu/drm-tungsten/drm_memory_debug.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,379 @@
++/**
++ * \file drm_memory_debug.h
++ * Memory management wrappers for DRM.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++typedef struct drm_mem_stats {
++      const char *name;
++      int succeed_count;
++      int free_count;
++      int fail_count;
++      unsigned long bytes_allocated;
++      unsigned long bytes_freed;
++} drm_mem_stats_t;
++
++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
++static unsigned long drm_ram_available = 0;   /* In pages */
++static unsigned long drm_ram_used = 0;
++static drm_mem_stats_t drm_mem_stats[] =
++{
++      [DRM_MEM_DMA] = {"dmabufs"},
++      [DRM_MEM_SAREA] = {"sareas"},
++      [DRM_MEM_DRIVER] = {"driver"},
++      [DRM_MEM_MAGIC] = {"magic"},
++      [DRM_MEM_IOCTLS] = {"ioctltab"},
++      [DRM_MEM_MAPS] = {"maplist"},
++      [DRM_MEM_VMAS] = {"vmalist"},
++      [DRM_MEM_BUFS] = {"buflist"},
++      [DRM_MEM_SEGS] = {"seglist"},
++      [DRM_MEM_PAGES] = {"pagelist"},
++      [DRM_MEM_FILES] = {"files"},
++      [DRM_MEM_QUEUES] = {"queues"},
++      [DRM_MEM_CMDS] = {"commands"},
++      [DRM_MEM_MAPPINGS] = {"mappings"},
++      [DRM_MEM_BUFLISTS] = {"buflists"},
++      [DRM_MEM_AGPLISTS] = {"agplist"},
++      [DRM_MEM_SGLISTS] = {"sglist"},
++      [DRM_MEM_TOTALAGP] = {"totalagp"},
++      [DRM_MEM_BOUNDAGP] = {"boundagp"},
++      [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
++      [DRM_MEM_CTXLIST] = {"ctxlist"},
++      [DRM_MEM_STUB] = {"stub"},
++      {NULL, 0,}              /* Last entry must be null */
++};
++
++void drm_mem_init (void) {
++      drm_mem_stats_t *mem;
++      struct sysinfo si;
++
++      for (mem = drm_mem_stats; mem->name; ++mem) {
++              mem->succeed_count = 0;
++              mem->free_count = 0;
++              mem->fail_count = 0;
++              mem->bytes_allocated = 0;
++              mem->bytes_freed = 0;
++      }
++
++      si_meminfo(&si);
++      drm_ram_available = si.totalram;
++      drm_ram_used = 0;
++}
++
++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
++
++static int drm__mem_info (char *buf, char **start, off_t offset,
++                         int request, int *eof, void *data) {
++      drm_mem_stats_t *pt;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *eof = 0;
++      *start = &buf[offset];
++
++      DRM_PROC_PRINT("                  total counts                  "
++                     " |    outstanding  \n");
++      DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
++                     " | allocs      bytes\n\n");
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "system", 0, 0, 0,
++                     drm_ram_available << (PAGE_SHIFT - 10));
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "locked", 0, 0, 0, drm_ram_used >> 10);
++      DRM_PROC_PRINT("\n");
++      for (pt = drm_mem_stats; pt->name; pt++) {
++              DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
++                             pt->name,
++                             pt->succeed_count,
++                             pt->free_count,
++                             pt->fail_count,
++                             pt->bytes_allocated,
++                             pt->bytes_freed,
++                             pt->succeed_count - pt->free_count,
++                             (long)pt->bytes_allocated
++                             - (long)pt->bytes_freed);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++int drm_mem_info (char *buf, char **start, off_t offset,
++                 int len, int *eof, void *data) {
++      int ret;
++
++      spin_lock(&drm_mem_lock);
++      ret = drm__mem_info (buf, start, offset, len, eof, data);
++      spin_unlock(&drm_mem_lock);
++      return ret;
++}
++
++void *drm_alloc (size_t size, int area) {
++      void *pt;
++
++      if (!size) {
++              DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
++              return NULL;
++      }
++
++      if (!(pt = kmalloc(size, GFP_KERNEL))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return NULL;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      spin_unlock(&drm_mem_lock);
++      return pt;
++}
++
++void *drm_calloc (size_t nmemb, size_t size, int area) {
++      void *addr;
++
++      addr = drm_alloc (nmemb * size, area);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++
++void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
++      void *pt;
++
++      if (!(pt = drm_alloc (size, area)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, oldsize);
++              drm_free (oldpt, oldsize, area);
++      }
++      return pt;
++}
++
++void drm_free (void *pt, size_t size, int area) {
++      int alloc_count;
++      int free_count;
++
++      if (!pt)
++              DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
++      else
++              kfree(pt);
++      spin_lock(&drm_mem_lock);
++      drm_mem_stats[area].bytes_freed += size;
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++unsigned long drm_alloc_pages (int order, int area) {
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += bytes;
++      drm_ram_used += bytes;
++      spin_unlock(&drm_mem_lock);
++
++      /* Zero outside the lock */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++void drm_free_pages (unsigned long address, int order, int area) {
++      unsigned long bytes = PAGE_SIZE << order;
++      int alloc_count;
++      int free_count;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address) {
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++      } else {
++              /* Unreserve */
++              for (addr = address, sz = bytes;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              free_pages(address, order);
++      }
++
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += bytes;
++      drm_ram_used -= bytes;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++#if __OS_HAS_AGP
++
++DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) {
++      DRM_AGP_MEM *handle;
++
++      if (!pages) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
++              return NULL;
++      }
++
++      if ((handle = drm_agp_allocate_memory (pages, type))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return handle;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return NULL;
++}
++
++int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
++      int alloc_count;
++      int free_count;
++      int retval = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                            "Attempt to free NULL AGP handle\n");
++              return retval;
++      }
++
++      if (drm_agp_free_memory (handle)) {
++              spin_lock(&drm_mem_lock);
++              free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
++              alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              if (free_count > alloc_count) {
++                      DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                                    "Excess frees: %d frees, %d allocs\n",
++                                    free_count, alloc_count);
++              }
++              return 0;
++      }
++      return retval;
++}
++
++int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to bind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if (!(retcode = drm_agp_bind_memory (handle, start))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
++                  += handle->page_count << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return retcode;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return retcode;
++}
++
++int drm_unbind_agp (DRM_AGP_MEM * handle) {
++      int alloc_count;
++      int free_count;
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to unbind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if ((retcode = drm_agp_unbind_memory (handle)))
++              return retcode;
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
++      alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++      drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
++          += handle->page_count << PAGE_SHIFT;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++      return retcode;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.h git-nokia/drivers/gpu/drm-tungsten/drm_memory.h
+--- git/drivers/gpu/drm-tungsten/drm_memory.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,61 @@
++/**
++ * \file drm_memory.h
++ * Memory management wrappers for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++/**
++ * Cut down version of drm_memory_debug.h, which used to be called
++ * drm_memory.h.
++ */
++
++#if __OS_HAS_AGP
++
++#include <linux/vmalloc.h>
++
++#ifdef HAVE_PAGE_AGP
++#include <asm/agp.h>
++#else
++# ifdef __powerpc__
++#  define PAGE_AGP    __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
++# else
++#  define PAGE_AGP    PAGE_KERNEL
++# endif
++#endif
++
++#else                         /* __OS_HAS_AGP */
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_mm.c git-nokia/drivers/gpu/drm-tungsten/drm_mm.c
+--- git/drivers/gpu/drm-tungsten/drm_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,298 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++
++/*
++ * Generic simple memory manager implementation. Intended to be used as a base
++ * class implementation for more advanced memory managers.
++ *
++ * Note that the algorithm used is quite simple and there might be substantial
++ * performance gains if a smarter free list is implemented. Currently it is just an
++ * unordered stack of free regions. This could easily be improved if an RB-tree
++ * is used instead. At least if we expect heavy fragmentation.
++ *
++ * Aligned allocations can also see improvement.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include <linux/slab.h>
++
++unsigned long drm_mm_tail_space(struct drm_mm *mm)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free)
++              return 0;
++
++      return entry->size;
++}
++
++int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free)
++              return -ENOMEM;
++
++      if (entry->size <= size)
++              return -ENOMEM;
++
++      entry->size -= size;
++      return 0;
++}
++
++
++static int drm_mm_create_tail_node(struct drm_mm *mm,
++                          unsigned long start,
++                          unsigned long size)
++{
++      struct drm_mm_node *child;
++
++      child = (struct drm_mm_node *)
++              drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
++      if (!child)
++              return -ENOMEM;
++
++      child->free = 1;
++      child->size = size;
++      child->start = start;
++      child->mm = mm;
++
++      list_add_tail(&child->ml_entry, &mm->ml_entry);
++      list_add_tail(&child->fl_entry, &mm->fl_entry);
++
++      return 0;
++}
++
++
++int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free) {
++              return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
++      }
++      entry->size += size;
++      return 0;
++}
++
++static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
++                                          unsigned long size)
++{
++      struct drm_mm_node *child;
++
++      child = (struct drm_mm_node *)
++              drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
++      if (!child)
++              return NULL;
++
++      INIT_LIST_HEAD(&child->fl_entry);
++
++      child->free = 0;
++      child->size = size;
++      child->start = parent->start;
++      child->mm = parent->mm;
++
++      list_add_tail(&child->ml_entry, &parent->ml_entry);
++      INIT_LIST_HEAD(&child->fl_entry);
++
++      parent->size -= size;
++      parent->start += size;
++      return child;
++}
++
++struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
++                              unsigned long size, unsigned alignment)
++{
++
++      struct drm_mm_node *align_splitoff = NULL;
++      struct drm_mm_node *child;
++      unsigned tmp = 0;
++
++      if (alignment)
++              tmp = parent->start % alignment;
++
++      if (tmp) {
++              align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
++              if (!align_splitoff)
++                      return NULL;
++      }
++
++      if (parent->size == size) {
++              list_del_init(&parent->fl_entry);
++              parent->free = 0;
++              return parent;
++      } else {
++              child = drm_mm_split_at_start(parent, size);
++      }
++
++      if (align_splitoff)
++              drm_mm_put_block(align_splitoff);
++
++      return child;
++}
++EXPORT_SYMBOL(drm_mm_get_block);
++
++/*
++ * Put a block. Merge with the previous and / or next block if they are free.
++ * Otherwise add to the free stack.
++ */
++
++void drm_mm_put_block(struct drm_mm_node * cur)
++{
++
++      struct drm_mm *mm = cur->mm;
++      struct list_head *cur_head = &cur->ml_entry;
++      struct list_head *root_head = &mm->ml_entry;
++      struct drm_mm_node *prev_node = NULL;
++      struct drm_mm_node *next_node;
++
++      int merged = 0;
++
++      if (cur_head->prev != root_head) {
++              prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
++              if (prev_node->free) {
++                      prev_node->size += cur->size;
++                      merged = 1;
++              }
++      }
++      if (cur_head->next != root_head) {
++              next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
++              if (next_node->free) {
++                      if (merged) {
++                              prev_node->size += next_node->size;
++                              list_del(&next_node->ml_entry);
++                              list_del(&next_node->fl_entry);
++                              drm_ctl_free(next_node, sizeof(*next_node),
++                                           DRM_MEM_MM);
++                      } else {
++                              next_node->size += cur->size;
++                              next_node->start = cur->start;
++                              merged = 1;
++                      }
++              }
++      }
++      if (!merged) {
++              cur->free = 1;
++              list_add(&cur->fl_entry, &mm->fl_entry);
++      } else {
++              list_del(&cur->ml_entry);
++              drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
++      }
++}
++EXPORT_SYMBOL(drm_mm_put_block);
++
++struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
++                                unsigned long size,
++                                unsigned alignment, int best_match)
++{
++      struct list_head *list;
++      const struct list_head *free_stack = &mm->fl_entry;
++      struct drm_mm_node *entry;
++      struct drm_mm_node *best;
++      unsigned long best_size;
++      unsigned wasted;
++
++      best = NULL;
++      best_size = ~0UL;
++
++      list_for_each(list, free_stack) {
++              entry = list_entry(list, struct drm_mm_node, fl_entry);
++              wasted = 0;
++
++              if (entry->size < size)
++                      continue;
++
++              if (alignment) {
++                      register unsigned tmp = entry->start % alignment;
++                      if (tmp)
++                              wasted += alignment - tmp;
++              }
++
++
++              if (entry->size >= size + wasted) {
++                      if (!best_match)
++                              return entry;
++                      if (size < best_size) {
++                              best = entry;
++                              best_size = entry->size;
++                      }
++              }
++      }
++
++      return best;
++}
++EXPORT_SYMBOL(drm_mm_search_free);
++
++int drm_mm_clean(struct drm_mm * mm)
++{
++      struct list_head *head = &mm->ml_entry;
++
++      return (head->next->next == head);
++}
++
++int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
++{
++      INIT_LIST_HEAD(&mm->ml_entry);
++      INIT_LIST_HEAD(&mm->fl_entry);
++
++      return drm_mm_create_tail_node(mm, start, size);
++}
++
++EXPORT_SYMBOL(drm_mm_init);
++
++void drm_mm_takedown(struct drm_mm * mm)
++{
++      struct list_head *bnode = mm->fl_entry.next;
++      struct drm_mm_node *entry;
++
++      entry = list_entry(bnode, struct drm_mm_node, fl_entry);
++
++      if (entry->ml_entry.next != &mm->ml_entry ||
++          entry->fl_entry.next != &mm->fl_entry) {
++              DRM_ERROR("Memory manager not clean. Delaying takedown\n");
++              return;
++      }
++
++      list_del(&entry->fl_entry);
++      list_del(&entry->ml_entry);
++      drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
++}
++
++EXPORT_SYMBOL(drm_mm_takedown);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_object.c git-nokia/drivers/gpu/drm-tungsten/drm_object.c
+--- git/drivers/gpu/drm-tungsten/drm_object.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_object.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,294 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
++                      int shareable)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      /* The refcount will be bumped to 1 when we add the ref object below. */
++      atomic_set(&item->refcount, 0);
++      item->shareable = shareable;
++      item->owner = priv;
++
++      ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
++                                      (unsigned long)item, 31, 0, 0);
++      if (ret)
++              return ret;
++
++      ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
++      if (ret)
++              ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_add_user_object);
++
++struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_hash_item *hash;
++      int ret;
++      struct drm_user_object *item;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      ret = drm_ht_find_item(&dev->object_hash, key, &hash);
++      if (ret)
++              return NULL;
++
++      item = drm_hash_entry(hash, struct drm_user_object, hash);
++
++      if (priv != item->owner) {
++              struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
++              ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
++              if (ret) {
++                      DRM_ERROR("Object not registered for usage\n");
++                      return NULL;
++              }
++      }
++      return item;
++}
++EXPORT_SYMBOL(drm_lookup_user_object);
++
++static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      if (atomic_dec_and_test(&item->refcount)) {
++              ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
++              BUG_ON(ret);
++              item->remove(priv, item);
++      }
++}
++
++static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
++                               enum drm_ref_type action)
++{
++      int ret = 0;
++
++      switch (action) {
++      case _DRM_REF_USE:
++              atomic_inc(&ro->refcount);
++              break;
++      default:
++              if (!ro->ref_struct_locked) {
++                      break;
++              } else {
++                      ro->ref_struct_locked(priv, ro, action);
++              }
++      }
++      return ret;
++}
++
++int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
++                     enum drm_ref_type ref_action)
++{
++      int ret = 0;
++      struct drm_ref_object *item;
++      struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      if (!referenced_object->shareable && priv != referenced_object->owner) {
++              DRM_ERROR("Not allowed to reference this object\n");
++              return -EINVAL;
++      }
++
++      /*
++       * If this is not a usage reference, Check that usage has been registered
++       * first. Otherwise strange things may happen on destruction.
++       */
++
++      if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
++              item =
++                  drm_lookup_ref_object(priv, referenced_object,
++                                        _DRM_REF_USE);
++              if (!item) {
++                      DRM_ERROR
++                          ("Object not registered for usage by this client\n");
++                      return -EINVAL;
++              }
++      }
++
++      if (NULL !=
++          (item =
++           drm_lookup_ref_object(priv, referenced_object, ref_action))) {
++              atomic_inc(&item->refcount);
++              return drm_object_ref_action(priv, referenced_object,
++                                           ref_action);
++      }
++
++      item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
++      if (item == NULL) {
++              DRM_ERROR("Could not allocate reference object\n");
++              return -ENOMEM;
++      }
++
++      atomic_set(&item->refcount, 1);
++      item->hash.key = (unsigned long)referenced_object;
++      ret = drm_ht_insert_item(ht, &item->hash);
++      item->unref_action = ref_action;
++
++      if (ret)
++              goto out;
++
++      list_add(&item->list, &priv->refd_objects);
++      ret = drm_object_ref_action(priv, referenced_object, ref_action);
++out:
++      return ret;
++}
++
++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
++                                      struct drm_user_object *referenced_object,
++                                      enum drm_ref_type ref_action)
++{
++      struct drm_hash_item *hash;
++      int ret;
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
++                             (unsigned long)referenced_object, &hash);
++      if (ret)
++              return NULL;
++
++      return drm_hash_entry(hash, struct drm_ref_object, hash);
++}
++EXPORT_SYMBOL(drm_lookup_ref_object);
++
++static void drm_remove_other_references(struct drm_file *priv,
++                                      struct drm_user_object *ro)
++{
++      int i;
++      struct drm_open_hash *ht;
++      struct drm_hash_item *hash;
++      struct drm_ref_object *item;
++
++      for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
++              ht = &priv->refd_object_hash[i];
++              while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
++                      item = drm_hash_entry(hash, struct drm_ref_object, hash);
++                      drm_remove_ref_object(priv, item);
++              }
++      }
++}
++
++void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
++{
++      int ret;
++      struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
++      struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
++      enum drm_ref_type unref_action;
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      unref_action = item->unref_action;
++      if (atomic_dec_and_test(&item->refcount)) {
++              ret = drm_ht_remove_item(ht, &item->hash);
++              BUG_ON(ret);
++              list_del_init(&item->list);
++              if (unref_action == _DRM_REF_USE)
++                      drm_remove_other_references(priv, user_object);
++              drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
++      }
++
++      switch (unref_action) {
++      case _DRM_REF_USE:
++              drm_deref_user_object(priv, user_object);
++              break;
++      default:
++              BUG_ON(!user_object->unref);
++              user_object->unref(priv, user_object, unref_action);
++              break;
++      }
++
++}
++EXPORT_SYMBOL(drm_remove_ref_object);
++
++int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
++                      enum drm_object_type type, struct drm_user_object **object)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_hash_item *hash;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
++      if (ret) {
++              DRM_ERROR("Could not find user object to reference.\n");
++              goto out_err;
++      }
++      uo = drm_hash_entry(hash, struct drm_user_object, hash);
++      if (uo->type != type) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
++      if (ret)
++              goto out_err;
++      mutex_unlock(&dev->struct_mutex);
++      *object = uo;
++      return 0;
++out_err:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
++                        enum drm_object_type type)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_ref_object *ro;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      uo = drm_lookup_user_object(priv, user_token);
++      if (!uo || (uo->type != type)) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
++      if (!ro) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      drm_remove_ref_object(priv, ro);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++out_err:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_objects.h git-nokia/drivers/gpu/drm-tungsten/drm_objects.h
+--- git/drivers/gpu/drm-tungsten/drm_objects.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_objects.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,832 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _DRM_OBJECTS_H
++#define _DRM_OBJECTS_H
++
++struct drm_device;
++struct drm_bo_mem_reg;
++
++/***************************************************
++ * User space objects. (drm_object.c)
++ */
++
++#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
++
++enum drm_object_type {
++      drm_fence_type,
++      drm_buffer_type,
++      drm_lock_type,
++          /*
++           * Add other user space object types here.
++           */
++      drm_driver_type0 = 256,
++      drm_driver_type1,
++      drm_driver_type2,
++      drm_driver_type3,
++      drm_driver_type4
++};
++
++/*
++ * A user object is a structure that helps the drm give out user handles
++ * to kernel internal objects and to keep track of these objects so that
++ * they can be destroyed, for example when the user space process exits.
++ * Designed to be accessible using a user space 32-bit handle.
++ */
++
++struct drm_user_object {
++      struct drm_hash_item hash;
++      struct list_head list;
++      enum drm_object_type type;
++      atomic_t refcount;
++      int shareable;
++      struct drm_file *owner;
++      void (*ref_struct_locked) (struct drm_file *priv,
++                                 struct drm_user_object *obj,
++                                 enum drm_ref_type ref_action);
++      void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
++                     enum drm_ref_type unref_action);
++      void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
++};
++
++/*
++ * A ref object is a structure which is used to
++ * keep track of references to user objects and to keep track of these
++ * references so that they can be destroyed for example when the user space
++ * process exits. Designed to be accessible using a pointer to the _user_ object.
++ */
++
++struct drm_ref_object {
++      struct drm_hash_item hash;
++      struct list_head list;
++      atomic_t refcount;
++      enum drm_ref_type unref_action;
++};
++
++/**
++ * Must be called with the struct_mutex held.
++ */
++
++extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
++                             int shareable);
++/**
++ * Must be called with the struct_mutex held.
++ */
++
++extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
++                                               uint32_t key);
++
++/*
++ * Must be called with the struct_mutex held. May temporarily release it.
++ */
++
++extern int drm_add_ref_object(struct drm_file *priv,
++                            struct drm_user_object *referenced_object,
++                            enum drm_ref_type ref_action);
++
++/*
++ * Must be called with the struct_mutex held.
++ */
++
++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
++                                      struct drm_user_object *referenced_object,
++                                      enum drm_ref_type ref_action);
++/*
++ * Must be called with the struct_mutex held.
++ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
++ * release the struct_mutex before calling drm_remove_ref_object.
++ * This function may temporarily release the struct_mutex.
++ */
++
++extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
++extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
++                             enum drm_object_type type,
++                             struct drm_user_object **object);
++extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
++                               enum drm_object_type type);
++
++/***************************************************
++ * Fence objects. (drm_fence.c)
++ */
++
++struct drm_fence_object {
++      struct drm_user_object base;
++      struct drm_device *dev;
++      atomic_t usage;
++
++      /*
++       * The below three fields are protected by the fence manager spinlock.
++       */
++
++      struct list_head ring;
++      int fence_class;
++      uint32_t native_types;
++      uint32_t type;
++      uint32_t signaled_types;
++      uint32_t sequence;
++      uint32_t waiting_types;
++      uint32_t error;
++};
++
++#define _DRM_FENCE_CLASSES 8
++#define _DRM_FENCE_TYPE_EXE 0x00
++
++struct drm_fence_class_manager {
++      struct list_head ring;
++      uint32_t pending_flush;
++      uint32_t waiting_types;
++      wait_queue_head_t fence_queue;
++      uint32_t highest_waiting_sequence;
++        uint32_t latest_queued_sequence;
++};
++
++struct drm_fence_manager {
++      int initialized;
++      rwlock_t lock;
++      struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
++      uint32_t num_classes;
++      atomic_t count;
++};
++
++struct drm_fence_driver {
++      unsigned long *waiting_jiffies;
++      uint32_t num_classes;
++      uint32_t wrap_diff;
++      uint32_t flush_diff;
++      uint32_t sequence_mask;
++
++      /*
++       * Driver implemented functions:
++       * has_irq() : 1 if the hardware can update the indicated type_flags using an
++       * irq handler. 0 if polling is required.
++       *
++       * emit() : Emit a sequence number to the command stream.
++       * Return the sequence number.
++       *
++       * flush() : Make sure the flags indicated in fc->pending_flush will eventually
++       * signal for fc->highest_received_sequence and all preceding sequences.
++       * Acknowledge by clearing the flags fc->pending_flush.
++       *
++       * poll() : Call drm_fence_handler with any new information.
++       *
++       * needed_flush() : Given the current state of the fence->type flags and previusly 
++       * executed or queued flushes, return the type_flags that need flushing.
++       *
++       * wait(): Wait for the "mask" flags to signal on a given fence, performing
++       * whatever's necessary to make this happen.
++       */
++
++      int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
++                      uint32_t flags);
++      int (*emit) (struct drm_device *dev, uint32_t fence_class,
++                   uint32_t flags, uint32_t *breadcrumb,
++                   uint32_t *native_type);
++      void (*flush) (struct drm_device *dev, uint32_t fence_class);
++      void (*poll) (struct drm_device *dev, uint32_t fence_class,
++              uint32_t types);
++      uint32_t (*needed_flush) (struct drm_fence_object *fence);
++      int (*wait) (struct drm_fence_object *fence, int lazy,
++                   int interruptible, uint32_t mask);
++};
++
++extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
++                                int interruptible, uint32_t mask,
++                                unsigned long end_jiffies);
++extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
++                            uint32_t sequence, uint32_t type,
++                            uint32_t error);
++extern void drm_fence_manager_init(struct drm_device *dev);
++extern void drm_fence_manager_takedown(struct drm_device *dev);
++extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
++                              uint32_t sequence);
++extern int drm_fence_object_flush(struct drm_fence_object *fence,
++                                uint32_t type);
++extern int drm_fence_object_signaled(struct drm_fence_object *fence,
++                                   uint32_t type);
++extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
++extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
++extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
++extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
++                                       struct drm_fence_object *src);
++extern int drm_fence_object_wait(struct drm_fence_object *fence,
++                               int lazy, int ignore_signals, uint32_t mask);
++extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
++                                 uint32_t fence_flags, uint32_t fence_class,
++                                 struct drm_fence_object **c_fence);
++extern int drm_fence_object_emit(struct drm_fence_object *fence,
++                               uint32_t fence_flags, uint32_t class,
++                               uint32_t type);
++extern void drm_fence_fill_arg(struct drm_fence_object *fence,
++                             struct drm_fence_arg *arg);
++
++extern int drm_fence_add_user_object(struct drm_file *priv,
++                                   struct drm_fence_object *fence,
++                                   int shareable);
++
++extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
++                                struct drm_file *file_priv);
++extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
++                                   struct drm_file *file_priv);
++extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
++                                     struct drm_file *file_priv);
++extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv);
++extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++/**************************************************
++ *TTMs
++ */
++
++/*
++ * The ttm backend GTT interface. (In our case AGP).
++ * Any similar type of device (PCIE?)
++ * needs only to implement these functions to be usable with the TTM interface.
++ * The AGP backend implementation lives in drm_agpsupport.c
++ * basically maps these calls to available functions in agpgart.
++ * Each drm device driver gets an
++ * additional function pointer that creates these types,
++ * so that the device can choose the correct aperture.
++ * (Multiple AGP apertures, etc.)
++ * Most device drivers will let this point to the standard AGP implementation.
++ */
++
++#define DRM_BE_FLAG_NEEDS_FREE     0x00000001
++#define DRM_BE_FLAG_BOUND_CACHED   0x00000002
++
++struct drm_ttm_backend;
++struct drm_ttm_backend_func {
++      int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
++      int (*populate) (struct drm_ttm_backend *backend,
++                       unsigned long num_pages, struct page **pages,
++                       struct page *dummy_read_page);
++      void (*clear) (struct drm_ttm_backend *backend);
++      int (*bind) (struct drm_ttm_backend *backend,
++                   struct drm_bo_mem_reg *bo_mem);
++      int (*unbind) (struct drm_ttm_backend *backend);
++      void (*destroy) (struct drm_ttm_backend *backend);
++};
++
++/**
++ * This structure associates a set of flags and methods with a drm_ttm
++ * object, and will also be subclassed by the particular backend.
++ *
++ * \sa #drm_agp_ttm_backend
++ */
++struct drm_ttm_backend {
++      struct drm_device *dev;
++      uint32_t flags;
++      struct drm_ttm_backend_func *func;
++};
++
++struct drm_ttm {
++      struct page *dummy_read_page;
++      struct page **pages;
++      long first_himem_page;
++      long last_lomem_page;
++      uint32_t page_flags;
++      unsigned long num_pages;
++      atomic_t vma_count;
++      struct drm_device *dev;
++      int destroy;
++      uint32_t mapping_offset;
++      struct drm_ttm_backend *be;
++      unsigned long highest_lomem_entry;
++      unsigned long lowest_himem_entry;
++      enum {
++              ttm_bound,
++              ttm_evicted,
++              ttm_unbound,
++              ttm_unpopulated,
++      } state;
++
++};
++
++extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
++                                    uint32_t page_flags,
++                                    struct page *dummy_read_page);
++extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
++extern void drm_ttm_unbind(struct drm_ttm *ttm);
++extern void drm_ttm_evict(struct drm_ttm *ttm);
++extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
++extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
++extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
++extern int drm_ttm_populate(struct drm_ttm *ttm);
++extern int drm_ttm_set_user(struct drm_ttm *ttm,
++                          struct task_struct *tsk,
++                          unsigned long start,
++                          unsigned long num_pages);
++
++/*
++ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
++ * this which calls this function iff there are no vmas referencing it anymore.
++ * Otherwise it is called when the last vma exits.
++ */
++
++extern int drm_ttm_destroy(struct drm_ttm *ttm);
++
++#define DRM_FLAG_MASKED(_old, _new, _mask) {\
++(_old) ^= (((_old) ^ (_new)) & (_mask)); \
++}
++
++#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
++#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
++
++/*
++ * Page flags.
++ */
++
++/*
++ * This ttm should not be cached by the CPU
++ */
++#define DRM_TTM_PAGE_UNCACHED   (1 << 0)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_USED       (1 << 1)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_BOUND      (1 << 2)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_PRESENT    (1 << 3)
++/*
++ * The array of page pointers was allocated with vmalloc
++ * instead of drm_calloc.
++ */
++#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
++/*
++ * This ttm is mapped from user space
++ */
++#define DRM_TTM_PAGE_USER       (1 << 5)
++/*
++ * This ttm will be written to by the GPU
++ */
++#define DRM_TTM_PAGE_WRITE    (1 << 6)
++/*
++ * This ttm was mapped to the GPU, and so the contents may have
++ * been modified
++ */
++#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
++/*
++ * This flag is not used at this time; I don't know what the
++ * intent was.
++ */
++#define DRM_TTM_PAGE_USER_DMA   (1 << 8)
++
++/***************************************************
++ * Buffer objects. (drm_bo.c, drm_bo_move.c)
++ */
++
++struct drm_bo_mem_reg {
++      struct drm_mm_node *mm_node;
++      unsigned long size;
++      unsigned long num_pages;
++      uint32_t page_alignment;
++      uint32_t mem_type;
++      /*
++       * Current buffer status flags, indicating
++       * where the buffer is located and which
++       * access modes are in effect
++       */
++      uint64_t flags;
++      /**
++       * These are the flags proposed for
++       * a validate operation. If the
++       * validate succeeds, they'll get moved
++       * into the flags field
++       */
++      uint64_t proposed_flags;
++      
++      uint32_t desired_tile_stride;
++      uint32_t hw_tile_stride;
++};
++
++enum drm_bo_type {
++      /*
++       * drm_bo_type_device are 'normal' drm allocations,
++       * pages are allocated from within the kernel automatically
++       * and the objects can be mmap'd from the drm device. Each
++       * drm_bo_type_device object has a unique name which can be
++       * used by other processes to share access to the underlying
++       * buffer.
++       */
++      drm_bo_type_device,
++      /*
++       * drm_bo_type_user are buffers of pages that already exist
++       * in the process address space. They are more limited than
++       * drm_bo_type_device buffers in that they must always
++       * remain cached (as we assume the user pages are mapped cached),
++       * and they are not sharable to other processes through DRM
++       * (although, regular shared memory should still work fine).
++       */
++      drm_bo_type_user,
++      /*
++       * drm_bo_type_kernel are buffers that exist solely for use
++       * within the kernel. The pages cannot be mapped into the
++       * process. One obvious use would be for the ring
++       * buffer where user access would not (ideally) be required.
++       */
++      drm_bo_type_kernel,
++};
++
++struct drm_buffer_object {
++      struct drm_device *dev;
++      struct drm_user_object base;
++
++      /*
++       * If there is a possibility that the usage variable is zero,
++       * then dev->struct_mutext should be locked before incrementing it.
++       */
++
++      atomic_t usage;
++      unsigned long buffer_start;
++      enum drm_bo_type type;
++      unsigned long offset;
++      atomic_t mapped;
++      struct drm_bo_mem_reg mem;
++
++      struct list_head lru;
++      struct list_head ddestroy;
++
++      uint32_t fence_type;
++      uint32_t fence_class;
++      uint32_t new_fence_type;
++      uint32_t new_fence_class;
++      struct drm_fence_object *fence;
++      uint32_t priv_flags;
++      wait_queue_head_t event_queue;
++      struct mutex mutex;
++      unsigned long num_pages;
++
++      /* For pinned buffers */
++      struct drm_mm_node *pinned_node;
++      uint32_t pinned_mem_type;
++      struct list_head pinned_lru;
++
++      /* For vm */
++      struct drm_ttm *ttm;
++      struct drm_map_list map_list;
++      uint32_t memory_type;
++      unsigned long bus_offset;
++      uint32_t vm_flags;
++      void *iomap;
++
++#ifdef DRM_ODD_MM_COMPAT
++      /* dev->struct_mutex only protected. */
++      struct list_head vma_list;
++      struct list_head p_mm_list;
++#endif
++
++};
++
++#define _DRM_BO_FLAG_UNFENCED 0x00000001
++#define _DRM_BO_FLAG_EVICTED  0x00000002
++
++/*
++ * This flag indicates that a flag called with bo->mutex held has
++ * temporarily released the buffer object mutex, (usually to wait for something).
++ * and thus any post-lock validation needs to be rerun.
++ */
++
++#define _DRM_BO_FLAG_UNLOCKED 0x00000004
++
++struct drm_mem_type_manager {
++      int has_type;
++      int use_type;
++      int kern_init_type;
++      struct drm_mm manager;
++      struct list_head lru;
++      struct list_head pinned;
++      uint32_t flags;
++      uint32_t drm_bus_maptype;
++      unsigned long gpu_offset;
++      unsigned long io_offset;
++      unsigned long io_size;
++      void *io_addr;
++      uint64_t size; /* size of managed area for reporting to userspace */
++};
++
++struct drm_bo_lock {
++      struct drm_user_object base;
++      wait_queue_head_t queue;
++      atomic_t write_lock_pending;
++      atomic_t readers;
++};
++
++#define _DRM_FLAG_MEMTYPE_FIXED     0x00000001        /* Fixed (on-card) PCI memory */
++#define _DRM_FLAG_MEMTYPE_MAPPABLE  0x00000002        /* Memory mappable */
++#define _DRM_FLAG_MEMTYPE_CACHED    0x00000004        /* Cached binding */
++#define _DRM_FLAG_NEEDS_IOREMAP     0x00000008        /* Fixed memory needs ioremap
++                                                 before kernel access. */
++#define _DRM_FLAG_MEMTYPE_CMA       0x00000010        /* Can't map aperture */
++#define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020        /* Select caching */
++
++struct drm_buffer_manager {
++      struct drm_bo_lock bm_lock;
++      struct mutex evict_mutex;
++      int nice_mode;
++      int initialized;
++      struct drm_file *last_to_validate;
++      struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
++      struct list_head unfenced;
++      struct list_head ddestroy;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      struct work_struct wq;
++#else
++      struct delayed_work wq;
++#endif
++      uint32_t fence_type;
++      unsigned long cur_pages;
++      atomic_t count;
++      struct page *dummy_read_page;
++};
++
++struct drm_bo_driver {
++      const uint32_t *mem_type_prio;
++      const uint32_t *mem_busy_prio;
++      uint32_t num_mem_type_prio;
++      uint32_t num_mem_busy_prio;
++      struct drm_ttm_backend *(*create_ttm_backend_entry)
++       (struct drm_device *dev);
++      int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++      int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
++      int (*init_mem_type) (struct drm_device *dev, uint32_t type,
++                            struct drm_mem_type_manager *man);
++      /*
++       * evict_flags:
++       *
++       * @bo: the buffer object to be evicted
++       *
++       * Return the bo flags for a buffer which is not mapped to the hardware.
++       * These will be placed in proposed_flags so that when the move is
++       * finished, they'll end up in bo->mem.flags
++       */
++      uint64_t(*evict_flags) (struct drm_buffer_object *bo);
++      /*
++       * move:
++       *
++       * @bo: the buffer to move
++       *
++       * @evict: whether this motion is evicting the buffer from
++       * the graphics address space
++       *
++       * @no_wait: whether this should give up and return -EBUSY
++       * if this move would require sleeping
++       *
++       * @new_mem: the new memory region receiving the buffer
++       *
++       * Move a buffer between two memory regions.
++       */
++      int (*move) (struct drm_buffer_object *bo,
++                   int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
++      /*
++       * ttm_cache_flush
++       */
++      void (*ttm_cache_flush)(struct drm_ttm *ttm);
++
++      /*
++       * command_stream_barrier
++       *
++       * @dev: The drm device.
++       *
++       * @bo: The buffer object to validate.
++       *
++       * @new_fence_class: The new fence class for the buffer object.
++       *
++       * @new_fence_type: The new fence type for the buffer object.
++       *
++       * @no_wait: whether this should give up and return -EBUSY
++       * if this operation would require sleeping
++       *
++       * Insert a command stream barrier that makes sure that the
++       * buffer is idle once the commands associated with the
++       * current validation are starting to execute. If an error
++       * condition is returned, or the function pointer is NULL,
++       * the drm core will force buffer idle
++       * during validation.
++       */
++
++      int (*command_stream_barrier) (struct drm_buffer_object *bo,
++                                     uint32_t new_fence_class,
++                                     uint32_t new_fence_type,
++                                     int no_wait);                                   
++};
++
++/*
++ * buffer objects (drm_bo.c)
++ */
++
++extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_driver_finish(struct drm_device *dev);
++extern int drm_bo_driver_init(struct drm_device *dev);
++extern int drm_bo_pci_offset(struct drm_device *dev,
++                           struct drm_bo_mem_reg *mem,
++                           unsigned long *bus_base,
++                           unsigned long *bus_offset,
++                           unsigned long *bus_size);
++extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
++
++extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
++extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
++extern void drm_putback_buffer_objects(struct drm_device *dev);
++extern int drm_fence_buffer_objects(struct drm_device *dev,
++                                  struct list_head *list,
++                                  uint32_t fence_flags,
++                                  struct drm_fence_object *fence,
++                                  struct drm_fence_object **used_fence);
++extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
++extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
++                                  enum drm_bo_type type, uint64_t flags,
++                                  uint32_t hint, uint32_t page_alignment,
++                                  unsigned long buffer_start,
++                                  struct drm_buffer_object **bo);
++extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
++                     int no_wait, int check_unfenced);
++extern int drm_bo_mem_space(struct drm_buffer_object *bo,
++                          struct drm_bo_mem_reg *mem, int no_wait);
++extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
++                            uint64_t new_mem_flags,
++                            int no_wait, int move_unfenced);
++extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean);
++extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
++                        unsigned long p_offset, unsigned long p_size,
++                        int kern_init);
++extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
++                                uint64_t flags, uint64_t mask, uint32_t hint,
++                                uint32_t fence_class,
++                                struct drm_bo_info_rep *rep,
++                                struct drm_buffer_object **bo_rep);
++extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
++                                                        uint32_t handle,
++                                                        int check_owner);
++extern int drm_bo_do_validate(struct drm_buffer_object *bo,
++                            uint64_t flags, uint64_t mask, uint32_t hint,
++                            uint32_t fence_class,
++                            struct drm_bo_info_rep *rep);
++extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
++/*
++ * Buffer object memory move- and map helpers.
++ * drm_bo_move.c
++ */
++
++extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
++                         int evict, int no_wait,
++                         struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
++                            int evict,
++                            int no_wait, struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
++                                   int evict, int no_wait,
++                                   uint32_t fence_class, uint32_t fence_type,
++                                   uint32_t fence_flags,
++                                   struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
++extern unsigned long drm_bo_offset_end(unsigned long offset,
++                                     unsigned long end);
++
++struct drm_bo_kmap_obj {
++      void *virtual;
++      struct page *page;
++      enum {
++              bo_map_iomap,
++              bo_map_vmap,
++              bo_map_kmap,
++              bo_map_premapped,
++      } bo_kmap_type;
++};
++
++static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
++{
++      *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
++                   map->bo_kmap_type == bo_map_premapped);
++      return map->virtual;
++}
++extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
++extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
++                     unsigned long num_pages, struct drm_bo_kmap_obj *map);
++extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
++                         unsigned long dst_offset,
++                         unsigned long *pfn,
++                         pgprot_t *prot);
++extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
++                              struct drm_bo_info_rep *rep);
++
++
++/*
++ * drm_regman.c
++ */
++
++struct drm_reg {
++      struct list_head head;
++      struct drm_fence_object *fence;
++      uint32_t fence_type;
++      uint32_t new_fence_type;
++};
++
++struct drm_reg_manager {
++      struct list_head free;
++      struct list_head lru;
++      struct list_head unfenced;
++
++      int (*reg_reusable)(const struct drm_reg *reg, const void *data);
++      void (*reg_destroy)(struct drm_reg *reg);
++};
++
++extern int drm_regs_alloc(struct drm_reg_manager *manager,
++                        const void *data,
++                        uint32_t fence_class,
++                        uint32_t fence_type,
++                        int interruptible,
++                        int no_wait,
++                        struct drm_reg **reg);
++
++extern void drm_regs_fence(struct drm_reg_manager *regs,
++                         struct drm_fence_object *fence);
++
++extern void drm_regs_free(struct drm_reg_manager *manager);
++extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
++extern void drm_regs_init(struct drm_reg_manager *manager,
++                        int (*reg_reusable)(const struct drm_reg *,
++                                            const void *),
++                        void (*reg_destroy)(struct drm_reg *));
++
++/*
++ * drm_bo_lock.c
++ * Simple replacement for the hardware lock on buffer manager init and clean.
++ */
++
++
++extern void drm_bo_init_lock(struct drm_bo_lock *lock);
++extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
++extern int drm_bo_read_lock(struct drm_bo_lock *lock,
++                          int interruptible);
++extern int drm_bo_write_lock(struct drm_bo_lock *lock,
++                           int interruptible,
++                           struct drm_file *file_priv);
++
++extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
++                             struct drm_file *file_priv);
++
++#ifdef CONFIG_DEBUG_MUTEXES
++#define DRM_ASSERT_LOCKED(_mutex)                                     \
++      BUG_ON(!mutex_is_locked(_mutex) ||                              \
++             ((_mutex)->owner != current_thread_info()))
++#else
++#define DRM_ASSERT_LOCKED(_mutex)
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_os_linux.h git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h
+--- git/drivers/gpu/drm-tungsten/drm_os_linux.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,145 @@
++/**
++ * \file drm_os_linux.h
++ * OS abstraction macros.
++ */
++
++#include <linux/interrupt.h>  /* For task queue support */
++#include <linux/delay.h>
++
++/** Current process ID */
++#define DRM_CURRENTPID                        current->pid
++#define DRM_SUSER(p)                  capable(CAP_SYS_ADMIN)
++#define DRM_UDELAY(d)                 udelay(d)
++#if LINUX_VERSION_CODE <= 0x020608    /* KERNEL_VERSION(2,6,8) */
++#ifndef __iomem
++#define __iomem
++#endif
++/** Read a byte from a MMIO region */
++#define DRM_READ8(map, offset)                readb(((void __iomem *)(map)->handle) + (offset))
++/** Read a word from a MMIO region */
++#define DRM_READ16(map, offset)               readw(((void __iomem *)(map)->handle) + (offset))
++/** Read a dword from a MMIO region */
++#define DRM_READ32(map, offset)               readl(((void __iomem *)(map)->handle) + (offset))
++/** Write a byte into a MMIO region */
++#define DRM_WRITE8(map, offset, val)  writeb(val, ((void __iomem *)(map)->handle) + (offset))
++/** Write a word into a MMIO region */
++#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
++/** Write a dword into a MMIO region */
++#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
++#else
++/** Read a byte from a MMIO region */
++#define DRM_READ8(map, offset)                readb((map)->handle + (offset))
++/** Read a word from a MMIO region */
++#define DRM_READ16(map, offset)               readw((map)->handle + (offset))
++/** Read a dword from a MMIO region */
++#define DRM_READ32(map, offset)               readl((map)->handle + (offset))
++/** Write a byte into a MMIO region */
++#define DRM_WRITE8(map, offset, val)  writeb(val, (map)->handle + (offset))
++/** Write a word into a MMIO region */
++#define DRM_WRITE16(map, offset, val) writew(val, (map)->handle + (offset))
++/** Write a dword into a MMIO region */
++#define DRM_WRITE32(map, offset, val) writel(val, (map)->handle + (offset))
++#endif
++/** Read memory barrier */
++#define DRM_READMEMORYBARRIER()               rmb()
++/** Write memory barrier */
++#define DRM_WRITEMEMORYBARRIER()      wmb()
++/** Read/write memory barrier */
++#define DRM_MEMORYBARRIER()           mb()
++
++/** IRQ handler arguments and return type and values */
++#define DRM_IRQ_ARGS          int irq, void *arg
++/** backwards compatibility with old irq return values */
++#ifndef IRQ_HANDLED
++typedef void irqreturn_t;
++#define IRQ_HANDLED           /* nothing */
++#define IRQ_NONE              /* nothing */
++#endif
++
++/** AGP types */
++#if __OS_HAS_AGP
++#define DRM_AGP_MEM           struct agp_memory
++#define DRM_AGP_KERN          struct agp_kern_info
++#else
++/* define some dummy types for non AGP supporting kernels */
++struct no_agp_kern {
++      unsigned long aper_base;
++      unsigned long aper_size;
++};
++#define DRM_AGP_MEM           int
++#define DRM_AGP_KERN          struct no_agp_kern
++#endif
++
++#if !(__OS_HAS_MTRR)
++static __inline__ int mtrr_add(unsigned long base, unsigned long size,
++                             unsigned int type, char increment)
++{
++      return -ENODEV;
++}
++
++static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++      return -ENODEV;
++}
++
++#define MTRR_TYPE_WRCOMB     1
++#endif
++
++/** Other copying of data to kernel space */
++#define DRM_COPY_FROM_USER(arg1, arg2, arg3)          \
++      copy_from_user(arg1, arg2, arg3)
++/** Other copying of data from kernel space */
++#define DRM_COPY_TO_USER(arg1, arg2, arg3)            \
++      copy_to_user(arg1, arg2, arg3)
++/* Macros for copyfrom user, but checking readability only once */
++#define DRM_VERIFYAREA_READ( uaddr, size )            \
++      (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
++#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3)        \
++      __copy_from_user(arg1, arg2, arg3)
++#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)  \
++      __copy_to_user(arg1, arg2, arg3)
++#define DRM_GET_USER_UNCHECKED(val, uaddr)            \
++      __get_user(val, uaddr)
++
++#define DRM_HZ HZ
++
++#define DRM_WAIT_ON( ret, queue, timeout, condition )         \
++do {                                                          \
++      DECLARE_WAITQUEUE(entry, current);                      \
++      unsigned long end = jiffies + (timeout);                \
++      add_wait_queue(&(queue), &entry);                       \
++                                                              \
++      for (;;) {                                              \
++              __set_current_state(TASK_INTERRUPTIBLE);        \
++              if (condition)                                  \
++                      break;                                  \
++              if (time_after_eq(jiffies, end)) {              \
++                      ret = -EBUSY;                           \
++                      break;                                  \
++              }                                               \
++              schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);    \
++              if (signal_pending(current)) {                  \
++                      ret = -EINTR;                           \
++                      break;                                  \
++              }                                               \
++      }                                                       \
++      __set_current_state(TASK_RUNNING);                      \
++      remove_wait_queue(&(queue), &entry);                    \
++} while (0)
++
++#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
++#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
++
++/** Type for the OS's non-sleepable mutex lock */
++#define DRM_SPINTYPE          spinlock_t
++/**
++ * Initialize the lock for use.  name is an optional string describing the
++ * lock
++ */
++#define DRM_SPININIT(l,name)  spin_lock_init(l)
++#define DRM_SPINUNINIT(l)
++#define DRM_SPINLOCK(l)               spin_lock(l)
++#define DRM_SPINUNLOCK(l)     spin_unlock(l)
++#define DRM_SPINLOCK_IRQSAVE(l, _flags)       spin_lock_irqsave(l, _flags);
++#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
++#define DRM_SPINLOCK_ASSERT(l)                do {} while (0)
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_pci.c git-nokia/drivers/gpu/drm-tungsten/drm_pci.c
+--- git/drivers/gpu/drm-tungsten/drm_pci.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_pci.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,177 @@
++/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
++/**
++ * \file drm_pci.c
++ * \brief Functions and ioctls to manage PCI memory
++ *
++ * \warning These interfaces aren't stable yet.
++ *
++ * \todo Implement the remaining ioctl's for the PCI pools.
++ * \todo The wrappers here are so thin that they would be better off inlined..
++ *
++ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
++ * \author Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++/*
++ * Copyright 2003 Jos�Fonseca.
++ * Copyright 2003 Leif Delgass.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
++ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include "drmP.h"
++
++/**********************************************************************/
++/** \name PCI memory */
++/*@{*/
++
++/**
++ * \brief Allocate a PCI consistent memory block, for DMA.
++ */
++drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
++                              dma_addr_t maxaddr)
++{
++      drm_dma_handle_t *dmah;
++      unsigned long addr;
++      size_t sz;
++#ifdef DRM_DEBUG_MEMORY
++      int area = DRM_MEM_DMA;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++#endif
++
++      /* pci_alloc_consistent only guarantees alignment to the smallest
++       * PAGE_SIZE order which is greater than or equal to the requested size.
++       * Return NULL here for now to make sure nobody tries for larger alignment
++       */
++      if (align > size)
++              return NULL;
++
++      if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
++              DRM_ERROR("Setting pci dma mask failed\n");
++              return NULL;
++      }
++
++      dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
++      if (!dmah)
++              return NULL;
++
++      dmah->size = size;
++      dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
++
++#ifdef DRM_DEBUG_MEMORY
++      if (dmah->vaddr == NULL) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              kfree(dmah);
++              return NULL;
++      }
++
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      drm_ram_used += size;
++      spin_unlock(&drm_mem_lock);
++#else
++      if (dmah->vaddr == NULL) {
++              kfree(dmah);
++              return NULL;
++      }
++#endif
++
++      memset(dmah->vaddr, 0, size);
++
++      /* XXX - Is virt_to_page() legal for consistent mem? */
++      /* Reserve */
++      for (addr = (unsigned long)dmah->vaddr, sz = size;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return dmah;
++}
++EXPORT_SYMBOL(drm_pci_alloc);
++
++/**
++ * \brief Free a PCI consistent memory block without freeing its descriptor.
++ *
++ * This function is for internal use in the Linux-specific DRM core code.
++ */
++void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
++{
++      unsigned long addr;
++      size_t sz;
++#ifdef DRM_DEBUG_MEMORY
++      int area = DRM_MEM_DMA;
++      int alloc_count;
++      int free_count;
++#endif
++
++      if (!dmah->vaddr) {
++#ifdef DRM_DEBUG_MEMORY
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++#endif
++      } else {
++              /* XXX - Is virt_to_page() legal for consistent mem? */
++              /* Unreserve */
++              for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
++                                dmah->busaddr);
++      }
++
++#ifdef DRM_DEBUG_MEMORY
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += size;
++      drm_ram_used -= size;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++#endif
++
++}
++
++/**
++ * \brief Free a PCI consistent memory block
++ */
++void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
++{
++      __drm_pci_free(dev, dmah);
++      kfree(dmah);
++}
++EXPORT_SYMBOL(drm_pci_free);
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_pciids.h git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h
+--- git/drivers/gpu/drm-tungsten/drm_pciids.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,614 @@
++/*
++   This file is auto-generated from the drm_pciids.txt in the DRM CVS
++   Please contact dri-devel@lists.sf.net to add new cards to this list
++*/
++#define radeon_PCI_IDS \
++      {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
++      {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
++      {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
++      {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
++      {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
++      {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
++      {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
++      {0, 0, 0}
++
++#define r128_PCI_IDS \
++      {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define mga_PCI_IDS \
++      {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
++      {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
++      {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
++      {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
++      {0, 0, 0}
++
++#define mach64_PCI_IDS \
++      {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define sis_PCI_IDS \
++      {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0, 0, 0}
++
++#define pvr2d_PCI_IDS \
++      {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define tdfx_PCI_IDS \
++      {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define viadrv_PCI_IDS \
++      {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
++      {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
++      {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
++      {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
++      {0, 0, 0}
++
++#define i810_PCI_IDS \
++      {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define i830_PCI_IDS \
++      {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define gamma_PCI_IDS \
++      {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define savage_PCI_IDS \
++      {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
++      {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
++      {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
++      {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
++      {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
++      {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
++      {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
++      {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
++      {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
++      {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
++      {0, 0, 0}
++
++#define ffb_PCI_IDS \
++      {0, 0, 0}
++
++#define i915_PCI_IDS \
++      {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x27A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x27AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x29A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2A02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2A12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x29C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x29B2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x29D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2A42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0, 0, 0}
++
++#define imagine_PCI_IDS \
++      {0x105d, 0x2309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128}, \
++      {0x105d, 0x2339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128_2}, \
++      {0x105d, 0x493d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_T2R}, \
++      {0x105d, 0x5348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_REV4}, \
++      {0, 0, 0}
++
++#define nv_PCI_IDS \
++      {0x10DE, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x00A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0171, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0172, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0173, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0174, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0175, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0176, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0177, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0178, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0179, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0189, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x01A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x01F0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0251, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0258, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0259, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x025B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0282, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x028C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0323, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0327, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0329, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0331, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0332, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0333, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x033F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0334, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0338, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0342, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0345, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x004E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10de, 0x00f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10de, 0x00f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0163, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0164, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0165, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0166, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0167, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0228, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0091, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0092, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0094, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0098, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0099, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0, 0, 0}
++
++#define xgi_PCI_IDS \
++      {0x18ca, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x18ca, 0x0047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
+diff -Nurd git/drivers/gpu/drm-tungsten/drmP.h git-nokia/drivers/gpu/drm-tungsten/drmP.h
+--- git/drivers/gpu/drm-tungsten/drmP.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drmP.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1507 @@
++/**
++ * \file drmP.h
++ * Private header for Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_P_H_
++#define _DRM_P_H_
++
++#ifdef __KERNEL__
++#ifdef __alpha__
++/* add include of current.h so that "current" is defined
++ * before static inline funcs in wait.h. Doing this so we
++ * can build the DRM (part of PI DRI). 4/21/2000 S + B */
++#include <asm/current.h>
++#endif                                /* __alpha__ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/init.h>
++#include <linux/file.h>
++#include <linux/pci.h>
++#include <linux/version.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>   /* For (un)lock_kernel */
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <linux/kref.h>
++#include <linux/pagemap.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++#include <linux/mutex.h>
++#endif
++#if defined(__alpha__) || defined(__powerpc__)
++#include <asm/pgtable.h>      /* For pte_wrprotect */
++#endif
++#include <asm/io.h>
++#include <asm/mman.h>
++#include <asm/uaccess.h>
++#ifdef CONFIG_MTRR
++#include <asm/mtrr.h>
++#endif
++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
++#include <asm/agp.h>
++#include <linux/types.h>
++#include <linux/agp_backend.h>
++#endif
++#include <linux/workqueue.h>
++#include <linux/poll.h>
++#include <asm/pgalloc.h>
++#include "drm.h"
++#include <linux/slab.h>
++#include <linux/idr.h>
++
++#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
++
++#include "drm_os_linux.h"
++#include "drm_hashtab.h"
++#include "drm_internal.h"
++
++struct drm_device;
++struct drm_file;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++typedef unsigned long uintptr_t;
++#endif
++
++/* If you want the memory alloc debug functionality, change define below */
++/* #define DEBUG_MEMORY */
++
++/***********************************************************************/
++/** \name DRM template customization defaults */
++/*@{*/
++
++/* driver capabilities and requirements mask */
++#define DRIVER_USE_AGP     0x1
++#define DRIVER_REQUIRE_AGP 0x2
++#define DRIVER_USE_MTRR    0x4
++#define DRIVER_PCI_DMA     0x8
++#define DRIVER_SG          0x10
++#define DRIVER_HAVE_DMA    0x20
++#define DRIVER_HAVE_IRQ    0x40
++#define DRIVER_IRQ_SHARED  0x80
++#define DRIVER_DMA_QUEUE   0x100
++#define DRIVER_FB_DMA      0x200
++#define DRIVER_GEM       0x400
++
++/*@}*/
++
++/***********************************************************************/
++/** \name Begin the DRM... */
++/*@{*/
++
++#define DRM_DEBUG_CODE 2        /**< Include debugging code if > 1, then
++                                   also include looping detection. */
++
++#define DRM_MAGIC_HASH_ORDER  4 /**< Size of key hash table. Must be power of 2. */
++#define DRM_KERNEL_CONTEXT    0        /**< Change drm_resctx if changed */
++#define DRM_RESERVED_CONTEXTS 1        /**< Change drm_resctx if changed */
++#define DRM_LOOPING_LIMIT     5000000
++#define DRM_TIME_SLICE              (HZ/20)  /**< Time slice for GLXContexts */
++#define DRM_LOCK_SLICE              1 /**< Time slice for lock, in jiffies */
++
++#define DRM_FLAG_DEBUG          0x01
++
++#define DRM_MEM_DMA      0
++#define DRM_MEM_SAREA    1
++#define DRM_MEM_DRIVER           2
++#define DRM_MEM_MAGIC    3
++#define DRM_MEM_IOCTLS           4
++#define DRM_MEM_MAPS     5
++#define DRM_MEM_VMAS     6
++#define DRM_MEM_BUFS     7
++#define DRM_MEM_SEGS     8
++#define DRM_MEM_PAGES    9
++#define DRM_MEM_FILES   10
++#define DRM_MEM_QUEUES          11
++#define DRM_MEM_CMDS    12
++#define DRM_MEM_MAPPINGS  13
++#define DRM_MEM_BUFLISTS  14
++#define DRM_MEM_AGPLISTS  15
++#define DRM_MEM_TOTALAGP  16
++#define DRM_MEM_BOUNDAGP  17
++#define DRM_MEM_CTXBITMAP 18
++#define DRM_MEM_STUB      19
++#define DRM_MEM_SGLISTS   20
++#define DRM_MEM_CTXLIST   21
++#define DRM_MEM_MM        22
++#define DRM_MEM_HASHTAB   23
++#define DRM_MEM_OBJECTS   24
++#define DRM_MEM_FENCE     25
++#define DRM_MEM_TTM       26
++#define DRM_MEM_BUFOBJ    27
++
++#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
++#define DRM_MAP_HASH_OFFSET 0x10000000
++#define DRM_MAP_HASH_ORDER 12
++#define DRM_OBJECT_HASH_ORDER 12
++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
++/*
++ * This should be small enough to allow the use of kmalloc for hash tables
++ * instead of vmalloc.
++ */
++
++#define DRM_FILE_HASH_ORDER 8
++#define DRM_MM_INIT_MAX_PAGES 256
++
++/*@}*/
++
++#include "drm_compat.h"
++
++/***********************************************************************/
++/** \name Macros to make printk easier */
++/*@{*/
++
++/**
++ * Error output.
++ *
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#define DRM_ERROR(fmt, arg...) \
++      printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg)
++
++/**
++ * Memory error output.
++ *
++ * \param area memory area where the error occurred.
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#define DRM_MEM_ERROR(area, fmt, arg...) \
++      printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \
++             drm_mem_stats[area].name , ##arg)
++#define DRM_INFO(fmt, arg...)  printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
++
++/**
++ * Debug output.
++ *
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#if DRM_DEBUG_CODE
++#define DRM_DEBUG(fmt, arg...)                                                \
++      do {                                                            \
++              if ( drm_debug )                                        \
++                      printk(KERN_DEBUG                               \
++                             "[" DRM_NAME ":%s] " fmt ,               \
++                             __FUNCTION__ , ##arg);                   \
++      } while (0)
++#else
++#define DRM_DEBUG(fmt, arg...)                 do { } while (0)
++#endif
++
++#define DRM_PROC_LIMIT (PAGE_SIZE-80)
++
++#define DRM_PROC_PRINT(fmt, arg...)                                   \
++   len += sprintf(&buf[len], fmt , ##arg);                            \
++   if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
++
++#define DRM_PROC_PRINT_RET(ret, fmt, arg...)                          \
++   len += sprintf(&buf[len], fmt , ##arg);                            \
++   if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
++
++/*@}*/
++
++/***********************************************************************/
++/** \name Internal types and structures */
++/*@{*/
++
++#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
++#define DRM_MIN(a,b) min(a,b)
++#define DRM_MAX(a,b) max(a,b)
++
++#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
++#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
++#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
++
++#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
++/**
++ * Get the private SAREA mapping.
++ *
++ * \param _dev DRM device.
++ * \param _ctx context number.
++ * \param _map output mapping.
++ */
++#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {     \
++      (_map) = (_dev)->context_sareas[_ctx];          \
++} while(0)
++
++/**
++ * Test that the hardware lock is held by the caller, returning otherwise.
++ *
++ * \param dev DRM device.
++ * \param file_priv DRM file private pointer of the caller.
++ */
++#define LOCK_TEST_WITH_RETURN( dev, file_priv )                               \
++do {                                                                  \
++      if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||           \
++           dev->lock.file_priv != file_priv ) {                       \
++              DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
++                         __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
++                         dev->lock.file_priv, file_priv );            \
++              return -EINVAL;                                         \
++      }                                                               \
++} while (0)
++
++/**
++ * Copy and IOCTL return string to user space
++ */
++#define DRM_COPY( name, value )                                               \
++      len = strlen( value );                                          \
++      if ( len > name##_len ) len = name##_len;                       \
++      name##_len = strlen( value );                                   \
++      if ( len && name ) {                                            \
++              if ( copy_to_user( name, value, len ) )                 \
++                      return -EFAULT;                                 \
++      }
++
++/**
++ * Ioctl function type.
++ *
++ * \param dev DRM device structure
++ * \param data pointer to kernel-space stored data, copied in and out according
++ *           to ioctl description.
++ * \param file_priv DRM file private pointer.
++ */
++typedef int drm_ioctl_t(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++
++typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
++                             unsigned long arg);
++
++#define DRM_AUTH        0x1
++#define DRM_MASTER      0x2
++#define DRM_ROOT_ONLY   0x4
++
++struct drm_ioctl_desc {
++      unsigned int cmd;
++      drm_ioctl_t *func;
++      int flags;
++};
++/**
++ * Creates a driver or general drm_ioctl_desc array entry for the given
++ * ioctl, for use by drm_ioctl().
++ */
++#define DRM_IOCTL_DEF(ioctl, func, flags) \
++      [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
++
++struct drm_magic_entry {
++      struct list_head head;
++      struct drm_hash_item hash_item;
++      struct drm_file *priv;
++};
++
++struct drm_vma_entry {
++      struct list_head head;
++      struct vm_area_struct *vma;
++      pid_t pid;
++};
++
++/**
++ * DMA buffer.
++ */
++struct drm_buf {
++      int idx;                       /**< Index into master buflist */
++      int total;                     /**< Buffer size */
++      int order;                     /**< log-base-2(total) */
++      int used;                      /**< Amount of buffer in use (for DMA) */
++      unsigned long offset;          /**< Byte offset (used internally) */
++      void *address;                 /**< Address of buffer */
++      unsigned long bus_address;     /**< Bus address of buffer */
++      struct drm_buf *next;          /**< Kernel-only: used for free list */
++      __volatile__ int waiting;      /**< On kernel DMA queue */
++      __volatile__ int pending;      /**< On hardware DMA queue */
++      wait_queue_head_t dma_wait;    /**< Processes waiting */
++      struct drm_file *file_priv;    /**< Private of holding file descr */
++      int context;                   /**< Kernel queue for this buffer */
++      int while_locked;              /**< Dispatch this buffer while locked */
++      enum {
++              DRM_LIST_NONE = 0,
++              DRM_LIST_FREE = 1,
++              DRM_LIST_WAIT = 2,
++              DRM_LIST_PEND = 3,
++              DRM_LIST_PRIO = 4,
++              DRM_LIST_RECLAIM = 5
++      } list;                        /**< Which list we're on */
++
++      int dev_priv_size;              /**< Size of buffer private storage */
++      void *dev_private;              /**< Per-buffer private storage */
++};
++
++/** bufs is one longer than it has to be */
++struct drm_waitlist {
++      int count;                      /**< Number of possible buffers */
++      struct drm_buf **bufs;          /**< List of pointers to buffers */
++      struct drm_buf **rp;                    /**< Read pointer */
++      struct drm_buf **wp;                    /**< Write pointer */
++      struct drm_buf **end;           /**< End pointer */
++      spinlock_t read_lock;
++      spinlock_t write_lock;
++};
++
++struct drm_freelist {
++      int initialized;               /**< Freelist in use */
++      atomic_t count;                /**< Number of free buffers */
++      struct drm_buf *next;          /**< End pointer */
++
++      wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
++      int low_mark;                  /**< Low water mark */
++      int high_mark;                 /**< High water mark */
++      atomic_t wfh;                  /**< If waiting for high mark */
++      spinlock_t lock;
++};
++
++typedef struct drm_dma_handle {
++      dma_addr_t busaddr;
++      void *vaddr;
++      size_t size;
++} drm_dma_handle_t;
++
++/**
++ * Buffer entry.  There is one of this for each buffer size order.
++ */
++struct drm_buf_entry {
++      int buf_size;                   /**< size */
++      int buf_count;                  /**< number of buffers */
++      struct drm_buf *buflist;                /**< buffer list */
++      int seg_count;
++      int page_order;
++      struct drm_dma_handle **seglist;
++      struct drm_freelist freelist;
++};
++
++
++enum drm_ref_type {
++      _DRM_REF_USE = 0,
++      _DRM_REF_TYPE1,
++      _DRM_NO_REF_TYPES
++};
++
++
++/** File private data */
++struct drm_file {
++      int authenticated;
++      int master;
++      pid_t pid;
++      uid_t uid;
++      drm_magic_t magic;
++      unsigned long ioctl_count;
++      struct list_head lhead;
++      struct drm_minor *minor;
++      int remove_auth_on_close;
++      unsigned long lock_count;
++
++      /*
++       * The user object hash table is global and resides in the
++       * drm_device structure. We protect the lists and hash tables with the
++       * device struct_mutex. A bit coarse-grained but probably the best
++       * option.
++       */
++
++      struct list_head refd_objects;
++
++      /** Mapping of mm object handles to object pointers. */
++      struct idr object_idr;
++      /** Lock for synchronization of access to object_idr. */
++      spinlock_t table_lock;
++
++      struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
++      struct file *filp;
++      void *driver_priv;
++};
++
++/** Wait queue */
++struct drm_queue {
++      atomic_t use_count;             /**< Outstanding uses (+1) */
++      atomic_t finalization;          /**< Finalization in progress */
++      atomic_t block_count;           /**< Count of processes waiting */
++      atomic_t block_read;            /**< Queue blocked for reads */
++      wait_queue_head_t read_queue;   /**< Processes waiting on block_read */
++      atomic_t block_write;           /**< Queue blocked for writes */
++      wait_queue_head_t write_queue;  /**< Processes waiting on block_write */
++#if 1
++      atomic_t total_queued;          /**< Total queued statistic */
++      atomic_t total_flushed;         /**< Total flushes statistic */
++      atomic_t total_locks;           /**< Total locks statistics */
++#endif
++      enum drm_ctx_flags flags;       /**< Context preserving and 2D-only */
++      struct drm_waitlist waitlist;   /**< Pending buffers */
++      wait_queue_head_t flush_queue;  /**< Processes waiting until flush */
++};
++
++/**
++ * Lock data.
++ */
++struct drm_lock_data {
++      struct drm_hw_lock *hw_lock;            /**< Hardware lock */
++      /** Private of lock holder's file (NULL=kernel) */
++      struct drm_file *file_priv;
++      wait_queue_head_t lock_queue;   /**< Queue of blocked processes */
++      unsigned long lock_time;        /**< Time of last lock in jiffies */
++      spinlock_t spinlock;
++      uint32_t kernel_waiters;
++      uint32_t user_waiters;
++      int idle_has_lock;
++};
++
++/**
++ * DMA data.
++ */
++struct drm_device_dma {
++
++      struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];   /**< buffers, grouped by their size order */
++      int buf_count;                  /**< total number of buffers */
++      struct drm_buf **buflist;               /**< Vector of pointers into drm_device_dma::bufs */
++      int seg_count;
++      int page_count;                 /**< number of pages */
++      unsigned long *pagelist;        /**< page list */
++      unsigned long byte_count;
++      enum {
++              _DRM_DMA_USE_AGP = 0x01,
++              _DRM_DMA_USE_SG = 0x02,
++              _DRM_DMA_USE_FB = 0x04,
++              _DRM_DMA_USE_PCI_RO = 0x08
++      } flags;
++
++};
++
++/**
++ * AGP memory entry.  Stored as a doubly linked list.
++ */
++struct drm_agp_mem {
++      unsigned long handle;           /**< handle */
++      DRM_AGP_MEM *memory;
++      unsigned long bound;            /**< address */
++      int pages;
++      struct list_head head;
++};
++
++/**
++ * AGP data.
++ *
++ * \sa drm_agp_init() and drm_device::agp.
++ */
++struct drm_agp_head {
++      DRM_AGP_KERN agp_info;          /**< AGP device information */
++      struct list_head memory;
++      unsigned long mode;             /**< AGP mode */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
++      struct agp_bridge_data *bridge;
++#endif
++      int enabled;                    /**< whether the AGP bus as been enabled */
++      int acquired;                   /**< whether the AGP device has been acquired */
++      unsigned long base;
++      int agp_mtrr;
++      int cant_use_aperture;
++      unsigned long page_mask;
++};
++
++/**
++ * Scatter-gather memory.
++ */
++struct drm_sg_mem {
++      unsigned long handle;
++      void *virtual;
++      int pages;
++      struct page **pagelist;
++      dma_addr_t *busaddr;
++};
++
++struct drm_sigdata {
++      int context;
++      struct drm_hw_lock *lock;
++};
++
++
++/*
++ * Generic memory manager structs
++ */
++
++struct drm_mm_node {
++      struct list_head fl_entry;
++      struct list_head ml_entry;
++      int free;
++      unsigned long start;
++      unsigned long size;
++      struct drm_mm *mm;
++      void *private;
++};
++
++struct drm_mm {
++      struct list_head fl_entry;
++      struct list_head ml_entry;
++};
++
++
++/**
++ * Mappings list
++ */
++struct drm_map_list {
++      struct list_head head;          /**< list head */
++      struct drm_hash_item hash;
++      struct drm_map *map;                    /**< mapping */
++      uint64_t user_token;
++      struct drm_mm_node *file_offset_node;
++};
++
++typedef struct drm_map drm_local_map_t;
++
++/**
++ * Context handle list
++ */
++struct drm_ctx_list {
++      struct list_head head;          /**< list head */
++      drm_context_t handle;           /**< context handle */
++      struct drm_file *tag;           /**< associated fd private data */
++};
++
++struct drm_vbl_sig {
++      struct list_head head;
++      unsigned int sequence;
++      struct siginfo info;
++      struct task_struct *task;
++};
++
++/* location of GART table */
++#define DRM_ATI_GART_MAIN 1
++#define DRM_ATI_GART_FB   2
++
++#define DRM_ATI_GART_PCI 1
++#define DRM_ATI_GART_PCIE 2
++#define DRM_ATI_GART_IGP 3
++
++struct drm_ati_pcigart_info {
++      int gart_table_location;
++      int gart_reg_if;
++      void *addr;
++      dma_addr_t bus_addr;
++      dma_addr_t table_mask;
++      dma_addr_t member_mask;
++      struct drm_dma_handle *table_handle;
++      drm_local_map_t mapping;
++      int table_size;
++};
++
++/**
++ * This structure defines the drm_mm memory object, which will be used by the
++ * DRM for its buffer objects.
++ */
++struct drm_gem_object {
++      /** Reference count of this object */
++      struct kref refcount;
++
++      /** Handle count of this object. Each handle also holds a reference */
++      struct kref handlecount;
++
++      /** Related drm device */
++      struct drm_device *dev;
++      
++      /** File representing the shmem storage */
++      struct file *filp;
++
++      /**
++       * Size of the object, in bytes.  Immutable over the object's
++       * lifetime.
++       */
++      size_t size;
++
++      /**
++       * Global name for this object, starts at 1. 0 means unnamed.
++       * Access is covered by the object_name_lock in the related drm_device
++       */
++      int name;
++
++      /**
++       * Memory domains. These monitor which caches contain read/write data
++       * related to the object. When transitioning from one set of domains
++       * to another, the driver is called to ensure that caches are suitably
++       * flushed and invalidated
++       */
++      uint32_t        read_domains;
++      uint32_t        write_domain;
++
++      /**
++       * While validating an exec operation, the
++       * new read/write domain values are computed here.
++       * They will be transferred to the above values
++       * at the point that any cache flushing occurs
++       */
++      uint32_t        pending_read_domains;
++      uint32_t        pending_write_domain;
++
++      void *driver_private;
++};
++
++#include "drm_objects.h"
++
++/**
++ * DRM driver structure. This structure represent the common code for
++ * a family of cards. There will one drm_device for each card present
++ * in this family
++ */
++
++struct drm_driver {
++      int (*load) (struct drm_device *, unsigned long flags);
++      int (*firstopen) (struct drm_device *);
++      int (*open) (struct drm_device *, struct drm_file *);
++      void (*preclose) (struct drm_device *, struct drm_file *file_priv);
++      void (*postclose) (struct drm_device *, struct drm_file *);
++      void (*lastclose) (struct drm_device *);
++      int (*unload) (struct drm_device *);
++      int (*suspend) (struct drm_device *, pm_message_t state);
++      int (*resume) (struct drm_device *);
++      int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
++      void (*dma_ready) (struct drm_device *);
++      int (*dma_quiescent) (struct drm_device *);
++      int (*context_ctor) (struct drm_device *dev, int context);
++      int (*context_dtor) (struct drm_device *dev, int context);
++      int (*kernel_context_switch) (struct drm_device *dev, int old,
++                                    int new);
++      void (*kernel_context_switch_unlock) (struct drm_device * dev);
++      /**
++       * get_vblank_counter - get raw hardware vblank counter
++       * @dev: DRM device
++       * @crtc: counter to fetch
++       *
++       * Driver callback for fetching a raw hardware vblank counter
++       * for @crtc.  If a device doesn't have a hardware counter, the
++       * driver can simply return the value of drm_vblank_count and
++       * make the enable_vblank() and disable_vblank() hooks into no-ops,
++       * leaving interrupts enabled at all times.
++       *
++       * Wraparound handling and loss of events due to modesetting is dealt
++       * with in the DRM core code.
++       *
++       * RETURNS
++       * Raw vblank counter value.
++       */
++      u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
++
++      /**
++       * enable_vblank - enable vblank interrupt events
++       * @dev: DRM device
++       * @crtc: which irq to enable
++       *
++       * Enable vblank interrupts for @crtc.  If the device doesn't have
++       * a hardware vblank counter, this routine should be a no-op, since
++       * interrupts will have to stay on to keep the count accurate.
++       *
++       * RETURNS
++       * Zero on success, appropriate errno if the given @crtc's vblank
++       * interrupt cannot be enabled.
++       */
++      int (*enable_vblank) (struct drm_device *dev, int crtc);
++
++      /**
++       * disable_vblank - disable vblank interrupt events
++       * @dev: DRM device
++       * @crtc: which irq to enable
++       *
++       * Disable vblank interrupts for @crtc.  If the device doesn't have
++       * a hardware vblank counter, this routine should be a no-op, since
++       * interrupts will have to stay on to keep the count accurate.
++       */
++      void (*disable_vblank) (struct drm_device *dev, int crtc);
++      int (*dri_library_name) (struct drm_device *dev, char * buf);
++
++      /**
++       * Called by \c drm_device_is_agp.  Typically used to determine if a
++       * card is really attached to AGP or not.
++       *
++       * \param dev  DRM device handle
++       *
++       * \returns
++       * One of three values is returned depending on whether or not the
++       * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
++       * (return of 1), or may or may not be AGP (return of 2).
++       */
++      int (*device_is_agp) (struct drm_device *dev);
++
++/* these have to be filled in */
++       irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
++      void (*irq_preinstall) (struct drm_device *dev);
++      int (*irq_postinstall) (struct drm_device *dev);
++      void (*irq_uninstall) (struct drm_device *dev);
++      void (*reclaim_buffers) (struct drm_device *dev,
++                               struct drm_file *file_priv);
++      void (*reclaim_buffers_locked) (struct drm_device *dev,
++                                      struct drm_file *file_priv);
++      void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
++                                          struct drm_file *file_priv);
++      unsigned long (*get_map_ofs) (struct drm_map *map);
++      unsigned long (*get_reg_ofs) (struct drm_device *dev);
++      void (*set_version) (struct drm_device *dev,
++                           struct drm_set_version *sv);
++
++      int (*proc_init)(struct drm_minor *minor);
++      void (*proc_cleanup)(struct drm_minor *minor);
++
++      /**
++       * Driver-specific constructor for drm_gem_objects, to set up
++       * obj->driver_private.
++       *
++       * Returns 0 on success.
++       */
++      int (*gem_init_object) (struct drm_gem_object *obj);
++      void (*gem_free_object) (struct drm_gem_object *obj);
++
++      struct drm_fence_driver *fence_driver;
++      struct drm_bo_driver *bo_driver;
++
++      int major;
++      int minor;
++      int patchlevel;
++      char *name;
++      char *desc;
++      char *date;
++
++/* variables */
++      u32 driver_features;
++      int dev_priv_size;
++      struct drm_ioctl_desc *ioctls;
++      int num_ioctls;
++      struct file_operations fops;
++      struct pci_driver pci_driver;
++};
++
++#define DRM_MINOR_UNASSIGNED 0
++#define DRM_MINOR_LEGACY 1
++
++/**
++ * DRM minor structure. This structure represents a drm minor number.
++ */
++struct drm_minor {
++      int index;                      /**< Minor device number */
++      int type;                       /**< Control or render */
++      dev_t device;                   /**< Device number for mknod */
++      struct device kdev;             /**< Linux device */
++      struct drm_device *dev;
++      struct proc_dir_entry *dev_root;  /**< proc directory entry */
++      struct class_device *dev_class;
++};
++
++
++/**
++ * DRM device structure. This structure represent a complete card that
++ * may contain multiple heads.
++ */
++struct drm_device {
++      char *unique;                   /**< Unique identifier: e.g., busid */
++      int unique_len;                 /**< Length of unique field */
++      char *devname;                  /**< For /proc/interrupts */
++      int if_version;                 /**< Highest interface version set */
++
++      int blocked;                    /**< Blocked due to VC switch? */
++
++      /** \name Locks */
++      /*@{ */
++      spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
++      struct mutex struct_mutex;      /**< For others */
++      /*@} */
++
++      /** \name Usage Counters */
++      /*@{ */
++      int open_count;                 /**< Outstanding files open */
++      atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
++      atomic_t vma_count;             /**< Outstanding vma areas open */
++      int buf_use;                    /**< Buffers in use -- cannot alloc */
++      atomic_t buf_alloc;             /**< Buffer allocation in progress */
++      /*@} */
++
++      /** \name Performance counters */
++      /*@{ */
++      unsigned long counters;
++      enum drm_stat_type types[15];
++      atomic_t counts[15];
++      /*@} */
++
++      /** \name Authentication */
++      /*@{ */
++      struct list_head filelist;
++      struct drm_open_hash magiclist;
++      struct list_head magicfree;
++      /*@} */
++
++      /** \name Memory management */
++      /*@{ */
++      struct list_head maplist;       /**< Linked list of regions */
++      int map_count;                  /**< Number of mappable regions */
++      struct drm_open_hash map_hash;       /**< User token hash table for maps */
++      struct drm_mm offset_manager;        /**< User token manager */
++      struct drm_open_hash object_hash;    /**< User token hash table for objects */
++      struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
++      struct page *ttm_dummy_page;
++
++      /** \name Context handle management */
++      /*@{ */
++      struct list_head ctxlist;       /**< Linked list of context handles */
++      int ctx_count;                  /**< Number of context handles */
++      struct mutex ctxlist_mutex;     /**< For ctxlist */
++
++      struct idr ctx_idr;
++
++      struct list_head vmalist;       /**< List of vmas (for debugging) */
++      struct drm_lock_data lock;              /**< Information on hardware lock */
++      /*@} */
++
++      /** \name DMA queues (contexts) */
++      /*@{ */
++      int queue_count;                /**< Number of active DMA queues */
++      int queue_reserved;             /**< Number of reserved DMA queues */
++      int queue_slots;                /**< Actual length of queuelist */
++      struct drm_queue **queuelist;   /**< Vector of pointers to DMA queues */
++      struct drm_device_dma *dma;             /**< Optional pointer for DMA support */
++      /*@} */
++
++      /** \name Context support */
++      /*@{ */
++      int irq;                        /**< Interrupt used by board */
++      int irq_enabled;                /**< True if irq handler is enabled */
++      __volatile__ long context_flag; /**< Context swapping flag */
++      __volatile__ long interrupt_flag; /**< Interruption handler flag */
++      __volatile__ long dma_flag;     /**< DMA dispatch flag */
++      struct timer_list timer;        /**< Timer for delaying ctx switch */
++      wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
++      int last_checked;               /**< Last context checked for DMA */
++      int last_context;               /**< Last current context */
++      unsigned long last_switch;      /**< jiffies at last context switch */
++      /*@} */
++
++      struct work_struct work;
++
++      /** \name VBLANK IRQ support */
++      /*@{ */
++
++      /*
++       * At load time, disabling the vblank interrupt won't be allowed since
++       * old clients may not call the modeset ioctl and therefore misbehave.
++       * Once the modeset ioctl *has* been called though, we can safely
++       * disable them when unused.
++       */
++      int vblank_disable_allowed;
++
++      wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
++      atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
++      spinlock_t vbl_lock;
++      struct list_head *vbl_sigs;             /**< signal list to send on VBLANK */
++      atomic_t vbl_signal_pending;    /* number of signals pending on all crtcs*/
++      atomic_t *vblank_refcount;      /* number of users of vblank interrupts per crtc */
++      u32 *last_vblank;               /* protected by dev->vbl_lock, used */
++                                      /* for wraparound handling */
++      int *vblank_enabled;            /* so we don't call enable more than
++                                         once per disable */
++      int *vblank_inmodeset;          /* Display driver is setting mode */
++      struct timer_list vblank_disable_timer;
++
++      u32 max_vblank_count;           /**< size of vblank counter register */
++      spinlock_t tasklet_lock;        /**< For drm_locked_tasklet */
++      void (*locked_tasklet_func)(struct drm_device *dev);
++
++      /*@} */
++      cycles_t ctx_start;
++      cycles_t lck_start;
++
++      struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
++      wait_queue_head_t buf_readers;  /**< Processes waiting to read */
++      wait_queue_head_t buf_writers;  /**< Processes waiting to ctx switch */
++
++      struct drm_agp_head *agp;               /**< AGP data */
++
++      struct pci_dev *pdev;           /**< PCI device structure */
++      int pci_vendor;                 /**< PCI vendor id */
++      int pci_device;                 /**< PCI device id */
++#ifdef __alpha__
++      struct pci_controller *hose;
++#endif
++      int num_crtcs;                  /**< Number of CRTCs on this device */
++      struct drm_sg_mem *sg;          /**< Scatter gather memory */
++      void *dev_private;              /**< device private data */
++      struct drm_sigdata sigdata;             /**< For block_all_signals */
++      sigset_t sigmask;
++
++      struct drm_driver *driver;
++      drm_local_map_t *agp_buffer_map;
++      unsigned int agp_buffer_token;
++      struct drm_minor *primary;              /**< render type primary screen head */
++
++      struct drm_fence_manager fm;
++      struct drm_buffer_manager bm;
++
++      /** \name Drawable information */
++      /*@{ */
++      spinlock_t drw_lock;
++      struct idr drw_idr;
++      /*@} */
++
++      /** \name GEM information */
++      /*@{ */
++      spinlock_t object_name_lock;
++      struct idr object_name_idr;
++      atomic_t object_count;
++      atomic_t object_memory;
++      atomic_t pin_count;
++      atomic_t pin_memory;
++      atomic_t gtt_count;
++      atomic_t gtt_memory;
++      uint32_t gtt_total;
++      uint32_t invalidate_domains;    /* domains pending invalidation */
++      uint32_t flush_domains;         /* domains pending flush */
++      /*@} */
++};
++
++#if __OS_HAS_AGP
++struct drm_agp_ttm_backend {
++      struct drm_ttm_backend backend;
++      DRM_AGP_MEM *mem;
++      struct agp_bridge_data *bridge;
++      int populated;
++};
++#endif
++
++
++static __inline__ int drm_core_check_feature(struct drm_device *dev,
++                                           int feature)
++{
++      return ((dev->driver->driver_features & feature) ? 1 : 0);
++}
++
++#ifdef __alpha__
++#define drm_get_pci_domain(dev) dev->hose->index
++#else
++#define drm_get_pci_domain(dev) 0
++#endif
++
++#if __OS_HAS_AGP
++static inline int drm_core_has_AGP(struct drm_device *dev)
++{
++      return drm_core_check_feature(dev, DRIVER_USE_AGP);
++}
++#else
++#define drm_core_has_AGP(dev) (0)
++#endif
++
++#if __OS_HAS_MTRR
++static inline int drm_core_has_MTRR(struct drm_device *dev)
++{
++      return drm_core_check_feature(dev, DRIVER_USE_MTRR);
++}
++
++#define DRM_MTRR_WC           MTRR_TYPE_WRCOMB
++
++static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
++                             unsigned int flags)
++{
++      return mtrr_add(offset, size, flags, 1);
++}
++
++static inline int drm_mtrr_del(int handle, unsigned long offset,
++                             unsigned long size, unsigned int flags)
++{
++      return mtrr_del(handle, offset, size);
++}
++
++#else
++static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
++                             unsigned int flags)
++{
++      return -ENODEV;
++}
++
++static inline int drm_mtrr_del(int handle, unsigned long offset,
++                             unsigned long size, unsigned int flags)
++{
++      return -ENODEV;
++}
++
++#define drm_core_has_MTRR(dev) (0)
++#define DRM_MTRR_WC           0
++#endif
++
++
++/******************************************************************/
++/** \name Internal function definitions */
++/*@{*/
++
++                              /* Driver support (drm_drv.h) */
++extern int drm_fb_loaded;
++extern int drm_init(struct drm_driver *driver,
++                            struct pci_device_id *pciidlist);
++extern void drm_exit(struct drm_driver *driver);
++extern void drm_cleanup_pci(struct pci_dev *pdev);
++extern int drm_ioctl(struct inode *inode, struct file *filp,
++                   unsigned int cmd, unsigned long arg);
++extern long drm_unlocked_ioctl(struct file *filp,
++                             unsigned int cmd, unsigned long arg);
++extern long drm_compat_ioctl(struct file *filp,
++                           unsigned int cmd, unsigned long arg);
++
++extern int drm_lastclose(struct drm_device *dev);
++
++                              /* Device support (drm_fops.h) */
++extern int drm_open(struct inode *inode, struct file *filp);
++extern int drm_stub_open(struct inode *inode, struct file *filp);
++extern int drm_fasync(int fd, struct file *filp, int on);
++extern int drm_release(struct inode *inode, struct file *filp);
++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
++
++                              /* Mapping support (drm_vm.h) */
++extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
++extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
++extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
++extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
++
++                              /* Memory management support (drm_memory.h) */
++#include "drm_memory.h"
++extern void drm_mem_init(void);
++extern int drm_mem_info(char *buf, char **start, off_t offset,
++                      int request, int *eof, void *data);
++extern void *drm_calloc(size_t nmemb, size_t size, int area);
++extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
++extern unsigned long drm_alloc_pages(int order, int area);
++extern void drm_free_pages(unsigned long address, int order, int area);
++extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
++extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
++extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
++extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
++                                            struct page **pages,
++                                            unsigned long num_pages,
++                                            uint32_t gtt_offset);
++extern int drm_unbind_agp(DRM_AGP_MEM * handle);
++
++extern void drm_free_memctl(size_t size);
++extern int drm_alloc_memctl(size_t size);
++extern void drm_query_memctl(uint64_t *cur_used,
++                           uint64_t *emer_used,
++                           uint64_t *low_threshold,
++                           uint64_t *high_threshold,
++                           uint64_t *emer_threshold);
++extern void drm_init_memctl(size_t low_threshold,
++                          size_t high_threshold,
++                          size_t unit_size);
++
++                              /* Misc. IOCTL support (drm_ioctl.h) */
++extern int drm_irq_by_busid(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int drm_getunique(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_setunique(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_getmap(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_getclient(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_getstats(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_setversion(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int drm_noop(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv);
++
++                              /* Context IOCTL support (drm_context.h) */
++extern int drm_resctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_addctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_modctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_getctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_switchctx(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_newctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_rmctx(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv);
++
++extern int drm_ctxbitmap_init(struct drm_device *dev);
++extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
++extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
++
++extern int drm_setsareactx(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int drm_getsareactx(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++
++                              /* Drawable IOCTL support (drm_drawable.h) */
++extern int drm_adddraw(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_rmdraw(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_update_drawable_info(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv);
++extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
++                                                     drm_drawable_t id);
++extern void drm_drawable_free_all(struct drm_device *dev);
++
++                              /* Authentication IOCTL support (drm_auth.h) */
++extern int drm_getmagic(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_authmagic(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++
++                              /* Locking IOCTL support (drm_lock.h) */
++extern int drm_lock(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv);
++extern int drm_unlock(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
++extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
++extern void drm_idlelock_take(struct drm_lock_data *lock_data);
++extern void drm_idlelock_release(struct drm_lock_data *lock_data);
++
++/*
++ * These are exported to drivers so that they can implement fencing using
++ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
++ */
++
++extern int drm_i_have_hw_lock(struct drm_device *dev,
++                            struct drm_file *file_priv);
++
++                              /* Buffer management support (drm_bufs.h) */
++extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addmap(struct drm_device *dev, unsigned int offset,
++                    unsigned int size, enum drm_map_type type,
++                    enum drm_map_flags flags, drm_local_map_t ** map_ptr);
++extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
++extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
++extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int drm_addbufs(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_infobufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_markbufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_freebufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_mapbufs(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_order(unsigned long size);
++extern unsigned long drm_get_resource_start(struct drm_device *dev,
++                                          unsigned int resource);
++extern unsigned long drm_get_resource_len(struct drm_device *dev,
++                                        unsigned int resource);
++extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
++                                                drm_local_map_t *map);
++
++
++                              /* DMA support (drm_dma.h) */
++extern int drm_dma_setup(struct drm_device *dev);
++extern void drm_dma_takedown(struct drm_device *dev);
++extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
++extern void drm_core_reclaim_buffers(struct drm_device *dev,
++                                   struct drm_file *filp);
++
++                              /* IRQ support (drm_irq.h) */
++extern int drm_control(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
++extern int drm_irq_install(struct drm_device *dev);
++extern int drm_irq_uninstall(struct drm_device *dev);
++extern void drm_driver_irq_preinstall(struct drm_device *dev);
++extern void drm_driver_irq_postinstall(struct drm_device *dev);
++extern void drm_driver_irq_uninstall(struct drm_device *dev);
++
++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
++extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
++extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
++extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
++extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
++extern void drm_handle_vblank(struct drm_device *dev, int crtc);
++extern int drm_vblank_get(struct drm_device *dev, int crtc);
++extern void drm_vblank_put(struct drm_device *dev, int crtc);
++
++                              /* Modesetting support */
++extern int drm_modeset_ctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++
++                              /* AGP/GART support (drm_agpsupport.h) */
++extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
++extern int drm_agp_acquire(struct drm_device *dev);
++extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_agp_release(struct drm_device *dev);
++extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
++extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
++extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
++extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
++extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
++extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
++extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type);
++#else
++extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
++#endif
++extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
++extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
++extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
++extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
++extern void drm_agp_chipset_flush(struct drm_device *dev);
++                              /* Stub support (drm_stub.h) */
++extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
++                   struct drm_driver *driver);
++extern int drm_put_dev(struct drm_device *dev);
++extern int drm_put_minor(struct drm_device *dev);
++extern unsigned int drm_debug; /* 1 to enable debug output */
++
++extern struct class *drm_class;
++extern struct proc_dir_entry *drm_proc_root;
++
++extern struct idr drm_minors_idr;
++
++extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
++
++                              /* Proc support (drm_proc.h) */
++int drm_proc_init(struct drm_minor *minor, int minor_id,
++                struct proc_dir_entry *root);
++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
++
++                              /* Scatter Gather Support (drm_scatter.h) */
++extern void drm_sg_cleanup(struct drm_sg_mem * entry);
++extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
++extern int drm_sg_free(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++                             /* ATI PCIGART support (ati_pcigart.h) */
++extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
++extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
++
++extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
++                         size_t align, dma_addr_t maxaddr);
++extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
++extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
++
++                             /* sysfs support (drm_sysfs.c) */
++struct drm_sysfs_class;
++extern struct class *drm_sysfs_create(struct module *owner, char *name);
++extern void drm_sysfs_destroy(void);
++extern int drm_sysfs_device_add(struct drm_minor *minor);
++extern void drm_sysfs_device_remove(struct drm_minor *minor);
++
++/*
++ * Basic memory manager support (drm_mm.c)
++ */
++
++extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
++                                             unsigned alignment);
++extern void drm_mm_put_block(struct drm_mm_node *cur);
++extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
++                                              unsigned alignment, int best_match);
++extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
++extern void drm_mm_takedown(struct drm_mm *mm);
++extern int drm_mm_clean(struct drm_mm *mm);
++extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
++extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
++extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
++
++static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
++{
++      return block->mm;
++}
++
++/* Graphics Execution Manager library functions (drm_gem.c) */
++int
++drm_gem_init (struct drm_device *dev);
++
++void
++drm_gem_object_free (struct kref *kref);
++
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size);
++
++void
++drm_gem_object_handle_free (struct kref *kref);
++    
++static inline void drm_gem_object_reference(struct drm_gem_object *obj)
++{
++      kref_get(&obj->refcount);
++}
++
++static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
++{
++      if (obj == NULL)
++              return;
++
++      kref_put (&obj->refcount, drm_gem_object_free);
++}
++
++int
++drm_gem_handle_create(struct drm_file *file_priv,
++                    struct drm_gem_object *obj,
++                    int *handlep);
++
++static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
++{
++      drm_gem_object_reference (obj);
++      kref_get(&obj->handlecount);
++}
++
++static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
++{
++      if (obj == NULL)
++              return;
++      
++      /*
++       * Must bump handle count first as this may be the last
++       * ref, in which case the object would disappear before we
++       * checked for a name
++       */
++      kref_put (&obj->handlecount, drm_gem_object_handle_free);
++      drm_gem_object_unreference (obj);
++}
++
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
++                    int handle);
++int drm_gem_close_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int drm_gem_open_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
++
++extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
++extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
++extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
++
++static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
++                                                 unsigned int token)
++{
++      struct drm_map_list *_entry;
++      list_for_each_entry(_entry, &dev->maplist, head)
++              if (_entry->user_token == token)
++                      return _entry->map;
++      return NULL;
++}
++
++static __inline__ int drm_device_is_agp(struct drm_device *dev)
++{
++      if ( dev->driver->device_is_agp != NULL ) {
++              int err = (*dev->driver->device_is_agp)(dev);
++
++              if (err != 2) {
++                      return err;
++              }
++      }
++
++      return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
++}
++
++static __inline__ int drm_device_is_pcie(struct drm_device *dev)
++{
++      return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
++}
++
++static __inline__ void drm_core_dropmap(struct drm_map *map)
++{
++}
++
++#ifndef DEBUG_MEMORY
++/** Wrapper around kmalloc() */
++static __inline__ void *drm_alloc(size_t size, int area)
++{
++      return kmalloc(size, GFP_KERNEL);
++}
++
++/** Wrapper around kfree() */
++static __inline__ void drm_free(void *pt, size_t size, int area)
++{
++      kfree(pt);
++}
++#else
++extern void *drm_alloc(size_t size, int area);
++extern void drm_free(void *pt, size_t size, int area);
++#endif
++
++/*
++ * Accounting variants of standard calls.
++ */
++
++static inline void *drm_ctl_alloc(size_t size, int area)
++{
++      void *ret;
++      if (drm_alloc_memctl(size))
++              return NULL;
++      ret = drm_alloc(size, area);
++      if (!ret)
++              drm_free_memctl(size);
++      return ret;
++}
++
++static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
++{
++      void *ret;
++
++      if (drm_alloc_memctl(nmemb*size))
++              return NULL;
++      ret = drm_calloc(nmemb, size, area);
++      if (!ret)
++              drm_free_memctl(nmemb*size);
++      return ret;
++}
++
++static inline void drm_ctl_free(void *pt, size_t size, int area)
++{
++      drm_free(pt, size, area);
++      drm_free_memctl(size);
++}
++
++/*@}*/
++
++#endif                                /* __KERNEL__ */
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_proc.c git-nokia/drivers/gpu/drm-tungsten/drm_proc.c
+--- git/drivers/gpu/drm-tungsten/drm_proc.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_proc.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,743 @@
++/**
++ * \file drm_proc.c
++ * /proc support for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ *
++ * \par Acknowledgements:
++ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
++ *    the problem with the proc files not outputting all their information.
++ */
++
++/*
++ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++static int drm_name_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_vm_info(char *buf, char **start, off_t offset,
++                     int request, int *eof, void *data);
++static int drm_clients_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data);
++static int drm_queues_info(char *buf, char **start, off_t offset,
++                         int request, int *eof, void *data);
++static int drm_bufs_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_objects_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data);
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data);
++#if DRM_DEBUG_CODE
++static int drm_vma_info(char *buf, char **start, off_t offset,
++                      int request, int *eof, void *data);
++#endif
++
++/**
++ * Proc file list.
++ */
++static struct drm_proc_list {
++      const char *name;       /**< file name */
++      int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
++} drm_proc_list[] = {
++      {"name", drm_name_info},
++      {"mem", drm_mem_info},
++      {"vm", drm_vm_info},
++      {"clients", drm_clients_info},
++      {"queues", drm_queues_info},
++      {"bufs", drm_bufs_info},
++      {"objects", drm_objects_info},
++      {"gem_names", drm_gem_name_info},
++      {"gem_objects", drm_gem_object_info},
++#if DRM_DEBUG_CODE
++      {"vma", drm_vma_info},
++#endif
++};
++
++#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
++
++/**
++ * Initialize the DRI proc filesystem for a device.
++ *
++ * \param dev DRM device.
++ * \param minor device minor number.
++ * \param root DRI proc dir entry.
++ * \param dev_root resulting DRI device proc dir entry.
++ * \return root entry pointer on success, or NULL on failure.
++ *
++ * Create the DRI proc root entry "/proc/dri", the device proc root entry
++ * "/proc/dri/%minor%/", and each entry in proc_list as
++ * "/proc/dri/%minor%/%name%".
++ */
++int drm_proc_init(struct drm_minor *minor, int minor_id,
++                struct proc_dir_entry *root)
++{
++      struct proc_dir_entry *ent;
++      int i, j;
++      char name[64];
++
++      sprintf(name, "%d", minor_id);
++      minor->dev_root = proc_mkdir(name, root);
++      if (!minor->dev_root) {
++              DRM_ERROR("Cannot create /proc/dri/%s\n", name);
++              return -1;
++      }
++
++      for (i = 0; i < DRM_PROC_ENTRIES; i++) {
++              ent = create_proc_entry(drm_proc_list[i].name,
++                                      S_IFREG | S_IRUGO, minor->dev_root);
++              if (!ent) {
++                      DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
++                                name, drm_proc_list[i].name);
++                      for (j = 0; j < i; j++)
++                              remove_proc_entry(drm_proc_list[i].name,
++                                                minor->dev_root);
++                      remove_proc_entry(name, root);
++                      minor->dev_root = NULL;
++                      return -1;
++              }
++              ent->read_proc = drm_proc_list[i].f;
++              ent->data = minor;
++      }
++      return 0;
++}
++
++/**
++ * Cleanup the proc filesystem resources.
++ *
++ * \param minor device minor number.
++ * \param root DRI proc dir entry.
++ * \param dev_root DRI device proc dir entry.
++ * \return always zero.
++ *
++ * Remove all proc entries created by proc_init().
++ */
++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
++{
++      int i;
++      char name[64];
++
++      if (!root || !minor->dev_root)
++              return 0;
++
++      for (i = 0; i < DRM_PROC_ENTRIES; i++)
++              remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
++      sprintf(name, "%d", minor->index);
++      remove_proc_entry(name, root);
++
++      return 0;
++}
++
++/**
++ * Called when "/proc/dri/.../name" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * Prints the device name together with the bus id if available.
++ */
++static int drm_name_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      if (dev->unique) {
++              DRM_PROC_PRINT("%s %s %s\n",
++                             dev->driver->pci_driver.name,
++                             pci_name(dev->pdev), dev->unique);
++      } else {
++              DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
++                             pci_name(dev->pdev));
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Called when "/proc/dri/.../vm" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * Prints information about all mappings in drm_device::maplist.
++ */
++static int drm__vm_info(char *buf, char **start, off_t offset, int request,
++                      int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_map *map;
++      struct drm_map_list *r_list;
++
++      /* Hardcoded from _DRM_FRAME_BUFFER,
++         _DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
++         _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */
++      const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++      const char *type;
++      int i;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("slot     offset       size type flags    "
++                     "address mtrr\n\n");
++      i = 0;
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              map = r_list->map;
++              if (!map)
++                      continue;
++              if (map->type < 0 || map->type > 5)
++                      type = "??";
++              else
++                      type = types[map->type];
++              DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
++                             i,
++                             map->offset,
++                             map->size, type, map->flags,
++                             (unsigned long) r_list->user_token);
++
++              if (map->mtrr < 0) {
++                      DRM_PROC_PRINT("none\n");
++              } else {
++                      DRM_PROC_PRINT("%4d\n", map->mtrr);
++              }
++              i++;
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_vm_info(char *buf, char **start, off_t offset, int request,
++                     int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__vm_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../queues" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__queues_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      int i;
++      struct drm_queue *q;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("  ctx/flags   use   fin"
++                     "   blk/rw/rwf  wait    flushed     queued"
++                     "      locks\n\n");
++      for (i = 0; i < dev->queue_count; i++) {
++              q = dev->queuelist[i];
++              atomic_inc(&q->use_count);
++              DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
++                                 "%5d/0x%03x %5d %5d"
++                                 " %5d/%c%c/%c%c%c %5Zd\n",
++                                 i,
++                                 q->flags,
++                                 atomic_read(&q->use_count),
++                                 atomic_read(&q->finalization),
++                                 atomic_read(&q->block_count),
++                                 atomic_read(&q->block_read) ? 'r' : '-',
++                                 atomic_read(&q->block_write) ? 'w' : '-',
++                                 waitqueue_active(&q->read_queue) ? 'r' : '-',
++                                 waitqueue_active(&q->
++                                                  write_queue) ? 'w' : '-',
++                                 waitqueue_active(&q->
++                                                  flush_queue) ? 'f' : '-',
++                                 DRM_BUFCOUNT(&q->waitlist));
++              atomic_dec(&q->use_count);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_queues_info(char *buf, char **start, off_t offset, int request,
++                         int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__queues_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../bufs" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
++                        int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma || offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT(" o     size count  free  segs pages    kB\n\n");
++      for (i = 0; i <= DRM_MAX_ORDER; i++) {
++              if (dma->bufs[i].buf_count)
++                      DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
++                                     i,
++                                     dma->bufs[i].buf_size,
++                                     dma->bufs[i].buf_count,
++                                     atomic_read(&dma->bufs[i]
++                                                 .freelist.count),
++                                     dma->bufs[i].seg_count,
++                                     dma->bufs[i].seg_count
++                                     * (1 << dma->bufs[i].page_order),
++                                     (dma->bufs[i].seg_count
++                                      * (1 << dma->bufs[i].page_order))
++                                     * PAGE_SIZE / 1024);
++      }
++      DRM_PROC_PRINT("\n");
++      for (i = 0; i < dma->buf_count; i++) {
++              if (i && !(i % 32))
++                      DRM_PROC_PRINT("\n");
++              DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
++      }
++      DRM_PROC_PRINT("\n");
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__bufs_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../objects" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__objects_info(char *buf, char **start, off_t offset, int request,
++                        int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_fence_manager *fm = &dev->fm;
++      uint64_t used_mem;
++      uint64_t used_emer;
++      uint64_t low_mem;
++      uint64_t high_mem;
++      uint64_t emer_mem;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("Object accounting:\n\n");
++      if (fm->initialized) {
++              DRM_PROC_PRINT("Number of active fence objects: %d.\n",
++                             atomic_read(&fm->count));
++      } else {
++              DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
++      }
++
++      if (bm->initialized) {
++              DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
++                             atomic_read(&bm->count));
++      }
++      DRM_PROC_PRINT("Memory accounting:\n\n");
++      if (bm->initialized) {
++              DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
++      } else {
++              DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
++      }
++
++      drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
++
++      if (used_mem > 16*PAGE_SIZE) {
++              DRM_PROC_PRINT("Used object memory is %lu pages.\n",
++                             (unsigned long) (used_mem >> PAGE_SHIFT));
++      } else {
++              DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
++                             (unsigned long) used_mem);
++      }
++      if (used_emer > 16*PAGE_SIZE) {
++              DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
++                             (unsigned long) (used_emer >> PAGE_SHIFT));
++      } else {
++              DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
++                             (unsigned long) used_emer);
++      }
++      DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
++                     (unsigned long) (low_mem >> PAGE_SHIFT));
++      DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
++                     (unsigned long) (high_mem >> PAGE_SHIFT));
++      DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
++                     (unsigned long) (emer_mem >> PAGE_SHIFT));
++
++      DRM_PROC_PRINT("\n");
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_objects_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__objects_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../clients" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__clients_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_file *priv;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("a dev   pid    uid      magic     ioctls\n\n");
++      list_for_each_entry(priv, &dev->filelist, lhead) {
++              DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
++                             priv->authenticated ? 'y' : 'n',
++                             priv->minor->index,
++                             priv->pid,
++                             priv->uid, priv->magic, priv->ioctl_count);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_clients_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__clients_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++struct drm_gem_name_info_data {
++      int                     len;
++      char                    *buf;
++      int                     eof;
++};
++
++static int drm_gem_one_name_info(int id, void *ptr, void *data)
++{
++      struct drm_gem_object *obj = ptr;
++      struct drm_gem_name_info_data   *nid = data;
++
++      DRM_INFO("name %d size %d\n", obj->name, obj->size);
++      if (nid->eof)
++              return 0;
++
++      nid->len += sprintf(&nid->buf[nid->len],
++                          "%6d%9d%8d%9d\n",
++                          obj->name, obj->size,
++                          atomic_read(&obj->handlecount.refcount),
++                          atomic_read(&obj->refcount.refcount));
++      if (nid->len > DRM_PROC_LIMIT) {
++              nid->eof = 1;
++              return 0;
++      }
++      return 0;
++}
++
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      struct drm_gem_name_info_data nid;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      nid.len = sprintf(buf, "  name     size handles refcount\n");
++      nid.buf = buf;
++      nid.eof = 0;
++      idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
++
++      *start = &buf[offset];
++      *eof = 0;
++      if (nid.len > request + offset)
++              return request;
++      *eof = 1;
++      return nid.len - offset;
++}
++
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
++      DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
++      DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
++      DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
++      DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
++      DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++#if DRM_DEBUG_CODE
++
++static int drm__vma_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_vma_entry *pt;
++      struct vm_area_struct *vma;
++#if defined(__i386__)
++      unsigned int pgprot;
++#endif
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
++                     atomic_read(&dev->vma_count),
++                     high_memory, virt_to_phys(high_memory));
++      list_for_each_entry(pt, &dev->vmalist, head) {
++              if (!(vma = pt->vma))
++                      continue;
++              DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
++                             pt->pid,
++                             vma->vm_start,
++                             vma->vm_end,
++                             vma->vm_flags & VM_READ ? 'r' : '-',
++                             vma->vm_flags & VM_WRITE ? 'w' : '-',
++                             vma->vm_flags & VM_EXEC ? 'x' : '-',
++                             vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
++                             vma->vm_flags & VM_LOCKED ? 'l' : '-',
++                             vma->vm_flags & VM_IO ? 'i' : '-',
++                             vma->vm_pgoff);
++
++#if defined(__i386__)
++              pgprot = pgprot_val(vma->vm_page_prot);
++              DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
++                             pgprot & _PAGE_PRESENT ? 'p' : '-',
++                             pgprot & _PAGE_RW ? 'w' : 'r',
++                             pgprot & _PAGE_USER ? 'u' : 's',
++                             pgprot & _PAGE_PWT ? 't' : 'b',
++                             pgprot & _PAGE_PCD ? 'u' : 'c',
++                             pgprot & _PAGE_ACCESSED ? 'a' : '-',
++                             pgprot & _PAGE_DIRTY ? 'd' : '-',
++                             pgprot & _PAGE_PSE ? 'm' : 'k',
++                             pgprot & _PAGE_GLOBAL ? 'g' : 'l');
++#endif
++              DRM_PROC_PRINT("\n");
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int drm_vma_info(char *buf, char **start, off_t offset, int request,
++                      int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__vma_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_regman.c git-nokia/drivers/gpu/drm-tungsten/drm_regman.c
+--- git/drivers/gpu/drm-tungsten/drm_regman.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_regman.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,200 @@
++/**************************************************************************
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * An allocate-fence manager implementation intended for sets of base-registers
++ * or tiling-registers.
++ */
++
++#include "drmP.h"
++
++/*
++ * Allocate a compatible register and put it on the unfenced list.
++ */
++
++int drm_regs_alloc(struct drm_reg_manager *manager,
++                 const void *data,
++                 uint32_t fence_class,
++                 uint32_t fence_type,
++                 int interruptible, int no_wait, struct drm_reg **reg)
++{
++      struct drm_reg *entry, *next_entry;
++      int ret;
++
++      *reg = NULL;
++
++      /*
++       * Search the unfenced list.
++       */
++
++      list_for_each_entry(entry, &manager->unfenced, head) {
++              if (manager->reg_reusable(entry, data)) {
++                      entry->new_fence_type |= fence_type;
++                      goto out;
++              }
++      }
++
++      /*
++       * Search the lru list.
++       */
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++              struct drm_fence_object *fence = entry->fence;
++              if (fence->fence_class == fence_class &&
++                  (entry->fence_type & fence_type) == entry->fence_type &&
++                  manager->reg_reusable(entry, data)) {
++                      list_del(&entry->head);
++                      entry->new_fence_type = fence_type;
++                      list_add_tail(&entry->head, &manager->unfenced);
++                      goto out;
++              }
++      }
++
++      /*
++       * Search the free list.
++       */
++
++      list_for_each_entry(entry, &manager->free, head) {
++              list_del(&entry->head);
++              entry->new_fence_type = fence_type;
++              list_add_tail(&entry->head, &manager->unfenced);
++              goto out;
++      }
++
++      if (no_wait)
++              return -EBUSY;
++
++      /*
++       * Go back to the lru list and try to expire fences.
++       */
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++              BUG_ON(!entry->fence);
++              ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
++                                          entry->fence_type);
++              if (ret)
++                      return ret;
++
++              drm_fence_usage_deref_unlocked(&entry->fence);
++              list_del(&entry->head);
++              entry->new_fence_type = fence_type;
++              list_add_tail(&entry->head, &manager->unfenced);
++              goto out;
++      }
++
++      /*
++       * Oops. All registers are used up :(.
++       */
++
++      return -EBUSY;
++out:
++      *reg = entry;
++      return 0;
++}
++EXPORT_SYMBOL(drm_regs_alloc);
++
++void drm_regs_fence(struct drm_reg_manager *manager,
++                  struct drm_fence_object *fence)
++{
++      struct drm_reg *entry;
++      struct drm_reg *next_entry;
++
++      if (!fence) {
++
++              /*
++               * Old fence (if any) is still valid.
++               * Put back on free and lru lists.
++               */
++
++              list_for_each_entry_safe_reverse(entry, next_entry,
++                                               &manager->unfenced, head) {
++                      list_del(&entry->head);
++                      list_add(&entry->head, (entry->fence) ?
++                               &manager->lru : &manager->free);
++              }
++      } else {
++
++              /*
++               * Fence with a new fence and put on lru list.
++               */
++
++              list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
++                                       head) {
++                      list_del(&entry->head);
++                      if (entry->fence)
++                              drm_fence_usage_deref_unlocked(&entry->fence);
++                      drm_fence_reference_unlocked(&entry->fence, fence);
++
++                      entry->fence_type = entry->new_fence_type;
++                      BUG_ON((entry->fence_type & fence->type) !=
++                             entry->fence_type);
++
++                      list_add_tail(&entry->head, &manager->lru);
++              }
++      }
++}
++EXPORT_SYMBOL(drm_regs_fence);
++
++void drm_regs_free(struct drm_reg_manager *manager)
++{
++      struct drm_reg *entry;
++      struct drm_reg *next_entry;
++
++      drm_regs_fence(manager, NULL);
++
++      list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
++              list_del(&entry->head);
++              manager->reg_destroy(entry);
++      }
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++
++              (void)drm_fence_object_wait(entry->fence, 1, 1,
++                                          entry->fence_type);
++              list_del(&entry->head);
++              drm_fence_usage_deref_unlocked(&entry->fence);
++              manager->reg_destroy(entry);
++      }
++}
++EXPORT_SYMBOL(drm_regs_free);
++
++void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
++{
++      reg->fence = NULL;
++      list_add_tail(&reg->head, &manager->free);
++}
++EXPORT_SYMBOL(drm_regs_add);
++
++void drm_regs_init(struct drm_reg_manager *manager,
++                 int (*reg_reusable) (const struct drm_reg *, const void *),
++                 void (*reg_destroy) (struct drm_reg *))
++{
++      INIT_LIST_HEAD(&manager->free);
++      INIT_LIST_HEAD(&manager->lru);
++      INIT_LIST_HEAD(&manager->unfenced);
++      manager->reg_reusable = reg_reusable;
++      manager->reg_destroy = reg_destroy;
++}
++EXPORT_SYMBOL(drm_regs_init);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sarea.h git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h
+--- git/drivers/gpu/drm-tungsten/drm_sarea.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,84 @@
++/**
++ * \file drm_sarea.h
++ * \brief SAREA definitions
++ *
++ * \author Michel D�zer <michel@daenzer.net>
++ */
++
++/*
++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_SAREA_H_
++#define _DRM_SAREA_H_
++
++#include "drm.h"
++
++/* SAREA area needs to be at least a page */
++#if defined(__alpha__)
++#define SAREA_MAX                       0x2000
++#elif defined(__ia64__)
++#define SAREA_MAX                       0x10000       /* 64kB */
++#else
++/* Intel 830M driver needs at least 8k SAREA */
++#define SAREA_MAX                       0x2000UL
++#endif
++
++/** Maximum number of drawables in the SAREA */
++#define SAREA_MAX_DRAWABLES           256
++
++#define SAREA_DRAWABLE_CLAIMED_ENTRY    0x80000000
++
++/** SAREA drawable */
++struct drm_sarea_drawable {
++      unsigned int stamp;
++      unsigned int flags;
++};
++
++/** SAREA frame */
++struct drm_sarea_frame {
++      unsigned int x;
++      unsigned int y;
++      unsigned int width;
++      unsigned int height;
++      unsigned int fullscreen;
++};
++
++/** SAREA */
++struct drm_sarea {
++    /** first thing is always the DRM locking structure */
++      struct drm_hw_lock lock;
++    /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
++      struct drm_hw_lock drawable_lock;
++      struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES];   /**< drawables */
++      struct drm_sarea_frame frame;   /**< frame */
++      drm_context_t dummy_context;
++};
++
++#ifndef __KERNEL__
++typedef struct drm_sarea_drawable drm_sarea_drawable_t;
++typedef struct drm_sarea_frame drm_sarea_frame_t;
++typedef struct drm_sarea drm_sarea_t;
++#endif
++
++#endif                                /* _DRM_SAREA_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_scatter.c git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c
+--- git/drivers/gpu/drm-tungsten/drm_scatter.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,228 @@
++/**
++ * \file drm_scatter.c
++ * IOCTLs to manage scatter/gather memory
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++#define DEBUG_SCATTER 0
++
++static inline void *drm_vmalloc_dma(unsigned long size)
++{
++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
++      return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
++#else
++      return vmalloc_32(size);
++#endif
++}
++
++void drm_sg_cleanup(struct drm_sg_mem *entry)
++{
++      struct page *page;
++      int i;
++
++      for (i = 0; i < entry->pages; i++) {
++              page = entry->pagelist[i];
++              if (page)
++                      ClearPageReserved(page);
++      }
++
++      vfree(entry->virtual);
++
++      drm_free(entry->busaddr,
++               entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
++      drm_free(entry->pagelist,
++               entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
++      drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++}
++EXPORT_SYMBOL(drm_sg_cleanup);
++
++#ifdef _LP64
++# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
++#else
++# define ScatterHandle(x) (unsigned int)(x)
++#endif
++
++int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
++{
++      struct drm_sg_mem *entry;
++      unsigned long pages, i, j;
++
++      DRM_DEBUG("\n");
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      if (dev->sg)
++              return -EINVAL;
++
++      entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
++      if (!entry)
++              return -ENOMEM;
++
++      memset(entry, 0, sizeof(*entry));
++      pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
++      DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
++
++      entry->pages = pages;
++      entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
++                                  DRM_MEM_PAGES);
++      if (!entry->pagelist) {
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++
++      memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
++
++      entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
++                                 DRM_MEM_PAGES);
++      if (!entry->busaddr) {
++              drm_free(entry->pagelist,
++                       entry->pages * sizeof(*entry->pagelist),
++                       DRM_MEM_PAGES);
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++      memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
++
++      entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
++      if (!entry->virtual) {
++              drm_free(entry->busaddr,
++                       entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
++              drm_free(entry->pagelist,
++                       entry->pages * sizeof(*entry->pagelist),
++                       DRM_MEM_PAGES);
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++
++      /* This also forces the mapping of COW pages, so our page list
++       * will be valid.  Please don't remove it...
++       */
++      memset(entry->virtual, 0, pages << PAGE_SHIFT);
++
++      entry->handle = ScatterHandle((unsigned long)entry->virtual);
++
++      DRM_DEBUG("handle  = %08lx\n", entry->handle);
++      DRM_DEBUG("virtual = %p\n", entry->virtual);
++
++      for (i = (unsigned long)entry->virtual, j = 0; j < pages;
++           i += PAGE_SIZE, j++) {
++              entry->pagelist[j] = vmalloc_to_page((void *)i);
++              if (!entry->pagelist[j])
++                      goto failed;
++              SetPageReserved(entry->pagelist[j]);
++      }
++
++      request->handle = entry->handle;
++
++      dev->sg = entry;
++
++#if DEBUG_SCATTER
++      /* Verify that each page points to its virtual address, and vice
++       * versa.
++       */
++      {
++              int error = 0;
++
++              for (i = 0; i < pages; i++) {
++                      unsigned long *tmp;
++
++                      tmp = page_address(entry->pagelist[i]);
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              *tmp = 0xcafebabe;
++                      }
++                      tmp = (unsigned long *)((u8 *) entry->virtual +
++                                              (PAGE_SIZE * i));
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              if (*tmp != 0xcafebabe && error == 0) {
++                                      error = 1;
++                                      DRM_ERROR("Scatter allocation error, "
++                                                "pagelist does not match "
++                                                "virtual mapping\n");
++                              }
++                      }
++                      tmp = page_address(entry->pagelist[i]);
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              *tmp = 0;
++                      }
++              }
++              if (error == 0)
++                      DRM_ERROR("Scatter allocation matches pagelist\n");
++      }
++#endif
++
++      return 0;
++
++      failed:
++      drm_sg_cleanup(entry);
++      return -ENOMEM;
++
++}
++EXPORT_SYMBOL(drm_sg_alloc);
++
++int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_scatter_gather *request = data;
++
++      return drm_sg_alloc(dev, request);
++
++}
++
++int drm_sg_free(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_scatter_gather *request = data;
++      struct drm_sg_mem *entry;
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      entry = dev->sg;
++      dev->sg = NULL;
++
++      if (!entry || entry->handle != request->handle)
++              return -EINVAL;
++
++      DRM_DEBUG("virtual  = %p\n", entry->virtual);
++
++      drm_sg_cleanup(entry);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.c git-nokia/drivers/gpu/drm-tungsten/drm_sman.c
+--- git/drivers/gpu/drm-tungsten/drm_sman.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,353 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple memory manager interface that keeps track on allocate regions on a
++ * per "owner" basis. All regions associated with an "owner" can be released
++ * with a simple call. Typically if the "owner" exists. The owner is any
++ * "unsigned long" identifier. Can typically be a pointer to a file private
++ * struct or a context identifier.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drm_sman.h"
++
++struct drm_owner_item {
++      struct drm_hash_item owner_hash;
++      struct list_head sman_list;
++      struct list_head mem_blocks;
++};
++
++void drm_sman_takedown(struct drm_sman * sman)
++{
++      drm_ht_remove(&sman->user_hash_tab);
++      drm_ht_remove(&sman->owner_hash_tab);
++      if (sman->mm)
++              drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
++                       DRM_MEM_MM);
++}
++
++EXPORT_SYMBOL(drm_sman_takedown);
++
++int
++drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
++            unsigned int user_order, unsigned int owner_order)
++{
++      int ret = 0;
++
++      sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
++                                              DRM_MEM_MM);
++      if (!sman->mm) {
++              ret = -ENOMEM;
++              goto out;
++      }
++      sman->num_managers = num_managers;
++      INIT_LIST_HEAD(&sman->owner_items);
++      ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
++      if (ret)
++              goto out1;
++      ret = drm_ht_create(&sman->user_hash_tab, user_order);
++      if (!ret)
++              goto out;
++
++      drm_ht_remove(&sman->owner_hash_tab);
++out1:
++      drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
++out:
++      return ret;
++}
++
++EXPORT_SYMBOL(drm_sman_init);
++
++static void *drm_sman_mm_allocate(void *private, unsigned long size,
++                                unsigned alignment)
++{
++      struct drm_mm *mm = (struct drm_mm *) private;
++      struct drm_mm_node *tmp;
++
++      tmp = drm_mm_search_free(mm, size, alignment, 1);
++      if (!tmp) {
++              return NULL;
++      }
++      tmp = drm_mm_get_block(tmp, size, alignment);
++      return tmp;
++}
++
++static void drm_sman_mm_free(void *private, void *ref)
++{
++      struct drm_mm_node *node = (struct drm_mm_node *) ref;
++
++      drm_mm_put_block(node);
++}
++
++static void drm_sman_mm_destroy(void *private)
++{
++      struct drm_mm *mm = (struct drm_mm *) private;
++      drm_mm_takedown(mm);
++      drm_free(mm, sizeof(*mm), DRM_MEM_MM);
++}
++
++static unsigned long drm_sman_mm_offset(void *private, void *ref)
++{
++      struct drm_mm_node *node = (struct drm_mm_node *) ref;
++      return node->start;
++}
++
++int
++drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
++                 unsigned long start, unsigned long size)
++{
++      struct drm_sman_mm *sman_mm;
++      struct drm_mm *mm;
++      int ret;
++
++      BUG_ON(manager >= sman->num_managers);
++
++      sman_mm = &sman->mm[manager];
++      mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
++      if (!mm) {
++              return -ENOMEM;
++      }
++      sman_mm->private = mm;
++      ret = drm_mm_init(mm, start, size);
++
++      if (ret) {
++              drm_free(mm, sizeof(*mm), DRM_MEM_MM);
++              return ret;
++      }
++
++      sman_mm->allocate = drm_sman_mm_allocate;
++      sman_mm->free = drm_sman_mm_free;
++      sman_mm->destroy = drm_sman_mm_destroy;
++      sman_mm->offset = drm_sman_mm_offset;
++
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_set_range);
++
++int
++drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
++                   struct drm_sman_mm * allocator)
++{
++      BUG_ON(manager >= sman->num_managers);
++      sman->mm[manager] = *allocator;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_sman_set_manager);
++
++static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
++                                               unsigned long owner)
++{
++      int ret;
++      struct drm_hash_item *owner_hash_item;
++      struct drm_owner_item *owner_item;
++
++      ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
++      if (!ret) {
++              return drm_hash_entry(owner_hash_item, struct drm_owner_item,
++                                    owner_hash);
++      }
++
++      owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
++      if (!owner_item)
++              goto out;
++
++      INIT_LIST_HEAD(&owner_item->mem_blocks);
++      owner_item->owner_hash.key = owner;
++      if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
++              goto out1;
++
++      list_add_tail(&owner_item->sman_list, &sman->owner_items);
++      return owner_item;
++
++out1:
++      drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
++out:
++      return NULL;
++}
++
++struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
++                                  unsigned long size, unsigned alignment,
++                                  unsigned long owner)
++{
++      void *tmp;
++      struct drm_sman_mm *sman_mm;
++      struct drm_owner_item *owner_item;
++      struct drm_memblock_item *memblock;
++
++      BUG_ON(manager >= sman->num_managers);
++
++      sman_mm = &sman->mm[manager];
++      tmp = sman_mm->allocate(sman_mm->private, size, alignment);
++
++      if (!tmp) {
++              return NULL;
++      }
++
++      memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
++
++      if (!memblock)
++              goto out;
++
++      memblock->mm_info = tmp;
++      memblock->mm = sman_mm;
++      memblock->sman = sman;
++
++      if (drm_ht_just_insert_please
++          (&sman->user_hash_tab, &memblock->user_hash,
++           (unsigned long)memblock, 32, 0, 0))
++              goto out1;
++
++      owner_item = drm_sman_get_owner_item(sman, owner);
++      if (!owner_item)
++              goto out2;
++
++      list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
++
++      return memblock;
++
++out2:
++      drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
++out1:
++      drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
++out:
++      sman_mm->free(sman_mm->private, tmp);
++
++      return NULL;
++}
++
++EXPORT_SYMBOL(drm_sman_alloc);
++
++static void drm_sman_free(struct drm_memblock_item *item)
++{
++      struct drm_sman *sman = item->sman;
++
++      list_del(&item->owner_list);
++      drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
++      item->mm->free(item->mm->private, item->mm_info);
++      drm_free(item, sizeof(*item), DRM_MEM_MM);
++}
++
++int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
++{
++      struct drm_hash_item *hash_item;
++      struct drm_memblock_item *memblock_item;
++
++      if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
++              return -EINVAL;
++
++      memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
++                                     user_hash);
++      drm_sman_free(memblock_item);
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_free_key);
++
++static void drm_sman_remove_owner(struct drm_sman *sman,
++                                struct drm_owner_item *owner_item)
++{
++      list_del(&owner_item->sman_list);
++      drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
++      drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
++}
++
++int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
++{
++
++      struct drm_hash_item *hash_item;
++      struct drm_owner_item *owner_item;
++
++      if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
++              return -1;
++      }
++
++      owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
++      if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
++              drm_sman_remove_owner(sman, owner_item);
++              return -1;
++      }
++
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_owner_clean);
++
++static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
++                                    struct drm_owner_item *owner_item)
++{
++      struct drm_memblock_item *entry, *next;
++
++      list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
++                               owner_list) {
++              drm_sman_free(entry);
++      }
++      drm_sman_remove_owner(sman, owner_item);
++}
++
++void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
++{
++
++      struct drm_hash_item *hash_item;
++      struct drm_owner_item *owner_item;
++
++      if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
++
++              return;
++      }
++
++      owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
++      drm_sman_do_owner_cleanup(sman, owner_item);
++}
++
++EXPORT_SYMBOL(drm_sman_owner_cleanup);
++
++void drm_sman_cleanup(struct drm_sman *sman)
++{
++      struct drm_owner_item *entry, *next;
++      unsigned int i;
++      struct drm_sman_mm *sman_mm;
++
++      list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
++              drm_sman_do_owner_cleanup(sman, entry);
++      }
++      if (sman->mm) {
++              for (i = 0; i < sman->num_managers; ++i) {
++                      sman_mm = &sman->mm[i];
++                      if (sman_mm->private) {
++                              sman_mm->destroy(sman_mm->private);
++                              sman_mm->private = NULL;
++                      }
++              }
++      }
++}
++
++EXPORT_SYMBOL(drm_sman_cleanup);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.h git-nokia/drivers/gpu/drm-tungsten/drm_sman.h
+--- git/drivers/gpu/drm-tungsten/drm_sman.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple memory MANager interface that keeps track on allocate regions on a
++ * per "owner" basis. All regions associated with an "owner" can be released
++ * with a simple call. Typically if the "owner" exists. The owner is any
++ * "unsigned long" identifier. Can typically be a pointer to a file private
++ * struct or a context identifier.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef DRM_SMAN_H
++#define DRM_SMAN_H
++
++#include "drmP.h"
++#include "drm_hashtab.h"
++
++/*
++ * A class that is an abstration of a simple memory allocator.
++ * The sman implementation provides a default such allocator
++ * using the drm_mm.c implementation. But the user can replace it.
++ * See the SiS implementation, which may use the SiS FB kernel module
++ * for memory management.
++ */
++
++struct drm_sman_mm {
++      /* private info. If allocated, needs to be destroyed by the destroy
++         function */
++      void *private;
++
++      /* Allocate a memory block with given size and alignment.
++         Return an opaque reference to the memory block */
++
++      void *(*allocate) (void *private, unsigned long size,
++                         unsigned alignment);
++
++      /* Free a memory block. "ref" is the opaque reference that we got from
++         the "alloc" function */
++
++      void (*free) (void *private, void *ref);
++
++      /* Free all resources associated with this allocator */
++
++      void (*destroy) (void *private);
++
++      /* Return a memory offset from the opaque reference returned from the
++         "alloc" function */
++
++      unsigned long (*offset) (void *private, void *ref);
++};
++
++struct drm_memblock_item {
++      struct list_head owner_list;
++      struct drm_hash_item user_hash;
++      void *mm_info;
++      struct drm_sman_mm *mm;
++      struct drm_sman *sman;
++};
++
++struct drm_sman {
++      struct drm_sman_mm *mm;
++      int num_managers;
++      struct drm_open_hash owner_hash_tab;
++      struct drm_open_hash user_hash_tab;
++      struct list_head owner_items;
++};
++
++/*
++ * Take down a memory manager. This function should only be called after a
++ * successful init and after a call to drm_sman_cleanup.
++ */
++
++extern void drm_sman_takedown(struct drm_sman * sman);
++
++/*
++ * Allocate structures for a manager.
++ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
++ * user_order is the log2 of the number of buckets in the user hash table.
++ *        set this to approximately log2 of the max number of memory regions
++ *        that will be allocated for _all_ pools together.
++ * owner_order is the log2 of the number of buckets in the owner hash table.
++ *        set this to approximately log2 of
++ *        the number of client file connections that will
++ *        be using the manager.
++ *
++ */
++
++extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
++                       unsigned int user_order, unsigned int owner_order);
++
++/*
++ * Initialize a drm_mm.c allocator. Should be called only once for each
++ * manager unless a customized allogator is used.
++ */
++
++extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
++                            unsigned long start, unsigned long size);
++
++/*
++ * Initialize a customized allocator for one of the managers.
++ * (See the SiS module). The object pointed to by "allocator" is copied,
++ * so it can be destroyed after this call.
++ */
++
++extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
++                              struct drm_sman_mm * allocator);
++
++/*
++ * Allocate a memory block. Aligment is not implemented yet.
++ */
++
++extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
++                                              unsigned int manager,
++                                              unsigned long size,
++                                              unsigned alignment,
++                                              unsigned long owner);
++/*
++ * Free a memory block identified by its user hash key.
++ */
++
++extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
++
++/*
++ * returns 1 iff there are no stale memory blocks associated with this owner.
++ * Typically called to determine if we need to idle the hardware and call
++ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
++ * resources associated with owner.
++ */
++
++extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
++
++/*
++ * Frees all stale memory blocks associated with this owner. Note that this
++ * requires that the hardware is finished with all blocks, so the graphics engine
++ * should be idled before this call is made. This function also frees
++ * any resources associated with "owner" and should be called when owner
++ * is not going to be referenced anymore.
++ */
++
++extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
++
++/*
++ * Frees all stale memory blocks associated with the memory manager.
++ * See idling above.
++ */
++
++extern void drm_sman_cleanup(struct drm_sman * sman);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_stub.c git-nokia/drivers/gpu/drm-tungsten/drm_stub.c
+--- git/drivers/gpu/drm-tungsten/drm_stub.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_stub.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,400 @@
++/**
++ * \file drm_stub.c
++ * Stub support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ */
++
++/*
++ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
++ *
++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++#include "drmP.h"
++#include "drm_core.h"
++
++unsigned int drm_debug = 0;           /* 1 to enable debug output */
++EXPORT_SYMBOL(drm_debug);
++
++MODULE_AUTHOR(CORE_AUTHOR);
++MODULE_DESCRIPTION(CORE_DESC);
++MODULE_LICENSE("GPL and additional rights");
++MODULE_PARM_DESC(debug, "Enable debug output");
++
++module_param_named(debug, drm_debug, int, 0600);
++
++struct idr drm_minors_idr;
++
++struct class *drm_class;
++struct proc_dir_entry *drm_proc_root;
++
++static int drm_minor_get_id(struct drm_device *dev, int type)
++{
++      int new_id;
++      int ret;
++      int base = 0, limit = 63;
++
++again:
++      if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++      mutex_lock(&dev->struct_mutex);
++      ret = idr_get_new_above(&drm_minors_idr, NULL,
++                              base, &new_id);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret == -EAGAIN) {
++              goto again;
++      } else if (ret) {
++              return ret;
++      }
++
++      if (new_id >= limit) {
++              idr_remove(&drm_minors_idr, new_id);
++              return -EINVAL;
++      }
++      return new_id;
++}
++
++static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
++                         const struct pci_device_id *ent,
++                         struct drm_driver *driver)
++{
++      int retcode;
++
++      INIT_LIST_HEAD(&dev->filelist);
++      INIT_LIST_HEAD(&dev->ctxlist);
++      INIT_LIST_HEAD(&dev->vmalist);
++      INIT_LIST_HEAD(&dev->maplist);
++
++      spin_lock_init(&dev->count_lock);
++      spin_lock_init(&dev->drw_lock);
++      spin_lock_init(&dev->tasklet_lock);
++      spin_lock_init(&dev->lock.spinlock);
++      init_timer(&dev->timer);
++      mutex_init(&dev->struct_mutex);
++      mutex_init(&dev->ctxlist_mutex);
++      mutex_init(&dev->bm.evict_mutex);
++
++      idr_init(&dev->drw_idr);
++
++      dev->pdev = pdev;
++
++      if (pdev) {
++              dev->pci_device = pdev->device;
++              dev->pci_vendor = pdev->vendor;
++
++#ifdef __alpha__
++              dev->hose = pdev->sysdata;
++#endif
++
++              dev->irq = pdev->irq;
++      }
++
++      dev->irq_enabled = 0;
++
++      if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
++              return -ENOMEM;
++      }
++      if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
++                      DRM_FILE_PAGE_OFFSET_SIZE)) {
++              drm_ht_remove(&dev->map_hash);
++              return -ENOMEM;
++      }
++
++      if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
++              drm_ht_remove(&dev->map_hash);
++              drm_mm_takedown(&dev->offset_manager);
++              return -ENOMEM;
++      }
++
++      /* the DRM has 6 counters */
++      dev->counters = 6;
++      dev->types[0] = _DRM_STAT_LOCK;
++      dev->types[1] = _DRM_STAT_OPENS;
++      dev->types[2] = _DRM_STAT_CLOSES;
++      dev->types[3] = _DRM_STAT_IOCTLS;
++      dev->types[4] = _DRM_STAT_LOCKS;
++      dev->types[5] = _DRM_STAT_UNLOCKS;
++
++      dev->driver = driver;
++
++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
++      if (drm_core_has_AGP(dev)) {
++              if (drm_device_is_agp(dev))
++                      dev->agp = drm_agp_init(dev);
++              if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
++                  && (dev->agp == NULL)) {
++                      DRM_ERROR("Cannot initialize the agpgart module.\n");
++                      retcode = -EINVAL;
++                      goto error_out_unreg;
++              }
++
++              if (drm_core_has_MTRR(dev)) {
++                      if (dev->agp)
++                              dev->agp->agp_mtrr =
++                                  mtrr_add(dev->agp->agp_info.aper_base,
++                                           dev->agp->agp_info.aper_size *
++                                           1024 * 1024, MTRR_TYPE_WRCOMB, 1);
++              }
++      }
++#endif
++
++      retcode = drm_ctxbitmap_init(dev);
++      if (retcode) {
++              DRM_ERROR("Cannot allocate memory for context bitmap.\n");
++              goto error_out_unreg;
++      }
++
++      if (driver->driver_features & DRIVER_GEM) {
++              retcode = drm_gem_init (dev);
++              if (retcode) {
++                      DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
++                      goto error_out_unreg;
++              }
++      }
++
++      drm_fence_manager_init(dev);
++
++      return 0;
++
++error_out_unreg:
++      drm_lastclose(dev);
++      return retcode;
++}
++
++/**
++ * Get a secondary minor number.
++ *
++ * \param dev device data structure
++ * \param sec-minor structure to hold the assigned minor
++ * \return negative number on failure.
++ *
++ * Search an empty entry and initialize it to the given parameters, and
++ * create the proc init entry via proc_init(). This routines assigns
++ * minor numbers to secondary heads of multi-headed cards
++ */
++static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
++{
++      struct drm_minor *new_minor;
++      int ret;
++      int minor_id;
++
++      DRM_DEBUG("\n");
++
++      minor_id = drm_minor_get_id(dev, type);
++      if (minor_id < 0)
++              return minor_id;
++
++      new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
++      if (!new_minor) {
++              ret = -ENOMEM;
++              goto err_idr;
++      }
++
++      new_minor->type = type;
++      new_minor->device = MKDEV(DRM_MAJOR, minor_id);
++      new_minor->dev = dev;
++      new_minor->index = minor_id;
++
++      idr_replace(&drm_minors_idr, new_minor, minor_id);
++      
++      if (type == DRM_MINOR_LEGACY) {
++              ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
++              if (ret) {
++                      DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
++                      goto err_mem;
++              }
++              if (dev->driver->proc_init) {
++                      ret = dev->driver->proc_init(new_minor);
++                      if (ret) {
++                              DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
++                              goto err_mem;
++                      }
++              }
++      } else
++              new_minor->dev_root = NULL;
++
++      ret = drm_sysfs_device_add(new_minor);
++      if (ret) {
++              printk(KERN_ERR
++                     "DRM: Error sysfs_device_add.\n");
++              goto err_g2;
++      }
++      *minor = new_minor;
++      
++      DRM_DEBUG("new minor assigned %d\n", minor_id);
++      return 0;
++
++
++err_g2:
++      if (new_minor->type == DRM_MINOR_LEGACY) {
++              if (dev->driver->proc_cleanup)
++                      dev->driver->proc_cleanup(new_minor);
++              drm_proc_cleanup(new_minor, drm_proc_root);
++      }
++err_mem:
++      kfree(new_minor);
++err_idr:
++      idr_remove(&drm_minors_idr, minor_id);
++      *minor = NULL;
++      return ret;
++}
++
++/**
++ * Register.
++ *
++ * \param pdev - PCI device structure
++ * \param ent entry from the PCI ID table with device type flags
++ * \return zero on success or a negative number on failure.
++ *
++ * Attempt to gets inter module "drm" information. If we are first
++ * then register the character device and inter module information.
++ * Try and register, if we fail to register, backout previous work.
++ */
++int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
++              struct drm_driver *driver)
++{
++      struct drm_device *dev;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
++      if (!dev)
++              return -ENOMEM;
++
++#ifdef CONFIG_PCI
++      if (!drm_fb_loaded) {
++              pci_set_drvdata(pdev, dev);
++              ret = pci_request_regions(pdev, driver->pci_driver.name);
++              if (ret)
++                      goto err_g1;
++      }
++
++      ret = pci_enable_device(pdev);
++      if (ret)
++              goto err_g2;
++      pci_set_master(pdev);
++#endif
++
++      if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
++              printk(KERN_ERR "DRM: fill_in_dev failed\n");
++              goto err_g3;
++      }
++
++      /* only add the control node on a modesetting platform */
++      if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
++              goto err_g3;
++
++      if (dev->driver->load)
++              if ((ret = dev->driver->load(dev, ent ? ent->driver_data : 0)))
++                      goto err_g4;
++
++      DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
++               driver->name, driver->major, driver->minor, driver->patchlevel,
++               driver->date, dev->primary->index);
++
++      return 0;
++err_g4:
++      drm_put_minor(dev);
++err_g3:
++#ifdef CONFIG_PCI
++      if (!drm_fb_loaded)
++              pci_disable_device(pdev);
++err_g2:
++      if (!drm_fb_loaded)
++              pci_release_regions(pdev);
++err_g1:
++      if (!drm_fb_loaded)
++              pci_set_drvdata(pdev, NULL);
++#endif
++
++      drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
++      printk(KERN_ERR "DRM: drm_get_dev failed.\n");
++      return ret;
++}
++EXPORT_SYMBOL(drm_get_dev);
++
++
++/**
++ * Put a device minor number.
++ *
++ * \param dev device data structure
++ * \return always zero
++ *
++ * Cleans up the proc resources. If it is the last minor then release the foreign
++ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
++ * unregisters the character device.
++ */
++int drm_put_dev(struct drm_device * dev)
++{
++      DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++      if (dev->devname) {
++              drm_free(dev->devname, strlen(dev->devname) + 1,
++                       DRM_MEM_DRIVER);
++              dev->devname = NULL;
++      }
++      drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
++      return 0;
++}
++
++/**
++ * Put a secondary minor number.
++ *
++ * \param sec_minor - structure to be released
++ * \return always zero
++ *
++ * Cleans up the proc resources. Not legal for this to be the
++ * last minor released.
++ *
++ */
++int drm_put_minor(struct drm_device *dev)
++{
++      struct drm_minor **minor_p = &dev->primary;
++      struct drm_minor *minor = *minor_p;
++      DRM_DEBUG("release secondary minor %d\n", minor->index);
++
++      if (minor->type == DRM_MINOR_LEGACY) {
++              if (dev->driver->proc_cleanup)
++                      dev->driver->proc_cleanup(minor);
++              drm_proc_cleanup(minor, drm_proc_root);
++      }
++      drm_sysfs_device_remove(minor);
++
++      idr_remove(&drm_minors_idr, minor->index);
++
++      kfree(minor);
++      *minor_p = NULL;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sysfs.c git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c
+--- git/drivers/gpu/drm-tungsten/drm_sysfs.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,212 @@
++
++/*
++ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
++ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
++ *               does not allow adding attributes.
++ *
++ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
++ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
++ * Copyright (c) 2003-2004 IBM Corp.
++ *
++ * This file is released under the GPLv2
++ *
++ */
++
++#include <linux/device.h>
++#include <linux/kdev_t.h>
++#include <linux/err.h>
++
++#include "drm_core.h"
++#include "drmP.h"
++
++#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
++
++/**
++ * drm_sysfs_suspend - DRM class suspend hook
++ * @dev: Linux device to suspend
++ * @state: power state to enter
++ *
++ * Just figures out what the actual struct drm_device associated with
++ * @dev is and calls its suspend hook, if present.
++ */
++static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
++{
++      struct drm_minor *drm_minor = to_drm_minor(dev);
++      struct drm_device *drm_dev = drm_minor->dev;
++
++      printk(KERN_ERR "%s\n", __FUNCTION__);
++
++      if (drm_dev->driver->suspend)
++              return drm_dev->driver->suspend(drm_dev, state);
++
++      return 0;
++}
++
++/**
++ * drm_sysfs_resume - DRM class resume hook
++ * @dev: Linux device to resume
++ *
++ * Just figures out what the actual struct drm_device associated with
++ * @dev is and calls its resume hook, if present.
++ */
++static int drm_sysfs_resume(struct device *dev)
++{
++      struct drm_minor *drm_minor = to_drm_minor(dev);
++      struct drm_device *drm_dev = drm_minor->dev;
++
++      if (drm_dev->driver->resume)
++              return drm_dev->driver->resume(drm_dev);
++
++      return 0;
++}
++
++/* Display the version of drm_core. This doesn't work right in current design */
++static ssize_t version_show(struct class *dev, char *buf)
++{
++      return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
++                     CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++}
++
++static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
++
++/**
++ * drm_sysfs_create - create a struct drm_sysfs_class structure
++ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
++ * @name: pointer to a string for the name of this class.
++ *
++ * This is used to create DRM class pointer that can then be used
++ * in calls to drm_sysfs_device_add().
++ *
++ * Note, the pointer created here is to be destroyed when finished by making a
++ * call to drm_sysfs_destroy().
++ */
++struct class *drm_sysfs_create(struct module *owner, char *name)
++{
++      struct class *class;
++      int err;
++
++      class = class_create(owner, name);
++      if (IS_ERR(class)) {
++              err = PTR_ERR(class);
++              goto err_out;
++      }
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++      class->suspend = drm_sysfs_suspend;
++      class->resume = drm_sysfs_resume;
++#endif
++
++      err = class_create_file(class, &class_attr_version);
++      if (err)
++              goto err_out_class;
++
++      return class;
++
++err_out_class:
++      class_destroy(class);
++err_out:
++      return ERR_PTR(err);
++}
++
++/**
++ * drm_sysfs_destroy - destroys DRM class
++ *
++ * Destroy the DRM device class.
++ */
++void drm_sysfs_destroy(void)
++{
++      if ((drm_class == NULL) || (IS_ERR(drm_class)))
++              return;
++      class_remove_file(drm_class, &class_attr_version);
++      class_destroy(drm_class);
++}
++
++static ssize_t show_dri(struct device *device, struct device_attribute *attr,
++                      char *buf)
++{
++      struct drm_minor *drm_minor = to_drm_minor(device);
++      struct drm_device *drm_dev = drm_minor->dev;
++      if (drm_dev->driver->dri_library_name)
++              return drm_dev->driver->dri_library_name(drm_dev, buf);
++      return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
++}
++
++static struct device_attribute device_attrs[] = {
++      __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
++};
++
++/**
++ * drm_sysfs_device_release - do nothing
++ * @dev: Linux device
++ *
++ * Normally, this would free the DRM device associated with @dev, along
++ * with cleaning up any other stuff.  But we do that in the DRM core, so
++ * this function can just return and hope that the core does its job.
++ */
++static void drm_sysfs_device_release(struct device *dev)
++{
++      return;
++}
++
++/**
++ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
++ * @dev: DRM device to be added
++ * @head: DRM head in question
++ *
++ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
++ * as the parent for the Linux device, and make sure it has a file containing
++ * the driver we're using (for userspace compatibility).
++ */
++int drm_sysfs_device_add(struct drm_minor *minor)
++{
++      int err;
++      int i, j;
++      char *minor_str;
++
++      minor->kdev.parent = minor->dev->pdev ? &minor->dev->pdev->dev : NULL;
++      minor->kdev.class = drm_class;
++      minor->kdev.release = drm_sysfs_device_release;
++      minor->kdev.devt = minor->device;
++      minor_str = "card%d";
++      
++      snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
++
++      err = device_register(&minor->kdev);
++      if (err) {
++              DRM_ERROR("device add failed: %d\n", err);
++              goto err_out;
++      }
++
++      for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
++              err = device_create_file(&minor->kdev, &device_attrs[i]);
++              if (err)
++                      goto err_out_files;
++      }
++
++      return 0;
++
++err_out_files:
++      if (i > 0)
++              for (j = 0; j < i; j++)
++                      device_remove_file(&minor->kdev, &device_attrs[j]);
++      device_unregister(&minor->kdev);
++err_out:
++
++      return err;
++}
++
++/**
++ * drm_sysfs_device_remove - remove DRM device
++ * @dev: DRM device to remove
++ *
++ * This call unregisters and cleans up a class device that was created with a
++ * call to drm_sysfs_device_add()
++ */
++void drm_sysfs_device_remove(struct drm_minor *minor)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
++              device_remove_file(&minor->kdev, &device_attrs[i]);
++      device_unregister(&minor->kdev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ttm.c git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c
+--- git/drivers/gpu/drm-tungsten/drm_ttm.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,524 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++static void drm_clflush_page(struct page *page)
++{
++      uint8_t *page_virtual;
++      unsigned int i;
++
++      if (unlikely(page == NULL))
++              return;
++
++      page_virtual = kmap_atomic(page, KM_USER0);
++
++      for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++              clflush(page_virtual + i);
++
++      kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
++{
++      unsigned long i;
++
++      mb();
++      for (i=0; i < num_pages; ++i)
++              drm_clflush_page(*pages++);
++      mb();
++}
++#endif
++
++static void drm_ttm_ipi_handler(void *null)
++{
++#ifdef CONFIG_AGP
++      flush_agp_cache();
++#endif
++}
++
++void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++      if (cpu_has_clflush) {
++              drm_ttm_cache_flush_clflush(pages, num_pages);
++              return;
++      }
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++      if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1))
++#else
++      if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
++#endif
++              DRM_ERROR("Timed out waiting for drm cache flush.\n");
++}
++EXPORT_SYMBOL(drm_ttm_cache_flush);
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
++{
++      unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++      ttm->pages = NULL;
++
++      if (drm_alloc_memctl(size))
++              return;
++
++      if (size <= PAGE_SIZE)
++              ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
++
++      if (!ttm->pages) {
++              ttm->pages = vmalloc_user(size);
++              if (ttm->pages)
++                      ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
++      }
++      if (!ttm->pages)
++              drm_free_memctl(size);
++}
++
++static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
++{
++      unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++
++      if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
++              vfree(ttm->pages);
++              ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
++      } else {
++              drm_free(ttm->pages, size, DRM_MEM_TTM);
++      }
++      drm_free_memctl(size);
++      ttm->pages = NULL;
++}
++
++static struct page *drm_ttm_alloc_page(void)
++{
++      struct page *page;
++
++      if (drm_alloc_memctl(PAGE_SIZE))
++              return NULL;
++
++      page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++      if (!page) {
++              drm_free_memctl(PAGE_SIZE);
++              return NULL;
++      }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      SetPageReserved(page);
++#endif
++      return page;
++}
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
++{
++      int i;
++      struct page **cur_page;
++      int do_tlbflush = 0;
++
++      if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
++              return 0;
++
++      if (noncached)
++              drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              cur_page = ttm->pages + i;
++              if (*cur_page) {
++                      if (!PageHighMem(*cur_page)) {
++#ifdef CONFIG_AGP
++                              if (noncached) {
++                                      map_page_into_agp(*cur_page);
++                              } else {
++                                      unmap_page_from_agp(*cur_page);
++                              }
++#endif
++                              do_tlbflush = 1;
++                      }
++              }
++      }
++#ifdef CONFIG_AGP
++      if (do_tlbflush)
++              flush_agp_mappings();
++#endif
++
++      DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
++
++      return 0;
++}
++
++
++static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
++{
++      int write;
++      int dirty;
++      struct page *page;
++      int i;
++
++      BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
++      write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
++      dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              page = ttm->pages[i];
++              if (page == NULL)
++                      continue;
++
++              if (page == ttm->dummy_read_page) {
++                      BUG_ON(write);
++                      continue;
++              }
++
++              if (write && dirty && !PageReserved(page))
++                      set_page_dirty_lock(page);
++
++              ttm->pages[i] = NULL;
++              put_page(page);
++      }
++}
++
++static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
++{
++      int i;
++      struct drm_buffer_manager *bm = &ttm->dev->bm;
++      struct page **cur_page;
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              cur_page = ttm->pages + i;
++              if (*cur_page) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++                      ClearPageReserved(*cur_page);
++#endif
++                      if (page_count(*cur_page) != 1)
++                              DRM_ERROR("Erroneous page count. Leaking pages.\n");
++                      if (page_mapped(*cur_page))
++                              DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
++                      __free_page(*cur_page);
++                      drm_free_memctl(PAGE_SIZE);
++                      --bm->cur_pages;
++              }
++      }
++}
++
++/*
++ * Free all resources associated with a ttm.
++ */
++
++int drm_ttm_destroy(struct drm_ttm *ttm)
++{
++      struct drm_ttm_backend *be;
++
++      if (!ttm)
++              return 0;
++
++      be = ttm->be;
++      if (be) {
++              be->func->destroy(be);
++              ttm->be = NULL;
++      }
++
++      if (ttm->pages) {
++              if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
++                      drm_ttm_set_caching(ttm, 0);
++
++              if (ttm->page_flags & DRM_TTM_PAGE_USER)
++                      drm_ttm_free_user_pages(ttm);
++              else
++                      drm_ttm_free_alloced_pages(ttm);
++
++              drm_ttm_free_page_directory(ttm);
++      }
++
++      drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
++      return 0;
++}
++
++struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
++{
++      struct page *p;
++      struct drm_buffer_manager *bm = &ttm->dev->bm;
++
++      while(NULL == (p = ttm->pages[index])) {
++              p = drm_ttm_alloc_page();
++              if (!p)
++                      return NULL;
++
++              if (PageHighMem(p))
++                      ttm->pages[--ttm->first_himem_page] = p;
++              else
++                      ttm->pages[++ttm->last_lomem_page] = p;
++
++              ++bm->cur_pages;
++      }
++      return p;
++}
++EXPORT_SYMBOL(drm_ttm_get_page);
++
++/**
++ * drm_ttm_set_user:
++ *
++ * @ttm: the ttm to map pages to. This must always be
++ * a freshly created ttm.
++ *
++ * @tsk: a pointer to the address space from which to map
++ * pages.
++ * 
++ * @write: a boolean indicating that write access is desired
++ *
++ * start: the starting address
++ *
++ * Map a range of user addresses to a new ttm object. This
++ * provides access to user memory from the graphics device.
++ */
++int drm_ttm_set_user(struct drm_ttm *ttm,
++                   struct task_struct *tsk,
++                   unsigned long start,
++                   unsigned long num_pages)
++{
++      struct mm_struct *mm = tsk->mm;
++      int ret;
++      int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
++
++      BUG_ON(num_pages != ttm->num_pages);
++      BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
++
++      down_read(&mm->mmap_sem);
++      ret = get_user_pages(tsk, mm, start, num_pages,
++                           write, 0, ttm->pages, NULL);
++      up_read(&mm->mmap_sem);
++
++      if (ret != num_pages && write) {
++              drm_ttm_free_user_pages(ttm);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/**
++ * drm_ttm_populate:
++ *
++ * @ttm: the object to allocate pages for
++ *
++ * Allocate pages for all unset page entries, then
++ * call the backend to create the hardware mappings
++ */
++int drm_ttm_populate(struct drm_ttm *ttm)
++{
++      struct page *page;
++      unsigned long i;
++      struct drm_ttm_backend *be;
++
++      if (ttm->state != ttm_unpopulated)
++              return 0;
++
++      be = ttm->be;
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              page = drm_ttm_get_page(ttm, i);
++              if (!page)
++                      return -ENOMEM;
++      }
++
++      be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
++      ttm->state = ttm_unbound;
++      return 0;
++}
++
++/**
++ * drm_ttm_create:
++ *
++ * @dev: the drm_device
++ *
++ * @size: The size (in bytes) of the desired object
++ *
++ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
++ *
++ * Allocate and initialize a ttm, leaving it unpopulated at this time
++ */
++
++struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
++                             uint32_t page_flags, struct page *dummy_read_page)
++{
++      struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
++      struct drm_ttm *ttm;
++
++      if (!bo_driver)
++              return NULL;
++
++      ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
++      if (!ttm)
++              return NULL;
++
++      ttm->dev = dev;
++      atomic_set(&ttm->vma_count, 0);
++
++      ttm->destroy = 0;
++      ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      ttm->first_himem_page = ttm->num_pages;
++      ttm->last_lomem_page = -1;
++
++      ttm->page_flags = page_flags;
++
++      ttm->dummy_read_page = dummy_read_page;
++
++      /*
++       * Account also for AGP module memory usage.
++       */
++
++      drm_ttm_alloc_page_directory(ttm);
++      if (!ttm->pages) {
++              drm_ttm_destroy(ttm);
++              DRM_ERROR("Failed allocating page table\n");
++              return NULL;
++      }
++      ttm->be = bo_driver->create_ttm_backend_entry(dev);
++      if (!ttm->be) {
++              drm_ttm_destroy(ttm);
++              DRM_ERROR("Failed creating ttm backend entry\n");
++              return NULL;
++      }
++      ttm->state = ttm_unpopulated;
++      return ttm;
++}
++
++/**
++ * drm_ttm_evict:
++ *
++ * @ttm: the object to be unbound from the aperture.
++ *
++ * Transition a ttm from bound to evicted, where it
++ * isn't present in the aperture, but various caches may
++ * not be consistent.
++ */
++void drm_ttm_evict(struct drm_ttm *ttm)
++{
++      struct drm_ttm_backend *be = ttm->be;
++      int ret;
++
++      if (ttm->state == ttm_bound) {
++              ret = be->func->unbind(be);
++              BUG_ON(ret);
++      }
++
++      ttm->state = ttm_evicted;
++}
++
++/**
++ * drm_ttm_fixup_caching:
++ *
++ * @ttm: the object to set unbound
++ *
++ * XXX this function is misnamed. Transition a ttm from evicted to
++ * unbound, flushing caches as appropriate.
++ */
++void drm_ttm_fixup_caching(struct drm_ttm *ttm)
++{
++
++      if (ttm->state == ttm_evicted) {
++              struct drm_ttm_backend *be = ttm->be;
++              if (be->func->needs_ub_cache_adjust(be))
++                      drm_ttm_set_caching(ttm, 0);
++              ttm->state = ttm_unbound;
++      }
++}
++
++/**
++ * drm_ttm_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void drm_ttm_unbind(struct drm_ttm *ttm)
++{
++      if (ttm->state == ttm_bound)
++              drm_ttm_evict(ttm);
++
++      drm_ttm_fixup_caching(ttm);
++}
++
++/**
++ * drm_ttm_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
++{
++      struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
++      int ret = 0;
++      struct drm_ttm_backend *be;
++
++      if (!ttm)
++              return -EINVAL;
++      if (ttm->state == ttm_bound)
++              return 0;
++
++      be = ttm->be;
++
++      ret = drm_ttm_populate(ttm);
++      if (ret)
++              return ret;
++
++      if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
++              drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
++      else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
++                 bo_driver->ttm_cache_flush)
++              bo_driver->ttm_cache_flush(ttm);
++
++      ret = be->func->bind(be, bo_mem);
++      if (ret) {
++              ttm->state = ttm_evicted;
++              DRM_ERROR("Couldn't bind backend.\n");
++              return ret;
++      }
++
++      ttm->state = ttm_bound;
++      if (ttm->page_flags & DRM_TTM_PAGE_USER)
++              ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
++      return 0;
++}
++EXPORT_SYMBOL(drm_ttm_bind);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm.c git-nokia/drivers/gpu/drm-tungsten/drm_vm.c
+--- git/drivers/gpu/drm-tungsten/drm_vm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,890 @@
++/**
++ * \file drm_vm.c
++ * Memory mapping for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#if defined(__ia64__)
++#include <linux/efi.h>
++#endif
++
++static void drm_vm_open(struct vm_area_struct *vma);
++static void drm_vm_close(struct vm_area_struct *vma);
++static int drm_bo_mmap_locked(struct vm_area_struct *vma,
++                            struct file *filp,
++                            drm_local_map_t *map);
++
++
++pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
++{
++      pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
++
++#if defined(__i386__) || defined(__x86_64__)
++      if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
++              pgprot_val(tmp) |= _PAGE_PCD;
++              pgprot_val(tmp) &= ~_PAGE_PWT;
++      }
++#elif defined(__powerpc__)
++      pgprot_val(tmp) |= _PAGE_NO_CACHE;
++      if (map_type == _DRM_REGISTERS)
++              pgprot_val(tmp) |= _PAGE_GUARDED;
++#elif defined(__ia64__)
++      if (efi_range_is_wc(vma->vm_start, vma->vm_end -
++                                  vma->vm_start))
++              tmp = pgprot_writecombine(tmp);
++      else
++              tmp = pgprot_noncached(tmp);
++#elif defined(__sparc__)
++      tmp = pgprot_noncached(tmp);
++#endif
++      return tmp;
++}
++
++static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
++{
++      pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
++
++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
++      tmp |= _PAGE_NO_CACHE;
++#endif
++      return tmp;
++}
++
++#ifndef DRM_VM_NOPAGE
++/**
++ * \c fault method for AGP virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Find the right map and if it's AGP memory find the real physical page to
++ * map, get the page, increment the use count and return it.
++ */
++#if __OS_HAS_AGP
++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list;
++      struct drm_hash_item *hash;
++
++      /*
++       * Find the right map
++       */
++      if (!drm_core_has_AGP(dev))
++              goto vm_fault_error;
++
++      if (!dev->agp || !dev->agp->cant_use_aperture)
++              goto vm_fault_error;
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
++              goto vm_fault_error;
++
++      r_list = drm_hash_entry(hash, struct drm_map_list, hash);
++      map = r_list->map;
++
++      if (map && map->type == _DRM_AGP) {
++              /*
++               * Using vm_pgoff as a selector forces us to use this unusual
++               * addressing scheme.
++               */
++              unsigned long offset = (unsigned long)vmf->virtual_address -
++                                                              vma->vm_start;
++              unsigned long baddr = map->offset + offset;
++              struct drm_agp_mem *agpmem;
++              struct page *page;
++
++#ifdef __alpha__
++              /*
++               * Adjust to a bus-relative address
++               */
++              baddr -= dev->hose->mem_space->start;
++#endif
++
++              /*
++               * It's AGP memory - find the real physical page to map
++               */
++              list_for_each_entry(agpmem, &dev->agp->memory, head) {
++                      if (agpmem->bound <= baddr &&
++                          agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
++                              break;
++              }
++
++              if (!agpmem)
++                      goto vm_fault_error;
++
++              /*
++               * Get the page, inc the use count, and return it
++               */
++              offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
++              page = virt_to_page(__va(agpmem->memory->memory[offset]));
++              get_page(page);
++              vmf->page = page;
++
++              DRM_DEBUG
++                  ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
++                   baddr, __va(agpmem->memory->memory[offset]), offset,
++                   page_count(page));
++              return 0;
++      }
++vm_fault_error:
++      return VM_FAULT_SIGBUS; /* Disallow mremap */
++}
++#else                         /* __OS_HAS_AGP */
++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      return VM_FAULT_SIGBUS;
++}
++#endif                                /* __OS_HAS_AGP */
++
++/**
++ * \c nopage method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Get the mapping, find the real physical page to map, get the page, and
++ * return it.
++ */
++static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      unsigned long offset;
++      unsigned long i;
++      struct page *page;
++
++      if (!map)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;
++      i = (unsigned long)map->handle + offset;
++      page = vmalloc_to_page((void *)i);
++      if (!page)
++              return VM_FAULT_SIGBUS;
++      get_page(page);
++      vmf->page = page;
++
++      DRM_DEBUG("shm_fault 0x%lx\n", offset);
++      return 0;
++}
++#endif
++
++/**
++ * \c close method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ *
++ * Deletes map information if we are the last
++ * person to close a mapping and it's not in the global maplist.
++ */
++static void drm_vm_shm_close(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *pt, *temp;
++      struct drm_map *map;
++      struct drm_map_list *r_list;
++      int found_maps = 0;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_dec(&dev->vma_count);
++
++      map = vma->vm_private_data;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
++              if (pt->vma->vm_private_data == map)
++                      found_maps++;
++              if (pt->vma == vma) {
++                      list_del(&pt->head);
++                      drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
++              }
++      }
++      /* We were the only map that was found */
++      if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
++              /* Check to see if we are in the maplist, if we are not, then
++               * we delete this mappings information.
++               */
++              found_maps = 0;
++              list_for_each_entry(r_list, &dev->maplist, head) {
++                      if (r_list->map == map)
++                              found_maps++;
++              }
++
++              if (!found_maps) {
++                      drm_dma_handle_t dmah;
++
++                      switch (map->type) {
++                      case _DRM_REGISTERS:
++                      case _DRM_FRAME_BUFFER:
++                              if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
++                                      int retcode;
++                                      retcode = mtrr_del(map->mtrr,
++                                                         map->offset,
++                                                         map->size);
++                                      DRM_DEBUG("mtrr_del = %d\n", retcode);
++                              }
++                              iounmap(map->handle);
++                              break;
++                      case _DRM_SHM:
++                              vfree(map->handle);
++                              break;
++                      case _DRM_AGP:
++                      case _DRM_SCATTER_GATHER:
++                              break;
++                      case _DRM_CONSISTENT:
++                              dmah.vaddr = map->handle;
++                              dmah.busaddr = map->offset;
++                              dmah.size = map->size;
++                              __drm_pci_free(dev, &dmah);
++                              break;
++                      case _DRM_TTM:
++                              BUG_ON(1);
++                              break;
++                      }
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++#ifndef DRM_VM_NOPAGE
++/**
++ * \c fault method for DMA virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
++ */
++static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_device_dma *dma = dev->dma;
++      unsigned long offset;
++      unsigned long page_nr;
++      struct page *page;
++
++      if (!dma)
++              return VM_FAULT_SIGBUS; /* Error */
++      if (!dma->pagelist)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
++      page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
++      page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
++
++      get_page(page);
++      vmf->page = page;
++
++      DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
++      return 0;
++}
++
++/**
++ * \c fault method for scatter-gather virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
++ */
++static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long offset;
++      unsigned long map_offset;
++      unsigned long page_offset;
++      struct page *page;
++
++      if (!entry)
++              return VM_FAULT_SIGBUS; /* Error */
++      if (!entry->pagelist)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;
++      map_offset = map->offset - (unsigned long)dev->sg->virtual;
++      page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
++      page = entry->pagelist[page_offset];
++      get_page(page);
++      vmf->page = page;
++
++      return 0;
++}
++#endif
++
++/** AGP virtual memory operations */
++static struct vm_operations_struct drm_vm_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_nopage,
++#else
++      .fault = drm_do_vm_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/** Shared virtual memory operations */
++static struct vm_operations_struct drm_vm_shm_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_shm_nopage,
++#else
++      .fault = drm_do_vm_shm_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_shm_close,
++};
++
++/** DMA virtual memory operations */
++static struct vm_operations_struct drm_vm_dma_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_dma_nopage,
++#else
++      .fault = drm_do_vm_dma_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/** Scatter-gather virtual memory operations */
++static struct vm_operations_struct drm_vm_sg_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_sg_nopage,
++#else
++      .fault = drm_do_vm_sg_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/**
++ * \c open method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ *
++ * Create a new drm_vma_entry structure as the \p vma private data entry and
++ * add it to drm_device::vmalist.
++ */
++static void drm_vm_open_locked(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *vma_entry;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_inc(&dev->vma_count);
++
++      vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
++      if (vma_entry) {
++              vma_entry->vma = vma;
++              vma_entry->pid = current->pid;
++              list_add(&vma_entry->head, &dev->vmalist);
++      }
++}
++
++static void drm_vm_open(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_vm_open_locked(vma);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * \c close method for all virtual memory types.
++ *
++ * \param vma virtual memory area.
++ *
++ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
++ * free it.
++ */
++static void drm_vm_close(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *pt, *temp;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_dec(&dev->vma_count);
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
++              if (pt->vma == vma) {
++                      list_del(&pt->head);
++                      drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
++                      break;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++
++/**
++ * mmap DMA memory.
++ *
++ * \param file_priv DRM file private.
++ * \param vma virtual memory area.
++ * \return zero on success or a negative number on failure.
++ *
++ * Sets the virtual memory area operations structure to vm_dma_ops, the file
++ * pointer, and calls vm_open().
++ */
++static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev;
++      struct drm_device_dma *dma;
++      unsigned long length = vma->vm_end - vma->vm_start;
++
++      dev = priv->minor->dev;
++      dma = dev->dma;
++      DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
++                vma->vm_start, vma->vm_end, vma->vm_pgoff);
++
++      /* Length must match exact page count */
++      if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
++              return -EINVAL;
++      }
++
++      if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
++              vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++#if defined(__i386__) || defined(__x86_64__)
++              pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
++#else
++              /* Ye gads this is ugly.  With more thought
++                 we could move this up higher and use
++                 `protection_map' instead.  */
++              vma->vm_page_prot =
++                  __pgprot(pte_val
++                           (pte_wrprotect
++                            (__pte(pgprot_val(vma->vm_page_prot)))));
++#endif
++      }
++
++      vma->vm_ops = &drm_vm_dma_ops;
++      vma->vm_flags |= VM_RESERVED;   /* Don't swap */
++
++      vma->vm_file = filp;    /* Needed for drm_vm_open() */
++      drm_vm_open_locked(vma);
++      return 0;
++}
++
++unsigned long drm_core_get_map_ofs(struct drm_map * map)
++{
++      return map->offset;
++}
++EXPORT_SYMBOL(drm_core_get_map_ofs);
++
++unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
++{
++#ifdef __alpha__
++      return dev->hose->dense_mem_base - dev->hose->mem_space->start;
++#else
++      return 0;
++#endif
++}
++EXPORT_SYMBOL(drm_core_get_reg_ofs);
++
++/**
++ * mmap DMA memory.
++ *
++ * \param file_priv DRM file private.
++ * \param vma virtual memory area.
++ * \return zero on success or a negative number on failure.
++ *
++ * If the virtual memory area has no offset associated with it then it's a DMA
++ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
++ * checks that the restricted flag is not set, sets the virtual memory operations
++ * according to the mapping type and remaps the pages. Finally sets the file
++ * pointer and calls vm_open().
++ */
++static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      unsigned long offset = 0;
++      struct drm_hash_item *hash;
++
++      DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
++                vma->vm_start, vma->vm_end, vma->vm_pgoff);
++
++      if (!priv->authenticated)
++              return -EACCES;
++
++      /* We check for "dma". On Apple's UniNorth, it's valid to have
++       * the AGP mapped at physical address 0
++       * --BenH.
++       */
++
++      if (!vma->vm_pgoff
++#if __OS_HAS_AGP
++          && (!dev->agp
++              || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
++#endif
++          )
++              return drm_mmap_dma(filp, vma);
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
++              DRM_ERROR("Could not find map\n");
++              return -EINVAL;
++      }
++
++      map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
++      if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
++              return -EPERM;
++
++      /* Check for valid size. */
++      if (map->size < vma->vm_end - vma->vm_start)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
++              vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++#if defined(__i386__) || defined(__x86_64__)
++              pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
++#else
++              /* Ye gads this is ugly.  With more thought
++                 we could move this up higher and use
++                 `protection_map' instead.  */
++              vma->vm_page_prot =
++                  __pgprot(pte_val
++                           (pte_wrprotect
++                            (__pte(pgprot_val(vma->vm_page_prot)))));
++#endif
++      }
++
++      switch (map->type) {
++      case _DRM_AGP:
++              if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
++                      /*
++                       * On some platforms we can't talk to bus dma address from the CPU, so for
++                       * memory of type DRM_AGP, we'll deal with sorting out the real physical
++                       * pages and mappings in nopage()
++                       */
++#if defined(__powerpc__)
++                      pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
++#endif
++                      vma->vm_ops = &drm_vm_ops;
++                      break;
++              }
++              /* fall through to _DRM_FRAME_BUFFER... */
++      case _DRM_FRAME_BUFFER:
++      case _DRM_REGISTERS:
++              offset = dev->driver->get_reg_ofs(dev);
++              vma->vm_flags |= VM_IO; /* not in core dump */
++              vma->vm_page_prot = drm_io_prot(map->type, vma);
++              if (io_remap_pfn_range(vma, vma->vm_start,
++                                     (map->offset + offset) >> PAGE_SHIFT,
++                                     vma->vm_end - vma->vm_start,
++                                     vma->vm_page_prot))
++                      return -EAGAIN;
++              DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
++                        " offset = 0x%lx\n",
++                        map->type,
++                        vma->vm_start, vma->vm_end, map->offset + offset);
++              vma->vm_ops = &drm_vm_ops;
++              break;
++      case _DRM_CONSISTENT:
++              /* Consistent memory is really like shared memory. But
++               * it's allocated in a different way, so avoid nopage */
++              if (remap_pfn_range(vma, vma->vm_start,
++                  page_to_pfn(virt_to_page(map->handle)),
++                  vma->vm_end - vma->vm_start, vma->vm_page_prot))
++                      return -EAGAIN;
++              vma->vm_page_prot = drm_dma_prot(map->type, vma);
++      /* fall through to _DRM_SHM */
++      case _DRM_SHM:
++              vma->vm_ops = &drm_vm_shm_ops;
++              vma->vm_private_data = (void *)map;
++              /* Don't let this area swap.  Change when
++                 DRM_KERNEL advisory is supported. */
++              vma->vm_flags |= VM_RESERVED;
++              break;
++      case _DRM_SCATTER_GATHER:
++              vma->vm_ops = &drm_vm_sg_ops;
++              vma->vm_private_data = (void *)map;
++              vma->vm_flags |= VM_RESERVED;
++              vma->vm_page_prot = drm_dma_prot(map->type, vma);
++              break;
++      case _DRM_TTM:
++              return drm_bo_mmap_locked(vma, filp, map);
++      default:
++              return -EINVAL; /* This should never happen. */
++      }
++      vma->vm_flags |= VM_RESERVED;   /* Don't swap */
++
++      vma->vm_file = filp;    /* Needed for drm_vm_open() */
++      drm_vm_open_locked(vma);
++      return 0;
++}
++
++int drm_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_mmap_locked(filp, vma);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_mmap);
++
++/**
++ * buffer object vm functions.
++ */
++
++/**
++ * \c Pagefault method for buffer objects.
++ *
++ * \param vma Virtual memory area.
++ * \param vmf vm fault data
++ * \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted.
++ *
++ * It's important that pfns are inserted while holding the bo->mutex lock.
++ * otherwise we might race with unmap_mapping_range() which is always
++ * called with the bo->mutex lock held.
++ *
++ * We're modifying the page attribute bits of the vma->vm_page_prot field,
++ * without holding the mmap_sem in write mode. Only in read mode.
++ * These bits are not used by the mm subsystem code, and we consider them
++ * protected by the bo->mutex lock.
++ */
++
++#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT)
++static int drm_bo_vm_fault(struct vm_area_struct *vma,
++                                   struct vm_fault *vmf)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long ret = VM_FAULT_NOPAGE;
++
++      dev = bo->dev;
++      err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (err)
++              return VM_FAULT_NOPAGE;
++
++      err = mutex_lock_interruptible(&bo->mutex);
++      if (err) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return VM_FAULT_NOPAGE;
++      }
++
++      err = drm_bo_wait(bo, 0, 1, 0, 1);
++      if (err) {
++              ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++              goto out_unlock;
++      }
++
++      bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              uint32_t new_flags = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++              err = drm_bo_move_buffer(bo, new_flags, 0, 0);
++              if (err) {
++                      ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++                      goto out_unlock;
++              }
++      }
++
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              ret = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      ret = VM_FAULT_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
++      if (err) {
++              ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
++              goto out_unlock;
++      }
++out_unlock:
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
++#endif
++
++static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++
++      drm_vm_open_locked(vma);
++      atomic_inc(&bo->usage);
++#ifdef DRM_ODD_MM_COMPAT
++      drm_bo_add_vma(bo, vma);
++#endif
++}
++
++/**
++ * \c vma open method for buffer objects.
++ *
++ * \param vma virtual memory area.
++ */
++
++static void drm_bo_vm_open(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      struct drm_device *dev = bo->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_bo_vm_open_locked(vma);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * \c vma close method for buffer objects.
++ *
++ * \param vma virtual memory area.
++ */
++
++static void drm_bo_vm_close(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      struct drm_device *dev = bo->dev;
++
++      drm_vm_close(vma);
++      if (bo) {
++              mutex_lock(&dev->struct_mutex);
++#ifdef DRM_ODD_MM_COMPAT
++              drm_bo_delete_vma(bo, vma);
++#endif
++              drm_bo_usage_deref_locked((struct drm_buffer_object **)
++                                        &vma->vm_private_data);
++              mutex_unlock(&dev->struct_mutex);
++      }
++      return;
++}
++
++static struct vm_operations_struct drm_bo_vm_ops = {
++#ifdef DRM_FULL_MM_COMPAT
++#ifdef DRM_NO_FAULT
++      .nopfn = drm_bo_vm_nopfn,
++#else
++      .fault = drm_bo_vm_fault,
++#endif
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      .nopfn = drm_bo_vm_nopfn,
++#else
++      .nopage = drm_bo_vm_nopage,
++#endif
++#endif
++      .open = drm_bo_vm_open,
++      .close = drm_bo_vm_close,
++};
++
++/**
++ * mmap buffer object memory.
++ *
++ * \param vma virtual memory area.
++ * \param file_priv DRM file private.
++ * \param map The buffer object drm map.
++ * \return zero on success or a negative number on failure.
++ */
++
++int drm_bo_mmap_locked(struct vm_area_struct *vma,
++                     struct file *filp,
++                     drm_local_map_t *map)
++{
++      vma->vm_ops = &drm_bo_vm_ops;
++      vma->vm_private_data = map->handle;
++      vma->vm_file = filp;
++      vma->vm_flags |= VM_RESERVED | VM_IO;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      vma->vm_flags |= VM_PFNMAP;
++#endif
++      drm_bo_vm_open_locked(vma);
++#ifdef DRM_ODD_MM_COMPAT
++      drm_bo_map_bound(vma);
++#endif
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c
+--- git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,267 @@
++/**
++ * \file drm_vm.c
++ * Memory mapping for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#ifdef DRM_VM_NOPAGE
++/**
++ * \c nopage method for AGP virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Find the right map and if it's AGP memory find the real physical page to
++ * map, get the page, increment the use count and return it.
++ */
++#if __OS_HAS_AGP
++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
++                                              unsigned long address)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list;
++      struct drm_hash_item *hash;
++
++      /*
++       * Find the right map
++       */
++      if (!drm_core_has_AGP(dev))
++              goto vm_nopage_error;
++
++      if (!dev->agp || !dev->agp->cant_use_aperture)
++              goto vm_nopage_error;
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
++              goto vm_nopage_error;
++
++      r_list = drm_hash_entry(hash, struct drm_map_list, hash);
++      map = r_list->map;
++
++      if (map && map->type == _DRM_AGP) {
++              unsigned long offset = address - vma->vm_start;
++              unsigned long baddr = map->offset + offset;
++              struct drm_agp_mem *agpmem;
++              struct page *page;
++
++#ifdef __alpha__
++              /*
++               * Adjust to a bus-relative address
++               */
++              baddr -= dev->hose->mem_space->start;
++#endif
++
++              /*
++               * It's AGP memory - find the real physical page to map
++               */
++              list_for_each_entry(agpmem, &dev->agp->memory, head) {
++                      if (agpmem->bound <= baddr &&
++                          agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
++                              break;
++              }
++
++              if (!agpmem)
++                      goto vm_nopage_error;
++
++              /*
++               * Get the page, inc the use count, and return it
++               */
++              offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
++              page = virt_to_page(__va(agpmem->memory->memory[offset]));
++              get_page(page);
++
++#if 0
++              /* page_count() not defined everywhere */
++              DRM_DEBUG
++                  ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
++                   baddr, __va(agpmem->memory->memory[offset]), offset,
++                   page_count(page));
++#endif
++
++              return page;
++      }
++      vm_nopage_error:
++      return NOPAGE_SIGBUS;   /* Disallow mremap */
++}
++#else                         /* __OS_HAS_AGP */
++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
++                                              unsigned long address)
++{
++      return NOPAGE_SIGBUS;
++}
++#endif                                /* __OS_HAS_AGP */
++
++/**
++ * \c nopage method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Get the mapping, find the real physical page to map, get the page, and
++ * return it.
++ */
++static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
++                                                  unsigned long address)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      unsigned long offset;
++      unsigned long i;
++      struct page *page;
++
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!map)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;
++      i = (unsigned long)map->handle + offset;
++      page = vmalloc_to_page((void *)i);
++      if (!page)
++              return NOPAGE_SIGBUS;
++      get_page(page);
++
++      DRM_DEBUG("0x%lx\n", address);
++      return page;
++}
++
++/**
++ * \c nopage method for DMA virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
++ */
++static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
++                                                  unsigned long address)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_device_dma *dma = dev->dma;
++      unsigned long offset;
++      unsigned long page_nr;
++      struct page *page;
++
++      if (!dma)
++              return NOPAGE_SIGBUS;   /* Error */
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!dma->pagelist)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
++      page_nr = offset >> PAGE_SHIFT;
++      page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
++
++      get_page(page);
++
++      DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
++      return page;
++}
++
++/**
++ * \c nopage method for scatter-gather virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
++ */
++static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
++                                                 unsigned long address)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long offset;
++      unsigned long map_offset;
++      unsigned long page_offset;
++      struct page *page;
++
++      DRM_DEBUG("\n");
++      if (!entry)
++              return NOPAGE_SIGBUS;   /* Error */
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!entry->pagelist)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;
++      map_offset = map->offset - (unsigned long)dev->sg->virtual;
++      page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
++      page = entry->pagelist[page_offset];
++      get_page(page);
++
++      return page;
++}
++
++
++struct page *drm_vm_nopage(struct vm_area_struct *vma,
++                         unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_nopage(vma, address);
++}
++
++struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
++                             unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_shm_nopage(vma, address);
++}
++
++struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
++                             unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_dma_nopage(vma, address);
++}
++
++struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
++                            unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_sg_nopage(vma, address);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_context.c git-nokia/drivers/gpu/drm-tungsten/ffb_context.c
+--- git/drivers/gpu/drm-tungsten/ffb_context.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_context.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,582 @@
++/* $Id$
++ * ffb_context.c: Creator/Creator3D DRI/DRM context switching.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ *
++ * Almost entirely stolen from tdfx_context.c, see there
++ * for authors.
++ */
++
++#include <linux/sched.h>
++#include <asm/upa.h>
++
++#include "drmP.h"
++#include "ffb_drv.h"
++
++static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) {
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int i;
++
++      for (i = 0; i < FFB_MAX_CTXS; i++) {
++              if (fpriv->hw_state[i] == NULL)
++                      break;
++      }
++      if (i == FFB_MAX_CTXS)
++              return -1;
++
++      fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL);
++      if (fpriv->hw_state[i] == NULL)
++              return -1;
++
++      fpriv->hw_state[i]->is_2d_only = is_2d_only;
++
++      /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */
++      return i + 1;
++}
++
++static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx)
++{
++      ffb_fbcPtr ffb = fpriv->regs;
++      struct ffb_hw_context *ctx;
++      int i;
++
++      ctx = fpriv->hw_state[idx - 1];
++      if (idx == 0 || ctx == NULL)
++              return;
++
++      if (ctx->is_2d_only) {
++              /* 2D applications only care about certain pieces
++               * of state.
++               */
++              ctx->drawop = upa_readl(&ffb->drawop);
++              ctx->ppc = upa_readl(&ffb->ppc);
++              ctx->wid = upa_readl(&ffb->wid);
++              ctx->fg = upa_readl(&ffb->fg);
++              ctx->bg = upa_readl(&ffb->bg);
++              ctx->xclip = upa_readl(&ffb->xclip);
++              ctx->fbc = upa_readl(&ffb->fbc);
++              ctx->rop = upa_readl(&ffb->rop);
++              ctx->cmp = upa_readl(&ffb->cmp);
++              ctx->matchab = upa_readl(&ffb->matchab);
++              ctx->magnab = upa_readl(&ffb->magnab);
++              ctx->pmask = upa_readl(&ffb->pmask);
++              ctx->xpmask = upa_readl(&ffb->xpmask);
++              ctx->lpat = upa_readl(&ffb->lpat);
++              ctx->fontxy = upa_readl(&ffb->fontxy);
++              ctx->fontw = upa_readl(&ffb->fontw);
++              ctx->fontinc = upa_readl(&ffb->fontinc);
++
++              /* stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      ctx->stencil = upa_readl(&ffb->stencil);
++                      ctx->stencilctl = upa_readl(&ffb->stencilctl);
++              }
++
++              for (i = 0; i < 32; i++)
++                      ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
++              ctx->ucsr = upa_readl(&ffb->ucsr);
++              return;
++      }
++
++      /* Fetch drawop. */
++      ctx->drawop = upa_readl(&ffb->drawop);
++
++      /* If we were saving the vertex registers, this is where
++       * we would do it.  We would save 32 32-bit words starting
++       * at ffb->suvtx.
++       */
++
++      /* Capture rendering attributes. */
++
++      ctx->ppc = upa_readl(&ffb->ppc);        /* Pixel Processor Control */
++      ctx->wid = upa_readl(&ffb->wid);        /* Current WID */
++      ctx->fg = upa_readl(&ffb->fg);  /* Constant FG color */
++      ctx->bg = upa_readl(&ffb->bg);  /* Constant BG color */
++      ctx->consty = upa_readl(&ffb->consty);  /* Constant Y */
++      ctx->constz = upa_readl(&ffb->constz);  /* Constant Z */
++      ctx->xclip = upa_readl(&ffb->xclip);    /* X plane clip */
++      ctx->dcss = upa_readl(&ffb->dcss);      /* Depth Cue Scale Slope */
++      ctx->vclipmin = upa_readl(&ffb->vclipmin);      /* Primary XY clip, minimum */
++      ctx->vclipmax = upa_readl(&ffb->vclipmax);      /* Primary XY clip, maximum */
++      ctx->vclipzmin = upa_readl(&ffb->vclipzmin);    /* Primary Z clip, minimum */
++      ctx->vclipzmax = upa_readl(&ffb->vclipzmax);    /* Primary Z clip, maximum */
++      ctx->dcsf = upa_readl(&ffb->dcsf);      /* Depth Cue Scale Front Bound */
++      ctx->dcsb = upa_readl(&ffb->dcsb);      /* Depth Cue Scale Back Bound */
++      ctx->dczf = upa_readl(&ffb->dczf);      /* Depth Cue Scale Z Front */
++      ctx->dczb = upa_readl(&ffb->dczb);      /* Depth Cue Scale Z Back */
++      ctx->blendc = upa_readl(&ffb->blendc);  /* Alpha Blend Control */
++      ctx->blendc1 = upa_readl(&ffb->blendc1);        /* Alpha Blend Color 1 */
++      ctx->blendc2 = upa_readl(&ffb->blendc2);        /* Alpha Blend Color 2 */
++      ctx->fbc = upa_readl(&ffb->fbc);        /* Frame Buffer Control */
++      ctx->rop = upa_readl(&ffb->rop);        /* Raster Operation */
++      ctx->cmp = upa_readl(&ffb->cmp);        /* Compare Controls */
++      ctx->matchab = upa_readl(&ffb->matchab);        /* Buffer A/B Match Ops */
++      ctx->matchc = upa_readl(&ffb->matchc);  /* Buffer C Match Ops */
++      ctx->magnab = upa_readl(&ffb->magnab);  /* Buffer A/B Magnitude Ops */
++      ctx->magnc = upa_readl(&ffb->magnc);    /* Buffer C Magnitude Ops */
++      ctx->pmask = upa_readl(&ffb->pmask);    /* RGB Plane Mask */
++      ctx->xpmask = upa_readl(&ffb->xpmask);  /* X Plane Mask */
++      ctx->ypmask = upa_readl(&ffb->ypmask);  /* Y Plane Mask */
++      ctx->zpmask = upa_readl(&ffb->zpmask);  /* Z Plane Mask */
++
++      /* Auxiliary Clips. */
++      ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min);
++      ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max);
++      ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min);
++      ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max);
++      ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min);
++      ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max);
++      ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min);
++      ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max);
++
++      ctx->lpat = upa_readl(&ffb->lpat);      /* Line Pattern */
++      ctx->fontxy = upa_readl(&ffb->fontxy);  /* XY Font Coordinate */
++      ctx->fontw = upa_readl(&ffb->fontw);    /* Font Width */
++      ctx->fontinc = upa_readl(&ffb->fontinc);        /* Font X/Y Increment */
++
++      /* These registers/features only exist on FFB2 and later chips. */
++      if (fpriv->ffb_type >= ffb2_prototype) {
++              ctx->dcss1 = upa_readl(&ffb->dcss1);    /* Depth Cue Scale Slope 1 */
++              ctx->dcss2 = upa_readl(&ffb->dcss2);    /* Depth Cue Scale Slope 2 */
++              ctx->dcss2 = upa_readl(&ffb->dcss3);    /* Depth Cue Scale Slope 3 */
++              ctx->dcs2 = upa_readl(&ffb->dcs2);      /* Depth Cue Scale 2 */
++              ctx->dcs3 = upa_readl(&ffb->dcs3);      /* Depth Cue Scale 3 */
++              ctx->dcs4 = upa_readl(&ffb->dcs4);      /* Depth Cue Scale 4 */
++              ctx->dcd2 = upa_readl(&ffb->dcd2);      /* Depth Cue Depth 2 */
++              ctx->dcd3 = upa_readl(&ffb->dcd3);      /* Depth Cue Depth 3 */
++              ctx->dcd4 = upa_readl(&ffb->dcd4);      /* Depth Cue Depth 4 */
++
++              /* And stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      ctx->stencil = upa_readl(&ffb->stencil);
++                      ctx->stencilctl = upa_readl(&ffb->stencilctl);
++              }
++      }
++
++      /* Save the 32x32 area pattern. */
++      for (i = 0; i < 32; i++)
++              ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
++
++      /* Finally, stash away the User Constol/Status Register. */
++      ctx->ucsr = upa_readl(&ffb->ucsr);
++}
++
++static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx)
++{
++      ffb_fbcPtr ffb = fpriv->regs;
++      struct ffb_hw_context *ctx;
++      int i;
++
++      ctx = fpriv->hw_state[idx - 1];
++      if (idx == 0 || ctx == NULL)
++              return;
++
++      if (ctx->is_2d_only) {
++              /* 2D applications only care about certain pieces
++               * of state.
++               */
++              upa_writel(ctx->drawop, &ffb->drawop);
++
++              /* If we were restoring the vertex registers, this is where
++               * we would do it.  We would restore 32 32-bit words starting
++               * at ffb->suvtx.
++               */
++
++              upa_writel(ctx->ppc, &ffb->ppc);
++              upa_writel(ctx->wid, &ffb->wid);
++              upa_writel(ctx->fg, &ffb->fg);
++              upa_writel(ctx->bg, &ffb->bg);
++              upa_writel(ctx->xclip, &ffb->xclip);
++              upa_writel(ctx->fbc, &ffb->fbc);
++              upa_writel(ctx->rop, &ffb->rop);
++              upa_writel(ctx->cmp, &ffb->cmp);
++              upa_writel(ctx->matchab, &ffb->matchab);
++              upa_writel(ctx->magnab, &ffb->magnab);
++              upa_writel(ctx->pmask, &ffb->pmask);
++              upa_writel(ctx->xpmask, &ffb->xpmask);
++              upa_writel(ctx->lpat, &ffb->lpat);
++              upa_writel(ctx->fontxy, &ffb->fontxy);
++              upa_writel(ctx->fontw, &ffb->fontw);
++              upa_writel(ctx->fontinc, &ffb->fontinc);
++
++              /* stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      upa_writel(ctx->stencil, &ffb->stencil);
++                      upa_writel(ctx->stencilctl, &ffb->stencilctl);
++                      upa_writel(0x80000000, &ffb->fbc);
++                      upa_writel((ctx->stencilctl | 0x80000),
++                                 &ffb->rawstencilctl);
++                      upa_writel(ctx->fbc, &ffb->fbc);
++              }
++
++              for (i = 0; i < 32; i++)
++                      upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
++              upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
++              return;
++      }
++
++      /* Restore drawop. */
++      upa_writel(ctx->drawop, &ffb->drawop);
++
++      /* If we were restoring the vertex registers, this is where
++       * we would do it.  We would restore 32 32-bit words starting
++       * at ffb->suvtx.
++       */
++
++      /* Restore rendering attributes. */
++
++      upa_writel(ctx->ppc, &ffb->ppc);        /* Pixel Processor Control */
++      upa_writel(ctx->wid, &ffb->wid);        /* Current WID */
++      upa_writel(ctx->fg, &ffb->fg);  /* Constant FG color */
++      upa_writel(ctx->bg, &ffb->bg);  /* Constant BG color */
++      upa_writel(ctx->consty, &ffb->consty);  /* Constant Y */
++      upa_writel(ctx->constz, &ffb->constz);  /* Constant Z */
++      upa_writel(ctx->xclip, &ffb->xclip);    /* X plane clip */
++      upa_writel(ctx->dcss, &ffb->dcss);      /* Depth Cue Scale Slope */
++      upa_writel(ctx->vclipmin, &ffb->vclipmin);      /* Primary XY clip, minimum */
++      upa_writel(ctx->vclipmax, &ffb->vclipmax);      /* Primary XY clip, maximum */
++      upa_writel(ctx->vclipzmin, &ffb->vclipzmin);    /* Primary Z clip, minimum */
++      upa_writel(ctx->vclipzmax, &ffb->vclipzmax);    /* Primary Z clip, maximum */
++      upa_writel(ctx->dcsf, &ffb->dcsf);      /* Depth Cue Scale Front Bound */
++      upa_writel(ctx->dcsb, &ffb->dcsb);      /* Depth Cue Scale Back Bound */
++      upa_writel(ctx->dczf, &ffb->dczf);      /* Depth Cue Scale Z Front */
++      upa_writel(ctx->dczb, &ffb->dczb);      /* Depth Cue Scale Z Back */
++      upa_writel(ctx->blendc, &ffb->blendc);  /* Alpha Blend Control */
++      upa_writel(ctx->blendc1, &ffb->blendc1);        /* Alpha Blend Color 1 */
++      upa_writel(ctx->blendc2, &ffb->blendc2);        /* Alpha Blend Color 2 */
++      upa_writel(ctx->fbc, &ffb->fbc);        /* Frame Buffer Control */
++      upa_writel(ctx->rop, &ffb->rop);        /* Raster Operation */
++      upa_writel(ctx->cmp, &ffb->cmp);        /* Compare Controls */
++      upa_writel(ctx->matchab, &ffb->matchab);        /* Buffer A/B Match Ops */
++      upa_writel(ctx->matchc, &ffb->matchc);  /* Buffer C Match Ops */
++      upa_writel(ctx->magnab, &ffb->magnab);  /* Buffer A/B Magnitude Ops */
++      upa_writel(ctx->magnc, &ffb->magnc);    /* Buffer C Magnitude Ops */
++      upa_writel(ctx->pmask, &ffb->pmask);    /* RGB Plane Mask */
++      upa_writel(ctx->xpmask, &ffb->xpmask);  /* X Plane Mask */
++      upa_writel(ctx->ypmask, &ffb->ypmask);  /* Y Plane Mask */
++      upa_writel(ctx->zpmask, &ffb->zpmask);  /* Z Plane Mask */
++
++      /* Auxiliary Clips. */
++      upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min);
++      upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max);
++      upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min);
++      upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max);
++      upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min);
++      upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max);
++      upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min);
++      upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max);
++
++      upa_writel(ctx->lpat, &ffb->lpat);      /* Line Pattern */
++      upa_writel(ctx->fontxy, &ffb->fontxy);  /* XY Font Coordinate */
++      upa_writel(ctx->fontw, &ffb->fontw);    /* Font Width */
++      upa_writel(ctx->fontinc, &ffb->fontinc);        /* Font X/Y Increment */
++
++      /* These registers/features only exist on FFB2 and later chips. */
++      if (fpriv->ffb_type >= ffb2_prototype) {
++              upa_writel(ctx->dcss1, &ffb->dcss1);    /* Depth Cue Scale Slope 1 */
++              upa_writel(ctx->dcss2, &ffb->dcss2);    /* Depth Cue Scale Slope 2 */
++              upa_writel(ctx->dcss3, &ffb->dcss2);    /* Depth Cue Scale Slope 3 */
++              upa_writel(ctx->dcs2, &ffb->dcs2);      /* Depth Cue Scale 2 */
++              upa_writel(ctx->dcs3, &ffb->dcs3);      /* Depth Cue Scale 3 */
++              upa_writel(ctx->dcs4, &ffb->dcs4);      /* Depth Cue Scale 4 */
++              upa_writel(ctx->dcd2, &ffb->dcd2);      /* Depth Cue Depth 2 */
++              upa_writel(ctx->dcd3, &ffb->dcd3);      /* Depth Cue Depth 3 */
++              upa_writel(ctx->dcd4, &ffb->dcd4);      /* Depth Cue Depth 4 */
++
++              /* And stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      /* Unfortunately, there is a hardware bug on
++                       * the FFB2+ chips which prevents a normal write
++                       * to the stencil control register from working
++                       * as it should.
++                       *
++                       * The state controlled by the FFB stencilctl register
++                       * really gets transferred to the per-buffer instances
++                       * of the stencilctl register in the 3DRAM chips.
++                       *
++                       * The bug is that FFB does not update buffer C correctly,
++                       * so we have to do it by hand for them.
++                       */
++
++                      /* This will update buffers A and B. */
++                      upa_writel(ctx->stencil, &ffb->stencil);
++                      upa_writel(ctx->stencilctl, &ffb->stencilctl);
++
++                      /* Force FFB to use buffer C 3dram regs. */
++                      upa_writel(0x80000000, &ffb->fbc);
++                      upa_writel((ctx->stencilctl | 0x80000),
++                                 &ffb->rawstencilctl);
++
++                      /* Now restore the correct FBC controls. */
++                      upa_writel(ctx->fbc, &ffb->fbc);
++              }
++      }
++
++      /* Restore the 32x32 area pattern. */
++      for (i = 0; i < 32; i++)
++              upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
++
++      /* Finally, stash away the User Constol/Status Register.
++       * The only state we really preserve here is the picking
++       * control.
++       */
++      upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
++}
++
++#define FFB_UCSR_FB_BUSY       0x01000000
++#define FFB_UCSR_RP_BUSY       0x02000000
++#define FFB_UCSR_ALL_BUSY      (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY)
++
++static void FFBWait(ffb_fbcPtr ffb)
++{
++      int limit = 100000;
++
++      do {
++              u32 regval = upa_readl(&ffb->ucsr);
++
++              if ((regval & FFB_UCSR_ALL_BUSY) == 0)
++                      break;
++      } while (--limit);
++}
++
++int ffb_context_switch(struct drm_device * dev, int old, int new) {
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++
++#if DRM_DMA_HISTOGRAM
++      dev->ctx_start = get_cycles();
++#endif
++
++      DRM_DEBUG("Context switch from %d to %d\n", old, new);
++
++      if (new == dev->last_context || dev->last_context == 0) {
++              dev->last_context = new;
++              return 0;
++      }
++
++      FFBWait(fpriv->regs);
++      ffb_save_context(fpriv, old);
++      ffb_restore_context(fpriv, old, new);
++      FFBWait(fpriv->regs);
++
++      dev->last_context = new;
++
++      return 0;
++}
++
++int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_ctx_res_t res;
++      drm_ctx_t ctx;
++      int i;
++
++      DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
++      if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res)))
++              return -EFAULT;
++      if (res.count >= DRM_RESERVED_CONTEXTS) {
++              memset(&ctx, 0, sizeof(ctx));
++              for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
++                      ctx.handle = i;
++                      if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
++                              return -EFAULT;
++              }
++      }
++      res.count = DRM_RESERVED_CONTEXTS;
++      if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res)))
++              return -EFAULT;
++      return 0;
++}
++
++int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      idx = ffb_alloc_queue(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
++      if (idx < 0)
++              return -ENFILE;
++
++      DRM_DEBUG("%d\n", ctx.handle);
++      ctx.handle = idx;
++      if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
++              return -EFAULT;
++      return 0;
++}
++
++int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      struct ffb_hw_context *hwctx;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++
++      idx = ctx.handle;
++      if (idx <= 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      hwctx = fpriv->hw_state[idx - 1];
++      if (hwctx == NULL)
++              return -EINVAL;
++
++      if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0)
++              hwctx->is_2d_only = 0;
++      else
++              hwctx->is_2d_only = 1;
++
++      return 0;
++}
++
++int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      struct ffb_hw_context *hwctx;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++
++      idx = ctx.handle;
++      if (idx <= 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      hwctx = fpriv->hw_state[idx - 1];
++      if (hwctx == NULL)
++              return -EINVAL;
++
++      if (hwctx->is_2d_only != 0)
++              ctx.flags = _DRM_CONTEXT_2DONLY;
++      else
++              ctx.flags = 0;
++
++      if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
++              return -EFAULT;
++
++      return 0;
++}
++
++int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd,
++                  unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      drm_ctx_t ctx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++      return ffb_context_switch(dev, dev->last_context, ctx.handle);
++}
++
++int ffb_newctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_ctx_t ctx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++
++      return 0;
++}
++
++int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd,
++              unsigned long arg) {
++      drm_ctx_t ctx;
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++
++      idx = ctx.handle - 1;
++      if (idx < 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      if (fpriv->hw_state[idx] != NULL) {
++              kfree(fpriv->hw_state[idx]);
++              fpriv->hw_state[idx] = NULL;
++      }
++      return 0;
++}
++
++static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev)
++{
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
++      int idx;
++
++      idx = context - 1;
++      if (fpriv &&
++          context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) {
++              kfree(fpriv->hw_state[idx]);
++              fpriv->hw_state[idx] = NULL;
++      }
++}
++
++static void ffb_driver_lastclose(struct drm_device * dev)
++{
++      if (dev->dev_private)
++              kfree(dev->dev_private);
++}
++
++static void ffb_driver_unload(struct drm_device * dev)
++{
++      if (ffb_position != NULL)
++              kfree(ffb_position);
++}
++
++static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
++{
++      dev->lock.filp = 0;
++      {
++              __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
++              unsigned int old, new, prev, ctx;
++
++              ctx = lock.context;
++              do {
++                      old = *plock;
++                      new = ctx;
++                      prev = cmpxchg(plock, old, new);
++              } while (prev != old);
++      }
++      wake_up_interruptible(&dev->lock.lock_queue);
++}
++
++unsigned long ffb_driver_get_map_ofs(drm_map_t * map)
++{
++      return (map->offset & 0xffffffff);
++}
++
++unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev)
++{
++      ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private;
++
++      if (ffb_priv)
++              return ffb_priv->card_phys_base;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.c git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c
+--- git/drivers/gpu/drm-tungsten/ffb_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,329 @@
++/* $Id$
++ * ffb_drv.c: Creator/Creator3D direct rendering driver.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ */
++
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <asm/shmparam.h>
++#include <asm/oplib.h>
++#include <asm/upa.h>
++
++#include "drmP.h"
++#include "ffb_drv.h"
++
++#define DRIVER_AUTHOR         "David S. Miller"
++
++#define DRIVER_NAME           "ffb"
++#define DRIVER_DESC           "Creator/Creator3D"
++#define DRIVER_DATE           "20000517"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     1
++
++typedef struct _ffb_position_t {
++      int node;
++      int root;
++} ffb_position_t;
++
++static ffb_position_t *ffb_position;
++
++static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance)
++{
++      volatile unsigned char *strap_bits;
++      unsigned char val;
++
++      strap_bits = (volatile unsigned char *)
++              (ffb_priv->card_phys_base + 0x00200000UL);
++
++      /* Don't ask, you have to read the value twice for whatever
++       * reason to get correct contents.
++       */
++      val = upa_readb(strap_bits);
++      val = upa_readb(strap_bits);
++      switch (val & 0x78) {
++      case (0x0 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb1_prototype;
++              printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
++              break;
++      case (0x0 << 5) | (0x1 << 3):
++              ffb_priv->ffb_type = ffb1_standard;
++              printk("ffb%d: Detected FFB1\n", instance);
++              break;
++      case (0x0 << 5) | (0x3 << 3):
++              ffb_priv->ffb_type = ffb1_speedsort;
++              printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
++              break;
++      case (0x1 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb2_prototype;
++              printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance);
++              break;
++      case (0x1 << 5) | (0x1 << 3):
++              ffb_priv->ffb_type = ffb2_vertical;
++              printk("ffb%d: Detected FFB2/vertical\n", instance);
++              break;
++      case (0x1 << 5) | (0x2 << 3):
++              ffb_priv->ffb_type = ffb2_vertical_plus;
++              printk("ffb%d: Detected FFB2+/vertical\n", instance);
++              break;
++      case (0x2 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb2_horizontal;
++              printk("ffb%d: Detected FFB2/horizontal\n", instance);
++              break;
++      case (0x2 << 5) | (0x2 << 3):
++              ffb_priv->ffb_type = ffb2_horizontal;
++              printk("ffb%d: Detected FFB2+/horizontal\n", instance);
++              break;
++      default:
++              ffb_priv->ffb_type = ffb2_vertical;
++              printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val);
++              break;
++      };
++}
++
++static void ffb_apply_upa_parent_ranges(int parent,
++                                      struct linux_prom64_registers *regs)
++{
++      struct linux_prom64_ranges ranges[PROMREG_MAX];
++      char name[128];
++      int len, i;
++
++      prom_getproperty(parent, "name", name, sizeof(name));
++      if (strcmp(name, "upa") != 0)
++              return;
++
++      len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges));
++      if (len <= 0)
++              return;
++
++      len /= sizeof(struct linux_prom64_ranges);
++      for (i = 0; i < len; i++) {
++              struct linux_prom64_ranges *rng = &ranges[i];
++              u64 phys_addr = regs->phys_addr;
++
++              if (phys_addr >= rng->ot_child_base &&
++                  phys_addr < (rng->ot_child_base + rng->or_size)) {
++                      regs->phys_addr -= rng->ot_child_base;
++                      regs->phys_addr += rng->ot_parent_base;
++                      return;
++              }
++      }
++
++      return;
++}
++
++static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node,
++                      int instance)
++{
++      struct linux_prom64_registers regs[2*PROMREG_MAX];
++      ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private;
++      int i;
++
++      ffb_priv->prom_node = prom_node;
++      if (prom_getproperty(ffb_priv->prom_node, "reg",
++                           (void *)regs, sizeof(regs)) <= 0) {
++              return -EINVAL;
++      }
++      ffb_apply_upa_parent_ranges(parent_node, &regs[0]);
++      ffb_priv->card_phys_base = regs[0].phys_addr;
++      ffb_priv->regs = (ffb_fbcPtr)
++              (regs[0].phys_addr + 0x00600000UL);
++      get_ffb_type(ffb_priv, instance);
++      for (i = 0; i < FFB_MAX_CTXS; i++)
++              ffb_priv->hw_state[i] = NULL;
++
++      return 0;
++}
++
++static int __init ffb_count_siblings(int root)
++{
++      int node, child, count = 0;
++
++      child = prom_getchild(root);
++      for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
++           node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
++              count++;
++
++      return count;
++}
++
++static int __init ffb_scan_siblings(int root, int instance)
++{
++      int node, child;
++
++      child = prom_getchild(root);
++      for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
++           node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) {
++              ffb_position[instance].node = node;
++              ffb_position[instance].root = root;
++              instance++;
++      }
++
++      return instance;
++}
++
++static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
++{
++      drm_file_t      *priv   = filp->private_data;
++      struct drm_device       *dev;
++      drm_map_list_t  *r_list;
++      struct list_head *list;
++      drm_map_t       *map;
++
++      if (!priv || (dev = priv->dev) == NULL)
++              return NULL;
++
++      list_for_each(list, &dev->maplist->head) {
++              unsigned long uoff;
++
++              r_list = (drm_map_list_t *)list;
++              map = r_list->map;
++              if (!map)
++                      continue;
++              uoff = (map->offset & 0xffffffff);
++              if (uoff == off)
++                      return map;
++      }
++
++      return NULL;
++}
++
++unsigned long ffb_get_unmapped_area(struct file *filp,
++                                  unsigned long hint,
++                                  unsigned long len,
++                                  unsigned long pgoff,
++                                  unsigned long flags)
++{
++      drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);
++      unsigned long addr = -ENOMEM;
++
++      if (!map)
++              return get_unmapped_area(NULL, hint, len, pgoff, flags);
++
++      if (map->type == _DRM_FRAME_BUFFER ||
++          map->type == _DRM_REGISTERS) {
++#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
++              addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
++#else
++              addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
++#endif
++      } else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
++              unsigned long slack = SHMLBA - PAGE_SIZE;
++
++              addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
++              if (!(addr & ~PAGE_MASK)) {
++                      unsigned long kvirt = (unsigned long) map->handle;
++
++                      if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
++                              unsigned long koff, aoff;
++
++                              koff = kvirt & (SHMLBA - 1);
++                              aoff = addr & (SHMLBA - 1);
++                              if (koff < aoff)
++                                      koff += SHMLBA;
++
++                              addr += (koff - aoff);
++                      }
++              }
++      } else {
++              addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
++      }
++
++      return addr;
++}
++
++/* This functions must be here since it references drm_numdevs)
++ * which drm_drv.h declares.
++ */
++static int ffb_driver_firstopen(struct drm_device *dev)
++{
++      ffb_dev_priv_t  *ffb_priv;
++      struct drm_device *temp_dev;
++      int ret = 0;
++      int i;
++
++      /* Check for the case where no device was found. */
++      if (ffb_position == NULL)
++              return -ENODEV;
++
++      /* Find our instance number by finding our device in dev structure */
++      for (i = 0; i < drm_numdevs; i++) {
++              temp_dev = &(drm_device[i]);
++              if(temp_dev == dev)
++                      break;
++      }
++
++      if (i == drm_numdevs)
++              return -ENODEV;
++
++      ffb_priv = kmalloc(sizeof(ffb_dev_priv_t), GFP_KERNEL);
++      if (!ffb_priv)
++              return -ENOMEM;
++      memset(ffb_priv, 0, sizeof(*ffb_priv));
++      dev->dev_private = ffb_priv;
++
++      ret = ffb_init_one(dev,
++                         ffb_position[i].node,
++                         ffb_position[i].root,
++                         i);
++      return ret;
++}
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      ffb_PCI_IDS
++};
++
++static struct drm_driver ffb_driver = {
++      .release = ffb_driver_reclaim_buffers_locked,
++      .firstopen = ffb_driver_firstopen,
++      .lastclose = ffb_driver_lastclose,
++      .unload = ffb_driver_unload,
++      .kernel_context_switch = ffb_context_switch,
++      .kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock,
++      .get_map_ofs = ffb_driver_get_map_ofs,
++      .get_reg_ofs = ffb_driver_get_reg_ofs,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      fops = {
++              .owner   = THIS_MODULE,
++              .open    = drm_open,
++              .release = drm_release,
++              .ioctl   = drm_ioctl,
++              .mmap    = drm_mmap,
++              .fasync  = drm_fasync,
++              .poll    = drm_poll,
++              .get_unmapped_area = ffb_get_unmapped_area,
++      },
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_probe(pdev, ent, &driver);
++}
++
++static struct pci_driver pci_driver = {
++      .name          = DRIVER_NAME,
++      .id_table      = pciidlist,
++      .probe         = probe,
++      .remove        = __devexit_p(drm_cleanup_pci),
++};
++
++static int __init ffb_init(void)
++{
++      return drm_init(&pci_driver, pciidlist, &driver);
++}
++
++static void __exit ffb_exit(void)
++{
++      drm_exit(&pci_driver);
++}
++
++module_init(ffb_init);
++module_exit(ffb_exit));
++
++MODULE_AUTHOR( DRIVER_AUTHOR );
++MODULE_DESCRIPTION( DRIVER_DESC );
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.h git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h
+--- git/drivers/gpu/drm-tungsten/ffb_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,284 @@
++/* $Id$
++ * ffb_drv.h: Creator/Creator3D direct rendering driver.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ */
++
++/* Auxilliary clips. */
++typedef struct  {
++      volatile unsigned int min;
++      volatile unsigned int max;
++} ffb_auxclip, *ffb_auxclipPtr;
++
++/* FFB register set. */
++typedef struct _ffb_fbc {
++      /* Next vertex registers, on the right we list which drawops
++       * use said register and the logical name the register has in
++       * that context.
++       */                                     /* DESCRIPTION          DRAWOP(NAME)    */
++/*0x00*/unsigned int          pad1[3];        /* Reserved                             */
++/*0x0c*/volatile unsigned int alpha;          /* ALPHA Transparency                   */
++/*0x10*/volatile unsigned int red;            /* RED                                  */
++/*0x14*/volatile unsigned int green;          /* GREEN                                */
++/*0x18*/volatile unsigned int blue;           /* BLUE                                 */
++/*0x1c*/volatile unsigned int z;              /* DEPTH                                */
++/*0x20*/volatile unsigned int y;              /* Y                    triangle(DOYF)  */
++                                              /*                      aadot(DYF)      */
++                                              /*                      ddline(DYF)     */
++                                              /*                      aaline(DYF)     */
++/*0x24*/volatile unsigned int x;              /* X                    triangle(DOXF)  */
++                                              /*                      aadot(DXF)      */
++                                              /*                      ddline(DXF)     */
++                                              /*                      aaline(DXF)     */
++/*0x28*/unsigned int          pad2[2];        /* Reserved                             */
++/*0x30*/volatile unsigned int ryf;            /* Y (alias to DOYF)    ddline(RYF)     */
++                                              /*                      aaline(RYF)     */
++                                              /*                      triangle(RYF)   */
++/*0x34*/volatile unsigned int rxf;            /* X                    ddline(RXF)     */
++                                              /*                      aaline(RXF)     */
++                                              /*                      triangle(RXF)   */
++/*0x38*/unsigned int          pad3[2];        /* Reserved                             */
++/*0x40*/volatile unsigned int dmyf;           /* Y (alias to DOYF)    triangle(DMYF)  */
++/*0x44*/volatile unsigned int dmxf;           /* X                    triangle(DMXF)  */
++/*0x48*/unsigned int          pad4[2];        /* Reserved                             */
++/*0x50*/volatile unsigned int ebyi;           /* Y (alias to RYI)     polygon(EBYI)   */
++/*0x54*/volatile unsigned int ebxi;           /* X                    polygon(EBXI)   */
++/*0x58*/unsigned int          pad5[2];        /* Reserved                             */
++/*0x60*/volatile unsigned int by;             /* Y                    brline(RYI)     */
++                                              /*                      fastfill(OP)    */
++                                              /*                      polygon(YI)     */
++                                              /*                      rectangle(YI)   */
++                                              /*                      bcopy(SRCY)     */
++                                              /*                      vscroll(SRCY)   */
++/*0x64*/volatile unsigned int bx;             /* X                    brline(RXI)     */
++                                              /*                      polygon(XI)     */
++                                              /*                      rectangle(XI)   */
++                                              /*                      bcopy(SRCX)     */
++                                              /*                      vscroll(SRCX)   */
++                                              /*                      fastfill(GO)    */
++/*0x68*/volatile unsigned int dy;             /* destination Y        fastfill(DSTY)  */
++                                              /*                      bcopy(DSRY)     */
++                                              /*                      vscroll(DSRY)   */
++/*0x6c*/volatile unsigned int dx;             /* destination X        fastfill(DSTX)  */
++                                              /*                      bcopy(DSTX)     */
++                                              /*                      vscroll(DSTX)   */
++/*0x70*/volatile unsigned int bh;             /* Y (alias to RYI)     brline(DYI)     */
++                                              /*                      dot(DYI)        */
++                                              /*                      polygon(ETYI)   */
++                                              /* Height               fastfill(H)     */
++                                              /*                      bcopy(H)        */
++                                              /*                      vscroll(H)      */
++                                              /* Y count              fastfill(NY)    */
++/*0x74*/volatile unsigned int bw;             /* X                    dot(DXI)        */
++                                              /*                      brline(DXI)     */
++                                              /*                      polygon(ETXI)   */
++                                              /*                      fastfill(W)     */
++                                              /*                      bcopy(W)        */
++                                              /*                      vscroll(W)      */
++                                              /*                      fastfill(NX)    */
++/*0x78*/unsigned int          pad6[2];        /* Reserved                             */
++/*0x80*/unsigned int          pad7[32];       /* Reserved                             */
++
++      /* Setup Unit's vertex state register */
++/*100*/       volatile unsigned int   suvtx;
++/*104*/       unsigned int            pad8[63];       /* Reserved                             */
++
++      /* Frame Buffer Control Registers */
++/*200*/       volatile unsigned int   ppc;            /* Pixel Processor Control              */
++/*204*/       volatile unsigned int   wid;            /* Current WID                          */
++/*208*/       volatile unsigned int   fg;             /* FG data                              */
++/*20c*/       volatile unsigned int   bg;             /* BG data                              */
++/*210*/       volatile unsigned int   consty;         /* Constant Y                           */
++/*214*/       volatile unsigned int   constz;         /* Constant Z                           */
++/*218*/       volatile unsigned int   xclip;          /* X Clip                               */
++/*21c*/       volatile unsigned int   dcss;           /* Depth Cue Scale Slope                */
++/*220*/       volatile unsigned int   vclipmin;       /* Viewclip XY Min Bounds               */
++/*224*/       volatile unsigned int   vclipmax;       /* Viewclip XY Max Bounds               */
++/*228*/       volatile unsigned int   vclipzmin;      /* Viewclip Z Min Bounds                */
++/*22c*/       volatile unsigned int   vclipzmax;      /* Viewclip Z Max Bounds                */
++/*230*/       volatile unsigned int   dcsf;           /* Depth Cue Scale Front Bound          */
++/*234*/       volatile unsigned int   dcsb;           /* Depth Cue Scale Back Bound           */
++/*238*/       volatile unsigned int   dczf;           /* Depth Cue Z Front                    */
++/*23c*/       volatile unsigned int   dczb;           /* Depth Cue Z Back                     */
++/*240*/       unsigned int            pad9;           /* Reserved                             */
++/*244*/       volatile unsigned int   blendc;         /* Alpha Blend Control                  */
++/*248*/       volatile unsigned int   blendc1;        /* Alpha Blend Color 1                  */
++/*24c*/       volatile unsigned int   blendc2;        /* Alpha Blend Color 2                  */
++/*250*/       volatile unsigned int   fbramitc;       /* FB RAM Interleave Test Control       */
++/*254*/       volatile unsigned int   fbc;            /* Frame Buffer Control                 */
++/*258*/       volatile unsigned int   rop;            /* Raster OPeration                     */
++/*25c*/       volatile unsigned int   cmp;            /* Frame Buffer Compare                 */
++/*260*/       volatile unsigned int   matchab;        /* Buffer AB Match Mask                 */
++/*264*/       volatile unsigned int   matchc;         /* Buffer C(YZ) Match Mask              */
++/*268*/       volatile unsigned int   magnab;         /* Buffer AB Magnitude Mask             */
++/*26c*/       volatile unsigned int   magnc;          /* Buffer C(YZ) Magnitude Mask          */
++/*270*/       volatile unsigned int   fbcfg0;         /* Frame Buffer Config 0                */
++/*274*/       volatile unsigned int   fbcfg1;         /* Frame Buffer Config 1                */
++/*278*/       volatile unsigned int   fbcfg2;         /* Frame Buffer Config 2                */
++/*27c*/       volatile unsigned int   fbcfg3;         /* Frame Buffer Config 3                */
++/*280*/       volatile unsigned int   ppcfg;          /* Pixel Processor Config               */
++/*284*/       volatile unsigned int   pick;           /* Picking Control                      */
++/*288*/       volatile unsigned int   fillmode;       /* FillMode                             */
++/*28c*/       volatile unsigned int   fbramwac;       /* FB RAM Write Address Control         */
++/*290*/       volatile unsigned int   pmask;          /* RGB PlaneMask                        */
++/*294*/       volatile unsigned int   xpmask;         /* X PlaneMask                          */
++/*298*/       volatile unsigned int   ypmask;         /* Y PlaneMask                          */
++/*29c*/       volatile unsigned int   zpmask;         /* Z PlaneMask                          */
++/*2a0*/       ffb_auxclip             auxclip[4];     /* Auxilliary Viewport Clip             */
++
++      /* New 3dRAM III support regs */
++/*2c0*/       volatile unsigned int   rawblend2;
++/*2c4*/       volatile unsigned int   rawpreblend;
++/*2c8*/       volatile unsigned int   rawstencil;
++/*2cc*/       volatile unsigned int   rawstencilctl;
++/*2d0*/       volatile unsigned int   threedram1;
++/*2d4*/       volatile unsigned int   threedram2;
++/*2d8*/       volatile unsigned int   passin;
++/*2dc*/       volatile unsigned int   rawclrdepth;
++/*2e0*/       volatile unsigned int   rawpmask;
++/*2e4*/       volatile unsigned int   rawcsrc;
++/*2e8*/       volatile unsigned int   rawmatch;
++/*2ec*/       volatile unsigned int   rawmagn;
++/*2f0*/       volatile unsigned int   rawropblend;
++/*2f4*/       volatile unsigned int   rawcmp;
++/*2f8*/       volatile unsigned int   rawwac;
++/*2fc*/       volatile unsigned int   fbramid;
++
++/*300*/       volatile unsigned int   drawop;         /* Draw OPeration                       */
++/*304*/       unsigned int            pad10[2];       /* Reserved                             */
++/*30c*/       volatile unsigned int   lpat;           /* Line Pattern control                 */
++/*310*/       unsigned int            pad11;          /* Reserved                             */
++/*314*/       volatile unsigned int   fontxy;         /* XY Font coordinate                   */
++/*318*/       volatile unsigned int   fontw;          /* Font Width                           */
++/*31c*/       volatile unsigned int   fontinc;        /* Font Increment                       */
++/*320*/       volatile unsigned int   font;           /* Font bits                            */
++/*324*/       unsigned int            pad12[3];       /* Reserved                             */
++/*330*/       volatile unsigned int   blend2;
++/*334*/       volatile unsigned int   preblend;
++/*338*/       volatile unsigned int   stencil;
++/*33c*/       volatile unsigned int   stencilctl;
++
++/*340*/       unsigned int            pad13[4];       /* Reserved                             */
++/*350*/       volatile unsigned int   dcss1;          /* Depth Cue Scale Slope 1              */
++/*354*/       volatile unsigned int   dcss2;          /* Depth Cue Scale Slope 2              */
++/*358*/       volatile unsigned int   dcss3;          /* Depth Cue Scale Slope 3              */
++/*35c*/       volatile unsigned int   widpmask;
++/*360*/       volatile unsigned int   dcs2;
++/*364*/       volatile unsigned int   dcs3;
++/*368*/       volatile unsigned int   dcs4;
++/*36c*/       unsigned int            pad14;          /* Reserved                             */
++/*370*/       volatile unsigned int   dcd2;
++/*374*/       volatile unsigned int   dcd3;
++/*378*/       volatile unsigned int   dcd4;
++/*37c*/       unsigned int            pad15;          /* Reserved                             */
++/*380*/       volatile unsigned int   pattern[32];    /* area Pattern                         */
++/*400*/       unsigned int            pad16[8];       /* Reserved                             */
++/*420*/       volatile unsigned int   reset;          /* chip RESET                           */
++/*424*/       unsigned int            pad17[247];     /* Reserved                             */
++/*800*/       volatile unsigned int   devid;          /* Device ID                            */
++/*804*/       unsigned int            pad18[63];      /* Reserved                             */
++/*900*/       volatile unsigned int   ucsr;           /* User Control & Status Register       */
++/*904*/       unsigned int            pad19[31];      /* Reserved                             */
++/*980*/       volatile unsigned int   mer;            /* Mode Enable Register                 */
++/*984*/       unsigned int            pad20[1439];    /* Reserved                             */
++} ffb_fbc, *ffb_fbcPtr;
++
++struct ffb_hw_context {
++      int is_2d_only;
++
++      unsigned int ppc;
++      unsigned int wid;
++      unsigned int fg;
++      unsigned int bg;
++      unsigned int consty;
++      unsigned int constz;
++      unsigned int xclip;
++      unsigned int dcss;
++      unsigned int vclipmin;
++      unsigned int vclipmax;
++      unsigned int vclipzmin;
++      unsigned int vclipzmax;
++      unsigned int dcsf;
++      unsigned int dcsb;
++      unsigned int dczf;
++      unsigned int dczb;
++      unsigned int blendc;
++      unsigned int blendc1;
++      unsigned int blendc2;
++      unsigned int fbc;
++      unsigned int rop;
++      unsigned int cmp;
++      unsigned int matchab;
++      unsigned int matchc;
++      unsigned int magnab;
++      unsigned int magnc;
++      unsigned int pmask;
++      unsigned int xpmask;
++      unsigned int ypmask;
++      unsigned int zpmask;
++      unsigned int auxclip0min;
++      unsigned int auxclip0max;
++      unsigned int auxclip1min;
++      unsigned int auxclip1max;
++      unsigned int auxclip2min;
++      unsigned int auxclip2max;
++      unsigned int auxclip3min;
++      unsigned int auxclip3max;
++      unsigned int drawop;
++      unsigned int lpat;
++      unsigned int fontxy;
++      unsigned int fontw;
++      unsigned int fontinc;
++      unsigned int area_pattern[32];
++      unsigned int ucsr;
++      unsigned int stencil;
++      unsigned int stencilctl;
++      unsigned int dcss1;
++      unsigned int dcss2;
++      unsigned int dcss3;
++      unsigned int dcs2;
++      unsigned int dcs3;
++      unsigned int dcs4;
++      unsigned int dcd2;
++      unsigned int dcd3;
++      unsigned int dcd4;
++      unsigned int mer;
++};
++
++#define FFB_MAX_CTXS  32
++
++enum ffb_chip_type {
++      ffb1_prototype = 0,     /* Early pre-FCS FFB */
++      ffb1_standard,          /* First FCS FFB, 100Mhz UPA, 66MHz gclk */
++      ffb1_speedsort,         /* Second FCS FFB, 100Mhz UPA, 75MHz gclk */
++      ffb2_prototype,         /* Early pre-FCS vertical FFB2 */
++      ffb2_vertical,          /* First FCS FFB2/vertical, 100Mhz UPA, 100MHZ gclk,
++                                 75(SingleBuffer)/83(DoubleBuffer) MHz fclk */
++      ffb2_vertical_plus,     /* Second FCS FFB2/vertical, same timings */
++      ffb2_horizontal,        /* First FCS FFB2/horizontal, same timings as FFB2/vert */
++      ffb2_horizontal_plus,   /* Second FCS FFB2/horizontal, same timings */
++      afb_m3,                 /* FCS Elite3D, 3 float chips */
++      afb_m6                  /* FCS Elite3D, 6 float chips */
++};
++
++typedef struct ffb_dev_priv {
++      /* Misc software state. */
++      int                     prom_node;
++      enum ffb_chip_type      ffb_type;
++      u64                     card_phys_base;
++      struct miscdevice       miscdev;
++
++      /* Controller registers. */
++      ffb_fbcPtr              regs;
++
++      /* Context table. */
++      struct ffb_hw_context   *hw_state[FFB_MAX_CTXS];
++} ffb_dev_priv_t;
++
++extern unsigned long ffb_get_unmapped_area(struct file *filp,
++                                         unsigned long hint,
++                                         unsigned long len,
++                                         unsigned long pgoff,
++                                         unsigned long flags);
++extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map)
++extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev)
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_dma.c git-nokia/drivers/gpu/drm-tungsten/i810_dma.c
+--- git/drivers/gpu/drm-tungsten/i810_dma.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_dma.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1301 @@
++/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
++ *        Jeff Hartmann <jhartmann@valinux.com>
++ *          Keith Whitwell <keith@tungstengraphics.com>
++ *
++ */
++
++#include <linux/interrupt.h>  /* For task queue support */
++#include <linux/delay.h>
++#include <linux/pagemap.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "i810_drm.h"
++#include "i810_drv.h"
++
++#define I810_BUF_FREE         2
++#define I810_BUF_CLIENT               1
++#define I810_BUF_HARDWARE     0
++
++#define I810_BUF_UNMAPPED 0
++#define I810_BUF_MAPPED   1
++
++static inline void i810_print_status_page(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      u32 *temp = dev_priv->hw_status_page;
++      int i;
++
++      DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]);
++      DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]);
++      DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]);
++      DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]);
++      DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]);
++      DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]);
++      for (i = 6; i < dma->buf_count + 6; i++) {
++              DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]);
++      }
++}
++
++static struct drm_buf *i810_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++      int used;
++
++      /* Linear search might not be the best solution */
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++              /* In use is already a pointer */
++              used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
++                             I810_BUF_CLIENT);
++              if (used == I810_BUF_FREE) {
++                      return buf;
++              }
++      }
++      return NULL;
++}
++
++/* This should only be called if the buffer is not sent to the hardware
++ * yet, the hardware updates in use for us once its on the ring buffer.
++ */
++
++static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      int used;
++
++      /* In use is already a pointer */
++      used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
++      if (used != I810_BUF_CLIENT) {
++              DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev;
++      drm_i810_private_t *dev_priv;
++      struct drm_buf *buf;
++      drm_i810_buf_priv_t *buf_priv;
++
++      lock_kernel();
++      dev = priv->minor->dev;
++      dev_priv = dev->dev_private;
++      buf = dev_priv->mmap_buffer;
++      buf_priv = buf->dev_private;
++
++      vma->vm_flags |= (VM_IO | VM_DONTCOPY);
++      vma->vm_file = filp;
++
++      buf_priv->currently_mapped = I810_BUF_MAPPED;
++      unlock_kernel();
++
++      if (io_remap_pfn_range(vma, vma->vm_start,
++                             vma->vm_pgoff,
++                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
++              return -EAGAIN;
++      return 0;
++}
++
++static const struct file_operations i810_buffer_fops = {
++      .open = drm_open,
++      .release = drm_release,
++      .ioctl = drm_ioctl,
++      .mmap = i810_mmap_buffers,
++      .fasync = drm_fasync,
++};
++
++static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      const struct file_operations *old_fops;
++      int retcode = 0;
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED)
++              return -EINVAL;
++
++      down_write(&current->mm->mmap_sem);
++      old_fops = file_priv->filp->f_op;
++      file_priv->filp->f_op = &i810_buffer_fops;
++      dev_priv->mmap_buffer = buf;
++      buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
++                                          PROT_READ | PROT_WRITE,
++                                          MAP_SHARED, buf->bus_address);
++      dev_priv->mmap_buffer = NULL;
++      file_priv->filp->f_op = old_fops;
++      if (IS_ERR(buf_priv->virtual)) {
++              /* Real error */
++              DRM_ERROR("mmap error\n");
++              retcode = PTR_ERR(buf_priv->virtual);
++              buf_priv->virtual = NULL;
++      }
++      up_write(&current->mm->mmap_sem);
++
++      return retcode;
++}
++
++static int i810_unmap_buffer(struct drm_buf * buf)
++{
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      int retcode = 0;
++
++      if (buf_priv->currently_mapped != I810_BUF_MAPPED)
++              return -EINVAL;
++
++      down_write(&current->mm->mmap_sem);
++      retcode = do_munmap(current->mm,
++                          (unsigned long)buf_priv->virtual,
++                          (size_t) buf->total);
++      up_write(&current->mm->mmap_sem);
++
++      buf_priv->currently_mapped = I810_BUF_UNMAPPED;
++      buf_priv->virtual = NULL;
++
++      return retcode;
++}
++
++static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
++                             struct drm_file *file_priv)
++{
++      struct drm_buf *buf;
++      drm_i810_buf_priv_t *buf_priv;
++      int retcode = 0;
++
++      buf = i810_freelist_get(dev);
++      if (!buf) {
++              retcode = -ENOMEM;
++              DRM_DEBUG("retcode=%d\n", retcode);
++              return retcode;
++      }
++
++      retcode = i810_map_buffer(buf, file_priv);
++      if (retcode) {
++              i810_freelist_put(dev, buf);
++              DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
++              return retcode;
++      }
++      buf->file_priv = file_priv;
++      buf_priv = buf->dev_private;
++      d->granted = 1;
++      d->request_idx = buf->idx;
++      d->request_size = buf->total;
++      d->virtual = buf_priv->virtual;
++
++      return retcode;
++}
++
++static int i810_dma_cleanup(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              int i;
++              drm_i810_private_t *dev_priv =
++                  (drm_i810_private_t *) dev->dev_private;
++
++              if (dev_priv->ring.virtual_start) {
++                      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++              }
++              if (dev_priv->hw_status_page) {
++                      pci_free_consistent(dev->pdev, PAGE_SIZE,
++                                          dev_priv->hw_status_page,
++                                          dev_priv->dma_status_page);
++                      /* Need to rewrite hardware status page */
++                      I810_WRITE(0x02080, 0x1ffff000);
++              }
++              drm_free(dev->dev_private, sizeof(drm_i810_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      struct drm_buf *buf = dma->buflist[i];
++                      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++                      if (buf_priv->kernel_virtual && buf->total)
++                              drm_core_ioremapfree(&buf_priv->map, dev);
++              }
++      }
++      return 0;
++}
++
++static int i810_wait_ring(struct drm_device * dev, int n)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
++      int iters = 0;
++      unsigned long end;
++      unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++
++      end = jiffies + (HZ * 3);
++      while (ring->space < n) {
++              ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++              ring->space = ring->head - (ring->tail + 8);
++              if (ring->space < 0)
++                      ring->space += ring->Size;
++
++              if (ring->head != last_head) {
++                      end = jiffies + (HZ * 3);
++                      last_head = ring->head;
++              }
++
++              iters++;
++              if (time_before(end, jiffies)) {
++                      DRM_ERROR("space: %d wanted %d\n", ring->space, n);
++                      DRM_ERROR("lockup\n");
++                      goto out_wait_ring;
++              }
++              udelay(1);
++      }
++
++      out_wait_ring:
++      return iters;
++}
++
++static void i810_kernel_lost_context(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
++
++      ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++      ring->tail = I810_READ(LP_RING + RING_TAIL);
++      ring->space = ring->head - (ring->tail + 8);
++      if (ring->space < 0)
++              ring->space += ring->Size;
++}
++
++static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int my_idx = 24;
++      u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
++      int i;
++
++      if (dma->buf_count > 1019) {
++              /* Not enough space in the status page for the freelist */
++              return -EINVAL;
++      }
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              buf_priv->in_use = hw_status++;
++              buf_priv->my_use_idx = my_idx;
++              my_idx += 4;
++
++              *buf_priv->in_use = I810_BUF_FREE;
++
++              buf_priv->map.offset = buf->bus_address;
++              buf_priv->map.size = buf->total;
++              buf_priv->map.type = _DRM_AGP;
++              buf_priv->map.flags = 0;
++              buf_priv->map.mtrr = 0;
++
++              drm_core_ioremap(&buf_priv->map, dev);
++              buf_priv->kernel_virtual = buf_priv->map.handle;
++
++      }
++      return 0;
++}
++
++static int i810_dma_initialize(struct drm_device * dev,
++                             drm_i810_private_t * dev_priv,
++                             drm_i810_init_t * init)
++{
++      struct drm_map_list *r_list;
++      memset(dev_priv, 0, sizeof(drm_i810_private_t));
++
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map &&
++                  r_list->map->type == _DRM_SHM &&
++                  r_list->map->flags & _DRM_CONTAINS_LOCK) {
++                      dev_priv->sarea_map = r_list->map;
++                      break;
++              }
++      }
++      if (!dev_priv->sarea_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find sarea!\n");
++              return -EINVAL;
++      }
++      dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find mmio map!\n");
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find dma buffer map!\n");
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv = (drm_i810_sarea_t *)
++          ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
++
++      dev_priv->ring.Start = init->ring_start;
++      dev_priv->ring.End = init->ring_end;
++      dev_priv->ring.Size = init->ring_size;
++
++      dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
++      dev_priv->ring.map.size = init->ring_size;
++      dev_priv->ring.map.type = _DRM_AGP;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++
++      if (dev_priv->ring.map.handle == NULL) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
++
++      dev_priv->w = init->w;
++      dev_priv->h = init->h;
++      dev_priv->pitch = init->pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->front_offset = init->front_offset;
++
++      dev_priv->overlay_offset = init->overlay_offset;
++      dev_priv->overlay_physical = init->overlay_physical;
++
++      dev_priv->front_di1 = init->front_offset | init->pitch_bits;
++      dev_priv->back_di1 = init->back_offset | init->pitch_bits;
++      dev_priv->zi1 = init->depth_offset | init->pitch_bits;
++
++      /* Program Hardware Status Page */
++      dev_priv->hw_status_page =
++          pci_alloc_consistent(dev->pdev, PAGE_SIZE,
++                               &dev_priv->dma_status_page);
++      if (!dev_priv->hw_status_page) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("Can not allocate hardware status page\n");
++              return -ENOMEM;
++      }
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
++
++      I810_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++
++      /* Now we need to init our freelist */
++      if (i810_freelist_init(dev, dev_priv) != 0) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("Not enough space in the status page for"
++                        " the freelist\n");
++              return -ENOMEM;
++      }
++      dev->dev_private = (void *)dev_priv;
++
++      return 0;
++}
++
++static int i810_dma_init(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv;
++      drm_i810_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case I810_INIT_DMA_1_4:
++              DRM_INFO("Using v1.4 init.\n");
++              dev_priv = drm_alloc(sizeof(drm_i810_private_t),
++                                   DRM_MEM_DRIVER);
++              if (dev_priv == NULL)
++                      return -ENOMEM;
++              retcode = i810_dma_initialize(dev, dev_priv, init);
++              break;
++
++      case I810_CLEANUP_DMA:
++              DRM_INFO("DMA Cleanup\n");
++              retcode = i810_dma_cleanup(dev);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return retcode;
++}
++
++/* Most efficient way to verify state for the i810 is as it is
++ * emitted.  Non-conformant state is silently dropped.
++ *
++ * Use 'volatile' & local var tmp to force the emitted values to be
++ * identical to the verified ones.
++ */
++static void i810EmitContextVerified(struct drm_device * dev,
++                                  volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int i, j = 0;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
++
++      OUT_RING(GFX_OP_COLOR_FACTOR);
++      OUT_RING(code[I810_CTXREG_CF1]);
++
++      OUT_RING(GFX_OP_STIPPLE);
++      OUT_RING(code[I810_CTXREG_ST1]);
++
++      for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
++              tmp = code[i];
++
++              if ((tmp & (7 << 29)) == (3 << 29) &&
++                  (tmp & (0x1f << 24)) < (0x1d << 24)) {
++                      OUT_RING(tmp);
++                      j++;
++              } else
++                      printk("constext state dropped!!!\n");
++      }
++
++      if (j & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int i, j = 0;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
++
++      OUT_RING(GFX_OP_MAP_INFO);
++      OUT_RING(code[I810_TEXREG_MI1]);
++      OUT_RING(code[I810_TEXREG_MI2]);
++      OUT_RING(code[I810_TEXREG_MI3]);
++
++      for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
++              tmp = code[i];
++
++              if ((tmp & (7 << 29)) == (3 << 29) &&
++                  (tmp & (0x1f << 24)) < (0x1d << 24)) {
++                      OUT_RING(tmp);
++                      j++;
++              } else
++                      printk("texture state dropped!!!\n");
++      }
++
++      if (j & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++/* Need to do some additional checking when setting the dest buffer.
++ */
++static void i810EmitDestVerified(struct drm_device * dev,
++                               volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
++
++      tmp = code[I810_DESTREG_DI1];
++      if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
++              OUT_RING(CMD_OP_DESTBUFFER_INFO);
++              OUT_RING(tmp);
++      } else
++              DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
++                        tmp, dev_priv->front_di1, dev_priv->back_di1);
++
++      /* invarient:
++       */
++      OUT_RING(CMD_OP_Z_BUFFER_INFO);
++      OUT_RING(dev_priv->zi1);
++
++      OUT_RING(GFX_OP_DESTBUFFER_VARS);
++      OUT_RING(code[I810_DESTREG_DV1]);
++
++      OUT_RING(GFX_OP_DRAWRECT_INFO);
++      OUT_RING(code[I810_DESTREG_DR1]);
++      OUT_RING(code[I810_DESTREG_DR2]);
++      OUT_RING(code[I810_DESTREG_DR3]);
++      OUT_RING(code[I810_DESTREG_DR4]);
++      OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++static void i810EmitState(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      DRM_DEBUG("%x\n", dirty);
++
++      if (dirty & I810_UPLOAD_BUFFERS) {
++              i810EmitDestVerified(dev, sarea_priv->BufferState);
++              sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
++      }
++
++      if (dirty & I810_UPLOAD_CTX) {
++              i810EmitContextVerified(dev, sarea_priv->ContextState);
++              sarea_priv->dirty &= ~I810_UPLOAD_CTX;
++      }
++
++      if (dirty & I810_UPLOAD_TEX0) {
++              i810EmitTexVerified(dev, sarea_priv->TexState[0]);
++              sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
++      }
++
++      if (dirty & I810_UPLOAD_TEX1) {
++              i810EmitTexVerified(dev, sarea_priv->TexState[1]);
++              sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
++      }
++}
++
++/* need to verify
++ */
++static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
++                                  unsigned int clear_color,
++                                  unsigned int clear_zval)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int pitch = dev_priv->pitch;
++      int cpp = 2;
++      int i;
++      RING_LOCALS;
++
++      if (dev_priv->current_page == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(I810_FRONT | I810_BACK);
++              if (tmp & I810_FRONT)
++                      flags |= I810_BACK;
++              if (tmp & I810_BACK)
++                      flags |= I810_FRONT;
++      }
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      for (i = 0; i < nbox; i++, pbox++) {
++              unsigned int x = pbox->x1;
++              unsigned int y = pbox->y1;
++              unsigned int width = (pbox->x2 - x) * cpp;
++              unsigned int height = pbox->y2 - y;
++              unsigned int start = y * pitch + x * cpp;
++
++              if (pbox->x1 > pbox->x2 ||
++                  pbox->y1 > pbox->y2 ||
++                  pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
++                      continue;
++
++              if (flags & I810_FRONT) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(start);
++                      OUT_RING(clear_color);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++
++              if (flags & I810_BACK) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(dev_priv->back_offset + start);
++                      OUT_RING(clear_color);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++
++              if (flags & I810_DEPTH) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(dev_priv->depth_offset + start);
++                      OUT_RING(clear_zval);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++      }
++}
++
++static void i810_dma_dispatch_swap(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int pitch = dev_priv->pitch;
++      int cpp = 2;
++      int i;
++      RING_LOCALS;
++
++      DRM_DEBUG("swapbuffers\n");
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      for (i = 0; i < nbox; i++, pbox++) {
++              unsigned int w = pbox->x2 - pbox->x1;
++              unsigned int h = pbox->y2 - pbox->y1;
++              unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
++              unsigned int start = dst;
++
++              if (pbox->x1 > pbox->x2 ||
++                  pbox->y1 > pbox->y2 ||
++                  pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
++                      continue;
++
++              BEGIN_LP_RING(6);
++              OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
++              OUT_RING(pitch | (0xCC << 16));
++              OUT_RING((h << 16) | (w * cpp));
++              if (dev_priv->current_page == 0)
++                      OUT_RING(dev_priv->front_offset + start);
++              else
++                      OUT_RING(dev_priv->back_offset + start);
++              OUT_RING(pitch);
++              if (dev_priv->current_page == 0)
++                      OUT_RING(dev_priv->back_offset + start);
++              else
++                      OUT_RING(dev_priv->front_offset + start);
++              ADVANCE_LP_RING();
++      }
++}
++
++static void i810_dma_dispatch_vertex(struct drm_device * dev,
++                                   struct drm_buf * buf, int discard, int used)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_clip_rect *box = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      unsigned long address = (unsigned long)buf->bus_address;
++      unsigned long start = address - dev->agp->base;
++      int i = 0;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      if (used > 4 * 1024)
++              used = 0;
++
++      if (sarea_priv->dirty)
++              i810EmitState(dev);
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
++              unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
++
++              *(u32 *) buf_priv->kernel_virtual =
++                  ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
++
++              if (used & 4) {
++                      *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
++                      used += 4;
++              }
++
++              i810_unmap_buffer(buf);
++      }
++
++      if (used) {
++              do {
++                      if (i < nbox) {
++                              BEGIN_LP_RING(4);
++                              OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
++                                       SC_ENABLE);
++                              OUT_RING(GFX_OP_SCISSOR_INFO);
++                              OUT_RING(box[i].x1 | (box[i].y1 << 16));
++                              OUT_RING((box[i].x2 -
++                                        1) | ((box[i].y2 - 1) << 16));
++                              ADVANCE_LP_RING();
++                      }
++
++                      BEGIN_LP_RING(4);
++                      OUT_RING(CMD_OP_BATCH_BUFFER);
++                      OUT_RING(start | BB1_PROTECTED);
++                      OUT_RING(start + used - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++
++              } while (++i < nbox);
++      }
++
++      if (discard) {
++              dev_priv->counter++;
++
++              (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
++                            I810_BUF_HARDWARE);
++
++              BEGIN_LP_RING(8);
++              OUT_RING(CMD_STORE_DWORD_IDX);
++              OUT_RING(20);
++              OUT_RING(dev_priv->counter);
++              OUT_RING(CMD_STORE_DWORD_IDX);
++              OUT_RING(buf_priv->my_use_idx);
++              OUT_RING(I810_BUF_FREE);
++              OUT_RING(CMD_REPORT_HEAD);
++              OUT_RING(0);
++              ADVANCE_LP_RING();
++      }
++}
++
++static void i810_dma_dispatch_flip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int pitch = dev_priv->pitch;
++      RING_LOCALS;
++
++      DRM_DEBUG("page=%d pfCurrentPage=%d\n",
++                dev_priv->current_page,
++                dev_priv->sarea_priv->pf_current_page);
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
++      /* On i815 at least ASYNC is buggy */
++      /* pitch<<5 is from 11.2.8 p158,
++         its the pitch / 8 then left shifted 8,
++         so (pitch >> 3) << 8 */
++      OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
++      if (dev_priv->current_page == 0) {
++              OUT_RING(dev_priv->back_offset);
++              dev_priv->current_page = 1;
++      } else {
++              OUT_RING(dev_priv->front_offset);
++              dev_priv->current_page = 0;
++      }
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(2);
++      OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
++
++}
++
++static void i810_dma_quiescent(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
++      OUT_RING(CMD_REPORT_HEAD);
++      OUT_RING(0);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      i810_wait_ring(dev, dev_priv->ring.Size - 8);
++}
++
++static int i810_flush_queue(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      int i, ret = 0;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(CMD_REPORT_HEAD);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      i810_wait_ring(dev, dev_priv->ring.Size - 8);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
++                                 I810_BUF_FREE);
++
++              if (used == I810_BUF_HARDWARE)
++                      DRM_DEBUG("reclaimed from HARDWARE\n");
++              if (used == I810_BUF_CLIENT)
++                      DRM_DEBUG("still on client\n");
++      }
++
++      return ret;
++}
++
++/* Must be called with the lock held */
++static void i810_reclaim_buffers(struct drm_device *dev,
++                               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma)
++              return;
++      if (!dev->dev_private)
++              return;
++      if (!dma->buflist)
++              return;
++
++      i810_flush_queue(dev);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              if (buf->file_priv == file_priv && buf_priv) {
++                      int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
++                                         I810_BUF_FREE);
++
++                      if (used == I810_BUF_CLIENT)
++                              DRM_DEBUG("reclaimed from client\n");
++                      if (buf_priv->currently_mapped == I810_BUF_MAPPED)
++                              buf_priv->currently_mapped = I810_BUF_UNMAPPED;
++              }
++      }
++}
++
++static int i810_flush_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      i810_flush_queue(dev);
++      return 0;
++}
++
++static int i810_dma_vertex(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i810_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("idx %d used %d discard %d\n",
++                vertex->idx, vertex->used, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx > dma->buf_count)
++              return -EINVAL;
++
++      i810_dma_dispatch_vertex(dev,
++                               dma->buflist[vertex->idx],
++                               vertex->discard, vertex->used);
++
++      atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      sarea_priv->last_enqueue = dev_priv->counter - 1;
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return 0;
++}
++
++static int i810_clear_bufs(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv)
++{
++      drm_i810_clear_t *clear = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* GH: Someone's doing nasty things... */
++      if (!dev->dev_private) {
++              return -EINVAL;
++      }
++
++      i810_dma_dispatch_clear(dev, clear->flags,
++                              clear->clear_color, clear->clear_depth);
++      return 0;
++}
++
++static int i810_swap_bufs(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      i810_dma_dispatch_swap(dev);
++      return 0;
++}
++
++static int i810_getage(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++
++      sarea_priv->last_dispatch = (int)hw_status[5];
++      return 0;
++}
++
++static int i810_getbuf(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      int retcode = 0;
++      drm_i810_dma_t *d = data;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      d->granted = 0;
++
++      retcode = i810_dma_get_buffer(dev, d, file_priv);
++
++      DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
++                current->pid, retcode, d->granted);
++
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return retcode;
++}
++
++static int i810_copybuf(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      /* Never copy - 2.4.x doesn't need it */
++      return 0;
++}
++
++static int i810_docopy(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      /* Never copy - 2.4.x doesn't need it */
++      return 0;
++}
++
++static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
++                               unsigned int last_render)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned long address = (unsigned long)buf->bus_address;
++      unsigned long start = address - dev->agp->base;
++      int u;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
++      if (u != I810_BUF_CLIENT) {
++              DRM_DEBUG("MC found buffer that isn't mine!\n");
++      }
++
++      if (used > 4 * 1024)
++              used = 0;
++
++      sarea_priv->dirty = 0x7f;
++
++      DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
++
++      dev_priv->counter++;
++      DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
++      DRM_DEBUG("start : %lx\n", start);
++      DRM_DEBUG("used : %d\n", used);
++      DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
++              if (used & 4) {
++                      *(u32 *) ((char *) buf_priv->virtual + used) = 0;
++                      used += 4;
++              }
++
++              i810_unmap_buffer(buf);
++      }
++      BEGIN_LP_RING(4);
++      OUT_RING(CMD_OP_BATCH_BUFFER);
++      OUT_RING(start | BB1_PROTECTED);
++      OUT_RING(start + used - 4);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(8);
++      OUT_RING(CMD_STORE_DWORD_IDX);
++      OUT_RING(buf_priv->my_use_idx);
++      OUT_RING(I810_BUF_FREE);
++      OUT_RING(0);
++
++      OUT_RING(CMD_STORE_DWORD_IDX);
++      OUT_RING(16);
++      OUT_RING(last_render);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++static int i810_dma_mc(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i810_mc_t *mc = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (mc->idx >= dma->buf_count || mc->idx < 0)
++              return -EINVAL;
++
++      i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
++                           mc->last_render);
++
++      atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      sarea_priv->last_enqueue = dev_priv->counter - 1;
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return 0;
++}
++
++static int i810_rstatus(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
++}
++
++static int i810_ov0_info(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      drm_i810_overlay_t *ov = data;
++
++      ov->offset = dev_priv->overlay_offset;
++      ov->physical = dev_priv->overlay_physical;
++
++      return 0;
++}
++
++static int i810_fstatus(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      return I810_READ(0x30008);
++}
++
++static int i810_ov0_flip(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      //Tell the overlay to update
++      I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
++
++      return 0;
++}
++
++/* Not sure why this isn't set all the time:
++ */
++static void i810_do_init_pageflip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      dev_priv->page_flipping = 1;
++      dev_priv->current_page = 0;
++      dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
++}
++
++static int i810_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      if (dev_priv->current_page != 0)
++              i810_dma_dispatch_flip(dev);
++
++      dev_priv->page_flipping = 0;
++      return 0;
++}
++
++static int i810_flip_bufs(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv->page_flipping)
++              i810_do_init_pageflip(dev);
++
++      i810_dma_dispatch_flip(dev);
++      return 0;
++}
++
++int i810_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      /* i810 has 4 more counters */
++      dev->counters += 4;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++      dev->types[9] = _DRM_STAT_DMA;
++
++      return 0;
++}
++
++void i810_driver_lastclose(struct drm_device * dev)
++{
++      i810_dma_cleanup(dev);
++}
++
++void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_i810_private_t *dev_priv = dev->dev_private;
++              if (dev_priv->page_flipping) {
++                      i810_do_cleanup_pageflip(dev);
++              }
++      }
++}
++
++void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                      struct drm_file *file_priv)
++{
++      i810_reclaim_buffers(dev, file_priv);
++}
++
++int i810_driver_dma_quiescent(struct drm_device * dev)
++{
++      i810_dma_quiescent(dev);
++      return 0;
++}
++
++struct drm_ioctl_desc i810_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
++};
++
++int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * All Intel graphics chipsets are treated as AGP, even if they are really
++ * PCI-e.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * A value of 1 is always retured to indictate every i810 is AGP.
++ */
++int i810_driver_device_is_agp(struct drm_device * dev)
++{
++      return 1;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drm.h git-nokia/drivers/gpu/drm-tungsten/i810_drm.h
+--- git/drivers/gpu/drm-tungsten/i810_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,263 @@
++#ifndef _I810_DRM_H_
++#define _I810_DRM_H_
++
++/* WARNING: These defines must be the same as what the Xserver uses.
++ * if you change them, you must change the defines in the Xserver.
++ */
++
++#ifndef _I810_DEFINES_
++#define _I810_DEFINES_
++
++#define I810_DMA_BUF_ORDER            12
++#define I810_DMA_BUF_SZ               (1<<I810_DMA_BUF_ORDER)
++#define I810_DMA_BUF_NR               256
++#define I810_NR_SAREA_CLIPRECTS       8
++
++/* Each region is a minimum of 64k, and there are at most 64 of them.
++ */
++#define I810_NR_TEX_REGIONS 64
++#define I810_LOG_MIN_TEX_REGION_SIZE 16
++#endif
++
++#define I810_UPLOAD_TEX0IMAGE  0x1    /* handled clientside */
++#define I810_UPLOAD_TEX1IMAGE  0x2    /* handled clientside */
++#define I810_UPLOAD_CTX        0x4
++#define I810_UPLOAD_BUFFERS    0x8
++#define I810_UPLOAD_TEX0       0x10
++#define I810_UPLOAD_TEX1       0x20
++#define I810_UPLOAD_CLIPRECTS  0x40
++
++/* Indices into buf.Setup where various bits of state are mirrored per
++ * context and per buffer.  These can be fired at the card as a unit,
++ * or in a piecewise fashion as required.
++ */
++
++/* Destbuffer state
++ *    - backbuffer linear offset and pitch -- invarient in the current dri
++ *    - zbuffer linear offset and pitch -- also invarient
++ *    - drawing origin in back and depth buffers.
++ *
++ * Keep the depth/back buffer state here to accommodate private buffers
++ * in the future.
++ */
++#define I810_DESTREG_DI0  0   /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
++#define I810_DESTREG_DI1  1
++#define I810_DESTREG_DV0  2   /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
++#define I810_DESTREG_DV1  3
++#define I810_DESTREG_DR0  4   /* GFX_OP_DRAWRECT_INFO (4 dwords) */
++#define I810_DESTREG_DR1  5
++#define I810_DESTREG_DR2  6
++#define I810_DESTREG_DR3  7
++#define I810_DESTREG_DR4  8
++#define I810_DEST_SETUP_SIZE 10
++
++/* Context state
++ */
++#define I810_CTXREG_CF0   0   /* GFX_OP_COLOR_FACTOR */
++#define I810_CTXREG_CF1   1
++#define I810_CTXREG_ST0   2   /* GFX_OP_STIPPLE */
++#define I810_CTXREG_ST1   3
++#define I810_CTXREG_VF    4   /* GFX_OP_VERTEX_FMT */
++#define I810_CTXREG_MT    5   /* GFX_OP_MAP_TEXELS */
++#define I810_CTXREG_MC0   6   /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
++#define I810_CTXREG_MC1   7   /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
++#define I810_CTXREG_MC2   8   /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
++#define I810_CTXREG_MA0   9   /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
++#define I810_CTXREG_MA1   10  /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
++#define I810_CTXREG_MA2   11  /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
++#define I810_CTXREG_SDM   12  /* GFX_OP_SRC_DEST_MONO */
++#define I810_CTXREG_FOG   13  /* GFX_OP_FOG_COLOR */
++#define I810_CTXREG_B1    14  /* GFX_OP_BOOL_1 */
++#define I810_CTXREG_B2    15  /* GFX_OP_BOOL_2 */
++#define I810_CTXREG_LCS   16  /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
++#define I810_CTXREG_PV    17  /* GFX_OP_PV_RULE -- Invarient! */
++#define I810_CTXREG_ZA    18  /* GFX_OP_ZBIAS_ALPHAFUNC */
++#define I810_CTXREG_AA    19  /* GFX_OP_ANTIALIAS */
++#define I810_CTX_SETUP_SIZE 20
++
++/* Texture state (per tex unit)
++ */
++#define I810_TEXREG_MI0  0    /* GFX_OP_MAP_INFO (4 dwords) */
++#define I810_TEXREG_MI1  1
++#define I810_TEXREG_MI2  2
++#define I810_TEXREG_MI3  3
++#define I810_TEXREG_MF   4    /* GFX_OP_MAP_FILTER */
++#define I810_TEXREG_MLC  5    /* GFX_OP_MAP_LOD_CTL */
++#define I810_TEXREG_MLL  6    /* GFX_OP_MAP_LOD_LIMITS */
++#define I810_TEXREG_MCS  7    /* GFX_OP_MAP_COORD_SETS ??? */
++#define I810_TEX_SETUP_SIZE 8
++
++/* Flags for clear ioctl
++ */
++#define I810_FRONT   0x1
++#define I810_BACK    0x2
++#define I810_DEPTH   0x4
++
++typedef enum _drm_i810_init_func {
++      I810_INIT_DMA = 0x01,
++      I810_CLEANUP_DMA = 0x02,
++      I810_INIT_DMA_1_4 = 0x03
++} drm_i810_init_func_t;
++
++/* This is the init structure after v1.2 */
++typedef struct _drm_i810_init {
++      drm_i810_init_func_t func;
++      unsigned int mmio_offset;
++      unsigned int buffers_offset;
++      int sarea_priv_offset;
++      unsigned int ring_start;
++      unsigned int ring_end;
++      unsigned int ring_size;
++      unsigned int front_offset;
++      unsigned int back_offset;
++      unsigned int depth_offset;
++      unsigned int overlay_offset;
++      unsigned int overlay_physical;
++      unsigned int w;
++      unsigned int h;
++      unsigned int pitch;
++      unsigned int pitch_bits;
++} drm_i810_init_t;
++
++/* Warning: If you change the SAREA structure you must change the Xserver
++ * structure as well */
++
++typedef struct _drm_i810_tex_region {
++      unsigned char next, prev;       /* indices to form a circular LRU  */
++      unsigned char in_use;   /* owned by a client, or free? */
++      int age;                /* tracked by clients to update local LRU's */
++} drm_i810_tex_region_t;
++
++typedef struct _drm_i810_sarea {
++      unsigned int ContextState[I810_CTX_SETUP_SIZE];
++      unsigned int BufferState[I810_DEST_SETUP_SIZE];
++      unsigned int TexState[2][I810_TEX_SETUP_SIZE];
++      unsigned int dirty;
++
++      unsigned int nbox;
++      struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
++
++      /* Maintain an LRU of contiguous regions of texture space.  If
++       * you think you own a region of texture memory, and it has an
++       * age different to the one you set, then you are mistaken and
++       * it has been stolen by another client.  If global texAge
++       * hasn't changed, there is no need to walk the list.
++       *
++       * These regions can be used as a proxy for the fine-grained
++       * texture information of other clients - by maintaining them
++       * in the same lru which is used to age their own textures,
++       * clients have an approximate lru for the whole of global
++       * texture space, and can make informed decisions as to which
++       * areas to kick out.  There is no need to choose whether to
++       * kick out your own texture or someone else's - simply eject
++       * them all in LRU order.
++       */
++
++      drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
++      /* Last elt is sentinal */
++      int texAge;             /* last time texture was uploaded */
++      int last_enqueue;       /* last time a buffer was enqueued */
++      int last_dispatch;      /* age of the most recently dispatched buffer */
++      int last_quiescent;     /*  */
++      int ctxOwner;           /* last context to upload state */
++
++      int vertex_prim;
++
++      int pf_enabled;         /* is pageflipping allowed? */
++      int pf_active;
++      int pf_current_page;    /* which buffer is being displayed? */
++} drm_i810_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmMga.h)
++ */
++
++/* i810 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_I810_INIT         0x00
++#define DRM_I810_VERTEX               0x01
++#define DRM_I810_CLEAR                0x02
++#define DRM_I810_FLUSH                0x03
++#define DRM_I810_GETAGE               0x04
++#define DRM_I810_GETBUF               0x05
++#define DRM_I810_SWAP         0x06
++#define DRM_I810_COPY         0x07
++#define DRM_I810_DOCOPY               0x08
++#define DRM_I810_OV0INFO      0x09
++#define DRM_I810_FSTATUS      0x0a
++#define DRM_I810_OV0FLIP      0x0b
++#define DRM_I810_MC           0x0c
++#define DRM_I810_RSTATUS      0x0d
++#define DRM_I810_FLIP         0x0e
++
++#define DRM_IOCTL_I810_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
++#define DRM_IOCTL_I810_VERTEX         DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
++#define DRM_IOCTL_I810_CLEAR          DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
++#define DRM_IOCTL_I810_FLUSH          DRM_IO(  DRM_COMMAND_BASE + DRM_I810_FLUSH)
++#define DRM_IOCTL_I810_GETAGE         DRM_IO(  DRM_COMMAND_BASE + DRM_I810_GETAGE)
++#define DRM_IOCTL_I810_GETBUF         DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
++#define DRM_IOCTL_I810_SWAP           DRM_IO(  DRM_COMMAND_BASE + DRM_I810_SWAP)
++#define DRM_IOCTL_I810_COPY           DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
++#define DRM_IOCTL_I810_DOCOPY         DRM_IO(  DRM_COMMAND_BASE + DRM_I810_DOCOPY)
++#define DRM_IOCTL_I810_OV0INFO                DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
++#define DRM_IOCTL_I810_FSTATUS                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
++#define DRM_IOCTL_I810_OV0FLIP                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
++#define DRM_IOCTL_I810_MC             DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
++#define DRM_IOCTL_I810_RSTATUS                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
++#define DRM_IOCTL_I810_FLIP             DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
++
++typedef struct _drm_i810_clear {
++      int clear_color;
++      int clear_depth;
++      int flags;
++} drm_i810_clear_t;
++
++/* These may be placeholders if we have more cliprects than
++ * I810_NR_SAREA_CLIPRECTS.  In that case, the client sets discard to
++ * false, indicating that the buffer will be dispatched again with a
++ * new set of cliprects.
++ */
++typedef struct _drm_i810_vertex {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      int discard;            /* client is finished with the buffer? */
++} drm_i810_vertex_t;
++
++typedef struct _drm_i810_copy_t {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      void *address;          /* Address to copy from */
++} drm_i810_copy_t;
++
++#define PR_TRIANGLES         (0x0<<18)
++#define PR_TRISTRIP_0        (0x1<<18)
++#define PR_TRISTRIP_1        (0x2<<18)
++#define PR_TRIFAN            (0x3<<18)
++#define PR_POLYGON           (0x4<<18)
++#define PR_LINES             (0x5<<18)
++#define PR_LINESTRIP         (0x6<<18)
++#define PR_RECTS             (0x7<<18)
++#define PR_MASK              (0x7<<18)
++
++typedef struct drm_i810_dma {
++      void *virtual;
++      int request_idx;
++      int request_size;
++      int granted;
++} drm_i810_dma_t;
++
++typedef struct _drm_i810_overlay_t {
++      unsigned int offset;    /* Address of the Overlay Regs */
++      unsigned int physical;
++} drm_i810_overlay_t;
++
++typedef struct _drm_i810_mc {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      int num_blocks;         /* number of GFXBlocks */
++      int *length;            /* List of lengths for GFXBlocks (FUTURE) */
++      unsigned int last_render;       /* Last Render Request */
++} drm_i810_mc_t;
++
++#endif                                /* _I810_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drv.c git-nokia/drivers/gpu/drm-tungsten/i810_drv.c
+--- git/drivers/gpu/drm-tungsten/i810_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,104 @@
++/* i810_drv.c -- I810 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i810_drm.h"
++#include "i810_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      i810_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
++          DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
++      .dev_priv_size = sizeof(drm_i810_buf_priv_t),
++      .load = i810_driver_load,
++      .lastclose = i810_driver_lastclose,
++      .preclose = i810_driver_preclose,
++      .device_is_agp = i810_driver_device_is_agp,
++      .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
++      .dma_quiescent = i810_driver_dma_quiescent,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = i810_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++              },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init i810_init(void)
++{
++      driver.num_ioctls = i810_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit i810_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(i810_init);
++module_exit(i810_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drv.h git-nokia/drivers/gpu/drm-tungsten/i810_drv.h
+--- git/drivers/gpu/drm-tungsten/i810_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,242 @@
++/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
++ *        Jeff Hartmann <jhartmann@valinux.com>
++ *
++ */
++
++#ifndef _I810_DRV_H_
++#define _I810_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "VA Linux Systems Inc."
++
++#define DRIVER_NAME           "i810"
++#define DRIVER_DESC           "Intel i810"
++#define DRIVER_DATE           "20030605"
++
++/* Interface history
++ *
++ * 1.1   - XFree86 4.1
++ * 1.2   - XvMC interfaces
++ *       - XFree86 4.2
++ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
++ *       - Remove requirement for interrupt (leave stubs again)
++ * 1.3   - Add page flipping.
++ * 1.4   - fix DRM interface
++ */
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          4
++#define DRIVER_PATCHLEVEL     0
++
++typedef struct drm_i810_buf_priv {
++      u32 *in_use;
++      int my_use_idx;
++      int currently_mapped;
++      void *virtual;
++      void *kernel_virtual;
++      drm_local_map_t map;
++} drm_i810_buf_priv_t;
++
++typedef struct _drm_i810_ring_buffer {
++      int tail_mask;
++      unsigned long Start;
++      unsigned long End;
++      unsigned long Size;
++      u8 *virtual_start;
++      int head;
++      int tail;
++      int space;
++      drm_local_map_t map;
++} drm_i810_ring_buffer_t;
++
++typedef struct drm_i810_private {
++      struct drm_map *sarea_map;
++      struct drm_map *mmio_map;
++
++      drm_i810_sarea_t *sarea_priv;
++      drm_i810_ring_buffer_t ring;
++
++      void *hw_status_page;
++      unsigned long counter;
++
++      dma_addr_t dma_status_page;
++
++      struct drm_buf *mmap_buffer;
++
++      u32 front_di1, back_di1, zi1;
++
++      int back_offset;
++      int depth_offset;
++      int overlay_offset;
++      int overlay_physical;
++      int w, h;
++      int pitch;
++      int back_pitch;
++      int depth_pitch;
++
++      int do_boxes;
++      int dma_used;
++
++      int current_page;
++      int page_flipping;
++
++      wait_queue_head_t irq_queue;
++      atomic_t irq_received;
++      atomic_t irq_emitted;
++
++      int front_offset;
++} drm_i810_private_t;
++
++                              /* i810_dma.c */
++extern int i810_driver_dma_quiescent(struct drm_device * dev);
++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                             struct drm_file *file_priv);
++extern int i810_driver_load(struct drm_device *, unsigned long flags);
++extern void i810_driver_lastclose(struct drm_device * dev);
++extern void i810_driver_preclose(struct drm_device * dev,
++                               struct drm_file *file_priv);
++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                             struct drm_file *file_priv);
++extern int i810_driver_device_is_agp(struct drm_device * dev);
++
++extern struct drm_ioctl_desc i810_ioctls[];
++extern int i810_max_ioctl;
++
++#define I810_BASE(reg)                ((unsigned long) \
++                              dev_priv->mmio_map->handle)
++#define I810_ADDR(reg)                (I810_BASE(reg) + reg)
++#define I810_DEREF(reg)               *(__volatile__ int *)I810_ADDR(reg)
++#define I810_READ(reg)                I810_DEREF(reg)
++#define I810_WRITE(reg,val)   do { I810_DEREF(reg) = val; } while (0)
++#define I810_DEREF16(reg)     *(__volatile__ u16 *)I810_ADDR(reg)
++#define I810_READ16(reg)      I810_DEREF16(reg)
++#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
++
++#define I810_VERBOSE 0
++#define RING_LOCALS   unsigned int outring, ringmask; \
++                        volatile char *virt;
++
++#define BEGIN_LP_RING(n) do {                                         \
++      if (I810_VERBOSE)                                               \
++              DRM_DEBUG("BEGIN_LP_RING(%d)\n", n);                    \
++      if (dev_priv->ring.space < n*4)                                 \
++              i810_wait_ring(dev, n*4);                               \
++      dev_priv->ring.space -= n*4;                                    \
++      outring = dev_priv->ring.tail;                                  \
++      ringmask = dev_priv->ring.tail_mask;                            \
++      virt = dev_priv->ring.virtual_start;                            \
++} while (0)
++
++#define ADVANCE_LP_RING() do {                                        \
++      if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n");       \
++      dev_priv->ring.tail = outring;                          \
++      I810_WRITE(LP_RING + RING_TAIL, outring);               \
++} while(0)
++
++#define OUT_RING(n) do {                                              \
++      if (I810_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
++      *(volatile unsigned int *)(virt + outring) = n;                 \
++      outring += 4;                                                   \
++      outring &= ringmask;                                            \
++} while (0)
++
++#define GFX_OP_USER_INTERRUPT         ((0<<29)|(2<<23))
++#define GFX_OP_BREAKPOINT_INTERRUPT   ((0<<29)|(1<<23))
++#define CMD_REPORT_HEAD                       (7<<23)
++#define CMD_STORE_DWORD_IDX           ((0x21<<23) | 0x1)
++#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
++
++#define INST_PARSER_CLIENT   0x00000000
++#define INST_OP_FLUSH        0x02000000
++#define INST_FLUSH_MAP_CACHE 0x00000001
++
++#define BB1_START_ADDR_MASK   (~0x7)
++#define BB1_PROTECTED         (1<<0)
++#define BB1_UNPROTECTED       (0<<0)
++#define BB2_END_ADDR_MASK     (~0x7)
++
++#define I810REG_HWSTAM                0x02098
++#define I810REG_INT_IDENTITY_R        0x020a4
++#define I810REG_INT_MASK_R    0x020a8
++#define I810REG_INT_ENABLE_R  0x020a0
++
++#define LP_RING                       0x2030
++#define HP_RING                       0x2040
++#define RING_TAIL             0x00
++#define TAIL_ADDR             0x000FFFF8
++#define RING_HEAD             0x04
++#define HEAD_WRAP_COUNT               0xFFE00000
++#define HEAD_WRAP_ONE         0x00200000
++#define HEAD_ADDR             0x001FFFFC
++#define RING_START            0x08
++#define START_ADDR            0x00FFFFF8
++#define RING_LEN              0x0C
++#define RING_NR_PAGES         0x000FF000
++#define RING_REPORT_MASK      0x00000006
++#define RING_REPORT_64K               0x00000002
++#define RING_REPORT_128K      0x00000004
++#define RING_NO_REPORT                0x00000000
++#define RING_VALID_MASK               0x00000001
++#define RING_VALID            0x00000001
++#define RING_INVALID          0x00000000
++
++#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define SC_UPDATE_SCISSOR       (0x1<<1)
++#define SC_ENABLE_MASK          (0x1<<0)
++#define SC_ENABLE               (0x1<<0)
++
++#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
++#define SCI_YMIN_MASK      (0xffff<<16)
++#define SCI_XMIN_MASK      (0xffff<<0)
++#define SCI_YMAX_MASK      (0xffff<<16)
++#define SCI_XMAX_MASK      (0xffff<<0)
++
++#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
++#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
++#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x2)
++#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
++#define GFX_OP_PRIMITIVE         ((0x3<<29)|(0x1f<<24))
++
++#define CMD_OP_Z_BUFFER_INFO     ((0x0<<29)|(0x16<<23))
++#define CMD_OP_DESTBUFFER_INFO   ((0x0<<29)|(0x15<<23))
++#define CMD_OP_FRONTBUFFER_INFO  ((0x0<<29)|(0x14<<23))
++#define CMD_OP_WAIT_FOR_EVENT    ((0x0<<29)|(0x03<<23))
++
++#define BR00_BITBLT_CLIENT   0x40000000
++#define BR00_OP_COLOR_BLT    0x10000000
++#define BR00_OP_SRC_COPY_BLT 0x10C00000
++#define BR13_SOLID_PATTERN   0x80000000
++
++#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
++#define WAIT_FOR_PLANE_A_FLIP      (1<<2)
++#define WAIT_FOR_VBLANK (1<<3)
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_buffer.c git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c
+--- git/drivers/gpu/drm-tungsten/i915_buffer.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,303 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
++{
++      return drm_agp_init_ttm(dev);
++}
++
++int i915_fence_type(struct drm_buffer_object *bo,
++                   uint32_t *fclass,
++                   uint32_t *type)
++{
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
++              *type = 3;
++      else
++              *type = 1;
++      return 0;
++}
++
++int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
++{
++      /*
++       * FIXME: Only emit once per batchbuffer submission.
++       */
++
++      uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
++
++      if (flags & DRM_BO_FLAG_READ)
++              flush_cmd |= MI_READ_FLUSH;
++      if (flags & DRM_BO_FLAG_EXE)
++              flush_cmd |= MI_EXE_FLUSH;
++
++      return i915_emit_mi_flush(dev, flush_cmd);
++}
++
++int i915_init_mem_type(struct drm_device *dev, uint32_t type,
++                     struct drm_mem_type_manager *man)
++{
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              man->gpu_offset = 0;
++              break;
++      case DRM_BO_MEM_TT:
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              man->gpu_offset = 0;
++              break;
++      case DRM_BO_MEM_PRIV0:
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              man->gpu_offset = 0;
++              break;
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++/*
++ * i915_evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++uint64_t i915_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL;
++      default:
++              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
++      }
++}
++
++#if 0 /* See comment below */
++
++static void i915_emit_copy_blit(struct drm_device * dev,
++                              uint32_t src_offset,
++                              uint32_t dst_offset,
++                              uint32_t pages, int direction)
++{
++      uint32_t cur_pages;
++      uint32_t stride = PAGE_SIZE;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      if (!dev_priv)
++              return;
++
++      i915_kernel_lost_context(dev);
++      while (pages > 0) {
++              cur_pages = pages;
++              if (cur_pages > 2048)
++                      cur_pages = 2048;
++              pages -= cur_pages;
++
++              BEGIN_LP_RING(6);
++              OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
++                       XY_SRC_COPY_BLT_WRITE_RGB);
++              OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
++                       (1 << 25) | (direction ? (1 << 30) : 0));
++              OUT_RING((cur_pages << 16) | PAGE_SIZE);
++              OUT_RING(dst_offset);
++              OUT_RING(stride & 0xffff);
++              OUT_RING(src_offset);
++              ADVANCE_LP_RING();
++      }
++      return;
++}
++
++static int i915_move_blit(struct drm_buffer_object * bo,
++                        int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      int dir = 0;
++
++      if ((old_mem->mem_type == new_mem->mem_type) &&
++          (new_mem->mm_node->start <
++           old_mem->mm_node->start + old_mem->mm_node->size)) {
++              dir = 1;
++      }
++
++      i915_emit_copy_blit(bo->dev,
++                          old_mem->mm_node->start << PAGE_SHIFT,
++                          new_mem->mm_node->start << PAGE_SHIFT,
++                          new_mem->num_pages, dir);
++
++      i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
++
++      return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
++                                       DRM_FENCE_TYPE_EXE |
++                                       DRM_I915_FENCE_TYPE_RW,
++                                       DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
++}
++
++/*
++ * Flip destination ttm into cached-coherent AGP,
++ * then blit and subsequently move out again.
++ */
++
++static int i915_move_flip(struct drm_buffer_object * bo,
++                        int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg tmp_mem;
++      int ret;
++
++      tmp_mem = *new_mem;
++      tmp_mem.mm_node = NULL;
++      tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
++          DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
++
++      ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
++      if (ret)
++              return ret;
++
++      ret = drm_bind_ttm(bo->ttm, &tmp_mem);
++      if (ret)
++              goto out_cleanup;
++
++      ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
++      if (ret)
++              goto out_cleanup;
++
++      ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++      if (tmp_mem.mm_node) {
++              mutex_lock(&dev->struct_mutex);
++              if (tmp_mem.mm_node != bo->pinned_node)
++                      drm_mm_put_block(tmp_mem.mm_node);
++              tmp_mem.mm_node = NULL;
++              mutex_unlock(&dev->struct_mutex);
++      }
++      return ret;
++}
++
++#endif
++
++/*
++ * Disable i915_move_flip for now, since we can't guarantee that the hardware
++ * lock is held here. To re-enable we need to make sure either
++ * a) The X server is using DRM to submit commands to the ring, or
++ * b) DRM can use the HP ring for these blits. This means i915 needs to
++ *    implement a new ring submission mechanism and fence class.
++ */
++int i915_move(struct drm_buffer_object *bo,
++            int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      } else {
++              if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      return 0;
++}
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
++static inline void clflush(volatile void *__p)
++{
++      asm volatile("clflush %0" : "+m" (*(char __force *)__p));
++}
++#endif
++
++static inline void drm_cache_flush_addr(void *virt)
++{
++      int i;
++
++      for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++              clflush(virt+i);
++}
++
++static inline void drm_cache_flush_page(struct page *p)
++{
++      drm_cache_flush_addr(page_address(p));
++}
++
++void i915_flush_ttm(struct drm_ttm *ttm)
++{
++      int i;
++
++      if (!ttm)
++              return;
++
++      DRM_MEMORYBARRIER();
++
++#ifdef CONFIG_X86_32
++      /* Hopefully nobody has built an x86-64 processor without clflush */
++      if (!cpu_has_clflush) {
++              wbinvd();
++              DRM_MEMORYBARRIER();
++              return;
++      }
++#endif
++
++      for (i = ttm->num_pages - 1; i >= 0; i--)
++              drm_cache_flush_page(drm_ttm_get_page(ttm, i));
++
++      DRM_MEMORYBARRIER();
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_compat.c git-nokia/drivers/gpu/drm-tungsten/i915_compat.c
+--- git/drivers/gpu/drm-tungsten/i915_compat.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_compat.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,215 @@
++#include "drmP.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
++#define PCI_DEVICE_ID_INTEL_82965G_1_HB     0x2980
++#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
++#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
++#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
++#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
++#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
++#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
++#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
++#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
++
++#define I915_IFPADDR    0x60
++#define I965_IFPADDR    0x70
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
++#define upper_32_bits(_val) (((u64)(_val)) >> 32)
++#endif
++
++static struct _i9xx_private_compat {
++      void __iomem *flush_page;
++      int resource_valid;
++      struct resource ifp_resource;
++} i9xx_private;
++
++static struct _i8xx_private_compat {
++      void *flush_page;
++      struct page *page;
++} i8xx_private;
++
++static void
++intel_compat_align_resource(void *data, struct resource *res,
++                        resource_size_t size, resource_size_t align)
++{
++      return;
++}
++
++
++static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
++{
++      int ret;
++      ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
++                                   PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
++                                   intel_compat_align_resource, pdev);
++      if (ret != 0)
++              return ret;
++
++      return 0;
++}
++
++static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
++{
++      int ret;
++      u32 temp;
++
++      pci_read_config_dword(pdev, I915_IFPADDR, &temp);
++      if (!(temp & 0x1)) {
++              intel_alloc_chipset_flush_resource(pdev);
++              i9xx_private.resource_valid = 1;
++              pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
++      } else {
++              temp &= ~1;
++
++              i9xx_private.resource_valid = 1;
++              i9xx_private.ifp_resource.start = temp;
++              i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
++              ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
++              if (ret) {
++                      i9xx_private.resource_valid = 0;
++                      printk("Failed inserting resource into tree\n");
++              }
++      }
++}
++
++static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
++{
++      u32 temp_hi, temp_lo;
++      int ret;
++
++      pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
++      pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
++
++      if (!(temp_lo & 0x1)) {
++
++              intel_alloc_chipset_flush_resource(pdev);
++
++              i9xx_private.resource_valid = 1;
++              pci_write_config_dword(pdev, I965_IFPADDR + 4,
++                      upper_32_bits(i9xx_private.ifp_resource.start));
++              pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
++      } else {
++              u64 l64;
++
++              temp_lo &= ~0x1;
++              l64 = ((u64)temp_hi << 32) | temp_lo;
++
++              i9xx_private.resource_valid = 1;
++              i9xx_private.ifp_resource.start = l64;
++              i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
++              ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
++              if (ret) {
++                      i9xx_private.resource_valid = 0;
++                      printk("Failed inserting resource into tree\n");
++              }
++      }
++}
++
++static void intel_i8xx_fini_flush(struct drm_device *dev)
++{
++      kunmap(i8xx_private.page);
++      i8xx_private.flush_page = NULL;
++      unmap_page_from_agp(i8xx_private.page);
++      flush_agp_mappings();
++
++      __free_page(i8xx_private.page);
++}
++
++static void intel_i8xx_setup_flush(struct drm_device *dev)
++{
++
++      i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++      if (!i8xx_private.page) {
++              return;
++      }
++
++      /* make page uncached */
++      map_page_into_agp(i8xx_private.page);
++      flush_agp_mappings();
++
++      i8xx_private.flush_page = kmap(i8xx_private.page);
++      if (!i8xx_private.flush_page)
++              intel_i8xx_fini_flush(dev);
++}
++
++
++static void intel_i8xx_flush_page(struct drm_device *dev)
++{
++      unsigned int *pg = i8xx_private.flush_page;
++      int i;
++
++      /* HAI NUT CAN I HAZ HAMMER?? */
++      for (i = 0; i < 256; i++)
++              *(pg + i) = i;
++      
++      DRM_MEMORYBARRIER();
++}
++
++static void intel_i9xx_setup_flush(struct drm_device *dev)
++{
++      struct pci_dev *agp_dev = dev->agp->agp_info.device;
++
++      i9xx_private.ifp_resource.name = "GMCH IFPBAR";
++      i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
++
++      /* Setup chipset flush for 915 */
++      if (IS_I965G(dev) || IS_G33(dev)) {
++              intel_i965_g33_setup_chipset_flush(agp_dev);
++      } else {
++              intel_i915_setup_chipset_flush(agp_dev);
++      }
++
++      if (i9xx_private.ifp_resource.start) {
++              i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
++              if (!i9xx_private.flush_page)
++                      printk("unable to ioremap flush  page - no chipset flushing");
++      }
++}
++
++static void intel_i9xx_fini_flush(struct drm_device *dev)
++{
++      iounmap(i9xx_private.flush_page);
++      if (i9xx_private.resource_valid)
++              release_resource(&i9xx_private.ifp_resource);
++      i9xx_private.resource_valid = 0;
++}
++
++static void intel_i9xx_flush_page(struct drm_device *dev)
++{
++      if (i9xx_private.flush_page)
++              writel(1, i9xx_private.flush_page);
++}
++
++void intel_init_chipset_flush_compat(struct drm_device *dev)
++{
++      /* not flush on i8xx */
++      if (IS_I9XX(dev))       
++              intel_i9xx_setup_flush(dev);
++      else
++              intel_i8xx_setup_flush(dev);
++      
++}
++
++void intel_fini_chipset_flush_compat(struct drm_device *dev)
++{
++      /* not flush on i8xx */
++      if (IS_I9XX(dev))
++              intel_i9xx_fini_flush(dev);
++      else
++              intel_i8xx_fini_flush(dev);
++}
++
++void drm_agp_chipset_flush(struct drm_device *dev)
++{
++      if (IS_I9XX(dev))
++              intel_i9xx_flush_page(dev);
++      else
++              intel_i8xx_flush_page(dev);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_dma.c git-nokia/drivers/gpu/drm-tungsten/i915_dma.c
+--- git/drivers/gpu/drm-tungsten/i915_dma.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_dma.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1276 @@
++/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/* Really want an OS-independent resettable timer.  Would like to have
++ * this loop run for (eg) 3 sec, but have the timer reset every time
++ * the head pointer changes, so that EBUSY only happens if the ring
++ * actually stalls for (eg) 3 seconds.
++ */
++int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++      u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++      u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
++      u32 last_acthd = I915_READ(acthd_reg);
++      u32 acthd;
++      int i;
++
++      for (i = 0; i < 100000; i++) {
++              ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++              acthd = I915_READ(acthd_reg);
++              ring->space = ring->head - (ring->tail + 8);
++              if (ring->space < 0)
++                      ring->space += ring->Size;
++              if (ring->space >= n)
++                      return 0;
++
++              if (ring->head != last_head)
++                      i = 0;
++
++              if (acthd != last_acthd)
++                      i = 0;
++
++              last_head = ring->head;
++              last_acthd = acthd;
++              DRM_UDELAY(10 * 1000);
++      }
++
++      return -EBUSY;
++}
++
++int i915_init_hardware_status(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_dma_handle_t *dmah;
++
++      /* Program Hardware Status Page */
++#ifdef __FreeBSD__
++      DRM_UNLOCK();
++#endif
++      dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
++#ifdef __FreeBSD__
++      DRM_LOCK();
++#endif
++      if (!dmah) {
++              DRM_ERROR("Can not allocate hardware status page\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->status_page_dmah = dmah;
++      dev_priv->hw_status_page = dmah->vaddr;
++      dev_priv->dma_status_page = dmah->busaddr;
++
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++
++      I915_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++      return 0;
++}
++
++void i915_free_hardware_status(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      if (dev_priv->status_page_dmah) {
++              drm_pci_free(dev, dev_priv->status_page_dmah);
++              dev_priv->status_page_dmah = NULL;
++              /* Need to rewrite hardware status page */
++              I915_WRITE(0x02080, 0x1ffff000);
++      }
++
++      if (dev_priv->status_gfx_addr) {
++              dev_priv->status_gfx_addr = 0;
++              drm_core_ioremapfree(&dev_priv->hws_map, dev);
++              I915_WRITE(0x02080, 0x1ffff000);
++      }
++}
++
++#if I915_RING_VALIDATE
++/**
++ * Validate the cached ring tail value
++ *
++ * If the X server writes to the ring and DRM doesn't
++ * reload the head and tail pointers, it will end up writing
++ * data to the wrong place in the ring, causing havoc.
++ */
++void i915_ring_validate(struct drm_device *dev, const char *func, int line)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++      u32     tail = I915_READ(PRB0_TAIL) & HEAD_ADDR;
++      u32     head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++
++      if (tail != ring->tail) {
++              DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
++                        func, line,
++                        ring->head, head, ring->tail, tail);
++#ifdef __linux__
++              BUG_ON(1);
++#endif
++      }
++}
++#endif
++
++void i915_kernel_lost_context(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++
++      ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++      ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
++      ring->space = ring->head - (ring->tail + 8);
++      if (ring->space < 0)
++              ring->space += ring->Size;
++}
++
++static int i915_dma_cleanup(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev_priv->ring.virtual_start) {
++              drm_core_ioremapfree(&dev_priv->ring.map, dev);
++              dev_priv->ring.virtual_start = 0;
++              dev_priv->ring.map.handle = 0;
++              dev_priv->ring.map.size = 0;
++      }
++
++      if (I915_NEED_GFX_HWS(dev))
++              i915_free_hardware_status(dev);
++
++      return 0;
++}
++
++#if defined(I915_HAVE_BUFFER)
++#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
++#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
++#define DRI2_SAREA_BLOCK_NEXT(p)                              \
++      ((void *) ((unsigned char *) (p) +                      \
++                 DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
++
++#define DRI2_SAREA_BLOCK_END          0x0000
++#define DRI2_SAREA_BLOCK_LOCK         0x0001
++#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002
++
++static int
++setup_dri2_sarea(struct drm_device * dev,
++               struct drm_file *file_priv,
++               drm_i915_init_t * init)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++      unsigned int *p, *end, *next;
++
++      mutex_lock(&dev->struct_mutex);
++      dev_priv->sarea_bo =
++              drm_lookup_buffer_object(file_priv,
++                                       init->sarea_handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!dev_priv->sarea_bo) {
++              DRM_ERROR("did not find sarea bo\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
++                        dev_priv->sarea_bo->num_pages,
++                        &dev_priv->sarea_kmap);
++      if (ret) {
++              DRM_ERROR("could not map sarea bo\n");
++              return ret;
++      }
++
++      p = dev_priv->sarea_kmap.virtual;
++      end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
++      while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
++              switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
++              case DRI2_SAREA_BLOCK_LOCK:
++                      dev->lock.hw_lock = (void *) (p + 1);
++                      dev->sigdata.lock = dev->lock.hw_lock;
++                      break;
++              }
++              next = DRI2_SAREA_BLOCK_NEXT(p);
++              if (next <= p || end < next) {
++                      DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
++                                next, p, end);
++                      return -EINVAL;
++              }
++              p = next;
++      }
++
++      return 0;
++}
++#endif
++
++static int i915_initialize(struct drm_device * dev,
++                         struct drm_file *file_priv,
++                         drm_i915_init_t * init)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++#if defined(I915_HAVE_BUFFER)
++      int ret;
++#endif
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              i915_dma_cleanup(dev);
++              return -EINVAL;
++      }
++
++#ifdef I915_HAVE_BUFFER
++      dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
++#endif
++
++      if (init->sarea_priv_offset)
++              dev_priv->sarea_priv = (drm_i915_sarea_t *)
++                      ((u8 *) dev_priv->sarea->handle +
++                       init->sarea_priv_offset);
++      else {
++              /* No sarea_priv for you! */
++              dev_priv->sarea_priv = NULL;
++      }
++
++      if (init->ring_size != 0) {
++              dev_priv->ring.Size = init->ring_size;
++              dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
++
++              dev_priv->ring.map.offset = init->ring_start;
++              dev_priv->ring.map.size = init->ring_size;
++              dev_priv->ring.map.type = 0;
++              dev_priv->ring.map.flags = 0;
++              dev_priv->ring.map.mtrr = 0;
++
++              drm_core_ioremap(&dev_priv->ring.map, dev);
++
++              if (dev_priv->ring.map.handle == NULL) {
++                      i915_dma_cleanup(dev);
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " ring buffer\n");
++                      return -ENOMEM;
++              }
++
++              dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++      }
++
++      dev_priv->cpp = init->cpp;
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->pf_current_page = 0;
++
++      /* We are using separate values as placeholders for mechanisms for
++       * private backbuffer/depthbuffer usage.
++       */
++
++      /* Allow hardware batchbuffers unless told otherwise.
++       */
++      dev_priv->allow_batchbuffer = 1;
++
++      /* Enable vblank on pipe A for older X servers
++       */
++      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
++
++#ifdef I915_HAVE_BUFFER
++      mutex_init(&dev_priv->cmdbuf_mutex);
++#endif
++#if defined(I915_HAVE_BUFFER)
++      if (init->func == I915_INIT_DMA2) {
++              ret = setup_dri2_sarea(dev, file_priv, init);
++              if (ret) {
++                      i915_dma_cleanup(dev);
++                      DRM_ERROR("could not set up dri2 sarea\n");
++                      return ret;
++              }
++      }
++#endif
++
++      return 0;
++}
++
++static int i915_dma_resume(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              return -EINVAL;
++      }
++
++      if (dev_priv->ring.map.handle == NULL) {
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      /* Program Hardware Status Page */
++      if (!dev_priv->hw_status_page) {
++              DRM_ERROR("Can not find hardware status page\n");
++              return -EINVAL;
++      }
++      DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
++
++      if (dev_priv->status_gfx_addr != 0)
++              I915_WRITE(0x02080, dev_priv->status_gfx_addr);
++      else
++              I915_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++
++      return 0;
++}
++
++static int i915_dma_init(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case I915_INIT_DMA:
++      case I915_INIT_DMA2:
++              retcode = i915_initialize(dev, file_priv, init);
++              break;
++      case I915_CLEANUP_DMA:
++              retcode = i915_dma_cleanup(dev);
++              break;
++      case I915_RESUME_DMA:
++              retcode = i915_dma_resume(dev);
++              break;
++      default:
++              retcode = -EINVAL;
++              break;
++      }
++
++      return retcode;
++}
++
++/* Implement basically the same security restrictions as hardware does
++ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
++ *
++ * Most of the calculations below involve calculating the size of a
++ * particular instruction.  It's important to get the size right as
++ * that tells us where the next instruction to check is.  Any illegal
++ * instruction detected will be given a size of zero, which is a
++ * signal to abort the rest of the buffer.
++ */
++static int do_validate_cmd(int cmd)
++{
++      switch (((cmd >> 29) & 0x7)) {
++      case 0x0:
++              switch ((cmd >> 23) & 0x3f) {
++              case 0x0:
++                      return 1;       /* MI_NOOP */
++              case 0x4:
++                      return 1;       /* MI_FLUSH */
++              default:
++                      return 0;       /* disallow everything else */
++              }
++              break;
++      case 0x1:
++              return 0;       /* reserved */
++      case 0x2:
++              return (cmd & 0xff) + 2;        /* 2d commands */
++      case 0x3:
++              if (((cmd >> 24) & 0x1f) <= 0x18)
++                      return 1;
++
++              switch ((cmd >> 24) & 0x1f) {
++              case 0x1c:
++                      return 1;
++              case 0x1d:
++                      switch ((cmd >> 16) & 0xff) {
++                      case 0x3:
++                              return (cmd & 0x1f) + 2;
++                      case 0x4:
++                              return (cmd & 0xf) + 2;
++                      default:
++                              return (cmd & 0xffff) + 2;
++                      }
++              case 0x1e:
++                      if (cmd & (1 << 23))
++                              return (cmd & 0xffff) + 1;
++                      else
++                              return 1;
++              case 0x1f:
++                      if ((cmd & (1 << 23)) == 0)     /* inline vertices */
++                              return (cmd & 0x1ffff) + 2;
++                      else if (cmd & (1 << 17))       /* indirect random */
++                              if ((cmd & 0xffff) == 0)
++                                      return 0;       /* unknown length, too hard */
++                              else
++                                      return (((cmd & 0xffff) + 1) / 2) + 1;
++                      else
++                              return 2;       /* indirect sequential */
++              default:
++                      return 0;
++              }
++      default:
++              return 0;
++      }
++
++      return 0;
++}
++
++static int validate_cmd(int cmd)
++{
++      int ret = do_validate_cmd(cmd);
++
++/*    printk("validate_cmd( %x ): %d\n", cmd, ret); */
++
++      return ret;
++}
++
++static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
++                        int dwords)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++
++      if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
++              return -EINVAL;
++
++      BEGIN_LP_RING((dwords+1)&~1);
++
++      for (i = 0; i < dwords;) {
++              int cmd, sz;
++
++              if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
++                      return -EINVAL;
++
++              if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
++                      return -EINVAL;
++
++              OUT_RING(cmd);
++
++              while (++i, --sz) {
++                      if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
++                                                       sizeof(cmd))) {
++                              return -EINVAL;
++                      }
++                      OUT_RING(cmd);
++              }
++      }
++
++      if (dwords & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++
++      return 0;
++}
++
++int i915_emit_box(struct drm_device * dev,
++                struct drm_clip_rect __user * boxes,
++                int i, int DR1, int DR4)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect box;
++      RING_LOCALS;
++
++      if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
++              return -EFAULT;
++      }
++
++      if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
++              DRM_ERROR("Bad box %d,%d..%d,%d\n",
++                        box.x1, box.y1, box.x2, box.y2);
++              return -EINVAL;
++      }
++
++      if (IS_I965G(dev)) {
++              BEGIN_LP_RING(4);
++              OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
++              OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++              OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++              OUT_RING(DR4);
++              ADVANCE_LP_RING();
++      } else {
++              BEGIN_LP_RING(6);
++              OUT_RING(GFX_OP_DRAWRECT_INFO);
++              OUT_RING(DR1);
++              OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++              OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++              OUT_RING(DR4);
++              OUT_RING(0);
++              ADVANCE_LP_RING();
++      }
++
++      return 0;
++}
++
++/* XXX: Emitting the counter should really be moved to part of the IRQ
++ * emit. For now, do it in both places:
++ */
++
++void i915_emit_breadcrumb(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      if (++dev_priv->counter > BREADCRUMB_MASK) {
++               dev_priv->counter = 1;
++               DRM_DEBUG("Breadcrumb counter wrapped around\n");
++      }
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
++
++      BEGIN_LP_RING(4);
++      OUT_RING(MI_STORE_DWORD_INDEX);
++      OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
++      OUT_RING(dev_priv->counter);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++
++int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t flush_cmd = MI_FLUSH;
++      RING_LOCALS;
++
++      flush_cmd |= flush;
++
++      i915_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(flush_cmd);
++      OUT_RING(0);
++      OUT_RING(0);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      return 0;
++}
++
++
++static int i915_dispatch_cmdbuffer(struct drm_device * dev,
++                                 drm_i915_cmdbuffer_t * cmd)
++{
++#ifdef I915_HAVE_FENCE
++      drm_i915_private_t *dev_priv = dev->dev_private;
++#endif
++      int nbox = cmd->num_cliprects;
++      int i = 0, count, ret;
++
++      if (cmd->sz & 0x3) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      ret = i915_emit_box(dev, cmd->cliprects, i,
++                                          cmd->DR1, cmd->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
++              if (ret)
++                      return ret;
++      }
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely((dev_priv->counter & 0xFF) == 0))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++      return 0;
++}
++
++int i915_dispatch_batchbuffer(struct drm_device * dev,
++                            drm_i915_batchbuffer_t * batch)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect __user *boxes = batch->cliprects;
++      int nbox = batch->num_cliprects;
++      int i = 0, count;
++      RING_LOCALS;
++
++      if ((batch->start | batch->used) & 0x7) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      int ret = i915_emit_box(dev, boxes, i,
++                                              batch->DR1, batch->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              if (IS_I830(dev) || IS_845G(dev)) {
++                      BEGIN_LP_RING(4);
++                      OUT_RING(MI_BATCH_BUFFER);
++                      OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++                      OUT_RING(batch->start + batch->used - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              } else {
++                      BEGIN_LP_RING(2);
++                      if (IS_I965G(dev)) {
++                              OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
++                              OUT_RING(batch->start);
++                      } else {
++                              OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
++                              OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++                      }
++                      ADVANCE_LP_RING();
++              }
++      }
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely((dev_priv->counter & 0xFF) == 0))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++      return 0;
++}
++
++static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      u32 num_pages, current_page, next_page, dspbase;
++      int shift = 2 * plane, x, y;
++      RING_LOCALS;
++
++      /* Calculate display base offset */
++      num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
++      current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
++      next_page = (current_page + 1) % num_pages;
++
++      switch (next_page) {
++      default:
++      case 0:
++              dspbase = dev_priv->sarea_priv->front_offset;
++              break;
++      case 1:
++              dspbase = dev_priv->sarea_priv->back_offset;
++              break;
++      case 2:
++              dspbase = dev_priv->sarea_priv->third_offset;
++              break;
++      }
++
++      if (plane == 0) {
++              x = dev_priv->sarea_priv->planeA_x;
++              y = dev_priv->sarea_priv->planeA_y;
++      } else {
++              x = dev_priv->sarea_priv->planeB_x;
++              y = dev_priv->sarea_priv->planeB_y;
++      }
++
++      dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
++
++      DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
++                dspbase);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(sync ? 0 :
++               (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
++                                     MI_WAIT_FOR_PLANE_A_FLIP)));
++      OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
++               (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
++      OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
++      OUT_RING(dspbase);
++      ADVANCE_LP_RING();
++
++      dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
++      dev_priv->sarea_priv->pf_current_page |= next_page << shift;
++}
++
++void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
++                planes, dev_priv->sarea_priv->pf_current_page);
++
++      i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
++
++      for (i = 0; i < 2; i++)
++              if (planes & (1 << i))
++                      i915_do_dispatch_flip(dev, i, sync);
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++}
++
++int i915_quiescent(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      i915_kernel_lost_context(dev);
++      ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
++      if (ret)
++      {
++              i915_kernel_lost_context (dev);
++              DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
++                         dev_priv->ring.head,
++                         dev_priv->ring.tail,
++                         dev_priv->ring.space);
++      }
++      return ret;
++}
++
++static int i915_flush_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return i915_quiescent(dev);
++}
++
++static int i915_batchbuffer(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i915_batchbuffer_t *batch = data;
++      int ret;
++
++      if (!dev_priv->allow_batchbuffer) {
++              DRM_ERROR("Batchbuffer ioctl disabled\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
++                batch->start, batch->used, batch->num_cliprects);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
++                                                      batch->num_cliprects *
++                                                      sizeof(struct drm_clip_rect)))
++              return -EFAULT;
++
++      ret = i915_dispatch_batchbuffer(dev, batch);
++
++      sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      return ret;
++}
++
++static int i915_cmdbuffer(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i915_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
++                cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (cmdbuf->num_cliprects &&
++          DRM_VERIFYAREA_READ(cmdbuf->cliprects,
++                              cmdbuf->num_cliprects *
++                              sizeof(struct drm_clip_rect))) {
++              DRM_ERROR("Fault accessing cliprects\n");
++              return -EFAULT;
++      }
++
++      ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
++              return ret;
++      }
++
++      sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      return 0;
++}
++
++#if defined(DRM_DEBUG_CODE)
++#define DRM_DEBUG_RELOCATION  (drm_debug != 0)
++#else
++#define DRM_DEBUG_RELOCATION  0
++#endif
++
++static int i915_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0, planes = 0; i < 2; i++)
++              if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
++                      dev_priv->sarea_priv->pf_current_page =
++                              (dev_priv->sarea_priv->pf_current_page &
++                               ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
++
++                      planes |= 1 << i;
++              }
++
++      if (planes)
++              i915_dispatch_flip(dev, planes, 0);
++
++      return 0;
++}
++
++static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_i915_flip_t *param = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* This is really planes */
++      if (param->pipes & ~0x3) {
++              DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
++                        param->pipes);
++              return -EINVAL;
++      }
++
++      i915_dispatch_flip(dev, param->pipes, 0);
++
++      return 0;
++}
++
++
++static int i915_getparam(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case I915_PARAM_IRQ_ACTIVE:
++              value = dev->irq_enabled ? 1 : 0;
++              break;
++      case I915_PARAM_ALLOW_BATCHBUFFER:
++              value = dev_priv->allow_batchbuffer ? 1 : 0;
++              break;
++      case I915_PARAM_LAST_DISPATCH:
++              value = READ_BREADCRUMB(dev_priv);
++              break;
++      case I915_PARAM_CHIPSET_ID:
++              value = dev->pci_device;
++              break;
++      case I915_PARAM_HAS_GEM:
++              value = 1;
++              break;
++      default:
++              DRM_ERROR("Unknown parameter %d\n", param->param);
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("DRM_COPY_TO_USER failed\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int i915_setparam(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_setparam_t *param = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
++              break;
++      case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
++              dev_priv->tex_lru_log_granularity = param->value;
++              break;
++      case I915_SETPARAM_ALLOW_BATCHBUFFER:
++              dev_priv->allow_batchbuffer = param->value;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %d\n", param->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++drm_i915_mmio_entry_t mmio_table[] = {
++      [MMIO_REGS_PS_DEPTH_COUNT] = {
++              I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
++              0x2350,
++              8
++      }
++};
++
++static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
++
++static int i915_mmio(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      uint32_t buf[8];
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mmio_entry_t *e;
++      drm_i915_mmio_t *mmio = data;
++      void __iomem *base;
++      int i;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (mmio->reg >= mmio_table_size)
++              return -EINVAL;
++
++      e = &mmio_table[mmio->reg];
++      base = (u8 *) dev_priv->mmio_map->handle + e->offset;
++
++      switch (mmio->read_write) {
++      case I915_MMIO_READ:
++              if (!(e->flag & I915_MMIO_MAY_READ))
++                      return -EINVAL;
++              for (i = 0; i < e->size / 4; i++)
++                      buf[i] = I915_READ(e->offset + i * 4);
++              if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
++                      DRM_ERROR("DRM_COPY_TO_USER failed\n");
++                      return -EFAULT;
++              }
++              break;
++              
++      case I915_MMIO_WRITE:
++              if (!(e->flag & I915_MMIO_MAY_WRITE))
++                      return -EINVAL;
++              if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
++                      DRM_ERROR("DRM_COPY_TO_USER failed\n");
++                      return -EFAULT;
++              }
++              for (i = 0; i < e->size / 4; i++)
++                      I915_WRITE(e->offset + i * 4, buf[i]);
++              break;
++      }
++      return 0;
++}
++
++static int i915_set_status_page(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_hws_addr_t *hws = data;
++
++      if (!I915_NEED_GFX_HWS(dev))
++              return -EINVAL;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++      DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
++
++      dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
++
++      dev_priv->hws_map.offset = dev->agp->base + hws->addr;
++      dev_priv->hws_map.size = 4*1024;
++      dev_priv->hws_map.type = 0;
++      dev_priv->hws_map.flags = 0;
++      dev_priv->hws_map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->hws_map, dev);
++      if (dev_priv->hws_map.handle == NULL) {
++              i915_dma_cleanup(dev);
++              dev_priv->status_gfx_addr = 0;
++              DRM_ERROR("can not ioremap virtual address for"
++                              " G33 hw status page\n");
++              return -ENOMEM;
++      }
++      dev_priv->hw_status_page = dev_priv->hws_map.handle;
++
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++      DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
++                      dev_priv->status_gfx_addr);
++      DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
++      return 0;
++}
++
++int i915_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      struct drm_i915_private *dev_priv;
++      unsigned long base, size;
++      int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++
++      /* i915 has 4 more counters */
++      dev->counters += 4;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++      dev->types[9] = _DRM_STAT_DMA;
++
++      dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_i915_private_t));
++
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->dev = dev;
++
++      /* Add register map (needed for suspend/resume) */
++      base = drm_get_resource_start(dev, mmio_bar);
++      size = drm_get_resource_len(dev, mmio_bar);
++
++      ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
++              _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
++#ifdef I915_HAVE_GEM
++      i915_gem_load(dev);
++#endif
++      DRM_SPININIT(&dev_priv->swaps_lock, "swap");
++      DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++      intel_init_chipset_flush_compat(dev);
++#endif
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_init(dev);
++#endif
++#endif
++
++      /* Init HWS */
++      if (!I915_NEED_GFX_HWS(dev)) {
++              ret = i915_init_hardware_status(dev);
++              if(ret)
++                      return ret;
++      }
++
++      return ret;
++}
++
++int i915_driver_unload(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      i915_free_hardware_status(dev);
++
++      drm_rmmap(dev, dev_priv->mmio_map);
++
++      DRM_SPINUNINIT(&dev_priv->swaps_lock);
++      DRM_SPINUNINIT(&dev_priv->user_irq_lock);
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_free(dev);
++#endif
++#endif
++
++      drm_free(dev->dev_private, sizeof(drm_i915_private_t),
++               DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++      intel_fini_chipset_flush_compat(dev);
++#endif
++#endif
++      return 0;
++}
++
++void i915_driver_lastclose(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      /* agp off can use this to get called before dev_priv */
++      if (!dev_priv)
++              return;
++
++#ifdef I915_HAVE_BUFFER
++      if (dev_priv->val_bufs) {
++              vfree(dev_priv->val_bufs);
++              dev_priv->val_bufs = NULL;
++      }
++#endif
++#ifdef I915_HAVE_GEM
++      i915_gem_lastclose(dev);
++#endif
++      if (drm_getsarea(dev) && dev_priv->sarea_priv)
++              i915_do_cleanup_pageflip(dev);
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv = NULL;
++      if (dev_priv->agp_heap)
++              i915_mem_takedown(&(dev_priv->agp_heap));
++#if defined(I915_HAVE_BUFFER)
++      if (dev_priv->sarea_kmap.virtual) {
++              drm_bo_kunmap(&dev_priv->sarea_kmap);
++              dev_priv->sarea_kmap.virtual = NULL;
++              dev->lock.hw_lock = NULL;
++              dev->sigdata.lock = NULL;
++      }
++
++      if (dev_priv->sarea_bo) {
++              mutex_lock(&dev->struct_mutex);
++              drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
++              mutex_unlock(&dev->struct_mutex);
++              dev_priv->sarea_bo = NULL;
++      }
++#endif
++      i915_dma_cleanup(dev);
++}
++
++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv;
++
++      DRM_DEBUG("\n");
++      i915_file_priv = (struct drm_i915_file_private *)
++          drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
++
++      if (!i915_file_priv)
++              return -ENOMEM;
++
++      file_priv->driver_priv = i915_file_priv;
++
++      i915_file_priv->mm.last_gem_seqno = 0;
++      i915_file_priv->mm.last_gem_throttle_seqno = 0;
++
++      return 0;
++}
++
++void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      i915_mem_release(dev, file_priv, dev_priv->agp_heap);
++}
++
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++
++      drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
++}
++
++struct drm_ioctl_desc i915_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
++      DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
++      DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
++      DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
++#ifdef I915_HAVE_BUFFER
++      DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
++#endif
++#ifdef I915_HAVE_GEM
++      DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
++#endif
++};
++
++int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * All Intel graphics chipsets are treated as AGP, even if they are really
++ * PCI-e.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * A value of 1 is always retured to indictate every i9x5 is AGP.
++ */
++int i915_driver_device_is_agp(struct drm_device * dev)
++{
++      return 1;
++}
++
++int i915_driver_firstopen(struct drm_device *dev)
++{
++#ifdef I915_HAVE_BUFFER
++      drm_bo_driver_init(dev);
++#endif
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drm.h git-nokia/drivers/gpu/drm-tungsten/i915_drm.h
+--- git/drivers/gpu/drm-tungsten/i915_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,719 @@
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _I915_DRM_H_
++#define _I915_DRM_H_
++
++/* Please note that modifications to all structs defined here are
++ * subject to backwards-compatibility constraints.
++ */
++
++#include "drm.h"
++
++/* Each region is a minimum of 16k, and there are at most 255 of them.
++ */
++#define I915_NR_TEX_REGIONS 255       /* table size 2k - maximum due to use
++                               * of chars for next/prev indices */
++#define I915_LOG_MIN_TEX_REGION_SIZE 14
++
++typedef struct _drm_i915_init {
++      enum {
++              I915_INIT_DMA = 0x01,
++              I915_CLEANUP_DMA = 0x02,
++              I915_RESUME_DMA = 0x03,
++
++              /* Since this struct isn't versioned, just used a new
++               * 'func' code to indicate the presence of dri2 sarea
++               * info. */
++              I915_INIT_DMA2 = 0x04
++      } func;
++      unsigned int mmio_offset;
++      int sarea_priv_offset;
++      unsigned int ring_start;
++      unsigned int ring_end;
++      unsigned int ring_size;
++      unsigned int front_offset;
++      unsigned int back_offset;
++      unsigned int depth_offset;
++      unsigned int w;
++      unsigned int h;
++      unsigned int pitch;
++      unsigned int pitch_bits;
++      unsigned int back_pitch;
++      unsigned int depth_pitch;
++      unsigned int cpp;
++      unsigned int chipset;
++      unsigned int sarea_handle;
++} drm_i915_init_t;
++
++typedef struct drm_i915_sarea {
++      struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
++      int last_upload;        /* last time texture was uploaded */
++      int last_enqueue;       /* last time a buffer was enqueued */
++      int last_dispatch;      /* age of the most recently dispatched buffer */
++      int ctxOwner;           /* last context to upload state */
++      int texAge;
++      int pf_enabled;         /* is pageflipping allowed? */
++      int pf_active;
++      int pf_current_page;    /* which buffer is being displayed? */
++      int perf_boxes;         /* performance boxes to be displayed */
++      int width, height;      /* screen size in pixels */
++
++      drm_handle_t front_handle;
++      int front_offset;
++      int front_size;
++
++      drm_handle_t back_handle;
++      int back_offset;
++      int back_size;
++
++      drm_handle_t depth_handle;
++      int depth_offset;
++      int depth_size;
++
++      drm_handle_t tex_handle;
++      int tex_offset;
++      int tex_size;
++      int log_tex_granularity;
++      int pitch;
++      int rotation;           /* 0, 90, 180 or 270 */
++      int rotated_offset;
++      int rotated_size;
++      int rotated_pitch;
++      int virtualX, virtualY;
++
++      unsigned int front_tiled;
++      unsigned int back_tiled;
++      unsigned int depth_tiled;
++      unsigned int rotated_tiled;
++      unsigned int rotated2_tiled;
++
++      int planeA_x;
++      int planeA_y;
++      int planeA_w;
++      int planeA_h;
++      int planeB_x;
++      int planeB_y;
++      int planeB_w;
++      int planeB_h;
++
++      /* Triple buffering */
++      drm_handle_t third_handle;
++      int third_offset;
++      int third_size;
++      unsigned int third_tiled;
++
++      /* buffer object handles for the static buffers.  May change
++       * over the lifetime of the client, though it doesn't in our current
++       * implementation.
++       */
++      unsigned int front_bo_handle;
++      unsigned int back_bo_handle;
++      unsigned int third_bo_handle;
++      unsigned int depth_bo_handle;
++} drm_i915_sarea_t;
++
++/* Driver specific fence types and classes.
++ */
++
++/* The only fence class we support */
++#define DRM_I915_FENCE_CLASS_ACCEL 0
++/* Fence type that guarantees read-write flush */
++#define DRM_I915_FENCE_TYPE_RW 2
++/* MI_FLUSH programmed just before the fence */
++#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
++
++/* Flags for perf_boxes
++ */
++#define I915_BOX_RING_EMPTY    0x1
++#define I915_BOX_FLIP          0x2
++#define I915_BOX_WAIT          0x4
++#define I915_BOX_TEXTURE_LOAD  0x8
++#define I915_BOX_LOST_CONTEXT  0x10
++
++/* I915 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_I915_INIT         0x00
++#define DRM_I915_FLUSH                0x01
++#define DRM_I915_FLIP         0x02
++#define DRM_I915_BATCHBUFFER  0x03
++#define DRM_I915_IRQ_EMIT     0x04
++#define DRM_I915_IRQ_WAIT     0x05
++#define DRM_I915_GETPARAM     0x06
++#define DRM_I915_SETPARAM     0x07
++#define DRM_I915_ALLOC                0x08
++#define DRM_I915_FREE         0x09
++#define DRM_I915_INIT_HEAP    0x0a
++#define DRM_I915_CMDBUFFER    0x0b
++#define DRM_I915_DESTROY_HEAP 0x0c
++#define DRM_I915_SET_VBLANK_PIPE      0x0d
++#define DRM_I915_GET_VBLANK_PIPE      0x0e
++#define DRM_I915_VBLANK_SWAP  0x0f
++#define DRM_I915_MMIO         0x10
++#define DRM_I915_HWS_ADDR     0x11
++#define DRM_I915_EXECBUFFER   0x12
++#define DRM_I915_GEM_INIT     0x13
++#define DRM_I915_GEM_EXECBUFFER       0x14
++#define DRM_I915_GEM_PIN      0x15
++#define DRM_I915_GEM_UNPIN    0x16
++#define DRM_I915_GEM_BUSY     0x17
++#define DRM_I915_GEM_THROTTLE 0x18
++#define DRM_I915_GEM_ENTERVT  0x19
++#define DRM_I915_GEM_LEAVEVT  0x1a
++#define DRM_I915_GEM_CREATE   0x1b
++#define DRM_I915_GEM_PREAD    0x1c
++#define DRM_I915_GEM_PWRITE   0x1d
++#define DRM_I915_GEM_MMAP     0x1e
++#define DRM_I915_GEM_SET_DOMAIN       0x1f
++#define DRM_I915_GEM_SW_FINISH        0x20
++#define DRM_I915_GEM_SET_TILING       0x21
++#define DRM_I915_GEM_GET_TILING       0x22
++
++#define DRM_IOCTL_I915_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
++#define DRM_IOCTL_I915_FLUSH          DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
++#define DRM_IOCTL_I915_FLIP           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
++#define DRM_IOCTL_I915_BATCHBUFFER    DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
++#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
++#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
++#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
++#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
++#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
++#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
++#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
++#define DRM_IOCTL_I915_CMDBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
++#define DRM_IOCTL_I915_DESTROY_HEAP   DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
++#define DRM_IOCTL_I915_SET_VBLANK_PIPE        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
++#define DRM_IOCTL_I915_GET_VBLANK_PIPE        DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
++#define DRM_IOCTL_I915_VBLANK_SWAP    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
++#define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
++#define DRM_IOCTL_I915_EXECBUFFER     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
++#define DRM_IOCTL_I915_GEM_INIT               DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
++#define DRM_IOCTL_I915_GEM_PIN                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
++#define DRM_IOCTL_I915_GEM_UNPIN      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
++#define DRM_IOCTL_I915_GEM_BUSY               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
++#define DRM_IOCTL_I915_GEM_THROTTLE   DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
++#define DRM_IOCTL_I915_GEM_ENTERVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
++#define DRM_IOCTL_I915_GEM_LEAVEVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
++#define DRM_IOCTL_I915_GEM_CREATE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
++#define DRM_IOCTL_I915_GEM_PREAD      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
++#define DRM_IOCTL_I915_GEM_PWRITE     DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
++#define DRM_IOCTL_I915_GEM_MMAP               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
++#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
++#define DRM_IOCTL_I915_GEM_SW_FINISH  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
++#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
++#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
++
++/* Asynchronous page flipping:
++ */
++typedef struct drm_i915_flip {
++      /*
++       * This is really talking about planes, and we could rename it
++       * except for the fact that some of the duplicated i915_drm.h files
++       * out there check for HAVE_I915_FLIP and so might pick up this
++       * version.
++       */
++      int pipes;
++} drm_i915_flip_t;
++
++/* Allow drivers to submit batchbuffers directly to hardware, relying
++ * on the security mechanisms provided by hardware.
++ */
++typedef struct drm_i915_batchbuffer {
++      int start;              /* agp offset */
++      int used;               /* nr bytes in use */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
++} drm_i915_batchbuffer_t;
++
++/* As above, but pass a pointer to userspace buffer which can be
++ * validated by the kernel prior to sending to hardware.
++ */
++typedef struct _drm_i915_cmdbuffer {
++      char __user *buf;       /* pointer to userspace command buffer */
++      int sz;                 /* nr bytes in buf */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
++} drm_i915_cmdbuffer_t;
++
++/* Userspace can request & wait on irq's:
++ */
++typedef struct drm_i915_irq_emit {
++      int __user *irq_seq;
++} drm_i915_irq_emit_t;
++
++typedef struct drm_i915_irq_wait {
++      int irq_seq;
++} drm_i915_irq_wait_t;
++
++/* Ioctl to query kernel params:
++ */
++#define I915_PARAM_IRQ_ACTIVE            1
++#define I915_PARAM_ALLOW_BATCHBUFFER     2
++#define I915_PARAM_LAST_DISPATCH         3
++#define I915_PARAM_CHIPSET_ID            4
++#define I915_PARAM_HAS_GEM               5
++
++typedef struct drm_i915_getparam {
++      int param;
++      int __user *value;
++} drm_i915_getparam_t;
++
++/* Ioctl to set kernel params:
++ */
++#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
++#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
++#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
++
++typedef struct drm_i915_setparam {
++      int param;
++      int value;
++} drm_i915_setparam_t;
++
++/* A memory manager for regions of shared memory:
++ */
++#define I915_MEM_REGION_AGP 1
++
++typedef struct drm_i915_mem_alloc {
++      int region;
++      int alignment;
++      int size;
++      int __user *region_offset;      /* offset from start of fb or agp */
++} drm_i915_mem_alloc_t;
++
++typedef struct drm_i915_mem_free {
++      int region;
++      int region_offset;
++} drm_i915_mem_free_t;
++
++typedef struct drm_i915_mem_init_heap {
++      int region;
++      int size;
++      int start;
++} drm_i915_mem_init_heap_t;
++
++/* Allow memory manager to be torn down and re-initialized (eg on
++ * rotate):
++ */
++typedef struct drm_i915_mem_destroy_heap {
++      int region;
++} drm_i915_mem_destroy_heap_t;
++
++/* Allow X server to configure which pipes to monitor for vblank signals
++ */
++#define       DRM_I915_VBLANK_PIPE_A  1
++#define       DRM_I915_VBLANK_PIPE_B  2
++
++typedef struct drm_i915_vblank_pipe {
++      int pipe;
++} drm_i915_vblank_pipe_t;
++
++/* Schedule buffer swap at given vertical blank:
++ */
++typedef struct drm_i915_vblank_swap {
++      drm_drawable_t drawable;
++      enum drm_vblank_seq_type seqtype;
++      unsigned int sequence;
++} drm_i915_vblank_swap_t;
++
++#define I915_MMIO_READ        0
++#define I915_MMIO_WRITE 1
++
++#define I915_MMIO_MAY_READ    0x1
++#define I915_MMIO_MAY_WRITE   0x2
++
++#define MMIO_REGS_IA_PRIMATIVES_COUNT         0
++#define MMIO_REGS_IA_VERTICES_COUNT           1
++#define MMIO_REGS_VS_INVOCATION_COUNT         2
++#define MMIO_REGS_GS_PRIMITIVES_COUNT         3
++#define MMIO_REGS_GS_INVOCATION_COUNT         4
++#define MMIO_REGS_CL_PRIMITIVES_COUNT         5
++#define MMIO_REGS_CL_INVOCATION_COUNT         6
++#define MMIO_REGS_PS_INVOCATION_COUNT         7
++#define MMIO_REGS_PS_DEPTH_COUNT              8
++
++typedef struct drm_i915_mmio_entry {
++      unsigned int flag;
++      unsigned int offset;
++      unsigned int size;
++} drm_i915_mmio_entry_t;
++
++typedef struct drm_i915_mmio {
++      unsigned int read_write:1;
++      unsigned int reg:31;
++      void __user *data;
++} drm_i915_mmio_t;
++
++typedef struct drm_i915_hws_addr {
++      uint64_t addr;
++} drm_i915_hws_addr_t;
++
++/*
++ * Relocation header is 4 uint32_ts
++ * 0 - 32 bit reloc count
++ * 1 - 32-bit relocation type
++ * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
++ */
++#define I915_RELOC_HEADER 4
++
++/*
++ * type 0 relocation has 4-uint32_t stride
++ * 0 - offset into buffer
++ * 1 - delta to add in
++ * 2 - buffer handle
++ * 3 - reserved (for optimisations later).
++ */
++/*
++ * type 1 relocation has 4-uint32_t stride.
++ * Hangs off the first item in the op list.
++ * Performed after all valiations are done.
++ * Try to group relocs into the same relocatee together for
++ * performance reasons.
++ * 0 - offset into buffer
++ * 1 - delta to add in
++ * 2 - buffer index in op list.
++ * 3 - relocatee index in op list.
++ */
++#define I915_RELOC_TYPE_0 0
++#define I915_RELOC0_STRIDE 4
++#define I915_RELOC_TYPE_1 1
++#define I915_RELOC1_STRIDE 4
++
++
++struct drm_i915_op_arg {
++      uint64_t next;
++      uint64_t reloc_ptr;
++      int handled;
++      unsigned int pad64;
++      union {
++              struct drm_bo_op_req req;
++              struct drm_bo_arg_rep rep;
++      } d;
++
++};
++
++struct drm_i915_execbuffer {
++      uint64_t ops_list;
++      uint32_t num_buffers;
++      struct drm_i915_batchbuffer batch;
++      drm_context_t context; /* for lockless use in the future */
++      struct drm_fence_arg fence_arg;
++};
++
++struct drm_i915_gem_init {
++      /**
++       * Beginning offset in the GTT to be managed by the DRM memory
++       * manager.
++       */
++      uint64_t gtt_start;
++      /**
++       * Ending offset in the GTT to be managed by the DRM memory
++       * manager.
++       */
++      uint64_t gtt_end;
++};
++
++struct drm_i915_gem_create {
++      /**
++       * Requested size for the object.
++       *
++       * The (page-aligned) allocated size for the object will be returned.
++       */
++      uint64_t size;
++      /**
++       * Returned handle for the object.
++       *
++       * Object handles are nonzero.
++       */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_i915_gem_pread {
++      /** Handle for the object being read. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset into the object to read from */
++      uint64_t offset;
++      /** Length of data to read */
++      uint64_t size;
++      /** Pointer to write the data into. */
++      uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_pwrite {
++      /** Handle for the object being written to. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset into the object to write to */
++      uint64_t offset;
++      /** Length of data to write */
++      uint64_t size;
++      /** Pointer to read the data from. */
++      uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_mmap {
++      /** Handle for the object being mapped. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset in the object to map. */
++      uint64_t offset;
++      /**
++       * Length of data to map.
++       *
++       * The value will be page-aligned.
++       */
++      uint64_t size;
++      /** Returned pointer the data was mapped at */
++      uint64_t addr_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_set_domain {
++      /** Handle for the object */
++      uint32_t handle;
++
++      /** New read domains */
++      uint32_t read_domains;
++
++      /** New write domain */
++      uint32_t write_domain;
++};
++
++struct drm_i915_gem_sw_finish {
++      /** Handle for the object */
++      uint32_t handle;
++};
++
++struct drm_i915_gem_relocation_entry {
++      /**
++       * Handle of the buffer being pointed to by this relocation entry.
++       *
++       * It's appealing to make this be an index into the mm_validate_entry
++       * list to refer to the buffer, but this allows the driver to create
++       * a relocation list for state buffers and not re-write it per
++       * exec using the buffer.
++       */
++      uint32_t target_handle;
++
++      /**
++       * Value to be added to the offset of the target buffer to make up
++       * the relocation entry.
++       */
++      uint32_t delta;
++
++      /** Offset in the buffer the relocation entry will be written into */
++      uint64_t offset;
++
++      /**
++       * Offset value of the target buffer that the relocation entry was last
++       * written as.
++       *
++       * If the buffer has the same offset as last time, we can skip syncing
++       * and writing the relocation.  This value is written back out by
++       * the execbuffer ioctl when the relocation is written.
++       */
++      uint64_t presumed_offset;
++
++      /**
++       * Target memory domains read by this operation.
++       */
++      uint32_t read_domains;
++
++      /**
++       * Target memory domains written by this operation.
++       *
++       * Note that only one domain may be written by the whole
++       * execbuffer operation, so that where there are conflicts,
++       * the application will get -EINVAL back.
++       */
++      uint32_t write_domain;
++};
++
++/** @{
++ * Intel memory domains
++ *
++ * Most of these just align with the various caches in
++ * the system and are used to flush and invalidate as
++ * objects end up cached in different domains.
++ */
++/** CPU cache */
++#define I915_GEM_DOMAIN_CPU           0x00000001
++/** Render cache, used by 2D and 3D drawing */
++#define I915_GEM_DOMAIN_RENDER                0x00000002
++/** Sampler cache, used by texture engine */
++#define I915_GEM_DOMAIN_SAMPLER               0x00000004
++/** Command queue, used to load batch buffers */
++#define I915_GEM_DOMAIN_COMMAND               0x00000008
++/** Instruction cache, used by shader programs */
++#define I915_GEM_DOMAIN_INSTRUCTION   0x00000010
++/** Vertex address cache */
++#define I915_GEM_DOMAIN_VERTEX                0x00000020
++/** GTT domain - aperture and scanout */
++#define I915_GEM_DOMAIN_GTT           0x00000040
++/** @} */
++
++struct drm_i915_gem_exec_object {
++      /**
++       * User's handle for a buffer to be bound into the GTT for this
++       * operation.
++       */
++      uint32_t handle;
++
++      /** Number of relocations to be performed on this buffer */
++      uint32_t relocation_count;
++      /**
++       * Pointer to array of struct drm_i915_gem_relocation_entry containing
++       * the relocations to be performed in this buffer.
++       */
++      uint64_t relocs_ptr;
++
++      /** Required alignment in graphics aperture */
++      uint64_t alignment;
++
++      /**
++       * Returned value of the updated offset of the object, for future
++       * presumed_offset writes.
++       */
++      uint64_t offset;
++};
++
++struct drm_i915_gem_execbuffer {
++      /**
++       * List of buffers to be validated with their relocations to be
++       * performend on them.
++       *
++       * This is a pointer to an array of struct drm_i915_gem_validate_entry.
++       *
++       * These buffers must be listed in an order such that all relocations
++       * a buffer is performing refer to buffers that have already appeared
++       * in the validate list.
++       */
++      uint64_t buffers_ptr;
++      uint32_t buffer_count;
++
++      /** Offset in the batchbuffer to start execution from. */
++      uint32_t batch_start_offset;
++      /** Bytes used in batchbuffer from batch_start_offset */
++      uint32_t batch_len;
++      uint32_t DR1;
++      uint32_t DR4;
++      uint32_t num_cliprects;
++      uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
++};
++
++struct drm_i915_gem_pin {
++      /** Handle of the buffer to be pinned. */
++      uint32_t handle;
++      uint32_t pad;
++
++      /** alignment required within the aperture */
++      uint64_t alignment;
++
++      /** Returned GTT offset of the buffer. */
++      uint64_t offset;
++};
++
++struct drm_i915_gem_unpin {
++      /** Handle of the buffer to be unpinned. */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_i915_gem_busy {
++      /** Handle of the buffer to check for busy */
++      uint32_t handle;
++
++      /** Return busy status (1 if busy, 0 if idle) */
++      uint32_t busy;
++};
++
++#define I915_TILING_NONE      0
++#define I915_TILING_X         1
++#define I915_TILING_Y         2
++
++#define I915_BIT_6_SWIZZLE_NONE               0
++#define I915_BIT_6_SWIZZLE_9          1
++#define I915_BIT_6_SWIZZLE_9_10               2
++#define I915_BIT_6_SWIZZLE_9_11               3
++#define I915_BIT_6_SWIZZLE_9_10_11    4
++/* Not seen by userland */
++#define I915_BIT_6_SWIZZLE_UNKNOWN    5
++
++struct drm_i915_gem_set_tiling {
++      /** Handle of the buffer to have its tiling state updated */
++      uint32_t handle;
++
++      /**
++       * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++       * I915_TILING_Y).
++       *
++       * This value is to be set on request, and will be updated by the
++       * kernel on successful return with the actual chosen tiling layout.
++       *
++       * The tiling mode may be demoted to I915_TILING_NONE when the system
++       * has bit 6 swizzling that can't be managed correctly by GEM.
++       *
++       * Buffer contents become undefined when changing tiling_mode.
++       */
++      uint32_t tiling_mode;
++
++      /**
++       * Stride in bytes for the object when in I915_TILING_X or
++       * I915_TILING_Y.
++       */
++      uint32_t stride;
++
++      /**
++       * Returned address bit 6 swizzling required for CPU access through
++       * mmap mapping.
++       */
++      uint32_t swizzle_mode;
++};
++
++struct drm_i915_gem_get_tiling {
++      /** Handle of the buffer to get tiling state for. */
++      uint32_t handle;
++
++      /**
++       * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++       * I915_TILING_Y).
++       */
++      uint32_t tiling_mode;
++
++      /**
++       * Returned address bit 6 swizzling required for CPU access through
++       * mmap mapping.
++       */
++      uint32_t swizzle_mode;
++};
++
++#endif                                /* _I915_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.c git-nokia/drivers/gpu/drm-tungsten/i915_drv.c
+--- git/drivers/gpu/drm-tungsten/i915_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,222 @@
++/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      i915_PCI_IDS
++};
++
++#ifdef I915_HAVE_FENCE
++extern struct drm_fence_driver i915_fence_driver;
++#endif
++
++#ifdef I915_HAVE_BUFFER
++
++static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
++static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
++
++static struct drm_bo_driver i915_bo_driver = {
++      .mem_type_prio = i915_mem_prios,
++      .mem_busy_prio = i915_busy_prios,
++      .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
++      .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
++      .create_ttm_backend_entry = i915_create_ttm_backend_entry,
++      .fence_type = i915_fence_type,
++      .invalidate_caches = i915_invalidate_caches,
++      .init_mem_type = i915_init_mem_type,
++      .evict_flags = i915_evict_flags,
++      .move = i915_move,
++      .ttm_cache_flush = i915_flush_ttm,
++      .command_stream_barrier = NULL,
++};
++#endif
++
++static int i915_suspend(struct drm_device *dev, pm_message_t state)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      if (!dev || !dev_priv) {
++              printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
++              printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
++              return -ENODEV;
++      }
++
++      if (state.event == PM_EVENT_PRETHAW)
++              return 0;
++
++      pci_save_state(dev->pdev);
++
++      i915_save_state(dev);
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_free(dev);
++#endif
++
++      if (state.event == PM_EVENT_SUSPEND) {
++              /* Shut down the device */
++              pci_disable_device(dev->pdev);
++              pci_set_power_state(dev->pdev, PCI_D3hot);
++      }
++
++      return 0;
++}
++
++static int i915_resume(struct drm_device *dev)
++{
++      pci_set_power_state(dev->pdev, PCI_D0);
++      pci_restore_state(dev->pdev);
++      if (pci_enable_device(dev->pdev))
++              return -1;
++      pci_set_master(dev->pdev);
++
++      i915_restore_state(dev);
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_init(dev);
++#endif
++
++      return 0;
++}
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static void remove(struct pci_dev *pdev);
++
++static struct drm_driver driver = {
++      /* don't use mtrr's here, the Xserver or user space app should
++       * deal with them for intel hardware.
++       */
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
++          DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
++      .load = i915_driver_load,
++      .unload = i915_driver_unload,
++      .firstopen = i915_driver_firstopen,
++      .open = i915_driver_open,
++      .lastclose = i915_driver_lastclose,
++      .preclose = i915_driver_preclose,
++      .postclose = i915_driver_postclose,
++      .suspend = i915_suspend,
++      .resume = i915_resume,
++      .device_is_agp = i915_driver_device_is_agp,
++      .get_vblank_counter = i915_get_vblank_counter,
++      .enable_vblank = i915_enable_vblank,
++      .disable_vblank = i915_disable_vblank,
++      .irq_preinstall = i915_driver_irq_preinstall,
++      .irq_postinstall = i915_driver_irq_postinstall,
++      .irq_uninstall = i915_driver_irq_uninstall,
++      .irq_handler = i915_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .proc_init = i915_gem_proc_init,
++      .proc_cleanup = i915_gem_proc_cleanup,
++      .ioctls = i915_ioctls,
++      .gem_init_object = i915_gem_init_object,
++      .gem_free_object = i915_gem_free_object,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = i915_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = remove,
++              },
++#ifdef I915_HAVE_FENCE
++      .fence_driver = &i915_fence_driver,
++#endif
++#ifdef I915_HAVE_BUFFER
++      .bo_driver = &i915_bo_driver,
++#endif
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      int ret;
++
++      /* On the 945G/GM, the chipset reports the MSI capability on the
++       * integrated graphics even though the support isn't actually there
++       * according to the published specs.  It doesn't appear to function
++       * correctly in testing on 945G.
++       * This may be a side effect of MSI having been made available for PEG
++       * and the registers being closely associated.
++       */
++      if (pdev->device != 0x2772 && pdev->device != 0x27A2)
++              (void )pci_enable_msi(pdev);
++
++      ret = drm_get_dev(pdev, ent, &driver);
++      if (ret && pdev->msi_enabled)
++              pci_disable_msi(pdev);
++      return ret;
++}
++static void remove(struct pci_dev *pdev)
++{
++      if (pdev->msi_enabled)
++              pci_disable_msi(pdev);
++      drm_cleanup_pci(pdev);
++}
++
++static int __init i915_init(void)
++{
++      driver.num_ioctls = i915_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit i915_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(i915_init);
++module_exit(i915_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.h git-nokia/drivers/gpu/drm-tungsten/i915_drv.h
+--- git/drivers/gpu/drm-tungsten/i915_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2123 @@
++/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _I915_DRV_H_
++#define _I915_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Tungsten Graphics, Inc."
++
++#define DRIVER_NAME           "i915"
++#define DRIVER_DESC           "Intel Graphics"
++#define DRIVER_DATE           "20080730"
++
++#if defined(__linux__)
++#define I915_HAVE_FENCE
++#define I915_HAVE_BUFFER
++#define I915_HAVE_GEM
++#endif
++
++/* Interface history:
++ *
++ * 1.1: Original.
++ * 1.2: Add Power Management
++ * 1.3: Add vblank support
++ * 1.4: Fix cmdbuffer path, add heap destroy
++ * 1.5: Add vblank pipe configuration
++ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
++ *      - Support vertical blank on secondary display pipe
++ * 1.8: New ioctl for ARB_Occlusion_Query
++ * 1.9: Usable page flipping and triple buffering
++ * 1.10: Plane/pipe disentangling
++ * 1.11: TTM superioctl
++ * 1.12: TTM relocation optimization
++ */
++#define DRIVER_MAJOR          1
++#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
++#define DRIVER_MINOR          13
++#else
++#define DRIVER_MINOR          6
++#endif
++#define DRIVER_PATCHLEVEL     0
++
++enum pipe {
++    PIPE_A = 0,
++    PIPE_B,
++};
++
++#ifdef I915_HAVE_BUFFER
++#define I915_MAX_VALIDATE_BUFFERS 4096
++struct drm_i915_validate_buffer;
++#endif
++
++#define WATCH_COHERENCY       0
++#define WATCH_BUF     0
++#define WATCH_EXEC    0
++#define WATCH_LRU     0
++#define WATCH_RELOC   0
++#define WATCH_INACTIVE        0
++#define WATCH_PWRITE  0
++
++typedef struct _drm_i915_ring_buffer {
++      int tail_mask;
++      unsigned long Size;
++      u8 *virtual_start;
++      int head;
++      int tail;
++      int space;
++      drm_local_map_t map;
++      struct drm_gem_object *ring_obj;
++} drm_i915_ring_buffer_t;
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      int start;
++      int size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++};
++
++typedef struct _drm_i915_vbl_swap {
++      struct list_head head;
++      drm_drawable_t drw_id;
++      unsigned int plane;
++      unsigned int sequence;
++      int flip;
++} drm_i915_vbl_swap_t;
++
++#ifdef __linux__
++struct opregion_header;
++struct opregion_acpi;
++struct opregion_swsci;
++struct opregion_asle;
++
++struct intel_opregion {
++      struct opregion_header *header;
++      struct opregion_acpi *acpi;
++      struct opregion_swsci *swsci;
++      struct opregion_asle *asle;
++
++      int enabled;
++};
++#endif
++
++typedef struct drm_i915_private {
++      struct drm_device *dev;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio_map;
++
++      drm_i915_sarea_t *sarea_priv;
++      drm_i915_ring_buffer_t ring;
++
++      drm_dma_handle_t *status_page_dmah;
++      void *hw_status_page;
++      dma_addr_t dma_status_page;
++      uint32_t counter;
++      unsigned int status_gfx_addr;
++      drm_local_map_t hws_map;
++      struct drm_gem_object *hws_obj;
++
++      unsigned int cpp;
++
++      wait_queue_head_t irq_queue;
++      atomic_t irq_received;
++
++      int tex_lru_log_granularity;
++      int allow_batchbuffer;
++      struct mem_block *agp_heap;
++      unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
++      int vblank_pipe;
++      DRM_SPINTYPE user_irq_lock;
++      int user_irq_refcount;
++      int fence_irq_on;
++      uint32_t irq_mask_reg;
++      int irq_enabled;
++
++#ifdef I915_HAVE_FENCE
++      uint32_t flush_sequence;
++      uint32_t flush_flags;
++      uint32_t flush_pending;
++      uint32_t saved_flush_status;
++#endif
++#ifdef I915_HAVE_BUFFER
++      void *agp_iomap;
++      unsigned int max_validate_buffers;
++      struct mutex cmdbuf_mutex;
++      struct drm_i915_validate_buffer *val_bufs;
++#endif
++
++      DRM_SPINTYPE swaps_lock;
++      drm_i915_vbl_swap_t vbl_swaps;
++      unsigned int swaps_pending;
++#if defined(I915_HAVE_BUFFER)
++      /* DRI2 sarea */
++      struct drm_buffer_object *sarea_bo;
++      struct drm_bo_kmap_obj sarea_kmap;
++#endif
++
++#ifdef __linux__
++      struct intel_opregion opregion;
++#endif
++
++      /* Register state */
++      u8 saveLBB;
++      u32 saveDSPACNTR;
++      u32 saveDSPBCNTR;
++      u32 saveDSPARB;
++      u32 savePIPEACONF;
++      u32 savePIPEBCONF;
++      u32 savePIPEASRC;
++      u32 savePIPEBSRC;
++      u32 saveFPA0;
++      u32 saveFPA1;
++      u32 saveDPLL_A;
++      u32 saveDPLL_A_MD;
++      u32 saveHTOTAL_A;
++      u32 saveHBLANK_A;
++      u32 saveHSYNC_A;
++      u32 saveVTOTAL_A;
++      u32 saveVBLANK_A;
++      u32 saveVSYNC_A;
++      u32 saveBCLRPAT_A;
++      u32 savePIPEASTAT;
++      u32 saveDSPASTRIDE;
++      u32 saveDSPASIZE;
++      u32 saveDSPAPOS;
++      u32 saveDSPAADDR;
++      u32 saveDSPASURF;
++      u32 saveDSPATILEOFF;
++      u32 savePFIT_PGM_RATIOS;
++      u32 saveBLC_PWM_CTL;
++      u32 saveBLC_PWM_CTL2;
++      u32 saveFPB0;
++      u32 saveFPB1;
++      u32 saveDPLL_B;
++      u32 saveDPLL_B_MD;
++      u32 saveHTOTAL_B;
++      u32 saveHBLANK_B;
++      u32 saveHSYNC_B;
++      u32 saveVTOTAL_B;
++      u32 saveVBLANK_B;
++      u32 saveVSYNC_B;
++      u32 saveBCLRPAT_B;
++      u32 savePIPEBSTAT;
++      u32 saveDSPBSTRIDE;
++      u32 saveDSPBSIZE;
++      u32 saveDSPBPOS;
++      u32 saveDSPBADDR;
++      u32 saveDSPBSURF;
++      u32 saveDSPBTILEOFF;
++      u32 saveVGA0;
++      u32 saveVGA1;
++      u32 saveVGA_PD;
++      u32 saveVGACNTRL;
++      u32 saveADPA;
++      u32 saveLVDS;
++      u32 savePP_ON_DELAYS;
++      u32 savePP_OFF_DELAYS;
++      u32 saveDVOA;
++      u32 saveDVOB;
++      u32 saveDVOC;
++      u32 savePP_ON;
++      u32 savePP_OFF;
++      u32 savePP_CONTROL;
++      u32 savePP_DIVISOR;
++      u32 savePFIT_CONTROL;
++      u32 save_palette_a[256];
++      u32 save_palette_b[256];
++      u32 saveFBC_CFB_BASE;
++      u32 saveFBC_LL_BASE;
++      u32 saveFBC_CONTROL;
++      u32 saveFBC_CONTROL2;
++      u32 saveIER;
++      u32 saveIIR;
++      u32 saveIMR;
++      u32 saveCACHE_MODE_0;
++      u32 saveD_STATE;
++      u32 saveCG_2D_DIS;
++      u32 saveMI_ARB_STATE;
++      u32 saveSWF0[16];
++      u32 saveSWF1[16];
++      u32 saveSWF2[3];
++      u8 saveMSR;
++      u8 saveSR[8];
++      u8 saveGR[25];
++      u8 saveAR_INDEX;
++      u8 saveAR[21];
++      u8 saveDACMASK;
++      u8 saveDACDATA[256*3]; /* 256 3-byte colors */
++      u8 saveCR[37];
++
++      struct {
++#ifdef __linux__
++              struct drm_mm gtt_space;
++#endif
++              /**
++               * List of objects currently involved in rendering from the
++               * ringbuffer.
++               *
++               * A reference is held on the buffer while on this list.
++               */
++              struct list_head active_list;
++
++              /**
++               * List of objects which are not in the ringbuffer but which
++               * still have a write_domain which needs to be flushed before
++               * unbinding.
++               *
++               * A reference is held on the buffer while on this list.
++               */
++              struct list_head flushing_list;
++
++              /**
++               * LRU list of objects which are not in the ringbuffer and
++               * are ready to unbind, but are still in the GTT.
++               *
++               * A reference is not held on the buffer while on this list,
++               * as merely being GTT-bound shouldn't prevent its being
++               * freed, and we'll pull it off the list in the free path.
++               */
++              struct list_head inactive_list;
++
++              /**
++               * List of breadcrumbs associated with GPU requests currently
++               * outstanding.
++               */
++              struct list_head request_list;
++#ifdef __linux__
++              /**
++               * We leave the user IRQ off as much as possible,
++               * but this means that requests will finish and never
++               * be retired once the system goes idle. Set a timer to
++               * fire periodically while the ring is running. When it
++               * fires, go retire requests.
++               */
++              struct delayed_work retire_work;
++#endif
++              uint32_t next_gem_seqno;
++
++              /**
++               * Waiting sequence number, if any
++               */
++              uint32_t waiting_gem_seqno;
++
++              /**
++               * Last seq seen at irq time
++               */
++              uint32_t irq_gem_seqno;
++
++              /**
++               * Flag if the X Server, and thus DRM, is not currently in
++               * control of the device.
++               *
++               * This is set between LeaveVT and EnterVT.  It needs to be
++               * replaced with a semaphore.  It also needs to be
++               * transitioned away from for kernel modesetting.
++               */
++              int suspended;
++
++              /**
++               * Flag if the hardware appears to be wedged.
++               *
++               * This is set when attempts to idle the device timeout.
++               * It prevents command submission from occuring and makes
++               * every pending request fail
++               */
++              int wedged;
++
++              /** Bit 6 swizzling required for X tiling */
++              uint32_t bit_6_swizzle_x;
++              /** Bit 6 swizzling required for Y tiling */
++              uint32_t bit_6_swizzle_y;
++      } mm;
++} drm_i915_private_t;
++
++struct drm_i915_file_private {
++      struct {
++              uint32_t last_gem_seqno;
++              uint32_t last_gem_throttle_seqno;
++      } mm;
++};
++
++enum intel_chip_family {
++      CHIP_I8XX = 0x01,
++      CHIP_I9XX = 0x02,
++      CHIP_I915 = 0x04,
++      CHIP_I965 = 0x08,
++};
++
++/** driver private structure attached to each drm_gem_object */
++struct drm_i915_gem_object {
++      struct drm_gem_object *obj;
++
++      /** Current space allocated to this object in the GTT, if any. */
++      struct drm_mm_node *gtt_space;
++
++      /** This object's place on the active/flushing/inactive lists */
++      struct list_head list;
++
++      /**
++       * This is set if the object is on the active or flushing lists
++       * (has pending rendering), and is not set if it's on inactive (ready
++       * to be unbound).
++       */
++      int active;
++
++      /**
++       * This is set if the object has been written to since last bound
++       * to the GTT
++       */
++      int dirty;
++
++      /** AGP memory structure for our GTT binding. */
++      DRM_AGP_MEM *agp_mem;
++
++      struct page **page_list;
++
++      /**
++       * Current offset of the object in GTT space.
++       *
++       * This is the same as gtt_space->start
++       */
++      uint32_t gtt_offset;
++
++      /** Boolean whether this object has a valid gtt offset. */
++      int gtt_bound;
++
++      /** How many users have pinned this object in GTT space */
++      int pin_count;
++
++      /** Breadcrumb of last rendering to the buffer. */
++      uint32_t last_rendering_seqno;
++
++      /** Current tiling mode for the object. */
++      uint32_t tiling_mode;
++
++      /**
++       * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
++       * GEM_DOMAIN_CPU is not in the object's read domain.
++       */
++      uint8_t *page_cpu_valid;
++};
++
++/**
++ * Request queue structure.
++ *
++ * The request queue allows us to note sequence numbers that have been emitted
++ * and may be associated with active buffers to be retired.
++ *
++ * By keeping this list, we can avoid having to do questionable
++ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
++ * an emission time with seqnos for tracking how far ahead of the GPU we are.
++ */
++struct drm_i915_gem_request {
++      /** GEM sequence number associated with this request. */
++      uint32_t seqno;
++
++      /** Time at which this request was emitted, in jiffies. */
++      unsigned long emitted_jiffies;
++
++      /** Cache domains that were flushed at the start of the request. */
++      uint32_t flush_domains;
++
++      struct list_head list;
++};
++
++extern struct drm_ioctl_desc i915_ioctls[];
++extern int i915_max_ioctl;
++
++                              /* i915_dma.c */
++extern void i915_kernel_lost_context(struct drm_device * dev);
++extern int i915_driver_load(struct drm_device *, unsigned long flags);
++extern int i915_driver_unload(struct drm_device *);
++extern void i915_driver_lastclose(struct drm_device * dev);
++extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
++extern void i915_driver_preclose(struct drm_device *dev,
++                               struct drm_file *file_priv);
++extern void i915_driver_postclose(struct drm_device *dev,
++                                struct drm_file *file_priv);
++extern int i915_driver_device_is_agp(struct drm_device * dev);
++extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
++                            unsigned long arg);
++extern void i915_emit_breadcrumb(struct drm_device *dev);
++extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
++extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
++extern int i915_driver_firstopen(struct drm_device *dev);
++extern int i915_dispatch_batchbuffer(struct drm_device * dev,
++                                   drm_i915_batchbuffer_t * batch);
++extern int i915_quiescent(struct drm_device *dev);
++extern int i915_init_hardware_status(struct drm_device *dev);
++extern void i915_free_hardware_status(struct drm_device *dev);
++
++int i915_emit_box(struct drm_device * dev,
++                struct drm_clip_rect __user * boxes,
++                int i, int DR1, int DR4);
++
++/* i915_irq.c */
++extern int i915_irq_emit(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int i915_irq_wait(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++
++extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
++extern void i915_driver_irq_preinstall(struct drm_device * dev);
++extern int i915_driver_irq_postinstall(struct drm_device * dev);
++extern void i915_driver_irq_uninstall(struct drm_device * dev);
++extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int i915_emit_irq(struct drm_device * dev);
++extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
++extern int i915_enable_vblank(struct drm_device *dev, int crtc);
++extern void i915_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int i915_vblank_swap(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
++extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
++
++/* i915_mem.c */
++extern int i915_mem_alloc(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int i915_mem_free(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int i915_mem_init_heap(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern void i915_mem_takedown(struct mem_block **heap);
++extern void i915_mem_release(struct drm_device * dev,
++                           struct drm_file *file_priv,
++                           struct mem_block *heap);
++
++/* i915_suspend.c */
++extern int i915_save_state(struct drm_device *dev);
++extern int i915_restore_state(struct drm_device *dev);
++
++#ifdef I915_HAVE_FENCE
++/* i915_fence.c */
++extern void i915_fence_handler(struct drm_device *dev);
++extern void i915_invalidate_reported_sequence(struct drm_device *dev);
++
++#endif
++
++#ifdef I915_HAVE_BUFFER
++/* i915_buffer.c */
++extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
++extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
++                             struct drm_mem_type_manager *man);
++extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
++extern int i915_move(struct drm_buffer_object *bo, int evict,
++              int no_wait, struct drm_bo_mem_reg *new_mem);
++void i915_flush_ttm(struct drm_ttm *ttm);
++/* i915_execbuf.c */
++int i915_execbuffer(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++/* i915_gem.c */
++int i915_gem_init_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_create_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++int i915_gem_execbuffer(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++int i915_gem_set_tiling(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_get_tiling(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++void i915_gem_load(struct drm_device *dev);
++int i915_gem_proc_init(struct drm_minor *minor);
++void i915_gem_proc_cleanup(struct drm_minor *minor);
++int i915_gem_init_object(struct drm_gem_object *obj);
++void i915_gem_free_object(struct drm_gem_object *obj);
++int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
++void i915_gem_object_unpin(struct drm_gem_object *obj);
++void i915_gem_lastclose(struct drm_device *dev);
++uint32_t i915_get_gem_seqno(struct drm_device *dev);
++void i915_gem_retire_requests(struct drm_device *dev);
++void i915_gem_retire_work_handler(struct work_struct *work);
++void i915_gem_clflush_object(struct drm_gem_object *obj);
++#endif
++
++/* i915_gem_tiling.c */
++void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
++
++/* i915_gem_debug.c */
++#if WATCH_INACTIVE
++void i915_verify_inactive(struct drm_device *dev, char *file, int line);
++#else
++#define i915_verify_inactive(dev,file,line)
++#endif
++void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
++void i915_gem_dump_object(struct drm_gem_object *obj, int len,
++                        const char *where, uint32_t mark);
++void i915_dump_lru(struct drm_device *dev, const char *where);
++
++#ifdef __linux__
++/* i915_opregion.c */
++extern int intel_opregion_init(struct drm_device *dev);
++extern void intel_opregion_free(struct drm_device *dev);
++extern void opregion_asle_intr(struct drm_device *dev);
++extern void opregion_enable_asle(struct drm_device *dev);
++#endif
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++extern void intel_init_chipset_flush_compat(struct drm_device *dev);
++extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
++#endif
++#endif
++
++#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
++#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
++#define I915_READ16(reg)      DRM_READ16(dev_priv->mmio_map, (reg))
++#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
++#define I915_READ8(reg)               DRM_READ8(dev_priv->mmio_map, (reg))
++#define I915_WRITE8(reg,val)  DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
++
++#if defined(__FreeBSD__)
++typedef boolean_t bool;
++#endif
++
++#define I915_VERBOSE 0
++#define I915_RING_VALIDATE 0
++
++#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
++
++#define RING_LOCALS   unsigned int outring, ringmask, outcount; \
++                      volatile char *virt;
++
++#if I915_RING_VALIDATE
++void i915_ring_validate(struct drm_device *dev, const char *func, int line);
++#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
++#else
++#define I915_RING_DO_VALIDATE(dev)
++#endif
++
++#define BEGIN_LP_RING(n) do {                         \
++      if (I915_VERBOSE)                               \
++              DRM_DEBUG("BEGIN_LP_RING(%d)\n",        \
++                               (n));                  \
++      I915_RING_DO_VALIDATE(dev);                     \
++      if (dev_priv->ring.space < (n)*4)                      \
++              i915_wait_ring(dev, (n)*4, __FUNCTION__);      \
++      outcount = 0;                                   \
++      outring = dev_priv->ring.tail;                  \
++      ringmask = dev_priv->ring.tail_mask;            \
++      virt = dev_priv->ring.virtual_start;            \
++} while (0)
++
++#define OUT_RING(n) do {                                      \
++      if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
++      *(volatile unsigned int *)(virt + outring) = (n);               \
++      outcount++;                                             \
++      outring += 4;                                           \
++      outring &= ringmask;                                    \
++} while (0)
++
++#define ADVANCE_LP_RING() do {                                                \
++      if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
++      I915_RING_DO_VALIDATE(dev);                                     \
++      dev_priv->ring.tail = outring;                                  \
++      dev_priv->ring.space -= outcount * 4;                           \
++      I915_WRITE(PRB0_TAIL, outring);                 \
++} while(0)
++
++extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
++
++#define BREADCRUMB_BITS 31
++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
++
++#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
++/**
++ * Reads a dword out of the status page, which is written to from the command
++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
++ * MI_STORE_DATA_IMM.
++ *
++ * The following dwords have a reserved meaning:
++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
++ * 4: ring 0 head pointer
++ * 5: ring 1 head pointer (915-class)
++ * 6: ring 2 head pointer (915-class)
++ *
++ * The area from dword 0x10 to 0x3ff is available for driver usage.
++ */
++#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
++#define I915_GEM_HWS_INDEX            0x10
++
++/* MCH MMIO space */
++/** 915-945 and GM965 MCH register controlling DRAM channel access */
++#define DCC           0x200
++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL            (0 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC   (1 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED  (2 << 0)
++#define DCC_ADDRESSING_MODE_MASK                      (3 << 0)
++#define DCC_CHANNEL_XOR_DISABLE                               (1 << 10)
++
++/** 965 MCH register controlling DRAM channel configuration */
++#define CHDECMISC             0x111
++#define CHDECMISC_FLEXMEMORY          (1 << 1)
++
++/*
++ * The Bridge device's PCI config space has information about the
++ * fb aperture size and the amount of pre-reserved memory.
++ */
++#define INTEL_GMCH_CTRL               0x52
++#define INTEL_GMCH_ENABLED    0x4
++#define INTEL_GMCH_MEM_MASK   0x1
++#define INTEL_GMCH_MEM_64M    0x1
++#define INTEL_GMCH_MEM_128M   0
++
++#define INTEL_855_GMCH_GMS_MASK               (0x7 << 4)
++#define INTEL_855_GMCH_GMS_DISABLED   (0x0 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_1M  (0x1 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_4M  (0x2 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_8M  (0x3 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
++
++#define INTEL_915G_GMCH_GMS_STOLEN_48M        (0x6 << 4)
++#define INTEL_915G_GMCH_GMS_STOLEN_64M        (0x7 << 4)
++
++/* PCI config space */
++
++#define HPLLCC        0xc0 /* 855 only */
++#define   GC_CLOCK_CONTROL_MASK               (3 << 0)
++#define   GC_CLOCK_133_200            (0 << 0)
++#define   GC_CLOCK_100_200            (1 << 0)
++#define   GC_CLOCK_100_133            (2 << 0)
++#define   GC_CLOCK_166_250            (3 << 0)
++#define GCFGC 0xf0 /* 915+ only */
++#define   GC_LOW_FREQUENCY_ENABLE     (1 << 7)
++#define   GC_DISPLAY_CLOCK_190_200_MHZ        (0 << 4)
++#define   GC_DISPLAY_CLOCK_333_MHZ    (4 << 4)
++#define   GC_DISPLAY_CLOCK_MASK               (7 << 4)
++#define LBB   0xf4
++
++/* VGA stuff */
++
++#define VGA_ST01_MDA 0x3ba
++#define VGA_ST01_CGA 0x3da
++
++#define VGA_MSR_WRITE 0x3c2
++#define VGA_MSR_READ 0x3cc
++#define   VGA_MSR_MEM_EN (1<<1)
++#define   VGA_MSR_CGA_MODE (1<<0)
++
++#define VGA_SR_INDEX 0x3c4
++#define VGA_SR_DATA 0x3c5
++
++#define VGA_AR_INDEX 0x3c0
++#define   VGA_AR_VID_EN (1<<5)
++#define VGA_AR_DATA_WRITE 0x3c0
++#define VGA_AR_DATA_READ 0x3c1
++
++#define VGA_GR_INDEX 0x3ce
++#define VGA_GR_DATA 0x3cf
++/* GR05 */
++#define   VGA_GR_MEM_READ_MODE_SHIFT 3
++#define     VGA_GR_MEM_READ_MODE_PLANE 1
++/* GR06 */
++#define   VGA_GR_MEM_MODE_MASK 0xc
++#define   VGA_GR_MEM_MODE_SHIFT 2
++#define   VGA_GR_MEM_A0000_AFFFF 0
++#define   VGA_GR_MEM_A0000_BFFFF 1
++#define   VGA_GR_MEM_B0000_B7FFF 2
++#define   VGA_GR_MEM_B0000_BFFFF 3
++
++#define VGA_DACMASK 0x3c6
++#define VGA_DACRX 0x3c7
++#define VGA_DACWX 0x3c8
++#define VGA_DACDATA 0x3c9
++
++#define VGA_CR_INDEX_MDA 0x3b4
++#define VGA_CR_DATA_MDA 0x3b5
++#define VGA_CR_INDEX_CGA 0x3d4
++#define VGA_CR_DATA_CGA 0x3d5
++
++/*
++ * Memory interface instructions used by the kernel
++ */
++#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
++
++#define MI_NOOP                       MI_INSTR(0, 0)
++#define MI_USER_INTERRUPT     MI_INSTR(0x02, 0)
++#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
++#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
++#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
++#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
++#define MI_FLUSH              MI_INSTR(0x04, 0)
++#define   MI_READ_FLUSH               (1 << 0)
++#define   MI_EXE_FLUSH                (1 << 1)
++#define   MI_NO_WRITE_FLUSH   (1 << 2)
++#define   MI_SCENE_COUNT      (1 << 3) /* just increment scene count */
++#define   MI_END_SCENE                (1 << 4) /* flush binner and incr scene count */
++#define MI_BATCH_BUFFER_END   MI_INSTR(0x0a, 0)
++#define MI_REPORT_HEAD                MI_INSTR(0x07, 0)
++#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
++#define MI_STORE_DWORD_IMM    MI_INSTR(0x20, 1)
++#define   MI_MEM_VIRTUAL      (1 << 22) /* 965+ only */
++#define MI_STORE_DWORD_INDEX  MI_INSTR(0x21, 1)
++#define   MI_STORE_DWORD_INDEX_SHIFT 2
++#define MI_LOAD_REGISTER_IMM  MI_INSTR(0x22, 1)
++#define MI_BATCH_BUFFER               MI_INSTR(0x30, 1)
++#define   MI_BATCH_NON_SECURE (1)
++#define   MI_BATCH_NON_SECURE_I965 (1<<8)
++#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
++
++/*
++ * 3D instructions used by the kernel
++ */
++#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
++
++#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
++#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define   SC_UPDATE_SCISSOR       (0x1<<1)
++#define   SC_ENABLE_MASK          (0x1<<0)
++#define   SC_ENABLE               (0x1<<0)
++#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
++#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
++#define   SCI_YMIN_MASK      (0xffff<<16)
++#define   SCI_XMIN_MASK      (0xffff<<0)
++#define   SCI_YMAX_MASK      (0xffff<<16)
++#define   SCI_XMAX_MASK      (0xffff<<0)
++#define GFX_OP_SCISSOR_ENABLE  ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define GFX_OP_SCISSOR_RECT    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
++#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
++#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
++#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
++#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DESTBUFFER_INFO         ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
++#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
++#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
++#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
++#define XY_SRC_COPY_BLT_CMD           ((2<<29)|(0x53<<22)|6)
++#define XY_MONO_SRC_COPY_IMM_BLT      ((2<<29)|(0x71<<22)|5)
++#define XY_SRC_COPY_BLT_WRITE_ALPHA   (1<<21)
++#define XY_SRC_COPY_BLT_WRITE_RGB     (1<<20)
++#define   BLT_DEPTH_8                 (0<<24)
++#define   BLT_DEPTH_16_565            (1<<24)
++#define   BLT_DEPTH_16_1555           (2<<24)
++#define   BLT_DEPTH_32                        (3<<24)
++#define   BLT_ROP_GXCOPY              (0xcc<<16)
++#define XY_SRC_COPY_BLT_SRC_TILED     (1<<15) /* 965+ only */
++#define XY_SRC_COPY_BLT_DST_TILED     (1<<11) /* 965+ only */
++#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
++#define   ASYNC_FLIP                (1<<22)
++#define   DISPLAY_PLANE_A           (0<<20)
++#define   DISPLAY_PLANE_B           (1<<20)
++
++/*
++ * Instruction and interrupt control regs
++ */
++
++#define PRB0_TAIL     0x02030
++#define PRB0_HEAD     0x02034
++#define PRB0_START    0x02038
++#define PRB0_CTL      0x0203c
++#define   TAIL_ADDR           0x001FFFF8
++#define   HEAD_WRAP_COUNT     0xFFE00000
++#define   HEAD_WRAP_ONE               0x00200000
++#define   HEAD_ADDR           0x001FFFFC
++#define   RING_NR_PAGES               0x001FF000
++#define   RING_REPORT_MASK    0x00000006
++#define   RING_REPORT_64K     0x00000002
++#define   RING_REPORT_128K    0x00000004
++#define   RING_NO_REPORT      0x00000000
++#define   RING_VALID_MASK     0x00000001
++#define   RING_VALID          0x00000001
++#define   RING_INVALID                0x00000000
++#define PRB1_TAIL     0x02040 /* 915+ only */
++#define PRB1_HEAD     0x02044 /* 915+ only */
++#define PRB1_START    0x02048 /* 915+ only */
++#define PRB1_CTL      0x0204c /* 915+ only */
++#define ACTHD_I965    0x02074
++#define HWS_PGA               0x02080
++#define HWS_ADDRESS_MASK      0xfffff000
++#define HWS_START_ADDRESS_SHIFT       4
++#define IPEIR         0x02088
++#define NOPID         0x02094
++#define HWSTAM                0x02098
++#define SCPD0         0x0209c /* 915+ only */
++#define IER           0x020a0
++#define IIR           0x020a4
++#define IMR           0x020a8
++#define ISR           0x020ac
++#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT          (1<<18)
++#define   I915_DISPLAY_PORT_INTERRUPT                 (1<<17)
++#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT  (1<<15)
++#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT    (1<<14)
++#define   I915_HWB_OOM_INTERRUPT                      (1<<13)
++#define   I915_SYNC_STATUS_INTERRUPT                  (1<<12)
++#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
++#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
++#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT   (1<<9)
++#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
++#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT                (1<<7)
++#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT         (1<<6)
++#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT                (1<<5)
++#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT         (1<<4)
++#define   I915_DEBUG_INTERRUPT                                (1<<2)
++#define   I915_USER_INTERRUPT                         (1<<1)
++#define   I915_ASLE_INTERRUPT                         (1<<0)
++#define EIR           0x020b0
++#define EMR           0x020b4
++#define ESR           0x020b8
++#define INSTPM                0x020c0
++#define ACTHD         0x020c8
++#define FW_BLC                0x020d8
++#define FW_BLC_SELF   0x020e0 /* 915+ only */
++#define MI_ARB_STATE  0x020e4 /* 915+ only */
++#define CACHE_MODE_0  0x02120 /* 915+ only */
++#define   CM0_MASK_SHIFT          16
++#define   CM0_IZ_OPT_DISABLE      (1<<6)
++#define   CM0_ZR_OPT_DISABLE      (1<<5)
++#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
++#define   CM0_COLOR_EVICT_DISABLE (1<<3)
++#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
++#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
++#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
++
++/*
++ * Framebuffer compression (915+ only)
++ */
++
++#define FBC_CFB_BASE          0x03200 /* 4k page aligned */
++#define FBC_LL_BASE           0x03204 /* 4k page aligned */
++#define FBC_CONTROL           0x03208
++#define   FBC_CTL_EN          (1<<31)
++#define   FBC_CTL_PERIODIC    (1<<30)
++#define   FBC_CTL_INTERVAL_SHIFT (16)
++#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
++#define   FBC_CTL_STRIDE_SHIFT        (5)
++#define   FBC_CTL_FENCENO     (1<<0)
++#define FBC_COMMAND           0x0320c
++#define   FBC_CMD_COMPRESS    (1<<0)
++#define FBC_STATUS            0x03210
++#define   FBC_STAT_COMPRESSING        (1<<31)
++#define   FBC_STAT_COMPRESSED (1<<30)
++#define   FBC_STAT_MODIFIED   (1<<29)
++#define   FBC_STAT_CURRENT_LINE       (1<<0)
++#define FBC_CONTROL2          0x03214
++#define   FBC_CTL_FENCE_DBL   (0<<4)
++#define   FBC_CTL_IDLE_IMM    (0<<2)
++#define   FBC_CTL_IDLE_FULL   (1<<2)
++#define   FBC_CTL_IDLE_LINE   (2<<2)
++#define   FBC_CTL_IDLE_DEBUG  (3<<2)
++#define   FBC_CTL_CPU_FENCE   (1<<1)
++#define   FBC_CTL_PLANEA      (0<<0)
++#define   FBC_CTL_PLANEB      (1<<0)
++#define FBC_FENCE_OFF         0x0321b
++
++#define FBC_LL_SIZE           (1536)
++
++/*
++ * GPIO regs
++ */
++#define GPIOA                 0x5010
++#define GPIOB                 0x5014
++#define GPIOC                 0x5018
++#define GPIOD                 0x501c
++#define GPIOE                 0x5020
++#define GPIOF                 0x5024
++#define GPIOG                 0x5028
++#define GPIOH                 0x502c
++# define GPIO_CLOCK_DIR_MASK          (1 << 0)
++# define GPIO_CLOCK_DIR_IN            (0 << 1)
++# define GPIO_CLOCK_DIR_OUT           (1 << 1)
++# define GPIO_CLOCK_VAL_MASK          (1 << 2)
++# define GPIO_CLOCK_VAL_OUT           (1 << 3)
++# define GPIO_CLOCK_VAL_IN            (1 << 4)
++# define GPIO_CLOCK_PULLUP_DISABLE    (1 << 5)
++# define GPIO_DATA_DIR_MASK           (1 << 8)
++# define GPIO_DATA_DIR_IN             (0 << 9)
++# define GPIO_DATA_DIR_OUT            (1 << 9)
++# define GPIO_DATA_VAL_MASK           (1 << 10)
++# define GPIO_DATA_VAL_OUT            (1 << 11)
++# define GPIO_DATA_VAL_IN             (1 << 12)
++# define GPIO_DATA_PULLUP_DISABLE     (1 << 13)
++
++/*
++ * Clock control & power management
++ */
++
++#define VGA0  0x6000
++#define VGA1  0x6004
++#define VGA_PD        0x6010
++#define   VGA0_PD_P2_DIV_4    (1 << 7)
++#define   VGA0_PD_P1_DIV_2    (1 << 5)
++#define   VGA0_PD_P1_SHIFT    0
++#define   VGA0_PD_P1_MASK     (0x1f << 0)
++#define   VGA1_PD_P2_DIV_4    (1 << 15)
++#define   VGA1_PD_P1_DIV_2    (1 << 13)
++#define   VGA1_PD_P1_SHIFT    8
++#define   VGA1_PD_P1_MASK     (0x1f << 8)
++#define DPLL_A        0x06014
++#define DPLL_B        0x06018
++#define   DPLL_VCO_ENABLE             (1 << 31)
++#define   DPLL_DVO_HIGH_SPEED         (1 << 30)
++#define   DPLL_SYNCLOCK_ENABLE                (1 << 29)
++#define   DPLL_VGA_MODE_DIS           (1 << 28)
++#define   DPLLB_MODE_DAC_SERIAL               (1 << 26) /* i915 */
++#define   DPLLB_MODE_LVDS             (2 << 26) /* i915 */
++#define   DPLL_MODE_MASK              (3 << 26)
++#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++#define   DPLLB_LVDS_P2_CLOCK_DIV_14  (0 << 24) /* i915 */
++#define   DPLLB_LVDS_P2_CLOCK_DIV_7   (1 << 24) /* i915 */
++#define   DPLL_P2_CLOCK_DIV_MASK      0x03000000 /* i915 */
++#define   DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++
++#define I915_FIFO_UNDERRUN_STATUS             (1UL<<31)
++#define I915_CRC_ERROR_ENABLE                 (1UL<<29)
++#define I915_CRC_DONE_ENABLE                  (1UL<<28)
++#define I915_GMBUS_EVENT_ENABLE                       (1UL<<27)
++#define I915_VSYNC_INTERRUPT_ENABLE           (1UL<<25)
++#define I915_DISPLAY_LINE_COMPARE_ENABLE      (1UL<<24)
++#define I915_DPST_EVENT_ENABLE                        (1UL<<23)
++#define I915_LEGACY_BLC_EVENT_ENABLE          (1UL<<22)
++#define I915_ODD_FIELD_INTERRUPT_ENABLE               (1UL<<21)
++#define I915_EVEN_FIELD_INTERRUPT_ENABLE      (1UL<<20)
++#define I915_START_VBLANK_INTERRUPT_ENABLE    (1UL<<18)       /* 965 or later */
++#define I915_VBLANK_INTERRUPT_ENABLE          (1UL<<17)
++#define I915_OVERLAY_UPDATED_ENABLE           (1UL<<16)
++#define I915_CRC_ERROR_INTERRUPT_STATUS               (1UL<<13)
++#define I915_CRC_DONE_INTERRUPT_STATUS                (1UL<<12)
++#define I915_GMBUS_INTERRUPT_STATUS           (1UL<<11)
++#define I915_VSYNC_INTERRUPT_STATUS           (1UL<<9)
++#define I915_DISPLAY_LINE_COMPARE_STATUS      (1UL<<8)
++#define I915_DPST_EVENT_STATUS                        (1UL<<7)
++#define I915_LEGACY_BLC_EVENT_STATUS          (1UL<<6)
++#define I915_ODD_FIELD_INTERRUPT_STATUS               (1UL<<5)
++#define I915_EVEN_FIELD_INTERRUPT_STATUS      (1UL<<4)
++#define I915_START_VBLANK_INTERRUPT_STATUS    (1UL<<2)        /* 965 or later */
++#define I915_VBLANK_INTERRUPT_STATUS          (1UL<<1)
++#define I915_OVERLAY_UPDATED_STATUS           (1UL<<0)
++
++#define SRX_INDEX             0x3c4
++#define SRX_DATA              0x3c5
++#define SR01                  1
++#define SR01_SCREEN_OFF               (1<<5)
++
++#define PPCR                  0x61204
++#define PPCR_ON                       (1<<0)
++
++#define DVOB                  0x61140
++#define DVOB_ON                       (1<<31)
++#define DVOC                  0x61160
++#define DVOC_ON                       (1<<31)
++#define LVDS                  0x61180
++#define LVDS_ON                       (1<<31)
++
++#define ADPA                  0x61100
++#define ADPA_DPMS_MASK                (~(3<<10))
++#define ADPA_DPMS_ON          (0<<10)
++#define ADPA_DPMS_SUSPEND     (1<<10)
++#define ADPA_DPMS_STANDBY     (2<<10)
++#define ADPA_DPMS_OFF         (3<<10)
++
++#define RING_TAIL             0x00
++#define TAIL_ADDR             0x001FFFF8
++#define RING_HEAD             0x04
++#define HEAD_WRAP_COUNT               0xFFE00000
++#define HEAD_WRAP_ONE         0x00200000
++#define HEAD_ADDR             0x001FFFFC
++#define RING_START            0x08
++#define START_ADDR            0xFFFFF000
++#define RING_LEN              0x0C
++#define RING_NR_PAGES         0x001FF000
++#define RING_REPORT_MASK      0x00000006
++#define RING_REPORT_64K               0x00000002
++#define RING_REPORT_128K      0x00000004
++#define RING_NO_REPORT                0x00000000
++#define RING_VALID_MASK               0x00000001
++#define RING_VALID            0x00000001
++#define RING_INVALID          0x00000000
++
++/* Scratch pad debug 0 reg:
++ */
++#define   DPLL_FPA01_P1_POST_DIV_MASK_I830    0x001f0000
++/*
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS       0x003f0000
++#define   DPLL_FPA01_P1_POST_DIV_SHIFT        16
++/* i830, required in DVO non-gang */
++#define   PLL_P2_DIVIDE_BY_4          (1 << 23)
++#define   PLL_P1_DIVIDE_BY_TWO                (1 << 21) /* i830 */
++#define   PLL_REF_INPUT_DREFCLK               (0 << 13)
++#define   PLL_REF_INPUT_TVCLKINA      (1 << 13) /* i830 */
++#define   PLL_REF_INPUT_TVCLKINBC     (2 << 13) /* SDVO TVCLKIN */
++#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++#define   PLL_REF_INPUT_MASK          (3 << 13)
++#define   PLL_LOAD_PULSE_PHASE_SHIFT          9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++#define   PLL_LOAD_PULSE_PHASE_MASK           (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++#define   DISPLAY_RATE_SELECT_FPA1            (1 << 8)
++/*
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ */
++#define   SDVO_MULTIPLIER_MASK                        0x000000ff
++#define   SDVO_MULTIPLIER_SHIFT_HIRES         4
++#define   SDVO_MULTIPLIER_SHIFT_VGA           0
++#define DPLL_A_MD 0x0601c /* 965+ only */
++/*
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
++ */
++#define   DPLL_MD_UDI_DIVIDER_MASK            0x3f000000
++#define   DPLL_MD_UDI_DIVIDER_SHIFT           24
++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++#define   DPLL_MD_VGA_UDI_DIVIDER_MASK                0x003f0000
++#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT               16
++/*
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++#define   DPLL_MD_UDI_MULTIPLIER_MASK         0x00003f00
++#define   DPLL_MD_UDI_MULTIPLIER_SHIFT                8
++/*
++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK     0x0000003f
++#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT    0
++#define DPLL_B_MD 0x06020 /* 965+ only */
++#define FPA0  0x06040
++#define FPA1  0x06044
++#define FPB0  0x06048
++#define FPB1  0x0604c
++#define   FP_N_DIV_MASK               0x003f0000
++#define   FP_N_DIV_SHIFT              16
++#define   FP_M1_DIV_MASK      0x00003f00
++#define   FP_M1_DIV_SHIFT              8
++#define   FP_M2_DIV_MASK      0x0000003f
++#define   FP_M2_DIV_SHIFT              0
++#define DPLL_TEST     0x606c
++#define   DPLLB_TEST_SDVO_DIV_1               (0 << 22)
++#define   DPLLB_TEST_SDVO_DIV_2               (1 << 22)
++#define   DPLLB_TEST_SDVO_DIV_4               (2 << 22)
++#define   DPLLB_TEST_SDVO_DIV_MASK    (3 << 22)
++#define   DPLLB_TEST_N_BYPASS         (1 << 19)
++#define   DPLLB_TEST_M_BYPASS         (1 << 18)
++#define   DPLLB_INPUT_BUFFER_ENABLE   (1 << 16)
++#define   DPLLA_TEST_N_BYPASS         (1 << 3)
++#define   DPLLA_TEST_M_BYPASS         (1 << 2)
++#define   DPLLA_INPUT_BUFFER_ENABLE   (1 << 0)
++#define D_STATE               0x6104
++#define CG_2D_DIS     0x6200
++#define CG_3D_DIS     0x6204
++
++/*
++ * Palette regs
++ */
++
++#define PALETTE_A             0x0a000
++#define PALETTE_B             0x0a800
++
++/*
++ * Overlay regs
++ */
++
++#define OVADD                 0x30000
++#define DOVSTA                        0x30008
++#define OC_BUF                        (0x3<<20)
++#define OGAMC5                        0x30010
++#define OGAMC4                        0x30014
++#define OGAMC3                        0x30018
++#define OGAMC2                        0x3001c
++#define OGAMC1                        0x30020
++#define OGAMC0                        0x30024
++
++/*
++ * Display engine regs
++ */
++
++/* Pipe A timing regs */
++#define HTOTAL_A      0x60000
++#define HBLANK_A      0x60004
++#define HSYNC_A               0x60008
++#define VTOTAL_A      0x6000c
++#define VBLANK_A      0x60010
++#define VSYNC_A               0x60014
++#define PIPEASRC      0x6001c
++#define BCLRPAT_A     0x60020
++
++/* Pipe B timing regs */
++#define HTOTAL_B      0x61000
++#define HBLANK_B      0x61004
++#define HSYNC_B               0x61008
++#define VTOTAL_B      0x6100c
++#define VBLANK_B      0x61010
++#define VSYNC_B               0x61014
++#define PIPEBSRC      0x6101c
++#define BCLRPAT_B     0x61020
++
++/* VGA port control */
++#define ADPA                  0x61100
++#define   ADPA_DAC_ENABLE     (1<<31)
++#define   ADPA_DAC_DISABLE    0
++#define   ADPA_PIPE_SELECT_MASK       (1<<30)
++#define   ADPA_PIPE_A_SELECT  0
++#define   ADPA_PIPE_B_SELECT  (1<<30)
++#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define   ADPA_SETS_HVPOLARITY        0
++#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define   ADPA_VSYNC_CNTL_ENABLE 0
++#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define   ADPA_HSYNC_CNTL_ENABLE 0
++#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define   ADPA_VSYNC_ACTIVE_LOW       0
++#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define   ADPA_HSYNC_ACTIVE_LOW       0
++#define   ADPA_DPMS_MASK      (~(3<<10))
++#define   ADPA_DPMS_ON                (0<<10)
++#define   ADPA_DPMS_SUSPEND   (1<<10)
++#define   ADPA_DPMS_STANDBY   (2<<10)
++#define   ADPA_DPMS_OFF               (3<<10)
++
++/* Hotplug control (945+ only) */
++#define PORT_HOTPLUG_EN               0x61110
++#define   SDVOB_HOTPLUG_INT_EN                        (1 << 26)
++#define   SDVOC_HOTPLUG_INT_EN                        (1 << 25)
++#define   TV_HOTPLUG_INT_EN                   (1 << 18)
++#define   CRT_HOTPLUG_INT_EN                  (1 << 9)
++#define   CRT_HOTPLUG_FORCE_DETECT            (1 << 3)
++
++#define PORT_HOTPLUG_STAT     0x61114
++#define   CRT_HOTPLUG_INT_STATUS              (1 << 11)
++#define   TV_HOTPLUG_INT_STATUS                       (1 << 10)
++#define   CRT_HOTPLUG_MONITOR_MASK            (3 << 8)
++#define   CRT_HOTPLUG_MONITOR_COLOR           (3 << 8)
++#define   CRT_HOTPLUG_MONITOR_MONO            (2 << 8)
++#define   CRT_HOTPLUG_MONITOR_NONE            (0 << 8)
++#define   SDVOC_HOTPLUG_INT_STATUS            (1 << 7)
++#define   SDVOB_HOTPLUG_INT_STATUS            (1 << 6)
++
++/* SDVO port control */
++#define SDVOB                 0x61140
++#define SDVOC                 0x61160
++#define   SDVO_ENABLE         (1 << 31)
++#define   SDVO_PIPE_B_SELECT  (1 << 30)
++#define   SDVO_STALL_SELECT   (1 << 29)
++#define   SDVO_INTERRUPT_ENABLE       (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define   SDVO_PORT_MULTIPLY_MASK     (7 << 23)
++#define   SDVO_PORT_MULTIPLY_SHIFT            23
++#define   SDVO_PHASE_SELECT_MASK      (15 << 19)
++#define   SDVO_PHASE_SELECT_DEFAULT   (6 << 19)
++#define   SDVO_CLOCK_OUTPUT_INVERT    (1 << 18)
++#define   SDVOC_GANG_MODE             (1 << 16)
++#define   SDVO_BORDER_ENABLE          (1 << 7)
++#define   SDVOB_PCIE_CONCURRENCY      (1 << 3)
++#define   SDVO_DETECTED                       (1 << 2)
++/* Bits to be preserved when writing */
++#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
++#define   SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
++
++/* DVO port control */
++#define DVOA                  0x61120
++#define DVOB                  0x61140
++#define DVOC                  0x61160
++#define   DVO_ENABLE                  (1 << 31)
++#define   DVO_PIPE_B_SELECT           (1 << 30)
++#define   DVO_PIPE_STALL_UNUSED               (0 << 28)
++#define   DVO_PIPE_STALL              (1 << 28)
++#define   DVO_PIPE_STALL_TV           (2 << 28)
++#define   DVO_PIPE_STALL_MASK         (3 << 28)
++#define   DVO_USE_VGA_SYNC            (1 << 15)
++#define   DVO_DATA_ORDER_I740         (0 << 14)
++#define   DVO_DATA_ORDER_FP           (1 << 14)
++#define   DVO_VSYNC_DISABLE           (1 << 11)
++#define   DVO_HSYNC_DISABLE           (1 << 10)
++#define   DVO_VSYNC_TRISTATE          (1 << 9)
++#define   DVO_HSYNC_TRISTATE          (1 << 8)
++#define   DVO_BORDER_ENABLE           (1 << 7)
++#define   DVO_DATA_ORDER_GBRG         (1 << 6)
++#define   DVO_DATA_ORDER_RGGB         (0 << 6)
++#define   DVO_DATA_ORDER_GBRG_ERRATA  (0 << 6)
++#define   DVO_DATA_ORDER_RGGB_ERRATA  (1 << 6)
++#define   DVO_VSYNC_ACTIVE_HIGH               (1 << 4)
++#define   DVO_HSYNC_ACTIVE_HIGH               (1 << 3)
++#define   DVO_BLANK_ACTIVE_HIGH               (1 << 2)
++#define   DVO_OUTPUT_CSTATE_PIXELS    (1 << 1)        /* SDG only */
++#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS       (1 << 0)        /* SDG only */
++#define   DVO_PRESERVE_MASK           (0x7<<24)
++#define DVOA_SRCDIM           0x61124
++#define DVOB_SRCDIM           0x61144
++#define DVOC_SRCDIM           0x61164
++#define   DVO_SRCDIM_HORIZONTAL_SHIFT 12
++#define   DVO_SRCDIM_VERTICAL_SHIFT   0
++
++/* LVDS port control */
++#define LVDS                  0x61180
++/*
++ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++#define   LVDS_PORT_EN                        (1 << 31)
++/* Selects pipe B for LVDS data.  Must be set on pre-965. */
++#define   LVDS_PIPEB_SELECT           (1 << 30)
++/*
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++#define   LVDS_A0A2_CLKA_POWER_MASK   (3 << 8)
++#define   LVDS_A0A2_CLKA_POWER_DOWN   (0 << 8)
++#define   LVDS_A0A2_CLKA_POWER_UP     (3 << 8)
++/*
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++#define   LVDS_A3_POWER_MASK          (3 << 6)
++#define   LVDS_A3_POWER_DOWN          (0 << 6)
++#define   LVDS_A3_POWER_UP            (3 << 6)
++/*
++ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++#define   LVDS_CLKB_POWER_MASK                (3 << 4)
++#define   LVDS_CLKB_POWER_DOWN                (0 << 4)
++#define   LVDS_CLKB_POWER_UP          (3 << 4)
++/*
++ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode.  The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++#define   LVDS_B0B3_POWER_MASK                (3 << 2)
++#define   LVDS_B0B3_POWER_DOWN                (0 << 2)
++#define   LVDS_B0B3_POWER_UP          (3 << 2)
++
++/* Panel power sequencing */
++#define PP_STATUS     0x61200
++#define   PP_ON               (1 << 31)
++/*
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++#define   PP_READY            (1 << 30)
++#define   PP_SEQUENCE_NONE    (0 << 28)
++#define   PP_SEQUENCE_ON      (1 << 28)
++#define   PP_SEQUENCE_OFF     (2 << 28)
++#define   PP_SEQUENCE_MASK    0x30000000
++#define PP_CONTROL    0x61204
++#define   POWER_TARGET_ON     (1 << 0)
++#define PP_ON_DELAYS  0x61208
++#define PP_OFF_DELAYS 0x6120c
++#define PP_DIVISOR    0x61210
++
++/* Panel fitting */
++#define PFIT_CONTROL  0x61230
++#define   PFIT_ENABLE         (1 << 31)
++#define   PFIT_PIPE_MASK      (3 << 29)
++#define   PFIT_PIPE_SHIFT     29
++#define   VERT_INTERP_DISABLE (0 << 10)
++#define   VERT_INTERP_BILINEAR        (1 << 10)
++#define   VERT_INTERP_MASK    (3 << 10)
++#define   VERT_AUTO_SCALE     (1 << 9)
++#define   HORIZ_INTERP_DISABLE        (0 << 6)
++#define   HORIZ_INTERP_BILINEAR       (1 << 6)
++#define   HORIZ_INTERP_MASK   (3 << 6)
++#define   HORIZ_AUTO_SCALE    (1 << 5)
++#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
++#define PFIT_PGM_RATIOS       0x61234
++#define   PFIT_VERT_SCALE_MASK                        0xfff00000
++#define   PFIT_HORIZ_SCALE_MASK                       0x0000fff0
++#define PFIT_AUTO_RATIOS 0x61238
++
++/* Backlight control */
++#define BLC_PWM_CTL           0x61254
++#define   BACKLIGHT_MODULATION_FREQ_SHIFT             (17)
++#define BLC_PWM_CTL2          0x61250 /* 965+ only */
++/*
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define   BACKLIGHT_MODULATION_FREQ_MASK              (0x7fff << 17)
++#define   BLM_LEGACY_MODE                             (1 << 16)
++/*
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define   BACKLIGHT_DUTY_CYCLE_SHIFT          (0)
++#define   BACKLIGHT_DUTY_CYCLE_MASK           (0xffff)
++
++/* TV port control */
++#define TV_CTL                        0x68000
++/** Enables the TV encoder */
++# define TV_ENC_ENABLE                        (1 << 31)
++/** Sources the TV encoder input from pipe B instead of A. */
++# define TV_ENC_PIPEB_SELECT          (1 << 30)
++/** Outputs composite video (DAC A only) */
++# define TV_ENC_OUTPUT_COMPOSITE      (0 << 28)
++/** Outputs SVideo video (DAC B/C) */
++# define TV_ENC_OUTPUT_SVIDEO         (1 << 28)
++/** Outputs Component video (DAC A/B/C) */
++# define TV_ENC_OUTPUT_COMPONENT      (2 << 28)
++/** Outputs Composite and SVideo (DAC A/B/C) */
++# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE       (3 << 28)
++# define TV_TRILEVEL_SYNC             (1 << 21)
++/** Enables slow sync generation (945GM only) */
++# define TV_SLOW_SYNC                 (1 << 20)
++/** Selects 4x oversampling for 480i and 576p */
++# define TV_OVERSAMPLE_4X             (0 << 18)
++/** Selects 2x oversampling for 720p and 1080i */
++# define TV_OVERSAMPLE_2X             (1 << 18)
++/** Selects no oversampling for 1080p */
++# define TV_OVERSAMPLE_NONE           (2 << 18)
++/** Selects 8x oversampling */
++# define TV_OVERSAMPLE_8X             (3 << 18)
++/** Selects progressive mode rather than interlaced */
++# define TV_PROGRESSIVE                       (1 << 17)
++/** Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
++# define TV_PAL_BURST                 (1 << 16)
++/** Field for setting delay of Y compared to C */
++# define TV_YC_SKEW_MASK              (7 << 12)
++/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
++# define TV_ENC_SDP_FIX                       (1 << 11)
++/**
++ * Enables a fix for the 915GM only.
++ *
++ * Not sure what it does.
++ */
++# define TV_ENC_C0_FIX                        (1 << 10)
++/** Bits that must be preserved by software */
++# define TV_CTL_SAVE                  ((3 << 8) | (3 << 6))
++# define TV_FUSE_STATE_MASK           (3 << 4)
++/** Read-only state that reports all features enabled */
++# define TV_FUSE_STATE_ENABLED                (0 << 4)
++/** Read-only state that reports that Macrovision is disabled in hardware*/
++# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
++/** Read-only state that reports that TV-out is disabled in hardware. */
++# define TV_FUSE_STATE_DISABLED               (2 << 4)
++/** Normal operation */
++# define TV_TEST_MODE_NORMAL          (0 << 0)
++/** Encoder test pattern 1 - combo pattern */
++# define TV_TEST_MODE_PATTERN_1               (1 << 0)
++/** Encoder test pattern 2 - full screen vertical 75% color bars */
++# define TV_TEST_MODE_PATTERN_2               (2 << 0)
++/** Encoder test pattern 3 - full screen horizontal 75% color bars */
++# define TV_TEST_MODE_PATTERN_3               (3 << 0)
++/** Encoder test pattern 4 - random noise */
++# define TV_TEST_MODE_PATTERN_4               (4 << 0)
++/** Encoder test pattern 5 - linear color ramps */
++# define TV_TEST_MODE_PATTERN_5               (5 << 0)
++/**
++ * This test mode forces the DACs to 50% of full output.
++ *
++ * This is used for load detection in combination with TVDAC_SENSE_MASK
++ */
++# define TV_TEST_MODE_MONITOR_DETECT  (7 << 0)
++# define TV_TEST_MODE_MASK            (7 << 0)
++
++#define TV_DAC                        0x68004
++/**
++ * Reports that DAC state change logic has reported change (RO).
++ *
++ * This gets cleared when TV_DAC_STATE_EN is cleared
++*/
++# define TVDAC_STATE_CHG              (1 << 31)
++# define TVDAC_SENSE_MASK             (7 << 28)
++/** Reports that DAC A voltage is above the detect threshold */
++# define TVDAC_A_SENSE                        (1 << 30)
++/** Reports that DAC B voltage is above the detect threshold */
++# define TVDAC_B_SENSE                        (1 << 29)
++/** Reports that DAC C voltage is above the detect threshold */
++# define TVDAC_C_SENSE                        (1 << 28)
++/**
++ * Enables DAC state detection logic, for load-based TV detection.
++ *
++ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
++ * to off, for load detection to work.
++ */
++# define TVDAC_STATE_CHG_EN           (1 << 27)
++/** Sets the DAC A sense value to high */
++# define TVDAC_A_SENSE_CTL            (1 << 26)
++/** Sets the DAC B sense value to high */
++# define TVDAC_B_SENSE_CTL            (1 << 25)
++/** Sets the DAC C sense value to high */
++# define TVDAC_C_SENSE_CTL            (1 << 24)
++/** Overrides the ENC_ENABLE and DAC voltage levels */
++# define DAC_CTL_OVERRIDE             (1 << 7)
++/** Sets the slew rate.  Must be preserved in software */
++# define ENC_TVDAC_SLEW_FAST          (1 << 6)
++# define DAC_A_1_3_V                  (0 << 4)
++# define DAC_A_1_1_V                  (1 << 4)
++# define DAC_A_0_7_V                  (2 << 4)
++# define DAC_A_OFF                    (3 << 4)
++# define DAC_B_1_3_V                  (0 << 2)
++# define DAC_B_1_1_V                  (1 << 2)
++# define DAC_B_0_7_V                  (2 << 2)
++# define DAC_B_OFF                    (3 << 2)
++# define DAC_C_1_3_V                  (0 << 0)
++# define DAC_C_1_1_V                  (1 << 0)
++# define DAC_C_0_7_V                  (2 << 0)
++# define DAC_C_OFF                    (3 << 0)
++
++/**
++ * CSC coefficients are stored in a floating point format with 9 bits of
++ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
++ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
++ * -1 (0x3) being the only legal negative value.
++ */
++#define TV_CSC_Y              0x68010
++# define TV_RY_MASK                   0x07ff0000
++# define TV_RY_SHIFT                  16
++# define TV_GY_MASK                   0x00000fff
++# define TV_GY_SHIFT                  0
++
++#define TV_CSC_Y2             0x68014
++# define TV_BY_MASK                   0x07ff0000
++# define TV_BY_SHIFT                  16
++/**
++ * Y attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AY_MASK                   0x000003ff
++# define TV_AY_SHIFT                  0
++
++#define TV_CSC_U              0x68018
++# define TV_RU_MASK                   0x07ff0000
++# define TV_RU_SHIFT                  16
++# define TV_GU_MASK                   0x000007ff
++# define TV_GU_SHIFT                  0
++
++#define TV_CSC_U2             0x6801c
++# define TV_BU_MASK                   0x07ff0000
++# define TV_BU_SHIFT                  16
++/**
++ * U attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AU_MASK                   0x000003ff
++# define TV_AU_SHIFT                  0
++
++#define TV_CSC_V              0x68020
++# define TV_RV_MASK                   0x0fff0000
++# define TV_RV_SHIFT                  16
++# define TV_GV_MASK                   0x000007ff
++# define TV_GV_SHIFT                  0
++
++#define TV_CSC_V2             0x68024
++# define TV_BV_MASK                   0x07ff0000
++# define TV_BV_SHIFT                  16
++/**
++ * V attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AV_MASK                   0x000007ff
++# define TV_AV_SHIFT                  0
++
++#define TV_CLR_KNOBS          0x68028
++/** 2s-complement brightness adjustment */
++# define TV_BRIGHTNESS_MASK           0xff000000
++# define TV_BRIGHTNESS_SHIFT          24
++/** Contrast adjustment, as a 2.6 unsigned floating point number */
++# define TV_CONTRAST_MASK             0x00ff0000
++# define TV_CONTRAST_SHIFT            16
++/** Saturation adjustment, as a 2.6 unsigned floating point number */
++# define TV_SATURATION_MASK           0x0000ff00
++# define TV_SATURATION_SHIFT          8
++/** Hue adjustment, as an integer phase angle in degrees */
++# define TV_HUE_MASK                  0x000000ff
++# define TV_HUE_SHIFT                 0
++
++#define TV_CLR_LEVEL          0x6802c
++/** Controls the DAC level for black */
++# define TV_BLACK_LEVEL_MASK          0x01ff0000
++# define TV_BLACK_LEVEL_SHIFT         16
++/** Controls the DAC level for blanking */
++# define TV_BLANK_LEVEL_MASK          0x000001ff
++# define TV_BLANK_LEVEL_SHIFT         0
++
++#define TV_H_CTL_1            0x68030
++/** Number of pixels in the hsync. */
++# define TV_HSYNC_END_MASK            0x1fff0000
++# define TV_HSYNC_END_SHIFT           16
++/** Total number of pixels minus one in the line (display and blanking). */
++# define TV_HTOTAL_MASK                       0x00001fff
++# define TV_HTOTAL_SHIFT              0
++
++#define TV_H_CTL_2            0x68034
++/** Enables the colorburst (needed for non-component color) */
++# define TV_BURST_ENA                 (1 << 31)
++/** Offset of the colorburst from the start of hsync, in pixels minus one. */
++# define TV_HBURST_START_SHIFT                16
++# define TV_HBURST_START_MASK         0x1fff0000
++/** Length of the colorburst */
++# define TV_HBURST_LEN_SHIFT          0
++# define TV_HBURST_LEN_MASK           0x0001fff
++
++#define TV_H_CTL_3            0x68038
++/** End of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_END_SHIFT          16
++# define TV_HBLANK_END_MASK           0x1fff0000
++/** Start of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_START_SHIFT                0
++# define TV_HBLANK_START_MASK         0x0001fff
++
++#define TV_V_CTL_1            0x6803c
++/** XXX */
++# define TV_NBR_END_SHIFT             16
++# define TV_NBR_END_MASK              0x07ff0000
++/** XXX */
++# define TV_VI_END_F1_SHIFT           8
++# define TV_VI_END_F1_MASK            0x00003f00
++/** XXX */
++# define TV_VI_END_F2_SHIFT           0
++# define TV_VI_END_F2_MASK            0x0000003f
++
++#define TV_V_CTL_2            0x68040
++/** Length of vsync, in half lines */
++# define TV_VSYNC_LEN_MASK            0x07ff0000
++# define TV_VSYNC_LEN_SHIFT           16
++/** Offset of the start of vsync in field 1, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F1_MASK               0x00007f00
++# define TV_VSYNC_START_F1_SHIFT      8
++/**
++ * Offset of the start of vsync in field 2, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F2_MASK               0x0000007f
++# define TV_VSYNC_START_F2_SHIFT      0
++
++#define TV_V_CTL_3            0x68044
++/** Enables generation of the equalization signal */
++# define TV_EQUAL_ENA                 (1 << 31)
++/** Length of vsync, in half lines */
++# define TV_VEQ_LEN_MASK              0x007f0000
++# define TV_VEQ_LEN_SHIFT             16
++/** Offset of the start of equalization in field 1, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F1_MASK         0x0007f00
++# define TV_VEQ_START_F1_SHIFT                8
++/**
++ * Offset of the start of equalization in field 2, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F2_MASK         0x000007f
++# define TV_VEQ_START_F2_SHIFT                0
++
++#define TV_V_CTL_4            0x68048
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F1_MASK      0x003f0000
++# define TV_VBURST_START_F1_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F1_MASK                0x000000ff
++# define TV_VBURST_END_F1_SHIFT               0
++
++#define TV_V_CTL_5            0x6804c
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F2_MASK      0x003f0000
++# define TV_VBURST_START_F2_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F2_MASK                0x000000ff
++# define TV_VBURST_END_F2_SHIFT               0
++
++#define TV_V_CTL_6            0x68050
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F3_MASK      0x003f0000
++# define TV_VBURST_START_F3_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F3_MASK                0x000000ff
++# define TV_VBURST_END_F3_SHIFT               0
++
++#define TV_V_CTL_7            0x68054
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F4_MASK      0x003f0000
++# define TV_VBURST_START_F4_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F4_MASK                0x000000ff
++# define TV_VBURST_END_F4_SHIFT               0
++
++#define TV_SC_CTL_1           0x68060
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA1_EN                        (1 << 31)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA2_EN                        (1 << 30)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA3_EN                        (1 << 29)
++/** Sets the subcarrier DDA to reset frequency every other field */
++# define TV_SC_RESET_EVERY_2          (0 << 24)
++/** Sets the subcarrier DDA to reset frequency every fourth field */
++# define TV_SC_RESET_EVERY_4          (1 << 24)
++/** Sets the subcarrier DDA to reset frequency every eighth field */
++# define TV_SC_RESET_EVERY_8          (2 << 24)
++/** Sets the subcarrier DDA to never reset the frequency */
++# define TV_SC_RESET_NEVER            (3 << 24)
++/** Sets the peak amplitude of the colorburst.*/
++# define TV_BURST_LEVEL_MASK          0x00ff0000
++# define TV_BURST_LEVEL_SHIFT         16
++/** Sets the increment of the first subcarrier phase generation DDA */
++# define TV_SCDDA1_INC_MASK           0x00000fff
++# define TV_SCDDA1_INC_SHIFT          0
++
++#define TV_SC_CTL_2           0x68064
++/** Sets the rollover for the second subcarrier phase generation DDA */
++# define TV_SCDDA2_SIZE_MASK          0x7fff0000
++# define TV_SCDDA2_SIZE_SHIFT         16
++/** Sets the increent of the second subcarrier phase generation DDA */
++# define TV_SCDDA2_INC_MASK           0x00007fff
++# define TV_SCDDA2_INC_SHIFT          0
++
++#define TV_SC_CTL_3           0x68068
++/** Sets the rollover for the third subcarrier phase generation DDA */
++# define TV_SCDDA3_SIZE_MASK          0x7fff0000
++# define TV_SCDDA3_SIZE_SHIFT         16
++/** Sets the increent of the third subcarrier phase generation DDA */
++# define TV_SCDDA3_INC_MASK           0x00007fff
++# define TV_SCDDA3_INC_SHIFT          0
++
++#define TV_WIN_POS            0x68070
++/** X coordinate of the display from the start of horizontal active */
++# define TV_XPOS_MASK                 0x1fff0000
++# define TV_XPOS_SHIFT                        16
++/** Y coordinate of the display from the start of vertical active (NBR) */
++# define TV_YPOS_MASK                 0x00000fff
++# define TV_YPOS_SHIFT                        0
++
++#define TV_WIN_SIZE           0x68074
++/** Horizontal size of the display window, measured in pixels*/
++# define TV_XSIZE_MASK                        0x1fff0000
++# define TV_XSIZE_SHIFT                       16
++/**
++ * Vertical size of the display window, measured in pixels.
++ *
++ * Must be even for interlaced modes.
++ */
++# define TV_YSIZE_MASK                        0x00000fff
++# define TV_YSIZE_SHIFT                       0
++
++#define TV_FILTER_CTL_1               0x68080
++/**
++ * Enables automatic scaling calculation.
++ *
++ * If set, the rest of the registers are ignored, and the calculated values can
++ * be read back from the register.
++ */
++# define TV_AUTO_SCALE                        (1 << 31)
++/**
++ * Disables the vertical filter.
++ *
++ * This is required on modes more than 1024 pixels wide */
++# define TV_V_FILTER_BYPASS           (1 << 29)
++/** Enables adaptive vertical filtering */
++# define TV_VADAPT                    (1 << 28)
++# define TV_VADAPT_MODE_MASK          (3 << 26)
++/** Selects the least adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_LEAST         (0 << 26)
++/** Selects the moderately adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MODERATE      (1 << 26)
++/** Selects the most adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MOST          (3 << 26)
++/**
++ * Sets the horizontal scaling factor.
++ *
++ * This should be the fractional part of the horizontal scaling factor divided
++ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
++ *
++ * (src width - 1) / ((oversample * dest width) - 1)
++ */
++# define TV_HSCALE_FRAC_MASK          0x00003fff
++# define TV_HSCALE_FRAC_SHIFT         0
++
++#define TV_FILTER_CTL_2               0x68084
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
++ */
++# define TV_VSCALE_INT_MASK           0x00038000
++# define TV_VSCALE_INT_SHIFT          15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * \sa TV_VSCALE_INT_MASK
++ */
++# define TV_VSCALE_FRAC_MASK          0x00007fff
++# define TV_VSCALE_FRAC_SHIFT         0
++
++#define TV_FILTER_CTL_3               0x68088
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ */
++# define TV_VSCALE_IP_INT_MASK                0x00038000
++# define TV_VSCALE_IP_INT_SHIFT               15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ *
++ * \sa TV_VSCALE_IP_INT_MASK
++ */
++# define TV_VSCALE_IP_FRAC_MASK               0x00007fff
++# define TV_VSCALE_IP_FRAC_SHIFT              0
++
++#define TV_CC_CONTROL         0x68090
++# define TV_CC_ENABLE                 (1 << 31)
++/**
++ * Specifies which field to send the CC data in.
++ *
++ * CC data is usually sent in field 0.
++ */
++# define TV_CC_FID_MASK                       (1 << 27)
++# define TV_CC_FID_SHIFT              27
++/** Sets the horizontal position of the CC data.  Usually 135. */
++# define TV_CC_HOFF_MASK              0x03ff0000
++# define TV_CC_HOFF_SHIFT             16
++/** Sets the vertical position of the CC data.  Usually 21 */
++# define TV_CC_LINE_MASK              0x0000003f
++# define TV_CC_LINE_SHIFT             0
++
++#define TV_CC_DATA            0x68094
++# define TV_CC_RDY                    (1 << 31)
++/** Second word of CC data to be transmitted. */
++# define TV_CC_DATA_2_MASK            0x007f0000
++# define TV_CC_DATA_2_SHIFT           16
++/** First word of CC data to be transmitted. */
++# define TV_CC_DATA_1_MASK            0x0000007f
++# define TV_CC_DATA_1_SHIFT           0
++
++#define TV_H_LUMA_0           0x68100
++#define TV_H_LUMA_59          0x681ec
++#define TV_H_CHROMA_0         0x68200
++#define TV_H_CHROMA_59                0x682ec
++#define TV_V_LUMA_0           0x68300
++#define TV_V_LUMA_42          0x683a8
++#define TV_V_CHROMA_0         0x68400
++#define TV_V_CHROMA_42                0x684a8
++
++/* Display & cursor control */
++
++/* Pipe A */
++#define PIPEADSL              0x70000
++#define PIPEACONF              0x70008
++#define   PIPEACONF_ENABLE    (1<<31)
++#define   PIPEACONF_DISABLE   0
++#define   PIPEACONF_DOUBLE_WIDE       (1<<30)
++#define   I965_PIPECONF_ACTIVE        (1<<30)
++#define   PIPEACONF_SINGLE_WIDE       0
++#define   PIPEACONF_PIPE_UNLOCKED 0
++#define   PIPEACONF_PIPE_LOCKED       (1<<25)
++#define   PIPEACONF_PALETTE   0
++#define   PIPEACONF_GAMMA             (1<<24)
++#define   PIPECONF_FORCE_BORDER       (1<<25)
++#define   PIPECONF_PROGRESSIVE        (0 << 21)
++#define   PIPECONF_INTERLACE_W_FIELD_INDICATION       (6 << 21)
++#define   PIPECONF_INTERLACE_FIELD_0_ONLY             (7 << 21)
++#define PIPEASTAT             0x70024
++#define   PIPE_FIFO_UNDERRUN_STATUS           (1UL<<31)
++#define   PIPE_CRC_ERROR_ENABLE                       (1UL<<29)
++#define   PIPE_CRC_DONE_ENABLE                        (1UL<<28)
++#define   PIPE_GMBUS_EVENT_ENABLE             (1UL<<27)
++#define   PIPE_HOTPLUG_INTERRUPT_ENABLE               (1UL<<26)
++#define   PIPE_VSYNC_INTERRUPT_ENABLE         (1UL<<25)
++#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE    (1UL<<24)
++#define   PIPE_DPST_EVENT_ENABLE              (1UL<<23)
++#define   PIPE_LEGACY_BLC_EVENT_ENABLE                (1UL<<22)
++#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE     (1UL<<21)
++#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE    (1UL<<20)
++#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE    (1UL<<18) /* pre-965 */
++#define   PIPE_START_VBLANK_INTERRUPT_ENABLE  (1UL<<18) /* 965 or later */
++#define   PIPE_VBLANK_INTERRUPT_ENABLE                (1UL<<17)
++#define   PIPE_OVERLAY_UPDATED_ENABLE         (1UL<<16)
++#define   PIPE_CRC_ERROR_INTERRUPT_STATUS     (1UL<<13)
++#define   PIPE_CRC_DONE_INTERRUPT_STATUS      (1UL<<12)
++#define   PIPE_GMBUS_INTERRUPT_STATUS         (1UL<<11)
++#define   PIPE_HOTPLUG_INTERRUPT_STATUS               (1UL<<10)
++#define   PIPE_VSYNC_INTERRUPT_STATUS         (1UL<<9)
++#define   PIPE_DISPLAY_LINE_COMPARE_STATUS    (1UL<<8)
++#define   PIPE_DPST_EVENT_STATUS              (1UL<<7)
++#define   PIPE_LEGACY_BLC_EVENT_STATUS                (1UL<<6)
++#define   PIPE_ODD_FIELD_INTERRUPT_STATUS     (1UL<<5)
++#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS    (1UL<<4)
++#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS    (1UL<<2) /* pre-965 */
++#define   PIPE_START_VBLANK_INTERRUPT_STATUS  (1UL<<2) /* 965 or later */
++#define   PIPE_VBLANK_INTERRUPT_STATUS                (1UL<<1)
++#define   PIPE_OVERLAY_UPDATED_STATUS         (1UL<<0)
++
++#define DSPARB                        0x70030
++#define   DSPARB_CSTART_MASK  (0x7f << 7)
++#define   DSPARB_CSTART_SHIFT 7
++#define   DSPARB_BSTART_MASK  (0x7f)           
++#define   DSPARB_BSTART_SHIFT 0
++/*
++ * The two pipe frame counter registers are not synchronized, so
++ * reading a stable value is somewhat tricky. The following code 
++ * should work:
++ *
++ *  do {
++ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ *             PIPE_FRAME_HIGH_SHIFT;
++ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
++ *             PIPE_FRAME_LOW_SHIFT);
++ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ *             PIPE_FRAME_HIGH_SHIFT);
++ *  } while (high1 != high2);
++ *  frame = (high1 << 8) | low1;
++ */
++#define PIPEAFRAMEHIGH          0x70040
++#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
++#define   PIPE_FRAME_HIGH_SHIFT   0
++#define PIPEAFRAMEPIXEL         0x70044
++#define   PIPE_FRAME_LOW_MASK     0xff000000
++#define   PIPE_FRAME_LOW_SHIFT    24
++#define   PIPE_PIXEL_MASK         0x00ffffff
++#define   PIPE_PIXEL_SHIFT        0
++
++/* Cursor A & B regs */
++#define CURACNTR              0x70080
++#define   CURSOR_MODE_DISABLE   0x00
++#define   CURSOR_MODE_64_32B_AX 0x07
++#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
++#define CURABASE              0x70084
++#define CURAPOS                       0x70088
++#define   CURSOR_POS_MASK       0x007FF
++#define   CURSOR_POS_SIGN       0x8000
++#define   CURSOR_X_SHIFT        0
++#define   CURSOR_Y_SHIFT        16
++#define CURBCNTR              0x700c0
++#define CURBBASE              0x700c4
++#define CURBPOS                       0x700c8
++
++/* Display A control */
++#define DSPACNTR                0x70180
++#define   DISPLAY_PLANE_ENABLE                        (1<<31)
++#define   DISPLAY_PLANE_DISABLE                       0
++#define   DISPPLANE_GAMMA_ENABLE              (1<<30)
++#define   DISPPLANE_GAMMA_DISABLE             0
++#define   DISPPLANE_PIXFORMAT_MASK            (0xf<<26)
++#define   DISPPLANE_8BPP                      (0x2<<26)
++#define   DISPPLANE_15_16BPP                  (0x4<<26)
++#define   DISPPLANE_16BPP                     (0x5<<26)
++#define   DISPPLANE_32BPP_NO_ALPHA            (0x6<<26)
++#define   DISPPLANE_32BPP                     (0x7<<26)
++#define   DISPPLANE_STEREO_ENABLE             (1<<25)
++#define   DISPPLANE_STEREO_DISABLE            0
++#define   DISPPLANE_SEL_PIPE_MASK             (1<<24)
++#define   DISPPLANE_SEL_PIPE_A                        0
++#define   DISPPLANE_SEL_PIPE_B                        (1<<24)
++#define   DISPPLANE_SRC_KEY_ENABLE            (1<<22)
++#define   DISPPLANE_SRC_KEY_DISABLE           0
++#define   DISPPLANE_LINE_DOUBLE                       (1<<20)
++#define   DISPPLANE_NO_LINE_DOUBLE            0
++#define   DISPPLANE_STEREO_POLARITY_FIRST     0
++#define   DISPPLANE_STEREO_POLARITY_SECOND    (1<<18)
++#define DSPAADDR              0x70184
++#define DSPASTRIDE            0x70188
++#define DSPAPOS                       0x7018C /* reserved */
++#define DSPASIZE              0x70190
++#define DSPASURF              0x7019C /* 965+ only */
++#define DSPATILEOFF           0x701A4 /* 965+ only */
++
++/* VBIOS flags */
++#define SWF00                 0x71410
++#define SWF01                 0x71414
++#define SWF02                 0x71418
++#define SWF03                 0x7141c
++#define SWF04                 0x71420
++#define SWF05                 0x71424
++#define SWF06                 0x71428
++#define SWF10                 0x70410
++#define SWF11                 0x70414
++#define SWF14                 0x71420
++#define SWF30                 0x72414
++#define SWF31                 0x72418
++#define SWF32                 0x7241c
++
++/* Pipe B */
++#define PIPEBDSL              0x71000
++#define PIPEBCONF             0x71008
++#define PIPEBSTAT             0x71024
++#define PIPEBFRAMEHIGH                0x71040
++#define PIPEBFRAMEPIXEL               0x71044
++
++/* Display B control */
++#define DSPBCNTR              0x71180
++#define   DISPPLANE_ALPHA_TRANS_ENABLE                (1<<15)
++#define   DISPPLANE_ALPHA_TRANS_DISABLE               0
++#define   DISPPLANE_SPRITE_ABOVE_DISPLAY      0
++#define   DISPPLANE_SPRITE_ABOVE_OVERLAY      (1)
++#define DSPBADDR              0x71184
++#define DSPBSTRIDE            0x71188
++#define DSPBPOS                       0x7118C
++#define DSPBSIZE              0x71190
++#define DSPBSURF              0x7119C
++#define DSPBTILEOFF           0x711A4
++
++/* VBIOS regs */
++#define VGACNTRL              0x71400
++# define VGA_DISP_DISABLE                     (1 << 31)
++# define VGA_2X_MODE                          (1 << 30)
++# define VGA_PIPE_B_SELECT                    (1 << 29)
++
++/* Chipset type macros */
++
++#define IS_I830(dev) ((dev)->pci_device == 0x3577)
++#define IS_845G(dev) ((dev)->pci_device == 0x2562)
++#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
++#define IS_I855(dev) ((dev)->pci_device == 0x3582)
++#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
++
++#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
++#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
++#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
++#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
++                      (dev)->pci_device == 0x27AE)
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++                     (dev)->pci_device == 0x2982 || \
++                     (dev)->pci_device == 0x2992 || \
++                     (dev)->pci_device == 0x29A2 || \
++                     (dev)->pci_device == 0x2A02 || \
++                     (dev)->pci_device == 0x2A12 || \
++                     (dev)->pci_device == 0x2A42 || \
++                     (dev)->pci_device == 0x2E02 || \
++                     (dev)->pci_device == 0x2E12 || \
++                     (dev)->pci_device == 0x2E22)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
++
++#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
++                   (dev)->pci_device == 0x2E12 || \
++                   (dev)->pci_device == 0x2E22)
++
++#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||        \
++                      (dev)->pci_device == 0x29B2 ||  \
++                      (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++                    IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++                      IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
++
++#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_execbuf.c git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c
+--- git/drivers/gpu/drm-tungsten/i915_execbuf.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,917 @@
++/*
++ * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *     Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ *     Dave Airlie
++ *     Keith Packard
++ *     ... ?
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if DRM_DEBUG_CODE
++#define DRM_DEBUG_RELOCATION  (drm_debug != 0)
++#else
++#define DRM_DEBUG_RELOCATION  0
++#endif
++
++enum i915_buf_idle {
++      I915_RELOC_UNCHECKED,
++      I915_RELOC_IDLE,
++      I915_RELOC_BUSY
++};
++
++struct i915_relocatee_info {
++      struct drm_buffer_object *buf;
++      unsigned long offset;
++      uint32_t *data_page;
++      unsigned page_offset;
++      struct drm_bo_kmap_obj kmap;
++      int is_iomem;
++      int dst;
++      int idle;
++      int performed_ring_relocs;
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++      unsigned long pfn;
++      pgprot_t pg_prot;
++#endif
++};
++
++struct drm_i915_validate_buffer {
++      struct drm_buffer_object *buffer;
++      int presumed_offset_correct;
++      void __user *data;
++      int ret;
++      enum i915_buf_idle idle;
++};
++
++/*
++ * I'd like to use MI_STORE_DATA_IMM here, but I can't make
++ * it work. Seems like GART writes are broken with that
++ * instruction. Also I'm not sure that MI_FLUSH will
++ * act as a memory barrier for that instruction. It will
++ * for this single dword 2D blit.
++ */
++
++static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
++                               uint32_t value)
++{
++      struct drm_i915_private *dev_priv =
++          (struct drm_i915_private *)dev->dev_private;
++
++      RING_LOCALS;
++      i915_kernel_lost_context(dev);
++      BEGIN_LP_RING(6);
++      OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
++      OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
++      OUT_RING((0x1 << 16) | (0x4));
++      OUT_RING(offset);
++      OUT_RING(value);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
++                                          *buffers, unsigned num_buffers)
++{
++      while (num_buffers--)
++              drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
++}
++
++int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
++                   struct drm_i915_validate_buffer *buffers,
++                   struct i915_relocatee_info *relocatee, uint32_t * reloc)
++{
++      unsigned index;
++      unsigned long new_cmd_offset;
++      u32 val;
++      int ret, i;
++      int buf_index = -1;
++
++      /*
++       * FIXME: O(relocs * buffers) complexity.
++       */
++
++      for (i = 0; i <= num_buffers; i++)
++              if (buffers[i].buffer)
++                      if (reloc[2] == buffers[i].buffer->base.hash.key)
++                              buf_index = i;
++
++      if (buf_index == -1) {
++              DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
++              return -EINVAL;
++      }
++
++      /*
++       * Short-circuit relocations that were correctly
++       * guessed by the client
++       */
++      if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
++              return 0;
++
++      new_cmd_offset = reloc[0];
++      if (!relocatee->data_page ||
++          !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
++              struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
++
++              drm_bo_kunmap(&relocatee->kmap);
++              relocatee->data_page = NULL;
++              relocatee->offset = new_cmd_offset;
++
++              if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
++                ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
++                      if (ret)
++                              return ret;
++                      relocatee->idle = I915_RELOC_IDLE;
++              }
++
++              if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
++                           (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
++                      drm_bo_evict_cached(relocatee->buf);
++
++              ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
++                                1, &relocatee->kmap);
++              if (ret) {
++                      DRM_ERROR
++                          ("Could not map command buffer to apply relocs\n %08lx",
++                           new_cmd_offset);
++                      return ret;
++              }
++              relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
++                                                     &relocatee->is_iomem);
++              relocatee->page_offset = (relocatee->offset & PAGE_MASK);
++      }
++
++      val = buffers[buf_index].buffer->offset;
++      index = (reloc[0] - relocatee->page_offset) >> 2;
++
++      /* add in validate */
++      val = val + reloc[1];
++
++      if (DRM_DEBUG_RELOCATION) {
++              if (buffers[buf_index].presumed_offset_correct &&
++                  relocatee->data_page[index] != val) {
++                      DRM_DEBUG
++                          ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
++                           reloc[0], reloc[1], buf_index,
++                           relocatee->data_page[index], val);
++              }
++      }
++
++      if (relocatee->is_iomem)
++              iowrite32(val, relocatee->data_page + index);
++      else
++              relocatee->data_page[index] = val;
++      return 0;
++}
++
++int i915_process_relocs(struct drm_file *file_priv,
++                      uint32_t buf_handle,
++                      uint32_t __user ** reloc_user_ptr,
++                      struct i915_relocatee_info *relocatee,
++                      struct drm_i915_validate_buffer *buffers,
++                      uint32_t num_buffers)
++{
++      int ret, reloc_stride;
++      uint32_t cur_offset;
++      uint32_t reloc_count;
++      uint32_t reloc_type;
++      uint32_t reloc_buf_size;
++      uint32_t *reloc_buf = NULL;
++      int i;
++
++      /* do a copy from user from the user ptr */
++      ret = get_user(reloc_count, *reloc_user_ptr);
++      if (ret) {
++              DRM_ERROR("Could not map relocation buffer.\n");
++              goto out;
++      }
++
++      ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
++      if (ret) {
++              DRM_ERROR("Could not map relocation buffer.\n");
++              goto out;
++      }
++
++      if (reloc_type != 0) {
++              DRM_ERROR("Unsupported relocation type requested\n");
++              ret = -EINVAL;
++              goto out;
++      }
++
++      reloc_buf_size =
++          (I915_RELOC_HEADER +
++           (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
++      reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
++      if (!reloc_buf) {
++              DRM_ERROR("Out of memory for reloc buffer\n");
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
++              ret = -EFAULT;
++              goto out;
++      }
++
++      /* get next relocate buffer handle */
++      *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
++
++      reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);   /* may be different for other types of relocs */
++
++      DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
++                *reloc_user_ptr);
++
++      for (i = 0; i < reloc_count; i++) {
++              cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
++
++              ret = i915_apply_reloc(file_priv, num_buffers, buffers,
++                                     relocatee, reloc_buf + cur_offset);
++              if (ret)
++                      goto out;
++      }
++
++      out:
++      if (reloc_buf)
++              kfree(reloc_buf);
++
++      if (relocatee->data_page) {
++              drm_bo_kunmap(&relocatee->kmap);
++              relocatee->data_page = NULL;
++      }
++
++      return ret;
++}
++
++static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
++                         uint32_t __user * reloc_user_ptr,
++                         struct drm_i915_validate_buffer *buffers,
++                         uint32_t buf_count)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct i915_relocatee_info relocatee;
++      int ret = 0;
++      int b;
++
++      /*
++       * Short circuit relocations when all previous
++       * buffers offsets were correctly guessed by
++       * the client
++       */
++      if (!DRM_DEBUG_RELOCATION) {
++              for (b = 0; b < buf_count; b++)
++                      if (!buffers[b].presumed_offset_correct)
++                              break;
++
++              if (b == buf_count)
++                      return 0;
++      }
++
++      memset(&relocatee, 0, sizeof(relocatee));
++      relocatee.idle = I915_RELOC_UNCHECKED;
++
++      mutex_lock(&dev->struct_mutex);
++      relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++      if (!relocatee.buf) {
++              DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
++              ret = -EINVAL;
++              goto out_err;
++      }
++
++      mutex_lock(&relocatee.buf->mutex);
++      while (reloc_user_ptr) {
++              ret =
++                  i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
++                                      &relocatee, buffers, buf_count);
++              if (ret) {
++                      DRM_ERROR("process relocs failed\n");
++                      goto out_err1;
++              }
++      }
++
++      out_err1:
++      mutex_unlock(&relocatee.buf->mutex);
++      drm_bo_usage_deref_unlocked(&relocatee.buf);
++      out_err:
++      return ret;
++}
++
++static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
++{
++      if (relocatee->data_page) {
++#ifndef DRM_KMAP_ATOMIC_PROT_PFN
++              drm_bo_kunmap(&relocatee->kmap);
++#else
++              kunmap_atomic(relocatee->data_page, KM_USER0);
++#endif
++              relocatee->data_page = NULL;
++      }
++      relocatee->buf = NULL;
++      relocatee->dst = ~0;
++}
++
++static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
++                               struct drm_i915_validate_buffer *buffers,
++                               unsigned int dst, unsigned long dst_offset)
++{
++      int ret;
++
++      if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
++              i915_clear_relocatee(relocatee);
++              relocatee->dst = dst;
++              relocatee->buf = buffers[dst].buffer;
++              relocatee->idle = buffers[dst].idle;
++
++              /*
++               * Check for buffer idle. If the buffer is busy, revert to
++               * ring relocations.
++               */
++
++              if (relocatee->idle == I915_RELOC_UNCHECKED) {
++                      preempt_enable();
++                      mutex_lock(&relocatee->buf->mutex);
++
++                      ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
++                      if (ret == 0)
++                              relocatee->idle = I915_RELOC_IDLE;
++                      else {
++                              relocatee->idle = I915_RELOC_BUSY;
++                              relocatee->performed_ring_relocs = 1;
++                      }
++                      mutex_unlock(&relocatee->buf->mutex);
++                      preempt_disable();
++                      buffers[dst].idle = relocatee->idle;
++              }
++      }
++
++      if (relocatee->idle == I915_RELOC_BUSY)
++              return 0;
++
++      if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
++              DRM_ERROR("Relocation destination out of bounds.\n");
++              return -EINVAL;
++      }
++      if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
++                   NULL == relocatee->data_page)) {
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++              if (NULL != relocatee->data_page) {
++                      kunmap_atomic(relocatee->data_page, KM_USER0);
++                      relocatee->data_page = NULL;
++              }
++              ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
++                                    &relocatee->pfn, &relocatee->pg_prot);
++              if (ret) {
++                      DRM_ERROR("Can't map relocation destination.\n");
++                      return -EINVAL;
++              }
++              relocatee->data_page =
++                  kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
++                                       relocatee->pg_prot);
++#else
++              if (NULL != relocatee->data_page) {
++                      drm_bo_kunmap(&relocatee->kmap);
++                      relocatee->data_page = NULL;
++              }
++
++              ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
++                                1, &relocatee->kmap);
++              if (ret) {
++                      DRM_ERROR("Can't map relocation destination.\n");
++                      return ret;
++              }
++
++              relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
++                                                     &relocatee->is_iomem);
++#endif
++              relocatee->page_offset = dst_offset & PAGE_MASK;
++      }
++      return 0;
++}
++
++static int i915_apply_post_reloc(uint32_t reloc[],
++                               struct drm_i915_validate_buffer *buffers,
++                               uint32_t num_buffers,
++                               struct i915_relocatee_info *relocatee)
++{
++      uint32_t reloc_buffer = reloc[2];
++      uint32_t dst_buffer = reloc[3];
++      uint32_t val;
++      uint32_t index;
++      int ret;
++
++      if (likely(buffers[reloc_buffer].presumed_offset_correct))
++              return 0;
++      if (unlikely(reloc_buffer >= num_buffers)) {
++              DRM_ERROR("Invalid reloc buffer index.\n");
++              return -EINVAL;
++      }
++      if (unlikely(dst_buffer >= num_buffers)) {
++              DRM_ERROR("Invalid dest buffer index.\n");
++              return -EINVAL;
++      }
++
++      ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
++      if (unlikely(ret))
++              return ret;
++
++      val = buffers[reloc_buffer].buffer->offset;
++      index = (reloc[0] - relocatee->page_offset) >> 2;
++      val = val + reloc[1];
++
++      if (relocatee->idle == I915_RELOC_BUSY) {
++              i915_emit_ring_reloc(relocatee->buf->dev,
++                                   relocatee->buf->offset + reloc[0], val);
++              return 0;
++      }
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++      relocatee->data_page[index] = val;
++#else
++      if (likely(relocatee->is_iomem))
++              iowrite32(val, relocatee->data_page + index);
++      else
++              relocatee->data_page[index] = val;
++#endif
++
++      return 0;
++}
++
++static int i915_post_relocs(struct drm_file *file_priv,
++                          uint32_t __user * new_reloc_ptr,
++                          struct drm_i915_validate_buffer *buffers,
++                          unsigned int num_buffers)
++{
++      uint32_t *reloc;
++      uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
++      uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
++      struct i915_relocatee_info relocatee;
++      uint32_t reloc_type;
++      uint32_t num_relocs;
++      uint32_t count;
++      int ret = 0;
++      int i;
++      int short_circuit = 1;
++      uint32_t __user *reloc_ptr;
++      uint64_t new_reloc_data;
++      uint32_t reloc_buf_size;
++      uint32_t *reloc_buf;
++
++      for (i = 0; i < num_buffers; ++i) {
++              if (unlikely(!buffers[i].presumed_offset_correct)) {
++                      short_circuit = 0;
++                      break;
++              }
++      }
++
++      if (likely(short_circuit))
++              return 0;
++
++      memset(&relocatee, 0, sizeof(relocatee));
++
++      while (new_reloc_ptr) {
++              reloc_ptr = new_reloc_ptr;
++
++              ret = get_user(num_relocs, reloc_ptr);
++              if (unlikely(ret))
++                      goto out;
++              if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
++                                      header_size +
++                                      num_relocs * reloc_stride)))
++                      return -EFAULT;
++
++              ret = __get_user(reloc_type, reloc_ptr + 1);
++              if (unlikely(ret))
++                      goto out;
++
++              if (unlikely(reloc_type != 1)) {
++                      DRM_ERROR("Unsupported relocation type requested.\n");
++                      ret = -EINVAL;
++                      goto out;
++              }
++
++              ret = __get_user(new_reloc_data, reloc_ptr + 2);
++              new_reloc_ptr = (uint32_t __user *) (unsigned long)
++                  new_reloc_data;
++
++              reloc_ptr += I915_RELOC_HEADER;
++
++              if (num_relocs == 0)
++                      goto out;
++
++              reloc_buf_size =
++                  (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
++              reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
++              if (!reloc_buf) {
++                      DRM_ERROR("Out of memory for reloc buffer\n");
++                      ret = -ENOMEM;
++                      goto out;
++              }
++
++              if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
++                      ret = -EFAULT;
++                      goto out;
++              }
++              reloc = reloc_buf;
++              preempt_disable();
++              for (count = 0; count < num_relocs; ++count) {
++                      ret = i915_apply_post_reloc(reloc, buffers,
++                                                  num_buffers, &relocatee);
++                      if (unlikely(ret)) {
++                              preempt_enable();
++                              goto out;
++                      }
++                      reloc += I915_RELOC0_STRIDE;
++              }
++              preempt_enable();
++
++              if (reloc_buf) {
++                      kfree(reloc_buf);
++                      reloc_buf = NULL;
++              }
++              i915_clear_relocatee(&relocatee);
++      }
++
++      out:
++      /*
++       * Flush ring relocs so the command parser will pick them up.
++       */
++
++      if (relocatee.performed_ring_relocs)
++              (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
++
++      i915_clear_relocatee(&relocatee);
++      if (reloc_buf) {
++              kfree(reloc_buf);
++              reloc_buf = NULL;
++      }
++
++      return ret;
++}
++
++static int i915_check_presumed(struct drm_i915_op_arg *arg,
++                             struct drm_buffer_object *bo,
++                             uint32_t __user * data, int *presumed_ok)
++{
++      struct drm_bo_op_req *req = &arg->d.req;
++      uint32_t hint_offset;
++      uint32_t hint = req->bo_req.hint;
++
++      *presumed_ok = 0;
++
++      if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
++              return 0;
++      if (bo->offset == req->bo_req.presumed_offset) {
++              *presumed_ok = 1;
++              return 0;
++      }
++
++      /*
++       * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
++       * the user-space IOCTL argument list, since the buffer has moved,
++       * we're about to apply relocations and we might subsequently
++       * hit an -EAGAIN. In that case the argument list will be reused by
++       * user-space, but the presumed offset is no longer valid.
++       *
++       * Needless to say, this is a bit ugly.
++       */
++
++      hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
++      hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
++      return __put_user(hint, data + hint_offset);
++}
++
++/*
++ * Validate, add fence and relocate a block of bos from a userspace list
++ */
++int i915_validate_buffer_list(struct drm_file *file_priv,
++                            unsigned int fence_class, uint64_t data,
++                            struct drm_i915_validate_buffer *buffers,
++                            uint32_t * num_buffers,
++                            uint32_t __user ** post_relocs)
++{
++      struct drm_i915_op_arg arg;
++      struct drm_bo_op_req *req = &arg.d.req;
++      int ret = 0;
++      unsigned buf_count = 0;
++      uint32_t buf_handle;
++      uint32_t __user *reloc_user_ptr;
++      struct drm_i915_validate_buffer *item = buffers;
++      *post_relocs = NULL;
++
++      do {
++              if (buf_count >= *num_buffers) {
++                      DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
++                      ret = -EINVAL;
++                      goto out_err;
++              }
++              item = buffers + buf_count;
++              item->buffer = NULL;
++              item->presumed_offset_correct = 0;
++              item->idle = I915_RELOC_UNCHECKED;
++
++              if (copy_from_user
++                  (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
++                      ret = -EFAULT;
++                      goto out_err;
++              }
++
++              ret = 0;
++              if (req->op != drm_bo_validate) {
++                      DRM_ERROR
++                          ("Buffer object operation wasn't \"validate\".\n");
++                      ret = -EINVAL;
++                      goto out_err;
++              }
++              item->ret = 0;
++              item->data = (void __user *)(unsigned long)data;
++
++              buf_handle = req->bo_req.handle;
++              reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
++
++              /*
++               * Switch mode to post-validation relocations?
++               */
++
++              if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
++                           (reloc_user_ptr != NULL))) {
++                      uint32_t reloc_type;
++
++                      ret = get_user(reloc_type, reloc_user_ptr + 1);
++                      if (ret)
++                              goto out_err;
++
++                      if (reloc_type == 1)
++                              *post_relocs = reloc_user_ptr;
++
++              }
++
++              if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
++                      ret =
++                          i915_exec_reloc(file_priv, buf_handle,
++                                          reloc_user_ptr, buffers, buf_count);
++                      if (ret)
++                              goto out_err;
++                      DRM_MEMORYBARRIER();
++              }
++
++              ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
++                                           req->bo_req.flags,
++                                           req->bo_req.mask, req->bo_req.hint,
++                                           req->bo_req.fence_class,
++                                           NULL, &item->buffer);
++              if (ret) {
++                      DRM_ERROR("error on handle validate %d\n", ret);
++                      goto out_err;
++              }
++
++              buf_count++;
++
++              ret = i915_check_presumed(&arg, item->buffer,
++                                        (uint32_t __user *)
++                                        (unsigned long)data,
++                                        &item->presumed_offset_correct);
++              if (ret)
++                      goto out_err;
++
++              data = arg.next;
++      } while (data != 0);
++      out_err:
++      *num_buffers = buf_count;
++      item->ret = (ret != -EAGAIN) ? ret : 0;
++      return ret;
++}
++
++/*
++ * Remove all buffers from the unfenced list.
++ * If the execbuffer operation was aborted, for example due to a signal,
++ * this also make sure that buffers retain their original state and
++ * fence pointers.
++ * Copy back buffer information to user-space unless we were interrupted
++ * by a signal. In which case the IOCTL must be rerun.
++ */
++
++static int i915_handle_copyback(struct drm_device *dev,
++                              struct drm_i915_validate_buffer *buffers,
++                              unsigned int num_buffers, int ret)
++{
++      int err = ret;
++      int i;
++      struct drm_i915_op_arg arg;
++      struct drm_buffer_object *bo;
++
++      if (ret)
++              drm_putback_buffer_objects(dev);
++
++      if (ret != -EAGAIN) {
++              for (i = 0; i < num_buffers; ++i) {
++                      arg.handled = 1;
++                      arg.d.rep.ret = buffers->ret;
++                      bo = buffers->buffer;
++                      mutex_lock(&bo->mutex);
++                      drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
++                      mutex_unlock(&bo->mutex);
++                      if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
++                              err = -EFAULT;
++                      buffers++;
++              }
++      }
++
++      return err;
++}
++
++/*
++ * Create a fence object, and if that fails, pretend that everything is
++ * OK and just idle the GPU.
++ */
++
++void i915_fence_or_sync(struct drm_file *file_priv,
++                      uint32_t fence_flags,
++                      struct drm_fence_arg *fence_arg,
++                      struct drm_fence_object **fence_p)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret;
++      struct drm_fence_object *fence;
++
++      ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
++
++      if (ret) {
++
++              /*
++               * Fence creation failed.
++               * Fall back to synchronous operation and idle the engine.
++               */
++
++              (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
++              (void)i915_quiescent(dev);
++
++              if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
++
++                      /*
++                       * Communicate to user-space that
++                       * fence creation has failed and that
++                       * the engine is idle.
++                       */
++
++                      fence_arg->handle = ~0;
++                      fence_arg->error = ret;
++              }
++              drm_putback_buffer_objects(dev);
++              if (fence_p)
++                      *fence_p = NULL;
++              return;
++      }
++
++      if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
++
++              ret = drm_fence_add_user_object(file_priv, fence,
++                                              fence_flags &
++                                              DRM_FENCE_FLAG_SHAREABLE);
++              if (!ret)
++                      drm_fence_fill_arg(fence, fence_arg);
++              else {
++                      /*
++                       * Fence user object creation failed.
++                       * We must idle the engine here as well, as user-
++                       * space expects a fence object to wait on. Since we
++                       * have a fence object we wait for it to signal
++                       * to indicate engine "sufficiently" idle.
++                       */
++
++                      (void)drm_fence_object_wait(fence, 0, 1, fence->type);
++                      drm_fence_usage_deref_unlocked(&fence);
++                      fence_arg->handle = ~0;
++                      fence_arg->error = ret;
++              }
++      }
++
++      if (fence_p)
++              *fence_p = fence;
++      else if (fence)
++              drm_fence_usage_deref_unlocked(&fence);
++}
++
++int i915_execbuffer(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      struct drm_i915_execbuffer *exec_buf = data;
++      struct drm_i915_batchbuffer *batch = &exec_buf->batch;
++      struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
++      int num_buffers;
++      int ret;
++      uint32_t __user *post_relocs;
++
++      if (!dev_priv->allow_batchbuffer) {
++              DRM_ERROR("Batchbuffer ioctl disabled\n");
++              return -EINVAL;
++      }
++
++      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
++                                                      batch->num_cliprects *
++                                                      sizeof(struct
++                                                             drm_clip_rect)))
++              return -EFAULT;
++
++      if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
++              return -EINVAL;
++
++      ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (ret)
++              return ret;
++
++      /*
++       * The cmdbuf_mutex makes sure the validate-submit-fence
++       * operation is atomic.
++       */
++
++      ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++      if (ret) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return -EAGAIN;
++      }
++
++      num_buffers = exec_buf->num_buffers;
++
++      if (!dev_priv->val_bufs) {
++              dev_priv->val_bufs =
++                  vmalloc(sizeof(struct drm_i915_validate_buffer) *
++                          dev_priv->max_validate_buffers);
++      }
++      if (!dev_priv->val_bufs) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              mutex_unlock(&dev_priv->cmdbuf_mutex);
++              return -ENOMEM;
++      }
++
++      /* validate buffer list + fixup relocations */
++      ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
++                                      dev_priv->val_bufs, &num_buffers,
++                                      &post_relocs);
++      if (ret)
++              goto out_err0;
++
++      if (post_relocs) {
++              ret = i915_post_relocs(file_priv, post_relocs,
++                                     dev_priv->val_bufs, num_buffers);
++              if (ret)
++                      goto out_err0;
++      }
++
++      /* make sure all previous memory operations have passed */
++      DRM_MEMORYBARRIER();
++
++      if (!post_relocs) {
++              drm_agp_chipset_flush(dev);
++              batch->start =
++                  dev_priv->val_bufs[num_buffers - 1].buffer->offset;
++      } else {
++              batch->start += dev_priv->val_bufs[0].buffer->offset;
++      }
++
++      DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
++                batch->start, batch->used, batch->num_cliprects);
++
++      ret = i915_dispatch_batchbuffer(dev, batch);
++      if (ret)
++              goto out_err0;
++      if (sarea_priv)
++              sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
++
++      out_err0:
++      ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
++      mutex_lock(&dev->struct_mutex);
++      i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&dev_priv->cmdbuf_mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_fence.c git-nokia/drivers/gpu/drm-tungsten/i915_fence.c
+--- git/drivers/gpu/drm-tungsten/i915_fence.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_fence.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,273 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/*
++ * Initiate a sync flush if it's not already pending.
++ */
++
++static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
++                                       struct drm_fence_class_manager *fc)
++{
++      if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
++          !dev_priv->flush_pending) {
++              dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
++              dev_priv->flush_flags = fc->pending_flush;
++              dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
++              I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
++              dev_priv->flush_pending = 1;
++              fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
++      }
++}
++
++static inline void i915_report_rwflush(struct drm_device *dev,
++                                     struct drm_i915_private *dev_priv)
++{
++      if (unlikely(dev_priv->flush_pending)) {
++
++              uint32_t flush_flags;
++              uint32_t i_status;
++              uint32_t flush_sequence;
++
++              i_status = READ_HWSP(dev_priv, 0);
++              if ((i_status & (1 << 12)) !=
++                  (dev_priv->saved_flush_status & (1 << 12))) {
++                      flush_flags = dev_priv->flush_flags;
++                      flush_sequence = dev_priv->flush_sequence;
++                      dev_priv->flush_pending = 0;
++                      drm_fence_handler(dev, 0, flush_sequence,
++                                        flush_flags, 0);
++              }
++      }
++}
++
++static void i915_fence_flush(struct drm_device *dev,
++                           uint32_t fence_class)
++{
++      struct drm_i915_private *dev_priv = 
++              (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      unsigned long irq_flags;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++      i915_initiate_rwflush(dev_priv, fc);
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++}
++
++
++static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
++                          uint32_t waiting_types)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      uint32_t sequence;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      /*
++       * First, report any executed sync flush:
++       */
++
++      i915_report_rwflush(dev, dev_priv);
++
++      /*
++       * Report A new breadcrumb, and adjust IRQs.
++       */
++
++      if (waiting_types & DRM_FENCE_TYPE_EXE) {
++
++              sequence = READ_BREADCRUMB(dev_priv);
++              drm_fence_handler(dev, 0, sequence,
++                                DRM_FENCE_TYPE_EXE, 0);
++
++              if (dev_priv->fence_irq_on &&
++                  !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
++                      i915_user_irq_off(dev_priv);
++                      dev_priv->fence_irq_on = 0;
++              } else if (!dev_priv->fence_irq_on &&
++                         (fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
++                      i915_user_irq_on(dev_priv);
++                      dev_priv->fence_irq_on = 1;
++              }
++      }
++
++      /*
++       * There may be new RW flushes pending. Start them.
++       */
++      
++      i915_initiate_rwflush(dev_priv, fc); 
++
++      /*
++       * And possibly, but unlikely, they finish immediately.
++       */
++
++      i915_report_rwflush(dev, dev_priv);
++
++}
++
++static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
++                           uint32_t flags, uint32_t *sequence,
++                           uint32_t *native_type)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      if (unlikely(!dev_priv))
++              return -EINVAL;
++
++      i915_emit_irq(dev);
++      *sequence = (uint32_t) dev_priv->counter;
++      *native_type = DRM_FENCE_TYPE_EXE;
++      if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
++              *native_type |= DRM_I915_FENCE_TYPE_RW;
++
++      return 0;
++}
++
++void i915_fence_handler(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++
++      write_lock(&fm->lock);
++      if (likely(dev_priv->fence_irq_on))
++              i915_fence_poll(dev, 0, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++/*
++ * We need a separate wait function since we need to poll for
++ * sync flushes.
++ */
++
++static int i915_fence_wait(struct drm_fence_object *fence,
++                         int lazy, int interruptible, uint32_t mask)
++{
++      struct drm_device *dev = fence->dev;
++      drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      int ret;
++      unsigned long  _end = jiffies + 3 * DRM_HZ;
++
++      drm_fence_object_flush(fence, mask);
++      if (likely(interruptible))
++              ret = wait_event_interruptible_timeout
++                      (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
++                       3 * DRM_HZ);
++      else 
++              ret = wait_event_timeout
++                      (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
++                       3 * DRM_HZ);
++
++      if (unlikely(ret == -ERESTARTSYS))
++              return -EAGAIN;
++
++      if (unlikely(ret == 0))
++              return -EBUSY;
++
++      if (likely(mask == DRM_FENCE_TYPE_EXE || 
++                 drm_fence_object_signaled(fence, mask))) 
++              return 0;
++
++      /*
++       * Remove this code snippet when fixed. HWSTAM doesn't let
++       * flush info through...
++       */
++
++      if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
++              unsigned long irq_flags;
++
++              DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
++              msleep(100);
++              dev_priv->flush_pending = 0;
++              write_lock_irqsave(&fm->lock, irq_flags);
++              drm_fence_handler(dev, fence->fence_class, 
++                                fence->sequence, fence->type, 0);
++              write_unlock_irqrestore(&fm->lock, irq_flags);
++      }
++
++      /*
++       * Poll for sync flush completion.
++       */
++
++      return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
++}
++
++static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
++{
++      uint32_t flush_flags = fence->waiting_types & 
++              ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
++
++      if (likely(flush_flags == 0 || 
++                 ((flush_flags & ~fence->native_types) == 0) || 
++                 (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
++              return 0;
++      else {
++              struct drm_device *dev = fence->dev;
++              struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
++              struct drm_fence_driver *driver = dev->driver->fence_driver;
++              
++              if (unlikely(!dev_priv))
++                      return 0;
++
++              if (dev_priv->flush_pending) {
++                      uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & 
++                              driver->sequence_mask;
++
++                      if (diff < driver->wrap_diff)
++                              return 0;
++              }
++      }
++      return flush_flags;
++}
++
++struct drm_fence_driver i915_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
++      .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
++      .sequence_mask = BREADCRUMB_MASK,
++      .has_irq = NULL,
++      .emit = i915_fence_emit_sequence,
++      .flush = i915_fence_flush,
++      .poll = i915_fence_poll,
++      .needed_flush = i915_fence_needed_flush,
++      .wait = i915_fence_wait,
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem.c git-nokia/drivers/gpu/drm-tungsten/i915_gem.c
+--- git/drivers/gpu/drm-tungsten/i915_gem.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2502 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include <linux/swap.h>
++
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++                          uint32_t read_domains,
++                          uint32_t write_domain);
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++                               uint64_t offset,
++                               uint64_t size,
++                               uint32_t read_domains,
++                               uint32_t write_domain);
++int
++i915_gem_set_domain(struct drm_gem_object *obj,
++                  struct drm_file *file_priv,
++                  uint32_t read_domains,
++                  uint32_t write_domain);
++static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
++static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
++
++int
++i915_gem_init_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_init *args = data;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (args->gtt_start >= args->gtt_end ||
++          (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
++          (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
++          args->gtt_end - args->gtt_start);
++
++      dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++
++/**
++ * Creates a new mm object and returns a handle to it.
++ */
++int
++i915_gem_create_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_create *args = data;
++      struct drm_gem_object *obj;
++      int handle, ret;
++
++      args->size = roundup(args->size, PAGE_SIZE);
++
++      /* Allocate the new object */
++      obj = drm_gem_object_alloc(dev, args->size);
++      if (obj == NULL)
++              return -ENOMEM;
++
++      ret = drm_gem_handle_create(file_priv, obj, &handle);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_handle_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (ret)
++              return ret;
++
++      args->handle = handle;
++
++      return 0;
++}
++
++/**
++ * Reads data from the object referenced by handle.
++ *
++ * On error, the contents of *data are undefined.
++ */
++int
++i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pread *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      ssize_t read;
++      loff_t offset;
++      int ret;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++      obj_priv = obj->driver_private;
++
++      /* Bounds check source.
++       *
++       * XXX: This could use review for overflow issues...
++       */
++      if (args->offset > obj->size || args->size > obj->size ||
++          args->offset + args->size > obj->size) {
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
++                                             I915_GEM_DOMAIN_CPU, 0);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      offset = args->offset;
++
++      read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
++                      args->size, &offset);
++      if (read != args->size) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              if (read < 0)
++                      return read;
++              else
++                      return -EINVAL;
++      }
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++#include "drm_compat.h"
++
++static int
++i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++                  struct drm_i915_gem_pwrite *args,
++                  struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset;
++      char __user *user_data;
++      char *vaddr;
++      int i, o, l;
++      int ret = 0;
++      unsigned long pfn;
++      unsigned long unwritten;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
++      if (!access_ok(VERIFY_READ, user_data, remain))
++              return -EFAULT;
++
++
++      mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_object_pin(obj, 0);
++      if (ret) {
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++      ret = i915_gem_set_domain(obj, file_priv,
++                                I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
++      if (ret)
++              goto fail;
++
++      obj_priv = obj->driver_private;
++      offset = obj_priv->gtt_offset + args->offset;
++      obj_priv->dirty = 1;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * i = page number
++               * o = offset within page
++               * l = bytes to copy
++               */
++              i = offset >> PAGE_SHIFT;
++              o = offset & (PAGE_SIZE-1);
++              l = remain;
++              if ((o + l) > PAGE_SIZE)
++                      l = PAGE_SIZE - o;
++
++              pfn = (dev->agp->base >> PAGE_SHIFT) + i;
++
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++              /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
++               */
++              vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
++                                           __pgprot(__PAGE_KERNEL));
++#if WATCH_PWRITE
++              DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
++                       i, o, l, pfn, vaddr);
++#endif
++              unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
++                                                            user_data, l);
++              kunmap_atomic(vaddr, KM_USER0);
++
++              if (unwritten)
++#endif
++              {
++                      vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++#if WATCH_PWRITE
++                      DRM_INFO("pwrite slow i %d o %d l %d "
++                               "pfn %ld vaddr %p\n",
++                               i, o, l, pfn, vaddr);
++#endif
++                      if (vaddr == NULL) {
++                              ret = -EFAULT;
++                              goto fail;
++                      }
++                      unwritten = __copy_from_user(vaddr + o, user_data, l);
++#if WATCH_PWRITE
++                      DRM_INFO("unwritten %ld\n", unwritten);
++#endif
++                      iounmap(vaddr);
++                      if (unwritten) {
++                              ret = -EFAULT;
++                              goto fail;
++                      }
++              }
++
++              remain -= l;
++              user_data += l;
++              offset += l;
++      }
++#if WATCH_PWRITE && 1
++      i915_gem_clflush_object(obj);
++      i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
++      i915_gem_clflush_object(obj);
++#endif
++
++fail:
++      i915_gem_object_unpin(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++int
++i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++                    struct drm_i915_gem_pwrite *args,
++                    struct drm_file *file_priv)
++{
++      int ret;
++      loff_t offset;
++      ssize_t written;
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_set_domain(obj, file_priv,
++                                I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
++      if (ret) {
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      offset = args->offset;
++
++      written = vfs_write(obj->filp,
++                          (char __user *)(uintptr_t) args->data_ptr,
++                          args->size, &offset);
++      if (written != args->size) {
++              mutex_unlock(&dev->struct_mutex);
++              if (written < 0)
++                      return written;
++              else
++                      return -EINVAL;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Writes data to the object referenced by handle.
++ *
++ * On error, the contents of the buffer that were to be modified are undefined.
++ */
++int
++i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pwrite *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++      obj_priv = obj->driver_private;
++
++      /* Bounds check destination.
++       *
++       * XXX: This could use review for overflow issues...
++       */
++      if (args->offset > obj->size || args->size > obj->size ||
++          args->offset + args->size > obj->size) {
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++
++      /* We can only do the GTT pwrite on untiled buffers, as otherwise
++       * it would end up going through the fenced access, and we'll get
++       * different detiling behavior between reading and writing.
++       * pread/pwrite currently are reading and writing from the CPU
++       * perspective, requiring manual detiling by the client.
++       */
++      if (obj_priv->tiling_mode == I915_TILING_NONE &&
++          dev->gtt_total != 0)
++              ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
++      else
++              ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
++
++#if WATCH_PWRITE
++      if (ret)
++              DRM_INFO("pwrite failed %d\n", ret);
++#endif
++
++      drm_gem_object_unreference(obj);
++
++      return ret;
++}
++
++/**
++ * Called when user space prepares to use an object
++ */
++int
++i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_set_domain *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++
++      mutex_lock(&dev->struct_mutex);
++#if WATCH_BUF
++      DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
++               obj, obj->size, args->read_domains, args->write_domain);
++#endif
++      ret = i915_gem_set_domain(obj, file_priv,
++                                args->read_domains, args->write_domain);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when user space has done writes to this buffer
++ */
++int
++i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_sw_finish *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      mutex_lock(&dev->struct_mutex);
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++#if WATCH_BUF
++      DRM_INFO("%s: sw_finish %d (%p %d)\n",
++               __func__, args->handle, obj, obj->size);
++#endif
++      obj_priv = obj->driver_private;
++
++      /* Pinned buffers may be scanout, so flush the cache */
++      if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
++              i915_gem_clflush_object(obj);
++              drm_agp_chipset_flush(dev);
++      }
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Maps the contents of an object, returning the address it is mapped
++ * into.
++ *
++ * While the mapping holds a reference on the contents of the object, it doesn't
++ * imply a ref on the object itself.
++ */
++int
++i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_mmap *args = data;
++      struct drm_gem_object *obj;
++      loff_t offset;
++      unsigned long addr;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++
++      offset = args->offset;
++
++      down_write(&current->mm->mmap_sem);
++      addr = do_mmap(obj->filp, 0, args->size,
++                     PROT_READ | PROT_WRITE, MAP_SHARED,
++                     args->offset);
++      up_write(&current->mm->mmap_sem);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      if (IS_ERR((void *)addr))
++              return addr;
++
++      args->addr_ptr = (uint64_t) addr;
++
++      return 0;
++}
++
++static void
++i915_gem_object_free_page_list(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page_count = obj->size / PAGE_SIZE;
++      int i;
++
++      if (obj_priv->page_list == NULL)
++              return;
++
++
++      for (i = 0; i < page_count; i++)
++              if (obj_priv->page_list[i] != NULL) {
++                      if (obj_priv->dirty)
++                              set_page_dirty(obj_priv->page_list[i]);
++                      mark_page_accessed(obj_priv->page_list[i]);
++                      page_cache_release(obj_priv->page_list[i]);
++              }
++      obj_priv->dirty = 0;
++
++      drm_free(obj_priv->page_list,
++               page_count * sizeof(struct page *),
++               DRM_MEM_DRIVER);
++      obj_priv->page_list = NULL;
++}
++
++static void
++i915_gem_object_move_to_active(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      /* Add a reference if we're newly entering the active list. */
++      if (!obj_priv->active) {
++              drm_gem_object_reference(obj);
++              obj_priv->active = 1;
++      }
++      /* Move from whatever list we were on to the tail of execution. */
++      list_move_tail(&obj_priv->list,
++                     &dev_priv->mm.active_list);
++}
++
++
++static void
++i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      if (obj_priv->pin_count != 0)
++              list_del_init(&obj_priv->list);
++      else
++              list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++
++      if (obj_priv->active) {
++              obj_priv->active = 0;
++              drm_gem_object_unreference(obj);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++/**
++ * Creates a new sequence number, emitting a write of it to the status page
++ * plus an interrupt, which will trigger i915_user_interrupt_handler.
++ *
++ * Must be called with struct_lock held.
++ *
++ * Returned sequence numbers are nonzero on success.
++ */
++static uint32_t
++i915_add_request(struct drm_device *dev, uint32_t flush_domains)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_request *request;
++      uint32_t seqno;
++      int was_empty;
++      RING_LOCALS;
++
++      request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
++      if (request == NULL)
++              return 0;
++
++      /* Grab the seqno we're going to make this request be, and bump the
++       * next (skipping 0 so it can be the reserved no-seqno value).
++       */
++      seqno = dev_priv->mm.next_gem_seqno;
++      dev_priv->mm.next_gem_seqno++;
++      if (dev_priv->mm.next_gem_seqno == 0)
++              dev_priv->mm.next_gem_seqno++;
++
++      BEGIN_LP_RING(4);
++      OUT_RING(MI_STORE_DWORD_INDEX);
++      OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++      OUT_RING(seqno);
++
++      OUT_RING(MI_USER_INTERRUPT);
++      ADVANCE_LP_RING();
++
++      DRM_DEBUG("%d\n", seqno);
++
++      request->seqno = seqno;
++      request->emitted_jiffies = jiffies;
++      request->flush_domains = flush_domains;
++      was_empty = list_empty(&dev_priv->mm.request_list);
++      list_add_tail(&request->list, &dev_priv->mm.request_list);
++
++      if (was_empty)
++              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++      return seqno;
++}
++
++/**
++ * Command execution barrier
++ *
++ * Ensures that all commands in the ring are finished
++ * before signalling the CPU
++ */
++uint32_t
++i915_retire_commands(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++      uint32_t flush_domains = 0;
++      RING_LOCALS;
++
++      /* The sampler always gets flushed on i965 (sigh) */
++      if (IS_I965G(dev))
++              flush_domains |= I915_GEM_DOMAIN_SAMPLER;
++      BEGIN_LP_RING(2);
++      OUT_RING(cmd);
++      OUT_RING(0); /* noop */
++      ADVANCE_LP_RING();
++      return flush_domains;
++}
++
++/**
++ * Moves buffers associated only with the given active seqno from the active
++ * to inactive list, potentially freeing them.
++ */
++static void
++i915_gem_retire_request(struct drm_device *dev,
++                      struct drm_i915_gem_request *request)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (request->flush_domains != 0) {
++              struct drm_i915_gem_object *obj_priv, *next;
++
++              /* First clear any buffers that were only waiting for a flush
++               * matching the one just retired.
++               */
++
++              list_for_each_entry_safe(obj_priv, next,
++                                       &dev_priv->mm.flushing_list, list) {
++                      struct drm_gem_object *obj = obj_priv->obj;
++
++                      if (obj->write_domain & request->flush_domains) {
++                              obj->write_domain = 0;
++                              i915_gem_object_move_to_inactive(obj);
++                      }
++              }
++
++      }
++
++      /* Move any buffers on the active list that are no longer referenced
++       * by the ringbuffer to the flushing/inactive lists as appropriate.
++       */
++      while (!list_empty(&dev_priv->mm.active_list)) {
++              struct drm_gem_object *obj;
++              struct drm_i915_gem_object *obj_priv;
++
++              obj_priv = list_first_entry(&dev_priv->mm.active_list,
++                                          struct drm_i915_gem_object,
++                                          list);
++              obj = obj_priv->obj;
++
++              /* If the seqno being retired doesn't match the oldest in the
++               * list, then the oldest in the list must still be newer than
++               * this seqno.
++               */
++              if (obj_priv->last_rendering_seqno != request->seqno)
++                      return;
++#if WATCH_LRU
++              DRM_INFO("%s: retire %d moves to inactive list %p\n",
++                       __func__, request->seqno, obj);
++#endif
++
++              /* If this request flushes the write domain,
++               * clear the write domain from the object now
++               */
++              if (request->flush_domains & obj->write_domain)
++                  obj->write_domain = 0;
++
++              if (obj->write_domain != 0) {
++                      list_move_tail(&obj_priv->list,
++                                     &dev_priv->mm.flushing_list);
++              } else {
++                      i915_gem_object_move_to_inactive(obj);
++              }
++      }
++}
++
++/**
++ * Returns true if seq1 is later than seq2.
++ */
++static int
++i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++{
++      return (int32_t)(seq1 - seq2) >= 0;
++}
++
++uint32_t
++i915_get_gem_seqno(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
++}
++
++/**
++ * This function clears the request list as sequence numbers are passed.
++ */
++void
++i915_gem_retire_requests(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t seqno;
++
++      seqno = i915_get_gem_seqno(dev);
++
++      while (!list_empty(&dev_priv->mm.request_list)) {
++              struct drm_i915_gem_request *request;
++              uint32_t retiring_seqno;
++
++              request = list_first_entry(&dev_priv->mm.request_list,
++                                         struct drm_i915_gem_request,
++                                         list);
++              retiring_seqno = request->seqno;
++
++              if (i915_seqno_passed(seqno, retiring_seqno) ||
++                  dev_priv->mm.wedged) {
++                      i915_gem_retire_request(dev, request);
++
++                      list_del(&request->list);
++                      drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
++              } else
++                      break;
++      }
++}
++
++void
++i915_gem_retire_work_handler(struct work_struct *work)
++{
++      drm_i915_private_t *dev_priv;
++      struct drm_device *dev;
++
++      dev_priv = container_of(work, drm_i915_private_t,
++                              mm.retire_work.work);
++      dev = dev_priv->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      i915_gem_retire_requests(dev);
++      if (!list_empty(&dev_priv->mm.request_list))
++              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Waits for a sequence number to be signaled, and cleans up the
++ * request and object lists appropriately for that event.
++ */
++int
++i915_wait_request(struct drm_device *dev, uint32_t seqno)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret = 0;
++
++      BUG_ON(seqno == 0);
++
++      if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
++              dev_priv->mm.waiting_gem_seqno = seqno;
++              i915_user_irq_on(dev_priv);
++              ret = wait_event_interruptible(dev_priv->irq_queue,
++                                             i915_seqno_passed(i915_get_gem_seqno(dev),
++                                                               seqno) ||
++                                             dev_priv->mm.wedged);
++              i915_user_irq_off(dev_priv);
++              dev_priv->mm.waiting_gem_seqno = 0;
++      }
++      if (dev_priv->mm.wedged)
++              ret = -EIO;
++
++      if (ret && ret != -ERESTARTSYS)
++              DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
++                        __func__, ret, seqno, i915_get_gem_seqno(dev));
++
++      /* Directly dispatch request retiring.  While we have the work queue
++       * to handle this, the waiter on a request often wants an associated
++       * buffer to have made it to the inactive list, and we would need
++       * a separate wait queue to handle that.
++       */
++      if (ret == 0)
++              i915_gem_retire_requests(dev);
++
++      return ret;
++}
++
++static void
++i915_gem_flush(struct drm_device *dev,
++             uint32_t invalidate_domains,
++             uint32_t flush_domains)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t cmd;
++      RING_LOCALS;
++
++#if WATCH_EXEC
++      DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
++                invalidate_domains, flush_domains);
++#endif
++
++      if (flush_domains & I915_GEM_DOMAIN_CPU)
++              drm_agp_chipset_flush(dev);
++
++      if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
++                                                   I915_GEM_DOMAIN_GTT)) {
++              /*
++               * read/write caches:
++               *
++               * I915_GEM_DOMAIN_RENDER is always invalidated, but is
++               * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
++               * also flushed at 2d versus 3d pipeline switches.
++               *
++               * read-only caches:
++               *
++               * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
++               * MI_READ_FLUSH is set, and is always flushed on 965.
++               *
++               * I915_GEM_DOMAIN_COMMAND may not exist?
++               *
++               * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
++               * invalidated when MI_EXE_FLUSH is set.
++               *
++               * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
++               * invalidated with every MI_FLUSH.
++               *
++               * TLBs:
++               *
++               * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
++               * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
++               * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
++               * are flushed at any MI_FLUSH.
++               */
++
++              cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++              if ((invalidate_domains|flush_domains) &
++                  I915_GEM_DOMAIN_RENDER)
++                      cmd &= ~MI_NO_WRITE_FLUSH;
++              if (!IS_I965G(dev)) {
++                      /*
++                       * On the 965, the sampler cache always gets flushed
++                       * and this bit is reserved.
++                       */
++                      if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
++                              cmd |= MI_READ_FLUSH;
++              }
++              if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
++                      cmd |= MI_EXE_FLUSH;
++
++#if WATCH_EXEC
++              DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
++#endif
++              BEGIN_LP_RING(2);
++              OUT_RING(cmd);
++              OUT_RING(0); /* noop */
++              ADVANCE_LP_RING();
++      }
++}
++
++/**
++ * Ensures that all rendering to the object has completed and the object is
++ * safe to unbind from the GTT or access from the CPU.
++ */
++static int
++i915_gem_object_wait_rendering(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret;
++      uint32_t write_domain;
++
++      /* If there are writes queued to the buffer, flush and
++       * create a new seqno to wait for.
++       */
++      write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
++      if (write_domain) {
++#if WATCH_BUF
++              DRM_INFO("%s: flushing object %p from write domain %08x\n",
++                        __func__, obj, write_domain);
++#endif
++              i915_gem_flush(dev, 0, write_domain);
++
++              i915_gem_object_move_to_active(obj);
++              obj_priv->last_rendering_seqno = i915_add_request(dev,
++                                                                write_domain);
++              BUG_ON(obj_priv->last_rendering_seqno == 0);
++#if WATCH_LRU
++              DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
++#endif
++      }
++
++      /* If there is rendering queued on the buffer being evicted, wait for
++       * it.
++       */
++      if (obj_priv->active) {
++#if WATCH_BUF
++              DRM_INFO("%s: object %p wait for seqno %08x\n",
++                        __func__, obj, obj_priv->last_rendering_seqno);
++#endif
++              ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
++              if (ret != 0)
++                      return ret;
++      }
++
++      return 0;
++}
++
++/**
++ * Unbinds an object from the GTT aperture.
++ */
++static int
++i915_gem_object_unbind(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret = 0;
++
++#if WATCH_BUF
++      DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
++      DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
++#endif
++      if (obj_priv->gtt_space == NULL)
++              return 0;
++
++      if (obj_priv->pin_count != 0) {
++              DRM_ERROR("Attempting to unbind pinned buffer\n");
++              return -EINVAL;
++      }
++
++      /* Wait for any rendering to complete
++       */
++      ret = i915_gem_object_wait_rendering(obj);
++      if (ret) {
++              DRM_ERROR("wait_rendering failed: %d\n", ret);
++              return ret;
++      }
++
++      /* Move the object to the CPU domain to ensure that
++       * any possible CPU writes while it's not in the GTT
++       * are flushed when we go to remap it. This will
++       * also ensure that all pending GPU writes are finished
++       * before we unbind.
++       */
++      ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
++                                       I915_GEM_DOMAIN_CPU);
++      if (ret) {
++              DRM_ERROR("set_domain failed: %d\n", ret);
++              return ret;
++      }
++
++      if (obj_priv->agp_mem != NULL) {
++              drm_unbind_agp(obj_priv->agp_mem);
++              drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
++              obj_priv->agp_mem = NULL;
++      }
++
++      BUG_ON(obj_priv->active);
++
++      i915_gem_object_free_page_list(obj);
++
++      if (obj_priv->gtt_space) {
++              atomic_dec(&dev->gtt_count);
++              atomic_sub(obj->size, &dev->gtt_memory);
++
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++      }
++
++      /* Remove ourselves from the LRU list if present. */
++      if (!list_empty(&obj_priv->list))
++              list_del_init(&obj_priv->list);
++
++      return 0;
++}
++
++static int
++i915_gem_evict_something(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      for (;;) {
++              /* If there's an inactive buffer available now, grab it
++               * and be done.
++               */
++              if (!list_empty(&dev_priv->mm.inactive_list)) {
++                      obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
++                                                  struct drm_i915_gem_object,
++                                                  list);
++                      obj = obj_priv->obj;
++                      BUG_ON(obj_priv->pin_count != 0);
++#if WATCH_LRU
++                      DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++                      BUG_ON(obj_priv->active);
++
++                      /* Wait on the rendering and unbind the buffer. */
++                      ret = i915_gem_object_unbind(obj);
++                      break;
++              }
++
++              /* If we didn't get anything, but the ring is still processing
++               * things, wait for one of those things to finish and hopefully
++               * leave us a buffer to evict.
++               */
++              if (!list_empty(&dev_priv->mm.request_list)) {
++                      struct drm_i915_gem_request *request;
++
++                      request = list_first_entry(&dev_priv->mm.request_list,
++                                                 struct drm_i915_gem_request,
++                                                 list);
++
++                      ret = i915_wait_request(dev, request->seqno);
++                      if (ret)
++                              break;
++
++                      /* if waiting caused an object to become inactive,
++                       * then loop around and wait for it. Otherwise, we
++                       * assume that waiting freed and unbound something,
++                       * so there should now be some space in the GTT
++                       */
++                      if (!list_empty(&dev_priv->mm.inactive_list))
++                              continue;
++                      break;
++              }
++
++              /* If we didn't have anything on the request list but there
++               * are buffers awaiting a flush, emit one and try again.
++               * When we wait on it, those buffers waiting for that flush
++               * will get moved to inactive.
++               */
++              if (!list_empty(&dev_priv->mm.flushing_list)) {
++                      obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
++                                                  struct drm_i915_gem_object,
++                                                  list);
++                      obj = obj_priv->obj;
++
++                      i915_gem_flush(dev,
++                                     obj->write_domain,
++                                     obj->write_domain);
++                      i915_add_request(dev, obj->write_domain);
++
++                      obj = NULL;
++                      continue;
++              }
++
++              DRM_ERROR("inactive empty %d request empty %d "
++                        "flushing empty %d\n",
++                        list_empty(&dev_priv->mm.inactive_list),
++                        list_empty(&dev_priv->mm.request_list),
++                        list_empty(&dev_priv->mm.flushing_list));
++              /* If we didn't do any of the above, there's nothing to be done
++               * and we just can't fit it in.
++               */
++              return -ENOMEM;
++      }
++      return ret;
++}
++
++static int
++i915_gem_object_get_page_list(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page_count, i;
++      struct address_space *mapping;
++      struct inode *inode;
++      struct page *page;
++      int ret;
++
++      if (obj_priv->page_list)
++              return 0;
++
++      /* Get the list of pages out of our struct file.  They'll be pinned
++       * at this point until we release them.
++       */
++      page_count = obj->size / PAGE_SIZE;
++      BUG_ON(obj_priv->page_list != NULL);
++      obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
++                                       DRM_MEM_DRIVER);
++      if (obj_priv->page_list == NULL) {
++              DRM_ERROR("Faled to allocate page list\n");
++              return -ENOMEM;
++      }
++
++      inode = obj->filp->f_path.dentry->d_inode;
++      mapping = inode->i_mapping;
++      for (i = 0; i < page_count; i++) {
++              page = read_mapping_page(mapping, i, NULL);
++              if (IS_ERR(page)) {
++                      ret = PTR_ERR(page);
++                      DRM_ERROR("read_mapping_page failed: %d\n", ret);
++                      i915_gem_object_free_page_list(obj);
++                      return ret;
++              }
++              obj_priv->page_list[i] = page;
++      }
++      return 0;
++}
++
++/**
++ * Finds free space in the GTT aperture and binds the object there.
++ */
++static int
++i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct drm_mm_node *free_space;
++      int page_count, ret;
++
++      if (alignment == 0)
++              alignment = PAGE_SIZE;
++      if (alignment & (PAGE_SIZE - 1)) {
++              DRM_ERROR("Invalid object alignment requested %u\n", alignment);
++              return -EINVAL;
++      }
++
++ search_free:
++      free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
++                                      obj->size, alignment, 0);
++      if (free_space != NULL) {
++              obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
++                                                     alignment);
++              if (obj_priv->gtt_space != NULL) {
++                      obj_priv->gtt_space->private = obj;
++                      obj_priv->gtt_offset = obj_priv->gtt_space->start;
++              }
++      }
++      if (obj_priv->gtt_space == NULL) {
++              /* If the gtt is empty and we're still having trouble
++               * fitting our object in, we're out of memory.
++               */
++#if WATCH_LRU
++              DRM_INFO("%s: GTT full, evicting something\n", __func__);
++#endif
++              if (list_empty(&dev_priv->mm.inactive_list) &&
++                  list_empty(&dev_priv->mm.flushing_list) &&
++                  list_empty(&dev_priv->mm.active_list)) {
++                      DRM_ERROR("GTT full, but LRU list empty\n");
++                      return -ENOMEM;
++              }
++
++              ret = i915_gem_evict_something(dev);
++              if (ret != 0) {
++                      DRM_ERROR("Failed to evict a buffer %d\n", ret);
++                      return ret;
++              }
++              goto search_free;
++      }
++
++#if WATCH_BUF
++      DRM_INFO("Binding object of size %d at 0x%08x\n",
++               obj->size, obj_priv->gtt_offset);
++#endif
++      ret = i915_gem_object_get_page_list(obj);
++      if (ret) {
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++              return ret;
++      }
++
++      page_count = obj->size / PAGE_SIZE;
++      /* Create an AGP memory structure pointing at our pages, and bind it
++       * into the GTT.
++       */
++      obj_priv->agp_mem = drm_agp_bind_pages(dev,
++                                             obj_priv->page_list,
++                                             page_count,
++                                             obj_priv->gtt_offset);
++      if (obj_priv->agp_mem == NULL) {
++              i915_gem_object_free_page_list(obj);
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++              return -ENOMEM;
++      }
++      atomic_inc(&dev->gtt_count);
++      atomic_add(obj->size, &dev->gtt_memory);
++
++      /* Assert that the object is not currently in any GPU domain. As it
++       * wasn't in the GTT, there shouldn't be any way it could have been in
++       * a GPU cache
++       */
++      BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++      BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++
++      return 0;
++}
++
++void
++i915_gem_clflush_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
++
++      /* If we don't have a page list set up, then we're not pinned
++       * to GPU, and we can ignore the cache flush because it'll happen
++       * again at bind time.
++       */
++      if (obj_priv->page_list == NULL)
++              return;
++
++      drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
++}
++
++/*
++ * Set the next domain for the specified object. This
++ * may not actually perform the necessary flushing/invaliding though,
++ * as that may want to be batched with other set_domain operations
++ *
++ * This is (we hope) the only really tricky part of gem. The goal
++ * is fairly simple -- track which caches hold bits of the object
++ * and make sure they remain coherent. A few concrete examples may
++ * help to explain how it works. For shorthand, we use the notation
++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
++ * a pair of read and write domain masks.
++ *
++ * Case 1: the batch buffer
++ *
++ *    1. Allocated
++ *    2. Written by CPU
++ *    3. Mapped to GTT
++ *    4. Read by GPU
++ *    5. Unmapped from GTT
++ *    6. Freed
++ *
++ *    Let's take these a step at a time
++ *
++ *    1. Allocated
++ *            Pages allocated from the kernel may still have
++ *            cache contents, so we set them to (CPU, CPU) always.
++ *    2. Written by CPU (using pwrite)
++ *            The pwrite function calls set_domain (CPU, CPU) and
++ *            this function does nothing (as nothing changes)
++ *    3. Mapped by GTT
++ *            This function asserts that the object is not
++ *            currently in any GPU-based read or write domains
++ *    4. Read by GPU
++ *            i915_gem_execbuffer calls set_domain (COMMAND, 0).
++ *            As write_domain is zero, this function adds in the
++ *            current read domains (CPU+COMMAND, 0).
++ *            flush_domains is set to CPU.
++ *            invalidate_domains is set to COMMAND
++ *            clflush is run to get data out of the CPU caches
++ *            then i915_dev_set_domain calls i915_gem_flush to
++ *            emit an MI_FLUSH and drm_agp_chipset_flush
++ *    5. Unmapped from GTT
++ *            i915_gem_object_unbind calls set_domain (CPU, CPU)
++ *            flush_domains and invalidate_domains end up both zero
++ *            so no flushing/invalidating happens
++ *    6. Freed
++ *            yay, done
++ *
++ * Case 2: The shared render buffer
++ *
++ *    1. Allocated
++ *    2. Mapped to GTT
++ *    3. Read/written by GPU
++ *    4. set_domain to (CPU,CPU)
++ *    5. Read/written by CPU
++ *    6. Read/written by GPU
++ *
++ *    1. Allocated
++ *            Same as last example, (CPU, CPU)
++ *    2. Mapped to GTT
++ *            Nothing changes (assertions find that it is not in the GPU)
++ *    3. Read/written by GPU
++ *            execbuffer calls set_domain (RENDER, RENDER)
++ *            flush_domains gets CPU
++ *            invalidate_domains gets GPU
++ *            clflush (obj)
++ *            MI_FLUSH and drm_agp_chipset_flush
++ *    4. set_domain (CPU, CPU)
++ *            flush_domains gets GPU
++ *            invalidate_domains gets CPU
++ *            wait_rendering (obj) to make sure all drawing is complete.
++ *            This will include an MI_FLUSH to get the data from GPU
++ *            to memory
++ *            clflush (obj) to invalidate the CPU cache
++ *            Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
++ *    5. Read/written by CPU
++ *            cache lines are loaded and dirtied
++ *    6. Read written by GPU
++ *            Same as last GPU access
++ *
++ * Case 3: The constant buffer
++ *
++ *    1. Allocated
++ *    2. Written by CPU
++ *    3. Read by GPU
++ *    4. Updated (written) by CPU again
++ *    5. Read by GPU
++ *
++ *    1. Allocated
++ *            (CPU, CPU)
++ *    2. Written by CPU
++ *            (CPU, CPU)
++ *    3. Read by GPU
++ *            (CPU+RENDER, 0)
++ *            flush_domains = CPU
++ *            invalidate_domains = RENDER
++ *            clflush (obj)
++ *            MI_FLUSH
++ *            drm_agp_chipset_flush
++ *    4. Updated (written) by CPU again
++ *            (CPU, CPU)
++ *            flush_domains = 0 (no previous write domain)
++ *            invalidate_domains = 0 (no new read domains)
++ *    5. Read by GPU
++ *            (CPU+RENDER, 0)
++ *            flush_domains = CPU
++ *            invalidate_domains = RENDER
++ *            clflush (obj)
++ *            MI_FLUSH
++ *            drm_agp_chipset_flush
++ */
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++                          uint32_t read_domains,
++                          uint32_t write_domain)
++{
++      struct drm_device               *dev = obj->dev;
++      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
++      uint32_t                        invalidate_domains = 0;
++      uint32_t                        flush_domains = 0;
++      int                             ret;
++
++#if WATCH_BUF
++      DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
++               __func__, obj,
++               obj->read_domains, read_domains,
++               obj->write_domain, write_domain);
++#endif
++      /*
++       * If the object isn't moving to a new write domain,
++       * let the object stay in multiple read domains
++       */
++      if (write_domain == 0)
++              read_domains |= obj->read_domains;
++      else
++              obj_priv->dirty = 1;
++
++      /*
++       * Flush the current write domain if
++       * the new read domains don't match. Invalidate
++       * any read domains which differ from the old
++       * write domain
++       */
++      if (obj->write_domain && obj->write_domain != read_domains) {
++              flush_domains |= obj->write_domain;
++              invalidate_domains |= read_domains & ~obj->write_domain;
++      }
++      /*
++       * Invalidate any read caches which may have
++       * stale data. That is, any new read domains.
++       */
++      invalidate_domains |= read_domains & ~obj->read_domains;
++      if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
++#if WATCH_BUF
++              DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
++                       __func__, flush_domains, invalidate_domains);
++#endif
++              /*
++               * If we're invaliding the CPU cache and flushing a GPU cache,
++               * then pause for rendering so that the GPU caches will be
++               * flushed before the cpu cache is invalidated
++               */
++              if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
++                  (flush_domains & ~(I915_GEM_DOMAIN_CPU |
++                                     I915_GEM_DOMAIN_GTT))) {
++                      ret = i915_gem_object_wait_rendering(obj);
++                      if (ret)
++                              return ret;
++              }
++              i915_gem_clflush_object(obj);
++      }
++
++      if ((write_domain | flush_domains) != 0)
++              obj->write_domain = write_domain;
++
++      /* If we're invalidating the CPU domain, clear the per-page CPU
++       * domain list as well.
++       */
++      if (obj_priv->page_cpu_valid != NULL &&
++          (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
++          ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
++              memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
++      }
++      obj->read_domains = read_domains;
++
++      dev->invalidate_domains |= invalidate_domains;
++      dev->flush_domains |= flush_domains;
++#if WATCH_BUF
++      DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
++               __func__,
++               obj->read_domains, obj->write_domain,
++               dev->invalidate_domains, dev->flush_domains);
++#endif
++      return 0;
++}
++
++/**
++ * Set the read/write domain on a range of the object.
++ *
++ * Currently only implemented for CPU reads, otherwise drops to normal
++ * i915_gem_object_set_domain().
++ */
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++                               uint64_t offset,
++                               uint64_t size,
++                               uint32_t read_domains,
++                               uint32_t write_domain)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret, i;
++
++      if (obj->read_domains & I915_GEM_DOMAIN_CPU)
++              return 0;
++
++      if (read_domains != I915_GEM_DOMAIN_CPU ||
++          write_domain != 0)
++              return i915_gem_object_set_domain(obj,
++                                                read_domains, write_domain);
++
++      /* Wait on any GPU rendering to the object to be flushed. */
++      if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
++              ret = i915_gem_object_wait_rendering(obj);
++              if (ret)
++                      return ret;
++      }
++
++      if (obj_priv->page_cpu_valid == NULL) {
++              obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
++                                                    DRM_MEM_DRIVER);
++      }
++
++      /* Flush the cache on any pages that are still invalid from the CPU's
++       * perspective.
++       */
++      for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
++              if (obj_priv->page_cpu_valid[i])
++                      continue;
++
++              drm_ttm_cache_flush(obj_priv->page_list + i, 1);
++
++              obj_priv->page_cpu_valid[i] = 1;
++      }
++
++      return 0;
++}
++
++/**
++ * Once all of the objects have been set in the proper domain,
++ * perform the necessary flush and invalidate operations.
++ *
++ * Returns the write domains flushed, for use in flush tracking.
++ */
++static uint32_t
++i915_gem_dev_set_domain(struct drm_device *dev)
++{
++      uint32_t flush_domains = dev->flush_domains;
++
++      /*
++       * Now that all the buffers are synced to the proper domains,
++       * flush and invalidate the collected domains
++       */
++      if (dev->invalidate_domains | dev->flush_domains) {
++#if WATCH_EXEC
++              DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++                        __func__,
++                       dev->invalidate_domains,
++                       dev->flush_domains);
++#endif
++              i915_gem_flush(dev,
++                             dev->invalidate_domains,
++                             dev->flush_domains);
++              dev->invalidate_domains = 0;
++              dev->flush_domains = 0;
++      }
++
++      return flush_domains;
++}
++
++/**
++ * Pin an object to the GTT and evaluate the relocations landing in it.
++ */
++static int
++i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
++                               struct drm_file *file_priv,
++                               struct drm_i915_gem_exec_object *entry)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_relocation_entry reloc;
++      struct drm_i915_gem_relocation_entry __user *relocs;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int i, ret;
++      uint32_t last_reloc_offset = -1;
++      void *reloc_page = NULL;
++
++      /* Choose the GTT offset for our buffer and put it there. */
++      ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
++      if (ret)
++              return ret;
++
++      entry->offset = obj_priv->gtt_offset;
++
++      relocs = (struct drm_i915_gem_relocation_entry __user *)
++               (uintptr_t) entry->relocs_ptr;
++      /* Apply the relocations, using the GTT aperture to avoid cache
++       * flushing requirements.
++       */
++      for (i = 0; i < entry->relocation_count; i++) {
++              struct drm_gem_object *target_obj;
++              struct drm_i915_gem_object *target_obj_priv;
++              uint32_t reloc_val, reloc_offset, *reloc_entry;
++              int ret;
++
++              ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
++              if (ret != 0) {
++                      i915_gem_object_unpin(obj);
++                      return ret;
++              }
++
++              target_obj = drm_gem_object_lookup(obj->dev, file_priv,
++                                                 reloc.target_handle);
++              if (target_obj == NULL) {
++                      i915_gem_object_unpin(obj);
++                      return -EBADF;
++              }
++              target_obj_priv = target_obj->driver_private;
++
++              /* The target buffer should have appeared before us in the
++               * exec_object list, so it should have a GTT space bound by now.
++               */
++              if (target_obj_priv->gtt_space == NULL) {
++                      DRM_ERROR("No GTT space found for object %d\n",
++                                reloc.target_handle);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++              if (reloc.offset > obj->size - 4) {
++                      DRM_ERROR("Relocation beyond object bounds: "
++                                "obj %p target %d offset %d size %d.\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset, (int) obj->size);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++              if (reloc.offset & 3) {
++                      DRM_ERROR("Relocation not 4-byte aligned: "
++                                "obj %p target %d offset %d.\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++              if (reloc.write_domain && target_obj->pending_write_domain &&
++                  reloc.write_domain != target_obj->pending_write_domain) {
++                      DRM_ERROR("Write domain conflict: "
++                                "obj %p target %d offset %d "
++                                "new %08x old %08x\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset,
++                                reloc.write_domain,
++                                target_obj->pending_write_domain);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++#if WATCH_RELOC
++              DRM_INFO("%s: obj %p offset %08x target %d "
++                       "read %08x write %08x gtt %08x "
++                       "presumed %08x delta %08x\n",
++                       __func__,
++                       obj,
++                       (int) reloc.offset,
++                       (int) reloc.target_handle,
++                       (int) reloc.read_domains,
++                       (int) reloc.write_domain,
++                       (int) target_obj_priv->gtt_offset,
++                       (int) reloc.presumed_offset,
++                       reloc.delta);
++#endif
++
++              target_obj->pending_read_domains |= reloc.read_domains;
++              target_obj->pending_write_domain |= reloc.write_domain;
++
++              /* If the relocation already has the right value in it, no
++               * more work needs to be done.
++               */
++              if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
++                      drm_gem_object_unreference(target_obj);
++                      continue;
++              }
++
++              /* Now that we're going to actually write some data in,
++               * make sure that any rendering using this buffer's contents
++               * is completed.
++               */
++              i915_gem_object_wait_rendering(obj);
++
++              /* As we're writing through the gtt, flush
++               * any CPU writes before we write the relocations
++               */
++              if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++                      i915_gem_clflush_object(obj);
++                      drm_agp_chipset_flush(dev);
++                      obj->write_domain = 0;
++              }
++
++              /* Map the page containing the relocation we're going to
++               * perform.
++               */
++              reloc_offset = obj_priv->gtt_offset + reloc.offset;
++              if (reloc_page == NULL ||
++                  (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
++                  (reloc_offset & ~(PAGE_SIZE - 1))) {
++                      if (reloc_page != NULL)
++                              iounmap(reloc_page);
++
++                      reloc_page = ioremap(dev->agp->base +
++                                           (reloc_offset & ~(PAGE_SIZE - 1)),
++                                           PAGE_SIZE);
++                      last_reloc_offset = reloc_offset;
++                      if (reloc_page == NULL) {
++                              drm_gem_object_unreference(target_obj);
++                              i915_gem_object_unpin(obj);
++                              return -ENOMEM;
++                      }
++              }
++
++              reloc_entry = (uint32_t *)((char *)reloc_page +
++                                         (reloc_offset & (PAGE_SIZE - 1)));
++              reloc_val = target_obj_priv->gtt_offset + reloc.delta;
++
++#if WATCH_BUF
++              DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
++                        obj, (unsigned int) reloc.offset,
++                        readl(reloc_entry), reloc_val);
++#endif
++              writel(reloc_val, reloc_entry);
++
++              /* Write the updated presumed offset for this entry back out
++               * to the user.
++               */
++              reloc.presumed_offset = target_obj_priv->gtt_offset;
++              ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
++              if (ret != 0) {
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return ret;
++              }
++
++              drm_gem_object_unreference(target_obj);
++      }
++
++      if (reloc_page != NULL)
++              iounmap(reloc_page);
++
++#if WATCH_BUF
++      if (0)
++              i915_gem_dump_object(obj, 128, __func__, ~0);
++#endif
++      return 0;
++}
++
++/** Dispatch a batchbuffer to the ring
++ */
++static int
++i915_dispatch_gem_execbuffer(struct drm_device *dev,
++                            struct drm_i915_gem_execbuffer *exec,
++                            uint64_t exec_offset)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
++                                           (uintptr_t) exec->cliprects_ptr;
++      int nbox = exec->num_cliprects;
++      int i = 0, count;
++      uint32_t        exec_start, exec_len;
++      RING_LOCALS;
++
++      exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
++      exec_len = (uint32_t) exec->batch_len;
++
++      if ((exec_start | exec_len) & 0x7) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      if (!exec_start)
++              return -EINVAL;
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      int ret = i915_emit_box(dev, boxes, i,
++                                              exec->DR1, exec->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              if (IS_I830(dev) || IS_845G(dev)) {
++                      BEGIN_LP_RING(4);
++                      OUT_RING(MI_BATCH_BUFFER);
++                      OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++                      OUT_RING(exec_start + exec_len - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              } else {
++                      BEGIN_LP_RING(2);
++                      if (IS_I965G(dev)) {
++                              OUT_RING(MI_BATCH_BUFFER_START |
++                                       (2 << 6) |
++                                       MI_BATCH_NON_SECURE_I965);
++                              OUT_RING(exec_start);
++                      } else {
++                              OUT_RING(MI_BATCH_BUFFER_START |
++                                       (2 << 6));
++                              OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++                      }
++                      ADVANCE_LP_RING();
++              }
++      }
++
++      /* XXX breadcrumb */
++      return 0;
++}
++
++/* Throttle our rendering by waiting until the ring has completed our requests
++ * emitted over 20 msec ago.
++ *
++ * This should get us reasonable parallelism between CPU and GPU but also
++ * relatively low latency when blocking on a particular request to finish.
++ */
++static int
++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++      int ret = 0;
++      uint32_t seqno;
++
++      mutex_lock(&dev->struct_mutex);
++      seqno = i915_file_priv->mm.last_gem_throttle_seqno;
++      i915_file_priv->mm.last_gem_throttle_seqno =
++              i915_file_priv->mm.last_gem_seqno;
++      if (seqno)
++              ret = i915_wait_request(dev, seqno);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int
++i915_gem_execbuffer(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++      struct drm_i915_gem_execbuffer *args = data;
++      struct drm_i915_gem_exec_object *exec_list = NULL;
++      struct drm_gem_object **object_list = NULL;
++      struct drm_gem_object *batch_obj;
++      int ret, i, pinned = 0;
++      uint64_t exec_offset;
++      uint32_t seqno, flush_domains;
++
++#if WATCH_EXEC
++      DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
++                (int) args->buffers_ptr, args->buffer_count, args->batch_len);
++#endif
++
++      /* Copy in the exec list from userland */
++      exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
++                             DRM_MEM_DRIVER);
++      object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
++                               DRM_MEM_DRIVER);
++      if (exec_list == NULL || object_list == NULL) {
++              DRM_ERROR("Failed to allocate exec or object list "
++                        "for %d buffers\n",
++                        args->buffer_count);
++              ret = -ENOMEM;
++              goto pre_mutex_err;
++      }
++      ret = copy_from_user(exec_list,
++                           (struct drm_i915_relocation_entry __user *)
++                           (uintptr_t) args->buffers_ptr,
++                           sizeof(*exec_list) * args->buffer_count);
++      if (ret != 0) {
++              DRM_ERROR("copy %d exec entries failed %d\n",
++                        args->buffer_count, ret);
++              goto pre_mutex_err;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      if (dev_priv->mm.wedged) {
++              DRM_ERROR("Execbuf while wedged\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EIO;
++      }
++
++      if (dev_priv->mm.suspended) {
++              DRM_ERROR("Execbuf while VT-switched.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EBUSY;
++      }
++
++      /* Zero the gloabl flush/invalidate flags. These
++       * will be modified as each object is bound to the
++       * gtt
++       */
++      dev->invalidate_domains = 0;
++      dev->flush_domains = 0;
++
++      /* Look up object handles and perform the relocations */
++      for (i = 0; i < args->buffer_count; i++) {
++              object_list[i] = drm_gem_object_lookup(dev, file_priv,
++                                                     exec_list[i].handle);
++              if (object_list[i] == NULL) {
++                      DRM_ERROR("Invalid object handle %d at index %d\n",
++                                 exec_list[i].handle, i);
++                      ret = -EBADF;
++                      goto err;
++              }
++
++              object_list[i]->pending_read_domains = 0;
++              object_list[i]->pending_write_domain = 0;
++              ret = i915_gem_object_pin_and_relocate(object_list[i],
++                                                     file_priv,
++                                                     &exec_list[i]);
++              if (ret) {
++                      DRM_ERROR("object bind and relocate failed %d\n", ret);
++                      goto err;
++              }
++              pinned = i + 1;
++      }
++
++      /* Set the pending read domains for the batch buffer to COMMAND */
++      batch_obj = object_list[args->buffer_count-1];
++      batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
++      batch_obj->pending_write_domain = 0;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      for (i = 0; i < args->buffer_count; i++) {
++              struct drm_gem_object *obj = object_list[i];
++              struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++              if (obj_priv->gtt_space == NULL) {
++                      /* We evicted the buffer in the process of validating
++                       * our set of buffers in.  We could try to recover by
++                       * kicking them everything out and trying again from
++                       * the start.
++                       */
++                      ret = -ENOMEM;
++                      goto err;
++              }
++
++              /* make sure all previous memory operations have passed */
++              ret = i915_gem_object_set_domain(obj,
++                                               obj->pending_read_domains,
++                                               obj->pending_write_domain);
++              if (ret)
++                      goto err;
++      }
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /* Flush/invalidate caches and chipset buffer */
++      flush_domains = i915_gem_dev_set_domain(dev);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++#if WATCH_COHERENCY
++      for (i = 0; i < args->buffer_count; i++) {
++              i915_gem_object_check_coherency(object_list[i],
++                                              exec_list[i].handle);
++      }
++#endif
++
++      exec_offset = exec_list[args->buffer_count - 1].offset;
++
++#if WATCH_EXEC
++      i915_gem_dump_object(object_list[args->buffer_count - 1],
++                            args->batch_len,
++                            __func__,
++                            ~0);
++#endif
++
++      /* Exec the batchbuffer */
++      ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
++      if (ret) {
++              DRM_ERROR("dispatch failed %d\n", ret);
++              goto err;
++      }
++
++      /*
++       * Ensure that the commands in the batch buffer are
++       * finished before the interrupt fires
++       */
++      flush_domains |= i915_retire_commands(dev);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /*
++       * Get a seqno representing the execution of the current buffer,
++       * which we can wait on.  We would like to mitigate these interrupts,
++       * likely by only creating seqnos occasionally (so that we have
++       * *some* interrupts representing completion of buffers that we can
++       * wait on when trying to clear up gtt space).
++       */
++      seqno = i915_add_request(dev, flush_domains);
++      BUG_ON(seqno == 0);
++      i915_file_priv->mm.last_gem_seqno = seqno;
++      for (i = 0; i < args->buffer_count; i++) {
++              struct drm_gem_object *obj = object_list[i];
++              struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++              i915_gem_object_move_to_active(obj);
++              obj_priv->last_rendering_seqno = seqno;
++#if WATCH_LRU
++              DRM_INFO("%s: move to exec list %p\n", __func__, obj);
++#endif
++      }
++#if WATCH_LRU
++      i915_dump_lru(dev, __func__);
++#endif
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /* Copy the new buffer offsets back to the user's exec list. */
++      ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++                         (uintptr_t) args->buffers_ptr,
++                         exec_list,
++                         sizeof(*exec_list) * args->buffer_count);
++      if (ret)
++              DRM_ERROR("failed to copy %d exec entries "
++                        "back to user (%d)\n",
++                         args->buffer_count, ret);
++err:
++      if (object_list != NULL) {
++              for (i = 0; i < pinned; i++)
++                      i915_gem_object_unpin(object_list[i]);
++
++              for (i = 0; i < args->buffer_count; i++)
++                      drm_gem_object_unreference(object_list[i]);
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++pre_mutex_err:
++      drm_free(object_list, sizeof(*object_list) * args->buffer_count,
++               DRM_MEM_DRIVER);
++      drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
++               DRM_MEM_DRIVER);
++
++      return ret;
++}
++
++int
++i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      if (obj_priv->gtt_space == NULL) {
++              ret = i915_gem_object_bind_to_gtt(obj, alignment);
++              if (ret != 0) {
++                      DRM_ERROR("Failure to bind: %d", ret);
++                      return ret;
++              }
++      }
++      obj_priv->pin_count++;
++
++      /* If the object is not active and not pending a flush,
++       * remove it from the inactive list
++       */
++      if (obj_priv->pin_count == 1) {
++              atomic_inc(&dev->pin_count);
++              atomic_add(obj->size, &dev->pin_memory);
++              if (!obj_priv->active &&
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)) == 0 &&
++                  !list_empty(&obj_priv->list))
++                      list_del_init(&obj_priv->list);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      return 0;
++}
++
++void
++i915_gem_object_unpin(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      obj_priv->pin_count--;
++      BUG_ON(obj_priv->pin_count < 0);
++      BUG_ON(obj_priv->gtt_space == NULL);
++
++      /* If the object is no longer pinned, and is
++       * neither active nor being flushed, then stick it on
++       * the inactive list
++       */
++      if (obj_priv->pin_count == 0) {
++              if (!obj_priv->active &&
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)) == 0)
++                      list_move_tail(&obj_priv->list,
++                                     &dev_priv->mm.inactive_list);
++              atomic_dec(&dev->pin_count);
++              atomic_sub(obj->size, &dev->pin_memory);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++int
++i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pin *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, args->alignment);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      /* XXX - flush the CPU caches for pinned objects
++       * as the X server doesn't manage domains yet
++       */
++      if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++              i915_gem_clflush_object(obj);
++              drm_agp_chipset_flush(dev);
++              obj->write_domain = 0;
++      }
++      args->offset = obj_priv->gtt_offset;
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++int
++i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pin *args = data;
++      struct drm_gem_object *obj;
++
++      mutex_lock(&dev->struct_mutex);
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++      i915_gem_object_unpin(obj);
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_i915_gem_busy *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      mutex_lock(&dev->struct_mutex);
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++      obj_priv = obj->driver_private;
++      args->busy = obj_priv->active;
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++    return i915_gem_ring_throttle(dev, file_priv);
++}
++
++int i915_gem_init_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv;
++
++      obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
++      if (obj_priv == NULL)
++              return -ENOMEM;
++
++      /*
++       * We've just allocated pages from the kernel,
++       * so they've just been written by the CPU with
++       * zeros. They'll need to be clflushed before we
++       * use them with the GPU.
++       */
++      obj->write_domain = I915_GEM_DOMAIN_CPU;
++      obj->read_domains = I915_GEM_DOMAIN_CPU;
++
++      obj->driver_private = obj_priv;
++      obj_priv->obj = obj;
++      INIT_LIST_HEAD(&obj_priv->list);
++      return 0;
++}
++
++void i915_gem_free_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      while (obj_priv->pin_count > 0)
++              i915_gem_object_unpin(obj);
++
++      i915_gem_object_unbind(obj);
++
++      drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
++      drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
++}
++
++int
++i915_gem_set_domain(struct drm_gem_object *obj,
++                  struct drm_file *file_priv,
++                  uint32_t read_domains,
++                  uint32_t write_domain)
++{
++      struct drm_device *dev = obj->dev;
++      int ret;
++      uint32_t flush_domains;
++
++      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++      ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
++      if (ret)
++              return ret;
++      flush_domains = i915_gem_dev_set_domain(obj->dev);
++
++      if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
++              (void) i915_add_request(dev, flush_domains);
++
++      return 0;
++}
++
++/** Unbinds all objects that are on the given buffer list. */
++static int
++i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
++{
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      while (!list_empty(head)) {
++              obj_priv = list_first_entry(head,
++                                          struct drm_i915_gem_object,
++                                          list);
++              obj = obj_priv->obj;
++
++              if (obj_priv->pin_count != 0) {
++                      DRM_ERROR("Pinned object in unbind list\n");
++                      mutex_unlock(&dev->struct_mutex);
++                      return -EINVAL;
++              }
++
++              ret = i915_gem_object_unbind(obj);
++              if (ret != 0) {
++                      DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
++                                ret);
++                      mutex_unlock(&dev->struct_mutex);
++                      return ret;
++              }
++      }
++
++
++      return 0;
++}
++
++static int
++i915_gem_idle(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t seqno, cur_seqno, last_seqno;
++      int stuck;
++
++      if (dev_priv->mm.suspended)
++              return 0;
++
++      /* Hack!  Don't let anybody do execbuf while we don't control the chip.
++       * We need to replace this with a semaphore, or something.
++       */
++      dev_priv->mm.suspended = 1;
++
++      i915_kernel_lost_context(dev);
++
++      /* Flush the GPU along with all non-CPU write domains
++       */
++      i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
++                     ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++      seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
++                                      I915_GEM_DOMAIN_GTT));
++
++      if (seqno == 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      dev_priv->mm.waiting_gem_seqno = seqno;
++      last_seqno = 0;
++      stuck = 0;
++      for (;;) {
++              cur_seqno = i915_get_gem_seqno(dev);
++              if (i915_seqno_passed(cur_seqno, seqno))
++                      break;
++              if (last_seqno == cur_seqno) {
++                      if (stuck++ > 100) {
++                              DRM_ERROR("hardware wedged\n");
++                              dev_priv->mm.wedged = 1;
++                              DRM_WAKEUP(&dev_priv->irq_queue);
++                              break;
++                      }
++              }
++              msleep(10);
++              last_seqno = cur_seqno;
++      }
++      dev_priv->mm.waiting_gem_seqno = 0;
++
++      i915_gem_retire_requests(dev);
++
++      /* Active and flushing should now be empty as we've
++       * waited for a sequence higher than any pending execbuffer
++       */
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++
++      /* Request should now be empty as we've also waited
++       * for the last request in the list
++       */
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++
++      /* Move all buffers out of the GTT. */
++      i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
++
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++      return 0;
++}
++
++static int
++i915_gem_init_hws(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      /* If we need a physical address for the status page, it's already
++       * initialized at driver load time.
++       */
++      if (!I915_NEED_GFX_HWS(dev))
++              return 0;
++
++      obj = drm_gem_object_alloc(dev, 4096);
++      if (obj == NULL) {
++              DRM_ERROR("Failed to allocate status page\n");
++              return -ENOMEM;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, 4096);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              return ret;
++      }
++
++      dev_priv->status_gfx_addr = obj_priv->gtt_offset;
++      dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
++      dev_priv->hws_map.size = 4096;
++      dev_priv->hws_map.type = 0;
++      dev_priv->hws_map.flags = 0;
++      dev_priv->hws_map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->hws_map, dev);
++      if (dev_priv->hws_map.handle == NULL) {
++              DRM_ERROR("Failed to map status page.\n");
++              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++      dev_priv->hws_obj = obj;
++      dev_priv->hw_status_page = dev_priv->hws_map.handle;
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++      DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
++
++      return 0;
++}
++
++static int
++i915_gem_init_ringbuffer(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      ret = i915_gem_init_hws(dev);
++      if (ret != 0)
++              return ret;
++
++      obj = drm_gem_object_alloc(dev, 128 * 1024);
++      if (obj == NULL) {
++              DRM_ERROR("Failed to allocate ringbuffer\n");
++              return -ENOMEM;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, 4096);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              return ret;
++      }
++
++      /* Set up the kernel mapping for the ring. */
++      dev_priv->ring.Size = obj->size;
++      dev_priv->ring.tail_mask = obj->size - 1;
++
++      dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
++      dev_priv->ring.map.size = obj->size;
++      dev_priv->ring.map.type = 0;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++      if (dev_priv->ring.map.handle == NULL) {
++              DRM_ERROR("Failed to map ringbuffer.\n");
++              memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++      dev_priv->ring.ring_obj = obj;
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      /* Stop the ring if it's running. */
++      I915_WRITE(PRB0_CTL, 0);
++      I915_WRITE(PRB0_HEAD, 0);
++      I915_WRITE(PRB0_TAIL, 0);
++      I915_WRITE(PRB0_START, 0);
++
++      /* Initialize the ring. */
++      I915_WRITE(PRB0_START, obj_priv->gtt_offset);
++      I915_WRITE(PRB0_CTL,
++                 ((obj->size - 4096) & RING_NR_PAGES) |
++                 RING_NO_REPORT |
++                 RING_VALID);
++
++      /* Update our cache of the ring state */
++      i915_kernel_lost_context(dev);
++
++      return 0;
++}
++
++static void
++i915_gem_cleanup_ringbuffer(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (dev_priv->ring.ring_obj == NULL)
++              return;
++
++      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++
++      i915_gem_object_unpin(dev_priv->ring.ring_obj);
++      drm_gem_object_unreference(dev_priv->ring.ring_obj);
++      dev_priv->ring.ring_obj = NULL;
++      memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++
++      if (dev_priv->hws_obj != NULL) {
++              i915_gem_object_unpin(dev_priv->hws_obj);
++              drm_gem_object_unreference(dev_priv->hws_obj);
++              dev_priv->hws_obj = NULL;
++              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++
++              /* Write high address into HWS_PGA when disabling. */
++              I915_WRITE(HWS_PGA, 0x1ffff000);
++      }
++}
++
++int
++i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      if (dev_priv->mm.wedged) {
++              DRM_ERROR("Reenabling wedged hardware, good luck\n");
++              dev_priv->mm.wedged = 0;
++      }
++
++      ret = i915_gem_init_ringbuffer(dev);
++      if (ret != 0)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++      dev_priv->mm.suspended = 0;
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_idle(dev);
++      if (ret == 0)
++              i915_gem_cleanup_ringbuffer(dev);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++void
++i915_gem_lastclose(struct drm_device *dev)
++{
++      int ret;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (dev_priv->ring.ring_obj != NULL) {
++              ret = i915_gem_idle(dev);
++              if (ret)
++                      DRM_ERROR("failed to idle hardware: %d\n", ret);
++
++              i915_gem_cleanup_ringbuffer(dev);
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void i915_gem_load(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      INIT_LIST_HEAD(&dev_priv->mm.active_list);
++      INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
++      INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++      INIT_LIST_HEAD(&dev_priv->mm.request_list);
++      INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
++                        i915_gem_retire_work_handler);
++      dev_priv->mm.next_gem_seqno = 1;
++
++      i915_gem_detect_bit_6_swizzle(dev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_debug.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_debug.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,202 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if WATCH_INACTIVE
++void
++i915_verify_inactive(struct drm_device *dev, char *file, int line)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++              obj = obj_priv->obj;
++              if (obj_priv->pin_count || obj_priv->active ||
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)))
++                      DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
++                                obj,
++                                obj_priv->pin_count, obj_priv->active,
++                                obj->write_domain, file, line);
++      }
++}
++#endif /* WATCH_INACTIVE */
++
++
++#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
++static void
++i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
++                 uint32_t bias, uint32_t mark)
++{
++      uint32_t *mem = kmap_atomic(page, KM_USER0);
++      int i;
++      for (i = start; i < end; i += 4)
++              DRM_INFO("%08x: %08x%s\n",
++                        (int) (bias + i), mem[i / 4],
++                        (bias + i == mark) ? " ********" : "");
++      kunmap_atomic(mem, KM_USER0);
++      /* give syslog time to catch up */
++      msleep(1);
++}
++
++void
++i915_gem_dump_object(struct drm_gem_object *obj, int len,
++                   const char *where, uint32_t mark)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page;
++
++      DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
++      for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
++              int page_len, chunk, chunk_len;
++
++              page_len = len - page * PAGE_SIZE;
++              if (page_len > PAGE_SIZE)
++                      page_len = PAGE_SIZE;
++
++              for (chunk = 0; chunk < page_len; chunk += 128) {
++                      chunk_len = page_len - chunk;
++                      if (chunk_len > 128)
++                              chunk_len = 128;
++                      i915_gem_dump_page(obj_priv->page_list[page],
++                                         chunk, chunk + chunk_len,
++                                         obj_priv->gtt_offset +
++                                         page * PAGE_SIZE,
++                                         mark);
++              }
++      }
++}
++#endif
++
++#if WATCH_LRU
++void
++i915_dump_lru(struct drm_device *dev, const char *where)
++{
++      drm_i915_private_t              *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object      *obj_priv;
++
++      DRM_INFO("active list %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++                          list)
++      {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++      DRM_INFO("flushing list %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++                          list)
++      {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++      DRM_INFO("inactive %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++}
++#endif
++
++
++#if WATCH_COHERENCY
++void
++i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page;
++      uint32_t *gtt_mapping;
++      uint32_t *backing_map = NULL;
++      int bad_count = 0;
++
++      DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
++               __func__, obj, obj_priv->gtt_offset, handle,
++               obj->size / 1024);
++
++      gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
++                            obj->size);
++      if (gtt_mapping == NULL) {
++              DRM_ERROR("failed to map GTT space\n");
++              return;
++      }
++
++      for (page = 0; page < obj->size / PAGE_SIZE; page++) {
++              int i;
++
++              backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
++
++              if (backing_map == NULL) {
++                      DRM_ERROR("failed to map backing page\n");
++                      goto out;
++              }
++
++              for (i = 0; i < PAGE_SIZE / 4; i++) {
++                      uint32_t cpuval = backing_map[i];
++                      uint32_t gttval = readl(gtt_mapping +
++                                              page * 1024 + i);
++
++                      if (cpuval != gttval) {
++                              DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
++                                       "0x%08x vs 0x%08x\n",
++                                       (int)(obj_priv->gtt_offset +
++                                             page * PAGE_SIZE + i * 4),
++                                       cpuval, gttval);
++                              if (bad_count++ >= 8) {
++                                      DRM_INFO("...\n");
++                                      goto out;
++                              }
++                      }
++              }
++              kunmap_atomic(backing_map, KM_USER0);
++              backing_map = NULL;
++      }
++
++ out:
++      if (backing_map != NULL)
++              kunmap_atomic(backing_map, KM_USER0);
++      iounmap(gtt_mapping);
++
++      /* give syslog time to catch up */
++      msleep(1);
++
++      /* Directly flush the object, since we just loaded values with the CPU
++       * from the backing pages and we don't want to disturb the cache
++       * management that we're trying to observe.
++       */
++
++      i915_gem_clflush_object(obj);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_proc.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_proc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,293 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *    Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static int i915_gem_active_info(char *buf, char **start, off_t offset,
++                              int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Active:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n",
++                                     obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
++                                int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Flushing:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
++                                int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Inactive:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_request_info(char *buf, char **start, off_t offset,
++                               int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_request *gem_request;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Request:\n");
++      list_for_each_entry(gem_request, &dev_priv->mm.request_list,
++                          list)
++      {
++              DRM_PROC_PRINT("    %d @ %d %08x\n",
++                             gem_request->seqno,
++                             (int) (jiffies - gem_request->emitted_jiffies),
++                             gem_request->flush_domains);
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
++      DRM_PROC_PRINT("Waiter sequence:  %d\n",
++                     dev_priv->mm.waiting_gem_seqno);
++      DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++
++static int i915_interrupt_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Interrupt enable:    %08x\n",
++                     I915_READ(IER));
++      DRM_PROC_PRINT("Interrupt identity:  %08x\n",
++                     I915_READ(IIR));
++      DRM_PROC_PRINT("Interrupt mask:      %08x\n",
++                     I915_READ(IMR));
++      DRM_PROC_PRINT("Pipe A stat:         %08x\n",
++                     I915_READ(PIPEASTAT));
++      DRM_PROC_PRINT("Pipe B stat:         %08x\n",
++                     I915_READ(PIPEBSTAT));
++      DRM_PROC_PRINT("Interrupts received: %d\n",
++                     atomic_read(&dev_priv->irq_received));
++      DRM_PROC_PRINT("Current sequence:    %d\n",
++                     i915_get_gem_seqno(dev));
++      DRM_PROC_PRINT("Waiter sequence:     %d\n",
++                     dev_priv->mm.waiting_gem_seqno);
++      DRM_PROC_PRINT("IRQ sequence:        %d\n",
++                     dev_priv->mm.irq_gem_seqno);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static struct drm_proc_list {
++      /** file name */
++      const char *name;
++      /** proc callback*/
++      int (*f) (char *, char **, off_t, int, int *, void *);
++} i915_gem_proc_list[] = {
++      {"i915_gem_active", i915_gem_active_info},
++      {"i915_gem_flushing", i915_gem_flushing_info},
++      {"i915_gem_inactive", i915_gem_inactive_info},
++      {"i915_gem_request", i915_gem_request_info},
++      {"i915_gem_seqno", i915_gem_seqno_info},
++      {"i915_gem_interrupt", i915_interrupt_info},
++};
++
++#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
++
++int i915_gem_proc_init(struct drm_minor *minor)
++{
++      struct proc_dir_entry *ent;
++      int i, j;
++
++      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
++              ent = create_proc_entry(i915_gem_proc_list[i].name,
++                                      S_IFREG | S_IRUGO, minor->dev_root);
++              if (!ent) {
++                      DRM_ERROR("Cannot create /proc/dri/.../%s\n",
++                                i915_gem_proc_list[i].name);
++                      for (j = 0; j < i; j++)
++                              remove_proc_entry(i915_gem_proc_list[i].name,
++                                                minor->dev_root);
++                      return -1;
++              }
++              ent->read_proc = i915_gem_proc_list[i].f;
++              ent->data = minor;
++      }
++      return 0;
++}
++
++void i915_gem_proc_cleanup(struct drm_minor *minor)
++{
++      int i;
++
++      if (!minor->dev_root)
++              return;
++
++      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
++              remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_tiling.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_tiling.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,309 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/** @file i915_gem_tiling.c
++ *
++ * Support for managing tiling state of buffer objects.
++ *
++ * The idea behind tiling is to increase cache hit rates by rearranging
++ * pixel data so that a group of pixel accesses are in the same cacheline.
++ * Performance improvement from doing this on the back/depth buffer are on
++ * the order of 30%.
++ *
++ * Intel architectures make this somewhat more complicated, though, by
++ * adjustments made to addressing of data when the memory is in interleaved
++ * mode (matched pairs of DIMMS) to improve memory bandwidth.
++ * For interleaved memory, the CPU sends every sequential 64 bytes
++ * to an alternate memory channel so it can get the bandwidth from both.
++ *
++ * The GPU also rearranges its accesses for increased bandwidth to interleaved
++ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
++ * it does it a little differently, since one walks addresses not just in the
++ * X direction but also Y.  So, along with alternating channels when bit
++ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
++ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
++ * are common to both the 915 and 965-class hardware.
++ *
++ * The CPU also sometimes XORs in higher bits as well, to improve
++ * bandwidth doing strided access like we do so frequently in graphics.  This
++ * is called "Channel XOR Randomization" in the MCH documentation.  The result
++ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
++ * decode.
++ *
++ * All of this bit 6 XORing has an effect on our memory management,
++ * as we need to make sure that the 3d driver can correctly address object
++ * contents.
++ *
++ * If we don't have interleaved memory, all tiling is safe and no swizzling is
++ * required.
++ *
++ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
++ * 17 is not just a page offset, so as we page an objet out and back in,
++ * individual pages in it will have different bit 17 addresses, resulting in
++ * each 64 bytes being swapped with its neighbor!
++ *
++ * Otherwise, if interleaved, we have to tell the 3d driver what the address
++ * swizzling it needs to do is, since it's writing with the CPU to the pages
++ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
++ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
++ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
++ * to match what the GPU expects.
++ */
++
++/**
++ * Detects bit 6 swizzling of address lookup between IGD access and CPU
++ * access through main memory.
++ */
++void
++i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct pci_dev *bridge;
++      uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++      uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++      int mchbar_offset;
++      char __iomem *mchbar;
++      int ret;
++
++      bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
++      if (bridge == NULL) {
++              DRM_ERROR("Couldn't get bridge device\n");
++              return;
++      }
++
++      ret = pci_enable_device(bridge);
++      if (ret != 0) {
++              DRM_ERROR("pci_enable_device failed: %d\n", ret);
++              return;
++      }
++
++      if (IS_I965G(dev))
++              mchbar_offset = 0x48;
++      else
++              mchbar_offset = 0x44;
++
++      /* Use resource 2 for our BAR that's stashed in a nonstandard location,
++       * since the bridge would only ever use standard BARs 0-1 (though it
++       * doesn't anyway)
++       */
++      ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
++      if (ret != 0) {
++              DRM_ERROR("pci_read_base failed: %d\n", ret);
++              return;
++      }
++
++      mchbar = ioremap(pci_resource_start(bridge, 2),
++                       pci_resource_len(bridge, 2));
++      if (mchbar == NULL) {
++              DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
++              return;
++      }
++
++      if (IS_I965G(dev) && !IS_I965GM(dev)) {
++              uint32_t chdecmisc;
++
++              /* On the 965, channel interleave appears to be determined by
++               * the flex bit.  If flex is set, then the ranks (sides of a
++               * DIMM) of memory will be "stacked" (physical addresses walk
++               * through one rank then move on to the next, flipping channels
++               * or not depending on rank configuration).  The GPU in this
++               * case does exactly the same addressing as the CPU.
++               *
++               * Unlike the 945, channel randomization based does not
++               * appear to be available.
++               *
++               * XXX: While the G965 doesn't appear to do any interleaving
++               * when the DIMMs are not exactly matched, the G4x chipsets
++               * might be for "L-shaped" configurations, and will need to be
++               * detected.
++               *
++               * L-shaped configuration:
++               *
++               * +-----+
++               * |     |
++               * |DIMM2|         <-- non-interleaved
++               * +-----+
++               * +-----+ +-----+
++               * |     | |     |
++               * |DIMM0| |DIMM1| <-- interleaved area
++               * +-----+ +-----+
++               */
++              chdecmisc = readb(mchbar + CHDECMISC);
++
++              if (chdecmisc == 0xff) {
++                      DRM_ERROR("Couldn't read from MCHBAR.  "
++                                "Disabling tiling.\n");
++              } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
++                      swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++                      swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++              } else {
++                      swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++                      swizzle_y = I915_BIT_6_SWIZZLE_9;
++              }
++      } else if (IS_I9XX(dev)) {
++              uint32_t dcc;
++
++              /* On 915-945 and GM965, channel interleave by the CPU is
++               * determined by DCC.  The CPU will alternate based on bit 6
++               * in interleaved mode, and the GPU will then also alternate
++               * on bit 6, 9, and 10 for X, but the CPU may also optionally
++               * alternate based on bit 17 (XOR not disabled and XOR
++               * bit == 17).
++               */
++              dcc = readl(mchbar + DCC);
++              switch (dcc & DCC_ADDRESSING_MODE_MASK) {
++              case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
++              case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
++                      swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++                      swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++                      break;
++              case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
++                      if (IS_I915G(dev) || IS_I915GM(dev) ||
++                          dcc & DCC_CHANNEL_XOR_DISABLE) {
++                              swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++                              swizzle_y = I915_BIT_6_SWIZZLE_9;
++                      } else if (IS_I965GM(dev)) {
++                              /* GM965 only does bit 11-based channel
++                               * randomization
++                               */
++                              swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
++                              swizzle_y = I915_BIT_6_SWIZZLE_9_11;
++                      } else {
++                              /* Bit 17 or perhaps other swizzling */
++                              swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++                              swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++                      }
++                      break;
++              }
++              if (dcc == 0xffffffff) {
++                      DRM_ERROR("Couldn't read from MCHBAR.  "
++                                "Disabling tiling.\n");
++                      swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++                      swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++              }
++      } else {
++              /* As far as we know, the 865 doesn't have these bit 6
++               * swizzling issues.
++               */
++              swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++              swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++      }
++
++      iounmap(mchbar);
++
++      dev_priv->mm.bit_6_swizzle_x = swizzle_x;
++      dev_priv->mm.bit_6_swizzle_y = swizzle_y;
++}
++
++/**
++ * Sets the tiling mode of an object, returning the required swizzling of
++ * bit 6 of addresses in the object.
++ */
++int
++i915_gem_set_tiling(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_set_tiling *args = data;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++      obj_priv = obj->driver_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (args->tiling_mode == I915_TILING_NONE) {
++              obj_priv->tiling_mode = I915_TILING_NONE;
++              args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++      } else {
++              if (args->tiling_mode == I915_TILING_X)
++                      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++              else
++                      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++              /* If we can't handle the swizzling, make it untiled. */
++              if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
++                      args->tiling_mode = I915_TILING_NONE;
++                      args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++              }
++      }
++      obj_priv->tiling_mode = args->tiling_mode;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_gem_object_unreference(obj);
++
++      return 0;
++}
++
++/**
++ * Returns the current tiling mode and required bit 6 swizzling for the object.
++ */
++int
++i915_gem_get_tiling(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_get_tiling *args = data;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++      obj_priv = obj->driver_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      args->tiling_mode = obj_priv->tiling_mode;
++      switch (obj_priv->tiling_mode) {
++      case I915_TILING_X:
++              args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++              break;
++      case I915_TILING_Y:
++              args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++              break;
++      case I915_TILING_NONE:
++              args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++              break;
++      default:
++              DRM_ERROR("unknown tiling mode\n");
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_gem_object_unreference(obj);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_ioc32.c git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c
+--- git/drivers/gpu/drm-tungsten/i915_ioc32.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,284 @@
++/**
++ * \file i915_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the i915 DRM.
++ *
++ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Alan Hourihane 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++typedef struct _drm_i915_batchbuffer32 {
++      int start;              /* agp offset */
++      int used;               /* nr bytes in use */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      u32 cliprects;  /* pointer to userspace cliprects */
++} drm_i915_batchbuffer32_t;
++
++static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_i915_batchbuffer32_t batchbuffer32;
++      drm_i915_batchbuffer_t __user *batchbuffer;
++
++      if (copy_from_user
++          (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
++              return -EFAULT;
++
++      batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
++      if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
++          || __put_user(batchbuffer32.start, &batchbuffer->start)
++          || __put_user(batchbuffer32.used, &batchbuffer->used)
++          || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
++          || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
++          || __put_user(batchbuffer32.num_cliprects,
++                        &batchbuffer->num_cliprects)
++          || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
++                        &batchbuffer->cliprects))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_BATCHBUFFER,
++                       (unsigned long) batchbuffer);
++}
++
++typedef struct _drm_i915_cmdbuffer32 {
++      u32 buf;        /* pointer to userspace command buffer */
++      int sz;                 /* nr bytes in buf */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      u32 cliprects;  /* pointer to userspace cliprects */
++} drm_i915_cmdbuffer32_t;
++
++static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_i915_cmdbuffer32_t cmdbuffer32;
++      drm_i915_cmdbuffer_t __user *cmdbuffer;
++
++      if (copy_from_user
++          (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
++              return -EFAULT;
++
++      cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
++      if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
++          || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
++                        &cmdbuffer->buf)
++          || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
++          || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
++          || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
++          || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
++          || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
++                        &cmdbuffer->cliprects))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
++}
++
++typedef struct drm_i915_irq_emit32 {
++      u32 irq_seq;
++} drm_i915_irq_emit32_t;
++
++static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_i915_irq_emit32_t req32;
++      drm_i915_irq_emit_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((int __user *)(unsigned long)req32.irq_seq,
++                        &request->irq_seq))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request);
++}
++typedef struct drm_i915_getparam32 {
++      int param;
++      u32 value;
++} drm_i915_getparam32_t;
++
++static int compat_i915_getparam(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_i915_getparam32_t req32;
++      drm_i915_getparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_GETPARAM, (unsigned long) request);
++}
++
++typedef struct drm_i915_mem_alloc32 {
++      int region;
++      int alignment;
++      int size;
++      u32 region_offset;      /* offset from start of fb or agp */
++} drm_i915_mem_alloc32_t;
++
++static int compat_i915_alloc(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_i915_mem_alloc32_t req32;
++      drm_i915_mem_alloc_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.region, &request->region)
++          || __put_user(req32.alignment, &request->alignment)
++          || __put_user(req32.size, &request->size)
++          || __put_user((void __user *)(unsigned long)req32.region_offset,
++                        &request->region_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_ALLOC, (unsigned long) request);
++}
++
++typedef struct drm_i915_execbuffer32 {
++      uint64_t ops_list;
++      uint32_t num_buffers;
++      struct _drm_i915_batchbuffer32 batch;
++      drm_context_t context; 
++      struct drm_fence_arg fence_arg;
++} drm_i915_execbuffer32_t;
++
++static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_i915_execbuffer32_t req32;
++      struct drm_i915_execbuffer __user *request;
++      int err;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++       || __put_user(req32.ops_list, &request->ops_list)
++       || __put_user(req32.num_buffers, &request->num_buffers)
++       || __put_user(req32.context, &request->context)
++       || __copy_to_user(&request->fence_arg, &req32.fence_arg, 
++                         sizeof(req32.fence_arg))
++       || __put_user(req32.batch.start, &request->batch.start)
++       || __put_user(req32.batch.used, &request->batch.used)
++       || __put_user(req32.batch.DR1, &request->batch.DR1)
++       || __put_user(req32.batch.DR4, &request->batch.DR4)
++       || __put_user(req32.batch.num_cliprects,
++                     &request->batch.num_cliprects)
++       || __put_user((int __user *)(unsigned long)req32.batch.cliprects,
++                     &request->batch.cliprects))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
++
++      if (err)
++              return err;
++
++      if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
++          || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
++          || __get_user(req32.fence_arg.type, &request->fence_arg.type)
++          || __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
++          || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
++          || __get_user(req32.fence_arg.error, &request->fence_arg.error)
++          || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++
++drm_ioctl_compat_t *i915_compat_ioctls[] = {
++      [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
++      [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
++      [DRM_I915_GETPARAM] = compat_i915_getparam,
++      [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
++      [DRM_I915_ALLOC] = compat_i915_alloc,
++#ifdef I915_HAVE_BUFFER
++      [DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
++#endif
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
++              fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_irq.c git-nokia/drivers/gpu/drm-tungsten/i915_irq.c
+--- git/drivers/gpu/drm-tungsten/i915_irq.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_irq.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1005 @@
++/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#define MAX_NOPID ((u32)~0)
++
++/*
++ * These are the interrupts used by the driver
++ */
++#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
++                                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
++                                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
++
++static inline void
++i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
++{
++      if ((dev_priv->irq_mask_reg & mask) != 0) {
++              dev_priv->irq_mask_reg &= ~mask;
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
++      }
++}
++
++static inline void
++i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
++{
++      if ((dev_priv->irq_mask_reg & mask) != mask) {
++              dev_priv->irq_mask_reg |= mask;
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
++      }
++}
++
++/**
++ * i915_get_pipe - return the the pipe associated with a given plane
++ * @dev: DRM device
++ * @plane: plane to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a pipe number, since they may not always be equal.  This routine
++ * maps the given @plane back to a pipe number.
++ */
++static int
++i915_get_pipe(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 dspcntr;
++
++      dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
++
++      return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
++}
++
++/**
++ * i915_get_plane - return the the plane associated with a given pipe
++ * @dev: DRM device
++ * @pipe: pipe to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a plane number, since they may not always be equal.  This routine
++ * maps the given @pipe back to a plane number.
++ */
++static int
++i915_get_plane(struct drm_device *dev, int pipe)
++{
++      if (i915_get_pipe(dev, 0) == pipe)
++              return 0;
++      return 1;
++}
++
++/**
++ * i915_pipe_enabled - check if a pipe is enabled
++ * @dev: DRM device
++ * @pipe: pipe to check
++ *
++ * Reading certain registers when the pipe is disabled can hang the chip.
++ * Use this routine to make sure the PLL is running and the pipe is active
++ * before reading such registers if unsure.
++ */
++static int
++i915_pipe_enabled(struct drm_device *dev, int pipe)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
++
++      if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
++              return 1;
++
++      return 0;
++}
++
++/**
++ * Emit a synchronous flip.
++ *
++ * This function must be called with the drawable spinlock held.
++ */
++static void
++i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
++                       int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u16 x1, y1, x2, y2;
++      int pf_planes = 1 << plane;
++
++      DRM_SPINLOCK_ASSERT(&dev->drw_lock);
++
++      /* If the window is visible on the other plane, we have to flip on that
++       * plane as well.
++       */
++      if (plane == 1) {
++              x1 = sarea_priv->planeA_x;
++              y1 = sarea_priv->planeA_y;
++              x2 = x1 + sarea_priv->planeA_w;
++              y2 = y1 + sarea_priv->planeA_h;
++      } else {
++              x1 = sarea_priv->planeB_x;
++              y1 = sarea_priv->planeB_y;
++              x2 = x1 + sarea_priv->planeB_w;
++              y2 = y1 + sarea_priv->planeB_h;
++      }
++
++      if (x2 > 0 && y2 > 0) {
++              int i, num_rects = drw->num_rects;
++              struct drm_clip_rect *rect = drw->rects;
++
++              for (i = 0; i < num_rects; i++)
++                      if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
++                            rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
++                              pf_planes = 0x3;
++
++                              break;
++                      }
++      }
++
++      i915_dispatch_flip(dev, pf_planes, 1);
++}
++
++/**
++ * Emit blits for scheduled buffer swaps.
++ *
++ * This function will be called with the HW lock held.
++ */
++static void i915_vblank_tasklet(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      struct list_head *list, *tmp, hits, *hit;
++      int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
++      unsigned counter[2];
++      struct drm_drawable_info *drw;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 cpp = dev_priv->cpp,  offsets[3];
++      u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
++                              XY_SRC_COPY_BLT_WRITE_ALPHA |
++                              XY_SRC_COPY_BLT_WRITE_RGB)
++                           : XY_SRC_COPY_BLT_CMD;
++      u32 src_pitch = sarea_priv->pitch * cpp;
++      u32 dst_pitch = sarea_priv->pitch * cpp;
++      /* COPY rop (0xcc), map cpp to magic color depth constants */
++      u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
++      RING_LOCALS;
++      
++      if (IS_I965G(dev) && sarea_priv->front_tiled) {
++              cmd |= XY_SRC_COPY_BLT_DST_TILED;
++              dst_pitch >>= 2;
++      }
++      if (IS_I965G(dev) && sarea_priv->back_tiled) {
++              cmd |= XY_SRC_COPY_BLT_SRC_TILED;
++              src_pitch >>= 2;
++      }
++      
++      counter[0] = drm_vblank_count(dev, 0);
++      counter[1] = drm_vblank_count(dev, 1);
++
++      DRM_DEBUG("\n");
++
++      INIT_LIST_HEAD(&hits);
++
++      nhits = nrects = 0;
++
++      /* No irqsave/restore necessary.  This tasklet may be run in an
++       * interrupt context or normal context, but we don't have to worry
++       * about getting interrupted by something acquiring the lock, because
++       * we are the interrupt context thing that acquires the lock.
++       */
++      DRM_SPINLOCK(&dev_priv->swaps_lock);
++
++      /* Find buffer swaps scheduled for this vertical blank */
++      list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
++              drm_i915_vbl_swap_t *vbl_swap =
++                      list_entry(list, drm_i915_vbl_swap_t, head);
++              int pipe = i915_get_pipe(dev, vbl_swap->plane);
++
++              if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
++                      continue;
++
++              list_del(list);
++              dev_priv->swaps_pending--;
++              drm_vblank_put(dev, pipe);
++
++              DRM_SPINUNLOCK(&dev_priv->swaps_lock);
++              DRM_SPINLOCK(&dev->drw_lock);
++
++              drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
++
++              if (!drw) {
++                      DRM_SPINUNLOCK(&dev->drw_lock);
++                      drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
++                      DRM_SPINLOCK(&dev_priv->swaps_lock);
++                      continue;
++              }
++
++              list_for_each(hit, &hits) {
++                      drm_i915_vbl_swap_t *swap_cmp =
++                              list_entry(hit, drm_i915_vbl_swap_t, head);
++                      struct drm_drawable_info *drw_cmp =
++                              drm_get_drawable_info(dev, swap_cmp->drw_id);
++
++                      if (drw_cmp &&
++                          drw_cmp->rects[0].y1 > drw->rects[0].y1) {
++                              list_add_tail(list, hit);
++                              break;
++                      }
++              }
++
++              DRM_SPINUNLOCK(&dev->drw_lock);
++
++              /* List of hits was empty, or we reached the end of it */
++              if (hit == &hits)
++                      list_add_tail(list, hits.prev);
++
++              nhits++;
++
++              DRM_SPINLOCK(&dev_priv->swaps_lock);
++      }
++
++      DRM_SPINUNLOCK(&dev_priv->swaps_lock);
++
++      if (nhits == 0) {
++              return;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      upper[0] = upper[1] = 0;
++      slice[0] = max(sarea_priv->planeA_h / nhits, 1);
++      slice[1] = max(sarea_priv->planeB_h / nhits, 1);
++      lower[0] = sarea_priv->planeA_y + slice[0];
++      lower[1] = sarea_priv->planeB_y + slice[0];
++
++      offsets[0] = sarea_priv->front_offset;
++      offsets[1] = sarea_priv->back_offset;
++      offsets[2] = sarea_priv->third_offset;
++      num_pages = sarea_priv->third_handle ? 3 : 2;
++
++      DRM_SPINLOCK(&dev->drw_lock);
++
++      /* Emit blits for buffer swaps, partitioning both outputs into as many
++       * slices as there are buffer swaps scheduled in order to avoid tearing
++       * (based on the assumption that a single buffer swap would always
++       * complete before scanout starts).
++       */
++      for (i = 0; i++ < nhits;
++           upper[0] = lower[0], lower[0] += slice[0],
++           upper[1] = lower[1], lower[1] += slice[1]) {
++              int init_drawrect = 1;
++
++              if (i == nhits)
++                      lower[0] = lower[1] = sarea_priv->height;
++
++              list_for_each(hit, &hits) {
++                      drm_i915_vbl_swap_t *swap_hit =
++                              list_entry(hit, drm_i915_vbl_swap_t, head);
++                      struct drm_clip_rect *rect;
++                      int num_rects, plane, front, back;
++                      unsigned short top, bottom;
++
++                      drw = drm_get_drawable_info(dev, swap_hit->drw_id);
++
++                      if (!drw)
++                              continue;
++
++                      plane = swap_hit->plane;
++
++                      if (swap_hit->flip) {
++                              i915_dispatch_vsync_flip(dev, drw, plane);
++                              continue;
++                      }
++
++                      if (init_drawrect) {
++                              int width  = sarea_priv->width;
++                              int height = sarea_priv->height;
++                              if (IS_I965G(dev)) {
++                                      BEGIN_LP_RING(4);
++
++                                      OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
++                                      OUT_RING(0);
++                                      OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
++                                      OUT_RING(0);
++                                      
++                                      ADVANCE_LP_RING();
++                              } else {
++                                      BEGIN_LP_RING(6);
++      
++                                      OUT_RING(GFX_OP_DRAWRECT_INFO);
++                                      OUT_RING(0);
++                                      OUT_RING(0);
++                                      OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
++                                      OUT_RING(0);
++                                      OUT_RING(0);
++                                      
++                                      ADVANCE_LP_RING();
++                              }
++
++                              sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
++
++                              init_drawrect = 0;
++                      }
++
++                      rect = drw->rects;
++                      top = upper[plane];
++                      bottom = lower[plane];
++
++                      front = (dev_priv->sarea_priv->pf_current_page >>
++                               (2 * plane)) & 0x3;
++                      back = (front + 1) % num_pages;
++
++                      for (num_rects = drw->num_rects; num_rects--; rect++) {
++                              int y1 = max(rect->y1, top);
++                              int y2 = min(rect->y2, bottom);
++
++                              if (y1 >= y2)
++                                      continue;
++
++                              BEGIN_LP_RING(8);
++
++                              OUT_RING(cmd);
++                              OUT_RING(ropcpp | dst_pitch);
++                              OUT_RING((y1 << 16) | rect->x1);
++                              OUT_RING((y2 << 16) | rect->x2);
++                              OUT_RING(offsets[front]);
++                              OUT_RING((y1 << 16) | rect->x1);
++                              OUT_RING(src_pitch);
++                              OUT_RING(offsets[back]);
++
++                              ADVANCE_LP_RING();
++                      }
++              }
++      }
++
++      DRM_SPINUNLOCK(&dev->drw_lock);
++
++      list_for_each_safe(hit, tmp, &hits) {
++              drm_i915_vbl_swap_t *swap_hit =
++                      list_entry(hit, drm_i915_vbl_swap_t, head);
++
++              list_del(hit);
++
++              drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
++      }
++}
++
++u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      unsigned long high_frame;
++      unsigned long low_frame;
++      u32 high1, high2, low, count;
++      int pipe;
++
++      pipe = i915_get_pipe(dev, plane);
++      high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++      low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
++      if (!i915_pipe_enabled(dev, pipe)) {
++          DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++          return 0;
++      }
++
++      /*
++       * High & low register fields aren't synchronized, so make sure
++       * we get a low value that's stable across two reads of the high
++       * register.
++       */
++      do {
++              high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++                       PIPE_FRAME_HIGH_SHIFT);
++              low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++                      PIPE_FRAME_LOW_SHIFT);
++              high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++                       PIPE_FRAME_HIGH_SHIFT);
++      } while (high1 != high2);
++
++      count = (high1 << 8) | low;
++
++      return count;
++}
++
++irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 iir;
++      u32 pipea_stats = 0, pipeb_stats = 0;
++      int vblank = 0;
++#ifdef __linux__
++      if (dev->pdev->msi_enabled)
++              I915_WRITE(IMR, ~0);
++#endif
++      iir = I915_READ(IIR);
++#if 0
++      DRM_DEBUG("flag=%08x\n", iir);
++#endif
++      atomic_inc(&dev_priv->irq_received);
++      if (iir == 0) {
++#ifdef __linux__
++              if (dev->pdev->msi_enabled) {
++                      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++                      (void) I915_READ(IMR);
++              }
++#endif
++              return IRQ_NONE;
++      }
++
++      /*
++       * Clear the PIPE(A|B)STAT regs before the IIR otherwise
++       * we may get extra interrupts.
++       */
++      if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
++              pipea_stats = I915_READ(PIPEASTAT);
++              if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++                                 PIPE_VBLANK_INTERRUPT_STATUS))
++              {
++                      vblank++;
++                      drm_handle_vblank(dev, i915_get_plane(dev, 0));
++              }
++              I915_WRITE(PIPEASTAT, pipea_stats);
++      }
++      if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
++              pipeb_stats = I915_READ(PIPEBSTAT);
++              /* Ack the event */
++              I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++              /* The vblank interrupt gets enabled even if we didn't ask for
++                 it, so make sure it's shut down again */
++              if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
++                      pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
++
++              if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++                                 PIPE_VBLANK_INTERRUPT_STATUS))
++              {
++                      vblank++;
++                      drm_handle_vblank(dev, i915_get_plane(dev, 1));
++              }
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++              if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE)
++                      opregion_asle_intr(dev);
++#endif
++#endif
++              I915_WRITE(PIPEBSTAT, pipeb_stats);
++      }
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      if (iir & I915_ASLE_INTERRUPT)
++              opregion_asle_intr(dev);
++#endif
++#endif
++
++      if (dev_priv->sarea_priv)
++          dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++
++      I915_WRITE(IIR, iir);
++#ifdef __linux__
++      if (dev->pdev->msi_enabled)
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++#endif
++      (void) I915_READ(IIR); /* Flush posted writes */
++
++      if (iir & I915_USER_INTERRUPT) {
++#ifdef I915_HAVE_GEM
++              dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
++#endif
++              DRM_WAKEUP(&dev_priv->irq_queue);
++#ifdef I915_HAVE_FENCE
++              i915_fence_handler(dev);
++#endif
++      }
++
++      if (vblank) {
++              if (dev_priv->swaps_pending > 0)
++                      drm_locked_tasklet(dev, i915_vblank_tasklet);
++      }
++
++      return IRQ_HANDLED;
++}
++
++int i915_emit_irq(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      i915_kernel_lost_context(dev);
++
++      DRM_DEBUG("\n");
++
++      i915_emit_breadcrumb(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(0);
++      OUT_RING(MI_USER_INTERRUPT);
++      ADVANCE_LP_RING();
++
++      return dev_priv->counter;
++}
++
++void i915_user_irq_on(drm_i915_private_t *dev_priv)
++{
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
++              i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++}
++
++void i915_user_irq_off(drm_i915_private_t *dev_priv)
++{
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++#ifdef __linux__
++      BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
++#endif
++      if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
++              i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++}
++
++
++int i915_wait_irq(struct drm_device * dev, int irq_nr)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
++                READ_BREADCRUMB(dev_priv));
++
++      if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
++              if (dev_priv->sarea_priv)
++                      dev_priv->sarea_priv->last_dispatch =
++                              READ_BREADCRUMB(dev_priv);
++              return 0;
++      }
++
++      i915_user_irq_on(dev_priv);
++      DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
++                  READ_BREADCRUMB(dev_priv) >= irq_nr);
++      i915_user_irq_off(dev_priv);
++
++      if (ret == -EBUSY) {
++              DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
++                        READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
++      }
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->last_dispatch =
++                      READ_BREADCRUMB(dev_priv);
++      return ret;
++}
++
++/* Needs the lock as it touches the ring.
++ */
++int i915_irq_emit(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_irq_emit_t *emit = data;
++      int result;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      result = i915_emit_irq(dev);
++
++      if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++/* Doesn't need the hardware lock.
++ */
++int i915_irq_wait(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_irq_wait_t *irqwait = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return i915_wait_irq(dev, irqwait->irq_seq);
++}
++
++int i915_enable_vblank(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int pipe = i915_get_pipe(dev, plane);
++      u32     pipestat_reg = 0;
++      u32     mask_reg = 0;
++      u32     pipestat;
++
++      switch (pipe) {
++      case 0:
++              pipestat_reg = PIPEASTAT;
++              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
++              break;
++      case 1:
++              pipestat_reg = PIPEBSTAT;
++              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              break;
++      default:
++              DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
++                        pipe);
++              break;
++      }
++
++      if (pipestat_reg)
++      {
++              pipestat = I915_READ (pipestat_reg);
++              /*
++               * Older chips didn't have the start vblank interrupt,
++               * but 
++               */
++              if (IS_I965G (dev))
++                      pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
++              else
++                      pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
++              /*
++               * Clear any pending status
++               */
++              pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++                           PIPE_VBLANK_INTERRUPT_STATUS);
++              I915_WRITE(pipestat_reg, pipestat);
++      }
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      i915_enable_irq(dev_priv, mask_reg);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++
++      return 0;
++}
++
++void i915_disable_vblank(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int pipe = i915_get_pipe(dev, plane);
++      u32     pipestat_reg = 0;
++      u32     mask_reg = 0;
++      u32     pipestat;
++
++      switch (pipe) {
++      case 0:
++              pipestat_reg = PIPEASTAT;
++              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
++              break;
++      case 1:
++              pipestat_reg = PIPEBSTAT;
++              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              break;
++      default:
++              DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
++                        pipe);
++              break;
++      }
++
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      i915_disable_irq(dev_priv, mask_reg);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++
++      if (pipestat_reg)
++      {
++              pipestat = I915_READ (pipestat_reg);
++              pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
++                            PIPE_VBLANK_INTERRUPT_ENABLE);
++              /*
++               * Clear any pending status
++               */
++              pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++                           PIPE_VBLANK_INTERRUPT_STATUS);
++              I915_WRITE(pipestat_reg, pipestat);
++              (void) I915_READ(pipestat_reg);
++      }
++}
++
++static void i915_enable_interrupt (struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      dev_priv->irq_mask_reg = ~0;
++      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++      I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++      (void) I915_READ (IER);
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      opregion_enable_asle(dev);
++#endif
++#endif
++
++      dev_priv->irq_enabled = 1;
++}
++
++/* Set the vblank monitor pipe
++ */
++int i915_vblank_pipe_set(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++int i915_vblank_pipe_get(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_vblank_pipe_t *pipe = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
++
++      return 0;
++}
++
++/**
++ * Schedule buffer swap at given vertical blank.
++ */
++int i915_vblank_swap(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_vblank_swap_t *swap = data;
++      drm_i915_vbl_swap_t *vbl_swap;
++      unsigned int pipe, seqtype, curseq, plane;
++      unsigned long irqflags;
++      struct list_head *list;
++      int ret;
++
++      if (!dev_priv) {
++              DRM_ERROR("%s called with no initialization\n", __func__);
++              return -EINVAL;
++      }
++
++      if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
++              DRM_DEBUG("Rotation not supported\n");
++              return -EINVAL;
++      }
++
++      if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
++                           _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
++                           _DRM_VBLANK_FLIP)) {
++              DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
++              return -EINVAL;
++      }
++
++      plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
++      pipe = i915_get_pipe(dev, plane);
++
++      seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
++
++      if (!(dev_priv->vblank_pipe & (1 << pipe))) {
++              DRM_ERROR("Invalid pipe %d\n", pipe);
++              return -EINVAL;
++      }
++
++      DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
++
++      /* It makes no sense to schedule a swap for a drawable that doesn't have
++       * valid information at this point. E.g. this could mean that the X
++       * server is too old to push drawable information to the DRM, in which
++       * case all such swaps would become ineffective.
++       */
++      if (!drm_get_drawable_info(dev, swap->drawable)) {
++              DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++              DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
++              return -EINVAL;
++      }
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++
++      /*
++       * We take the ref here and put it when the swap actually completes
++       * in the tasklet.
++       */
++      ret = drm_vblank_get(dev, pipe);
++      if (ret)
++              return ret;
++      curseq = drm_vblank_count(dev, pipe);
++
++      if (seqtype == _DRM_VBLANK_RELATIVE)
++              swap->sequence += curseq;
++
++      if ((curseq - swap->sequence) <= (1<<23)) {
++              if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
++                      swap->sequence = curseq + 1;
++              } else {
++                      DRM_DEBUG("Missed target sequence\n");
++                      drm_vblank_put(dev, pipe);
++                      return -EINVAL;
++              }
++      }
++
++      if (swap->seqtype & _DRM_VBLANK_FLIP) {
++              swap->sequence--;
++
++              if ((curseq - swap->sequence) <= (1<<23)) {
++                      struct drm_drawable_info *drw;
++
++                      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++                      DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
++
++                      drw = drm_get_drawable_info(dev, swap->drawable);
++
++                      if (!drw) {
++                              DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
++                                  irqflags);
++                              DRM_DEBUG("Invalid drawable ID %d\n",
++                                        swap->drawable);
++                              drm_vblank_put(dev, pipe);
++                              return -EINVAL;
++                      }
++
++                      i915_dispatch_vsync_flip(dev, drw, plane);
++
++                      DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++
++                      drm_vblank_put(dev, pipe);
++                      return 0;
++              }
++      }
++
++      DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
++
++      list_for_each(list, &dev_priv->vbl_swaps.head) {
++              vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
++
++              if (vbl_swap->drw_id == swap->drawable &&
++                  vbl_swap->plane == plane &&
++                  vbl_swap->sequence == swap->sequence) {
++                      vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
++                      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++                      DRM_DEBUG("Already scheduled\n");
++                      return 0;
++              }
++      }
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++
++      if (dev_priv->swaps_pending >= 100) {
++              DRM_DEBUG("Too many swaps queued\n");
++              drm_vblank_put(dev, pipe);
++              return -EBUSY;
++      }
++
++      vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
++
++      if (!vbl_swap) {
++              DRM_ERROR("Failed to allocate memory to queue swap\n");
++              drm_vblank_put(dev, pipe);
++              return -ENOMEM;
++      }
++
++      DRM_DEBUG("\n");
++
++      vbl_swap->drw_id = swap->drawable;
++      vbl_swap->plane = plane;
++      vbl_swap->sequence = swap->sequence;
++      vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
++
++      if (vbl_swap->flip)
++              swap->sequence++;
++
++      DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
++
++      list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
++      dev_priv->swaps_pending++;
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++
++      return 0;
++}
++
++/* drm_dma.h hooks
++*/
++void i915_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      I915_WRITE16(HWSTAM, 0xeffe);
++      I915_WRITE16(IMR, 0x0);
++      I915_WRITE16(IER, 0x0);
++}
++
++int i915_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int ret, num_pipes = 2;
++
++      INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
++      dev_priv->swaps_pending = 0;
++
++      dev_priv->user_irq_refcount = 0;
++      dev_priv->irq_mask_reg = ~0;
++
++      ret = drm_vblank_init(dev, num_pipes);
++      if (ret)
++              return ret;
++
++      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
++      dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++      i915_enable_interrupt(dev);
++      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
++
++      /*
++       * Initialize the hardware status page IRQ location.
++       */
++
++      I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
++      return 0;
++}
++
++void i915_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 temp;
++
++      if (!dev_priv)
++              return;
++
++      dev_priv->vblank_pipe = 0;
++
++      dev_priv->irq_enabled = 0;
++      I915_WRITE(HWSTAM, 0xffffffff);
++      I915_WRITE(IMR, 0xffffffff);
++      I915_WRITE(IER, 0x0);
++
++      temp = I915_READ(PIPEASTAT);
++      I915_WRITE(PIPEASTAT, temp);
++      temp = I915_READ(PIPEBSTAT);
++      I915_WRITE(PIPEBSTAT, temp);
++      temp = I915_READ(IIR);
++      I915_WRITE(IIR, temp);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_mem.c git-nokia/drivers/gpu/drm-tungsten/i915_mem.c
+--- git/drivers/gpu/drm-tungsten/i915_mem.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_mem.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,386 @@
++/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/* This memory manager is integrated into the global/local lru
++ * mechanisms used by the clients.  Specifically, it operates by
++ * setting the 'in_use' fields of the global LRU to indicate whether
++ * this region is privately allocated to a client.
++ *
++ * This does require the client to actually respect that field.
++ *
++ * Currently no effort is made to allocate 'private' memory in any
++ * clever way - the LRU information isn't used to determine which
++ * block to allocate, and the ring is drained prior to allocations --
++ * in other words allocation is expensive.
++ */
++static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_tex_region *list;
++      unsigned shift, nr;
++      unsigned start;
++      unsigned end;
++      unsigned i;
++      int age;
++
++      shift = dev_priv->tex_lru_log_granularity;
++      nr = I915_NR_TEX_REGIONS;
++
++      start = p->start >> shift;
++      end = (p->start + p->size - 1) >> shift;
++
++      age = ++sarea_priv->texAge;
++      list = sarea_priv->texList;
++
++      /* Mark the regions with the new flag and update their age.  Move
++       * them to head of list to preserve LRU semantics.
++       */
++      for (i = start; i <= end; i++) {
++              list[i].in_use = in_use;
++              list[i].age = age;
++
++              /* remove_from_list(i)
++               */
++              list[(unsigned)list[i].next].prev = list[i].prev;
++              list[(unsigned)list[i].prev].next = list[i].next;
++
++              /* insert_at_head(list, i)
++               */
++              list[i].prev = nr;
++              list[i].next = list[nr].next;
++              list[(unsigned)list[nr].next].prev = i;
++              list[nr].next = i;
++      }
++}
++
++/* Very simple allocator for agp memory, working on a static range
++ * already mapped into each client's address space.
++ */
++
++static struct mem_block *split_block(struct mem_block *p, int start, int size,
++                                   struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++      out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++static struct mem_block *alloc_block(struct mem_block *heap, int size,
++                                   int align2, struct drm_file *file_priv)
++{
++      struct mem_block *p;
++      int mask = (1 << align2) - 1;
++
++      for (p = heap->next; p != heap; p = p->next) {
++              int start = (p->start + mask) & ~mask;
++              if (p->file_priv == NULL && start + size <= p->start + p->size)
++                      return split_block(p, start, size, file_priv);
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, int start)
++{
++      struct mem_block *p;
++
++      for (p = heap->next; p != heap; p = p->next)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++static void free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == NULL) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++
++      if (p->prev->file_priv == NULL) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++static int init_heap(struct mem_block **heap, int start, int size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/* Free all blocks associated with the releasing file.
++ */
++void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
++                    struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      for (p = heap->next; p != heap; p = p->next) {
++              if (p->file_priv == file_priv) {
++                      p->file_priv = NULL;
++                      mark_block(dev, p, 0);
++              }
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      for (p = heap->next; p != heap; p = p->next) {
++              while (p->file_priv == NULL && p->next->file_priv == NULL) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++              }
++      }
++}
++
++/* Shutdown.
++ */
++void i915_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
++      *heap = NULL;
++}
++
++static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
++{
++      switch (region) {
++      case I915_MEM_REGION_AGP:
++              return &dev_priv->agp_heap;
++      default:
++              return NULL;
++      }
++}
++
++/* IOCTL HANDLERS */
++
++int i915_mem_alloc(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_alloc_t *alloc = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, alloc->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      /* Make things easier on ourselves: all allocations at least
++       * 4k aligned.
++       */
++      if (alloc->alignment < 12)
++              alloc->alignment = 12;
++
++      block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
++
++      if (!block)
++              return -ENOMEM;
++
++      mark_block(dev, block, 1);
++
++      if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
++                           sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++int i915_mem_free(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_free_t *memfree = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, memfree->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      block = find_block(*heap, memfree->region_offset);
++      if (!block)
++              return -EFAULT;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      mark_block(dev, block, 0);
++      free_block(block);
++      return 0;
++}
++
++int i915_mem_init_heap(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_init_heap_t *initheap = data;
++      struct mem_block **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, initheap->region);
++      if (!heap)
++              return -EFAULT;
++
++      if (*heap) {
++              DRM_ERROR("heap already initialized?");
++              return -EFAULT;
++      }
++
++      return init_heap(heap, initheap->start, initheap->size);
++}
++
++int i915_mem_destroy_heap( struct drm_device *dev, void *data,
++                         struct drm_file *file_priv )
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_destroy_heap_t *destroyheap = data;
++      struct mem_block **heap;
++
++      if ( !dev_priv ) {
++              DRM_ERROR( "called with no initialization\n" );
++              return -EINVAL;
++      }
++
++      heap = get_heap( dev_priv, destroyheap->region );
++      if (!heap) {
++              DRM_ERROR("get_heap failed");
++              return -EFAULT;
++      }
++
++      if (!*heap) {
++              DRM_ERROR("heap not initialized?");
++              return -EFAULT;
++      }
++
++      i915_mem_takedown( heap );
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_opregion.c git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c
+--- git/drivers/gpu/drm-tungsten/i915_opregion.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,389 @@
++/*
++ *
++ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
++ * Copyright 2008 Red Hat <mjg@redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/acpi.h>
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++#define PCI_ASLE 0xe4
++#define PCI_ASLS 0xfc
++
++#define OPREGION_SZ            (8*1024)
++#define OPREGION_HEADER_OFFSET 0
++#define OPREGION_ACPI_OFFSET   0x100
++#define OPREGION_SWSCI_OFFSET  0x200
++#define OPREGION_ASLE_OFFSET   0x300
++#define OPREGION_VBT_OFFSET    0x1000
++
++#define OPREGION_SIGNATURE "IntelGraphicsMem"
++#define MBOX_ACPI      (1<<0)
++#define MBOX_SWSCI     (1<<1)
++#define MBOX_ASLE      (1<<2)
++
++/* _DOD id definitions */
++#define OUTPUT_CONNECTOR_MSK   0xf000
++#define OUTPUT_CONNECTOR_OFFSET        12
++
++#define OUTPUT_PORT_MSK                0x00f0
++#define OUTPUT_PORT_OFFSET     4
++  #define OUTPUT_PORT_ANALOG   0
++  #define OUTPUT_PORT_LVDS     1
++  #define OUTPUT_PORT_SDVOB    2
++  #define OUTPUT_PORT_SDVOC    3
++  #define OUTPUT_PORT_TV       4
++
++#define OUTPUT_DISPLAY_MSK     0x0f00
++#define OUTPUT_DISPLAY_OFFSET  8
++  #define OUTPUT_DISPLAY_OTHER         0
++  #define OUTPUT_DISPLAY_VGA           1
++  #define OUTPUT_DISPLAY_TV            2
++  #define OUTPUT_DISPLAY_DIGI          3
++  #define OUTPUT_DISPLAY_FLAT_PANEL    4
++
++/* predefined id for integrated LVDS and VGA connector */
++#define OUTPUT_INT_LVDS        0x00000110
++#define OUTPUT_INT_VGA 0x80000100
++
++struct opregion_header {
++       u8 signature[16];
++       u32 size;
++       u32 opregion_ver;
++       u8 bios_ver[32];
++       u8 vbios_ver[16];
++       u8 driver_ver[16];
++       u32 mboxes;
++       u8 reserved[164];
++} __attribute__((packed));
++
++/* OpRegion mailbox #1: public ACPI methods */
++struct opregion_acpi {
++       u32 drdy;       /* driver readiness */
++       u32 csts;       /* notification status */
++       u32 cevt;       /* current event */
++       u8 rsvd1[20];
++       u32 didl[8];    /* supported display devices ID list */
++       u32 cpdl[8];    /* currently presented display list */
++       u32 cadl[8];    /* currently active display list */
++       u32 nadl[8];    /* next active devices list */
++       u32 aslp;       /* ASL sleep time-out */
++       u32 tidx;       /* toggle table index */
++       u32 chpd;       /* current hotplug enable indicator */
++       u32 clid;       /* current lid state*/
++       u32 cdck;       /* current docking state */
++       u32 sxsw;       /* Sx state resume */
++       u32 evts;       /* ASL supported events */
++       u32 cnot;       /* current OS notification */
++       u32 nrdy;       /* driver status */
++       u8 rsvd2[60];
++} __attribute__((packed));
++
++/* OpRegion mailbox #2: SWSCI */
++struct opregion_swsci {
++       u32 scic;       /* SWSCI command|status|data */
++       u32 parm;       /* command parameters */
++       u32 dslp;       /* driver sleep time-out */
++       u8 rsvd[244];
++} __attribute__((packed));
++
++/* OpRegion mailbox #3: ASLE */
++struct opregion_asle {
++       u32 ardy;       /* driver readiness */
++       u32 aslc;       /* ASLE interrupt command */
++       u32 tche;       /* technology enabled indicator */
++       u32 alsi;       /* current ALS illuminance reading */
++       u32 bclp;       /* backlight brightness to set */
++       u32 pfit;       /* panel fitting state */
++       u32 cblv;       /* current brightness level */
++       u16 bclm[20];   /* backlight level duty cycle mapping table */
++       u32 cpfm;       /* current panel fitting mode */
++       u32 epfm;       /* enabled panel fitting modes */
++       u8 plut[74];    /* panel LUT and identifier */
++       u32 pfmb;       /* PWM freq and min brightness */
++       u8 rsvd[102];
++} __attribute__((packed));
++
++/* ASLE irq request bits */
++#define ASLE_SET_ALS_ILLUM     (1 << 0)
++#define ASLE_SET_BACKLIGHT     (1 << 1)
++#define ASLE_SET_PFIT          (1 << 2)
++#define ASLE_SET_PWM_FREQ      (1 << 3)
++#define ASLE_REQ_MSK           0xf
++
++/* response bits of ASLE irq request */
++#define ASLE_ALS_ILLUM_FAIL    (2<<10)
++#define ASLE_BACKLIGHT_FAIL    (2<<12)
++#define ASLE_PFIT_FAIL         (2<<14)
++#define ASLE_PWM_FREQ_FAIL     (2<<16)
++
++/* ASLE backlight brightness to set */
++#define ASLE_BCLP_VALID                (1<<31)
++#define ASLE_BCLP_MSK          (~(1<<31))
++
++/* ASLE panel fitting request */
++#define ASLE_PFIT_VALID         (1<<31)
++#define ASLE_PFIT_CENTER (1<<0)
++#define ASLE_PFIT_STRETCH_TEXT (1<<1)
++#define ASLE_PFIT_STRETCH_GFX (1<<2)
++
++/* PWM frequency and minimum brightness */
++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
++#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
++#define ASLE_PFMB_PWM_VALID (1<<31)
++
++#define ASLE_CBLV_VALID         (1<<31)
++
++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++      u32 blc_pwm_ctl;
++      
++      if (!(bclp & ASLE_BCLP_VALID))
++              return ASLE_BACKLIGHT_FAIL;
++      
++      bclp &= ASLE_BCLP_MSK;
++      if (bclp < 0 || bclp > 255)
++              return ASLE_BACKLIGHT_FAIL;
++      
++      blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++      blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++      I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
++      asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++      
++      return 0;
++}
++
++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
++{
++      return 0;
++}
++
++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      if (pfmb & ASLE_PFMB_PWM_VALID) {
++              u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++              u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
++              blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
++              pwm = pwm >> 9;
++              // FIXME - what do we do with the PWM?
++      }
++      return 0;
++}
++
++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
++{
++      if (!(pfit & ASLE_PFIT_VALID))
++              return ASLE_PFIT_FAIL;
++      return 0;
++}
++
++void opregion_asle_intr(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++      u32 asle_stat = 0;
++      u32 asle_req;
++
++      if (!asle)
++              return;
++
++      asle_req = asle->aslc & ASLE_REQ_MSK;
++      
++      if (!asle_req) {
++              DRM_DEBUG("non asle set request??\n");
++              return;
++      }
++
++      if (asle_req & ASLE_SET_ALS_ILLUM)
++              asle_stat |= asle_set_als_illum(dev, asle->alsi);
++      
++      if (asle_req & ASLE_SET_BACKLIGHT)
++              asle_stat |= asle_set_backlight(dev, asle->bclp);
++      
++      if (asle_req & ASLE_SET_PFIT)
++              asle_stat |= asle_set_pfit(dev, asle->pfit);
++      
++      if (asle_req & ASLE_SET_PWM_FREQ)
++              asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
++      
++      asle->aslc = asle_stat;
++}
++
++#define ASLE_ALS_EN    (1<<0)
++#define ASLE_BLC_EN    (1<<1)
++#define ASLE_PFIT_EN   (1<<2)
++#define ASLE_PFMB_EN   (1<<3)
++
++void opregion_enable_asle(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++
++      if (asle) {
++              if (IS_MOBILE(dev)) {
++                      u32 pipeb_stats = I915_READ(PIPEBSTAT);
++                      /* Some hardware uses the legacy backlight controller
++                         to signal interrupts, so we need to set up pipe B
++                         to generate an IRQ on writes */
++                      pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
++                      I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++                      dev_priv->irq_mask_reg &=
++                              ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              }
++
++              dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
++
++              asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 
++                      ASLE_PFMB_EN;
++              asle->ardy = 1;
++      }
++}
++
++#define ACPI_EV_DISPLAY_SWITCH (1<<0)
++#define ACPI_EV_LID            (1<<1)
++#define ACPI_EV_DOCK           (1<<2)
++
++static struct intel_opregion *system_opregion;
++
++int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
++                               void *data)
++{
++      /* The only video events relevant to opregion are 0x80. These indicate
++         either a docking event, lid switch or display switch request. In
++         Linux, these are handled by the dock, button and video drivers.
++         We might want to fix the video driver to be opregion-aware in
++         future, but right now we just indicate to the firmware that the
++         request has been handled */
++      
++      struct opregion_acpi *acpi;
++
++      if (!system_opregion)
++              return NOTIFY_DONE;
++      
++      acpi = system_opregion->acpi;
++      acpi->csts = 0;
++
++      return NOTIFY_OK;
++}
++
++static struct notifier_block intel_opregion_notifier = {
++      .notifier_call = intel_opregion_video_event,
++};
++
++int intel_opregion_init(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct intel_opregion *opregion = &dev_priv->opregion;
++      void *base;
++      u32 asls, mboxes;
++      int err = 0;
++      
++      pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
++      DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
++      if (asls == 0) {
++              DRM_DEBUG("ACPI OpRegion not supported!\n");
++              return -ENOTSUPP;
++      }
++      
++      base = ioremap(asls, OPREGION_SZ);
++      if (!base)
++              return -ENOMEM;
++      
++      opregion->header = base;
++      if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
++              DRM_DEBUG("opregion signature mismatch\n");
++              err = -EINVAL;
++              goto err_out;
++      }
++      
++      mboxes = opregion->header->mboxes;
++      if (mboxes & MBOX_ACPI) {
++              DRM_DEBUG("Public ACPI methods supported\n");
++              opregion->acpi = base + OPREGION_ACPI_OFFSET;
++      } else {
++              DRM_DEBUG("Public ACPI methods not supported\n");
++              err = -ENOTSUPP;
++              goto err_out;
++      }
++      opregion->enabled = 1;
++      
++      if (mboxes & MBOX_SWSCI) {
++              DRM_DEBUG("SWSCI supported\n");
++              opregion->swsci = base + OPREGION_SWSCI_OFFSET;
++      }
++      if (mboxes & MBOX_ASLE) {
++              DRM_DEBUG("ASLE supported\n");
++              opregion->asle = base + OPREGION_ASLE_OFFSET;
++      }
++      
++      /* Notify BIOS we are ready to handle ACPI video ext notifs.
++       * Right now, all the events are handled by the ACPI video module.
++       * We don't actually need to do anything with them. */
++      opregion->acpi->csts = 0;
++      opregion->acpi->drdy = 1;
++
++      system_opregion = opregion;
++      register_acpi_notifier(&intel_opregion_notifier);
++      
++      return 0;
++      
++err_out:
++      iounmap(opregion->header);
++      opregion->header = NULL;
++      return err;
++}
++
++void intel_opregion_free(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct intel_opregion *opregion = &dev_priv->opregion;
++      
++      if (!opregion->enabled)
++              return;
++      
++      opregion->acpi->drdy = 0;
++      
++      system_opregion = NULL;
++      unregister_acpi_notifier(&intel_opregion_notifier);
++      
++      /* just clear all opregion memory pointers now */
++      iounmap(opregion->header);
++      opregion->header = NULL;
++      opregion->acpi = NULL;
++      opregion->swsci = NULL;
++      opregion->asle = NULL;
++      
++      opregion->enabled = 0;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_suspend.c git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c
+--- git/drivers/gpu/drm-tungsten/i915_suspend.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,520 @@
++/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      if (pipe == PIPE_A)
++              return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
++      else
++              return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
++}
++
++static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++      u32 *array;
++      int i;
++
++      if (!i915_pipe_enabled(dev, pipe))
++              return;
++
++      if (pipe == PIPE_A)
++              array = dev_priv->save_palette_a;
++      else
++              array = dev_priv->save_palette_b;
++
++      for(i = 0; i < 256; i++)
++              array[i] = I915_READ(reg + (i << 2));
++}
++
++static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++      u32 *array;
++      int i;
++
++      if (!i915_pipe_enabled(dev, pipe))
++              return;
++
++      if (pipe == PIPE_A)
++              array = dev_priv->save_palette_a;
++      else
++              array = dev_priv->save_palette_b;
++
++      for(i = 0; i < 256; i++)
++              I915_WRITE(reg + (i << 2), array[i]);
++}
++
++static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_WRITE8(index_port, reg);
++      return I915_READ8(data_port);
++}
++
++static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++      return I915_READ8(VGA_AR_DATA_READ);
++}
++
++static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++      I915_WRITE8(VGA_AR_DATA_WRITE, val);
++}
++
++static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_WRITE8(index_port, reg);
++      I915_WRITE8(data_port, val);
++}
++
++static void i915_save_vga(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++      u16 cr_index, cr_data, st01;
++
++      /* VGA color palette registers */
++      dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
++      /* DACCRX automatically increments during read */
++      I915_WRITE8(VGA_DACRX, 0);
++      /* Read 3 bytes of color data from each index */
++      for (i = 0; i < 256 * 3; i++)
++              dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
++
++      /* MSR bits */
++      dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
++      if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++              cr_index = VGA_CR_INDEX_CGA;
++              cr_data = VGA_CR_DATA_CGA;
++              st01 = VGA_ST01_CGA;
++      } else {
++              cr_index = VGA_CR_INDEX_MDA;
++              cr_data = VGA_CR_DATA_MDA;
++              st01 = VGA_ST01_MDA;
++      }
++
++      /* CRT controller regs */
++      i915_write_indexed(dev, cr_index, cr_data, 0x11,
++                         i915_read_indexed(dev, cr_index, cr_data, 0x11) &
++                         (~0x80));
++      for (i = 0; i <= 0x24; i++)
++              dev_priv->saveCR[i] =
++                      i915_read_indexed(dev, cr_index, cr_data, i);
++      /* Make sure we don't turn off CR group 0 writes */
++      dev_priv->saveCR[0x11] &= ~0x80;
++
++      /* Attribute controller registers */
++      I915_READ8(st01);
++      dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
++      for (i = 0; i <= 0x14; i++)
++              dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
++      I915_READ8(st01);
++
++      /* Graphics controller registers */
++      for (i = 0; i < 9; i++)
++              dev_priv->saveGR[i] =
++                      i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
++
++      dev_priv->saveGR[0x10] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
++      dev_priv->saveGR[0x11] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
++      dev_priv->saveGR[0x18] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
++
++      /* Sequencer registers */
++      for (i = 0; i < 8; i++)
++              dev_priv->saveSR[i] =
++                      i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
++}
++
++static void i915_restore_vga(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++      u16 cr_index, cr_data, st01;
++
++      /* MSR bits */
++      I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
++      if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++              cr_index = VGA_CR_INDEX_CGA;
++              cr_data = VGA_CR_DATA_CGA;
++              st01 = VGA_ST01_CGA;
++      } else {
++              cr_index = VGA_CR_INDEX_MDA;
++              cr_data = VGA_CR_DATA_MDA;
++              st01 = VGA_ST01_MDA;
++      }
++
++      /* Sequencer registers, don't write SR07 */
++      for (i = 0; i < 7; i++)
++              i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
++                                 dev_priv->saveSR[i]);
++
++      /* CRT controller regs */
++      /* Enable CR group 0 writes */
++      i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
++      for (i = 0; i <= 0x24; i++)
++              i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
++
++      /* Graphics controller regs */
++      for (i = 0; i < 9; i++)
++              i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
++                                 dev_priv->saveGR[i]);
++
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
++                         dev_priv->saveGR[0x10]);
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
++                         dev_priv->saveGR[0x11]);
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
++                         dev_priv->saveGR[0x18]);
++
++      /* Attribute controller registers */
++      I915_READ8(st01); /* switch back to index mode */
++      for (i = 0; i <= 0x14; i++)
++              i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
++      I915_READ8(st01); /* switch back to index mode */
++      I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
++      I915_READ8(st01);
++
++      /* VGA color palette registers */
++      I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
++      /* DACCRX automatically increments during read */
++      I915_WRITE8(VGA_DACWX, 0);
++      /* Read 3 bytes of color data from each index */
++      for (i = 0; i < 256 * 3; i++)
++              I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
++
++}
++
++int i915_save_state(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++
++#if defined(__FreeBSD__)
++      dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1);
++#else
++      pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
++#endif
++
++      /* Display arbitration control */
++      dev_priv->saveDSPARB = I915_READ(DSPARB);
++
++      /* Pipe & plane A info */
++      dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
++      dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
++      dev_priv->saveFPA0 = I915_READ(FPA0);
++      dev_priv->saveFPA1 = I915_READ(FPA1);
++      dev_priv->saveDPLL_A = I915_READ(DPLL_A);
++      if (IS_I965G(dev))
++              dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
++      dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
++      dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
++      dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
++      dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
++      dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
++      dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
++      dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++      dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
++      dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
++      dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
++      dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
++      dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
++      if (IS_I965G(dev)) {
++              dev_priv->saveDSPASURF = I915_READ(DSPASURF);
++              dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
++      }
++      i915_save_palette(dev, PIPE_A);
++      dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
++
++      /* Pipe & plane B info */
++      dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
++      dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
++      dev_priv->saveFPB0 = I915_READ(FPB0);
++      dev_priv->saveFPB1 = I915_READ(FPB1);
++      dev_priv->saveDPLL_B = I915_READ(DPLL_B);
++      if (IS_I965G(dev))
++              dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
++      dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
++      dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
++      dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
++      dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
++      dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
++      dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
++      dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++      dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
++      dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
++      dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
++      dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
++      dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
++      if (IS_I965GM(dev) || IS_GM45(dev)) {
++              dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
++              dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
++      }
++      i915_save_palette(dev, PIPE_B);
++      dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
++
++      /* CRT state */
++      dev_priv->saveADPA = I915_READ(ADPA);
++
++      /* LVDS state */
++      dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
++      dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
++      dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
++      if (IS_I965G(dev))
++              dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
++      if (IS_MOBILE(dev) && !IS_I830(dev))
++              dev_priv->saveLVDS = I915_READ(LVDS);
++      if (!IS_I830(dev) && !IS_845G(dev))
++              dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
++      dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
++      dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
++      dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
++
++      /* FIXME: save TV & SDVO state */
++
++      /* FBC state */
++      dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
++      dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
++      dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
++      dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
++
++      /* Interrupt state */
++      dev_priv->saveIIR = I915_READ(IIR);
++      dev_priv->saveIER = I915_READ(IER);
++      dev_priv->saveIMR = I915_READ(IMR);
++
++      /* VGA state */
++      dev_priv->saveVGA0 = I915_READ(VGA0);
++      dev_priv->saveVGA1 = I915_READ(VGA1);
++      dev_priv->saveVGA_PD = I915_READ(VGA_PD);
++      dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
++
++      /* Clock gating state */
++      dev_priv->saveD_STATE = I915_READ(D_STATE);
++      dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
++
++      /* Cache mode state */
++      dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
++
++      /* Memory Arbitration state */
++      dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
++
++      /* Scratch space */
++      for (i = 0; i < 16; i++) {
++              dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
++              dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
++      }
++      for (i = 0; i < 3; i++)
++              dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
++
++      i915_save_vga(dev);
++
++      return 0;
++}
++
++int i915_restore_state(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++
++#if defined(__FreeBSD__)
++      pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
++#else
++      pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
++#endif
++
++      I915_WRITE(DSPARB, dev_priv->saveDSPARB);
++
++      /* Pipe & plane A info */
++      /* Prime the clock */
++      if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
++              I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
++                         ~DPLL_VCO_ENABLE);
++              DRM_UDELAY(150);
++      }
++      I915_WRITE(FPA0, dev_priv->saveFPA0);
++      I915_WRITE(FPA1, dev_priv->saveFPA1);
++      /* Actually enable it */
++      I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
++      DRM_UDELAY(150);
++      if (IS_I965G(dev))
++              I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
++      DRM_UDELAY(150);
++
++      /* Restore mode */
++      I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
++      I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
++      I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
++      I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
++      I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
++      I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
++      I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
++
++      /* Restore plane info */
++      I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
++      I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
++      I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
++      I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
++      I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
++      if (IS_I965G(dev)) {
++              I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
++              I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
++      }
++
++      I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
++
++      i915_restore_palette(dev, PIPE_A);
++      /* Enable the plane */
++      I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
++      I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
++
++      /* Pipe & plane B info */
++      if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
++              I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
++                         ~DPLL_VCO_ENABLE);
++              DRM_UDELAY(150);
++      }
++      I915_WRITE(FPB0, dev_priv->saveFPB0);
++      I915_WRITE(FPB1, dev_priv->saveFPB1);
++      /* Actually enable it */
++      I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
++      DRM_UDELAY(150);
++      if (IS_I965G(dev))
++              I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
++      DRM_UDELAY(150);
++
++      /* Restore mode */
++      I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
++      I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
++      I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
++      I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
++      I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
++      I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
++      I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
++
++      /* Restore plane info */
++      I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
++      I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
++      I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
++      I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
++      I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
++      if (IS_I965G(dev)) {
++              I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
++              I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
++      }
++
++      I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
++
++      i915_restore_palette(dev, PIPE_B);
++      /* Enable the plane */
++      I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
++      I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
++
++      /* CRT state */
++      I915_WRITE(ADPA, dev_priv->saveADPA);
++
++      /* LVDS state */
++      if (IS_I965G(dev))
++              I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
++      if (IS_MOBILE(dev) && !IS_I830(dev))
++              I915_WRITE(LVDS, dev_priv->saveLVDS);
++      if (!IS_I830(dev) && !IS_845G(dev))
++              I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
++
++      I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
++      I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++      I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
++      I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
++      I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
++      I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++
++      /* FIXME: restore TV & SDVO state */
++
++      /* FBC info */
++      I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
++      I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
++      I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
++      I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
++
++      /* VGA state */
++      I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
++      I915_WRITE(VGA0, dev_priv->saveVGA0);
++      I915_WRITE(VGA1, dev_priv->saveVGA1);
++      I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
++      DRM_UDELAY(150);
++
++      /* Clock gating state */
++      I915_WRITE (D_STATE, dev_priv->saveD_STATE);
++      I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
++
++      /* Cache mode state */
++      I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
++
++      /* Memory arbitration state */
++      I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
++
++      for (i = 0; i < 16; i++) {
++              I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
++              I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
++      }
++      for (i = 0; i < 3; i++)
++              I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
++
++      i915_restore_vga(dev);
++
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/drm-tungsten/imagine_drv.c git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c
+--- git/drivers/gpu/drm-tungsten/imagine_drv.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2005 Adam Jackson.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* derived from tdfx_drv.c */
++
++#include "drmP.h"
++#include "imagine_drv.h"
++
++#include "drm_pciids.h"
++
++static struct drm_driver driver;
++
++static struct pci_device_id pciidlist[] = {
++    imagine_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++    return drm_get_dev(pdev, ent, &driver);
++}
++
++static struct drm_driver driver = {
++    .driver_features = DRIVER_USE_MTRR,
++    .reclaim_buffers = drm_core_reclaim_buffers,
++    .get_map_ofs = drm_core_get_map_ofs,
++    .get_reg_ofs = drm_core_get_reg_ofs,
++    .fops = {
++        .owner = THIS_MODULE,
++        .open = drm_open,
++        .release = drm_release,
++        .ioctl = drm_ioctl,
++        .mmap = drm_mmap,
++        .poll = drm_poll,
++        .fasync = drm_fasync,
++    },
++    .pci_driver = {
++        .name = DRIVER_NAME,
++        .id_table = pciidlist,
++        .probe = probe,
++        .remove = __devexit_p(drm_cleanup_pci),
++    },
++
++    .name = DRIVER_NAME,
++    .desc = DRIVER_DESC,
++    .date = DRIVER_DATE,
++    .major = DRIVER_MAJOR,
++    .minor = DRIVER_MINOR,
++    .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init imagine_init(void)
++{
++    return drm_init(&driver, pciidlist);
++}
++
++static void __exit imagine_exit(void)
++{
++    drm_exit(&driver);
++}
++
++module_init(imagine_init);
++module_exit(imagine_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/Kconfig git-nokia/drivers/gpu/drm-tungsten/Kconfig
+--- git/drivers/gpu/drm-tungsten/Kconfig       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/Kconfig 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++#
++# DRM device configuration from Tungsten Graphics
++#
++# This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++# The driver is the Tungsten alternative of the original DRM driver.
++#
++
++menuconfig DRM_TUNGSTEN
++      tristate "Direct Rendering Manager (Tungsten - XFree86 4.1.0 and higher DRI support)"
++      help
++        Kernel-level support for the Direct Rendering Infrastructure (DRI)
++        introduced in XFree86 4.0. If you say Y here, you need to select
++        the module that's right for your graphics card from the list below.
++        These modules provide support for synchronization, security, and
++        DMA transfers. Please see <http://dri.sourceforge.net/> for more
++        details.  You should also select and configure AGP
++        (/dev/agpgart) support.
++
++config DRM_TUNGSTEN_PVR2D
++      tristate "PVR2D kernel helper"
++      depends on DRM_TUNGSTEN && PVR
++      help
++        Choose this option if you want to give DRI access to your card
++        handled by the Imagination PowerVR framework. If M is selected,
++        the module will be called pvr2d.
++
++if DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
++
++config DRM_TUNGSTEN_TDFX
++      tristate "3dfx Banshee/Voodoo3+"
++      help
++        Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
++        graphics card.  If M is selected, the module will be called tdfx.
++
++config DRM_TUNGSTEN_R128
++      tristate "ATI Rage 128"
++      help
++        Choose this option if you have an ATI Rage 128 graphics card.  If M
++        is selected, the module will be called r128.  AGP support for
++        this card is strongly suggested (unless you have a PCI version).
++
++config DRM_TUNGSTEN_RADEON
++      tristate "ATI Radeon"
++      help
++        Choose this option if you have an ATI Radeon graphics card.  There
++        are both PCI and AGP versions.  You don't need to choose this to
++        run the Radeon in plain VGA mode.
++
++        If M is selected, the module will be called radeon.
++
++config DRM_TUNGSTEN_I810
++      tristate "Intel I810"
++      depends on AGP && AGP_INTEL
++      help
++        Choose this option if you have an Intel I810 graphics card.  If M is
++        selected, the module will be called i810.  AGP support is required
++        for this driver to work.
++
++config DRM_TUNGSTEN_I915
++      tristate "i915 driver"
++      depends on AGP && AGP_INTEL
++      help
++        Choose this option if you have a system that has Intel 830M, 845G,
++        852GM, 855GM 865G or 915G integrated graphics.  If M is selected, the
++        module will be called i915.  AGP support is required for this driver
++        to work. This driver is used by the Intel driver in X.org 6.8 and
++        XFree86 4.4 and above. If unsure, build this and i830 as modules and
++        the X server will load the correct one.
++
++config DRM_TUNGSTEN_MGA
++      tristate "Matrox g200/g400"
++      help
++        Choose this option if you have a Matrox G200, G400 or G450 graphics
++        card.  If M is selected, the module will be called mga.  AGP
++        support is required for this driver to work.
++
++config DRM_TUNGSTEN_SIS
++      tristate "SiS video cards"
++      depends on AGP
++      help
++        Choose this option if you have a SiS 630 or compatible video
++          chipset. If M is selected the module will be called sis. AGP
++          support is required for this driver to work.
++
++config DRM_TUNGSTEN_VIA
++      tristate "Via unichrome video cards"
++      help
++        Choose this option if you have a Via unichrome or compatible video
++        chipset. If M is selected the module will be called via.
++
++config DRM_TUNGSTEN_SAVAGE
++      tristate "Savage video cards"
++      help
++        Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
++        chipset. If M is selected the module will be called savage.
++
++config DRM_TUNGSTEN_FFB
++      tristate "Creator/Creator3D direct rendering"
++      help
++        Choose this option to include the Creator/Creator3D direct rendering
++        driver. If M is selected the module will be called ffb.
++
++config DRM_TUNGSTEN_MACH64
++      tristate "MACH64 Rage Pro video card"
++      help
++        Choose this option if you have a Mach64 Rage Pro chipset.
++        If M is selected the module will be called mach64.
++
++config DRM_TUNGSTEN_NV
++      tristate "Nvidia video card (NV driver)"
++      help
++        Choose this option if you have a Nvidia chipset and want to use the
++        original nv driver. If M is selected the module will be called nv.
++
++config DRM_TUNGSTEN_NOUVEAU
++      tristate "Nvidia video card (Nouveau driver)"
++      help
++        Choose this option if you have a Nvidia chipset and want to use the
++        nouveau driver. If M is selected the module will be called nouveau.
++
++config DRM_TUNGSTEN_XGI
++      tristate "XGI video card"
++      help
++        Choose this option if you have a XGI chipset. If M is selected the
++        module will be called xgi.
++
++endif # DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
++
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_dma.c git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c
+--- git/drivers/gpu/drm-tungsten/mach64_dma.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1778 @@
++/* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
++/**
++ * \file mach64_dma.c
++ * DMA support for mach64 (Rage Pro) driver
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ * \author Frank C. Earl <fearl@airmail.net>
++ * \author Leif Delgass <ldelgass@retinalburn.net>
++ * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++/*******************************************************************/
++/** \name Engine, FIFO control */
++/*@{*/
++
++/**
++ * Waits for free entries in the FIFO.
++ *
++ * \note Most writes to Mach64 registers are automatically routed through
++ * command FIFO which is 16 entry deep. Prior to writing to any draw engine
++ * register one has to ensure that enough FIFO entries are available by calling
++ * this function.  Failure to do so may cause the engine to lock.
++ *
++ * \param dev_priv pointer to device private data structure.
++ * \param entries number of free entries in the FIFO to wait for.
++ *
++ * \returns zero on success, or -EBUSY if the timeout (specificed by
++ * drm_mach64_private::usec_timeout) occurs.
++ */
++int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
++{
++      int slots = 0, i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
++              if (slots <= (0x8000 >> entries))
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
++      return -EBUSY;
++}
++
++/**
++ * Wait for the draw engine to be idle.
++ */
++int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
++{
++      int i, ret;
++
++      ret = mach64_do_wait_for_fifo(dev_priv, 16);
++      if (ret < 0)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Wait for free entries in the ring buffer.
++ *
++ * The Mach64 bus master can be configured to act as a virtual FIFO, using a
++ * circular buffer (commonly referred as "ring buffer" in other drivers) with
++ * pointers to engine commands. This allows the CPU to do other things while
++ * the graphics engine is busy, i.e., DMA mode.
++ *
++ * This function should be called before writing new entries to the ring
++ * buffer.
++ *
++ * \param dev_priv pointer to device private data structure.
++ * \param n number of free entries in the ring buffer to wait for.
++ *
++ * \returns zero on success, or -EBUSY if the timeout (specificed by
++ * drm_mach64_private_t::usec_timeout) occurs.
++ *
++ * \sa mach64_dump_ring_info()
++ */
++int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              mach64_update_ring_snapshot(dev_priv);
++              if (ring->space >= n) {
++                      if (i > 0)
++                              DRM_DEBUG("%d usecs\n", i);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This is being ignored... */
++      DRM_ERROR("failed!\n");
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Wait until all DMA requests have been processed...
++ *
++ * \sa mach64_wait_ring()
++ */
++static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      u32 head;
++      int i;
++
++      head = ring->head;
++      i = 0;
++      while (i < dev_priv->usec_timeout) {
++              mach64_update_ring_snapshot(dev_priv);
++              if (ring->head == ring->tail &&
++                  !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
++                      if (i > 0)
++                              DRM_DEBUG("%d usecs\n", i);
++                      return 0;
++              }
++              if (ring->head == head) {
++                      ++i;
++              } else {
++                      head = ring->head;
++                      i = 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Reset the the ring buffer descriptors.
++ *
++ * \sa mach64_do_engine_reset()
++ */
++static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      mach64_do_release_used_buffers(dev_priv);
++      ring->head_addr = ring->start_addr;
++      ring->head = ring->tail = 0;
++      ring->space = ring->size;
++
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      dev_priv->ring_running = 0;
++}
++
++/**
++ * Ensure the all the queued commands will be processed.
++ */
++int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
++{
++      /* FIXME: It's not necessary to wait for idle when flushing
++       * we just need to ensure the ring will be completely processed
++       * in finite time without another ioctl
++       */
++      return mach64_ring_idle(dev_priv);
++}
++
++/**
++ * Stop all DMA activity.
++ */
++int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
++{
++      int ret;
++
++      /* wait for completion */
++      if ((ret = mach64_ring_idle(dev_priv)) < 0) {
++              DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
++                        MACH64_READ(MACH64_BM_GUI_TABLE),
++                        dev_priv->ring.tail);
++              return ret;
++      }
++
++      mach64_ring_stop(dev_priv);
++
++      /* clean up after pass */
++      mach64_do_release_used_buffers(dev_priv);
++      return 0;
++}
++
++/**
++ * Reset the engine.  This will stop the DMA if it is running.
++ */
++int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
++{
++      u32 tmp;
++
++      DRM_DEBUG("\n");
++
++      /* Kill off any outstanding DMA transfers.
++       */
++      tmp = MACH64_READ(MACH64_BUS_CNTL);
++      MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
++
++      /* Reset the GUI engine (high to low transition).
++       */
++      tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
++      MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
++      /* Enable the GUI engine
++       */
++      tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
++      MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
++
++      /* ensure engine is not locked up by clearing any FIFO or HOST errors
++       */
++      tmp = MACH64_READ(MACH64_BUS_CNTL);
++      MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
++
++      /* Once GUI engine is restored, disable bus mastering */
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++
++      /* Reset descriptor ring */
++      mach64_ring_reset(dev_priv);
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name Debugging output */
++/*@{*/
++
++/**
++ * Dump engine registers values.
++ */
++void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
++{
++      DRM_INFO("\n");
++      if (!dev_priv->is_pci) {
++              DRM_INFO("           AGP_BASE = 0x%08x\n",
++                       MACH64_READ(MACH64_AGP_BASE));
++              DRM_INFO("           AGP_CNTL = 0x%08x\n",
++                       MACH64_READ(MACH64_AGP_CNTL));
++      }
++      DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_ALPHA_TST_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("         BM_COMMAND = 0x%08x\n",
++               MACH64_READ(MACH64_BM_COMMAND));
++      DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
++               MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
++      DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_GUI_TABLE));
++      DRM_INFO("          BM_STATUS = 0x%08x\n",
++               MACH64_READ(MACH64_BM_STATUS));
++      DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
++      DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_TABLE));
++      DRM_INFO("           BUS_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_BUS_CNTL));
++      DRM_INFO("\n");
++      /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
++      DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_CLR_CMP_CLR));
++      DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CLR_CMP_CNTL));
++      /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
++      DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_CHIP_ID));
++      DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_CNTL));
++      DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT0));
++      DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT1));
++      DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT2));
++      DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
++      DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
++      DRM_INFO("\n");
++      /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
++      /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
++      DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_DP_BKGD_CLR));
++      DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_DP_FRGD_CLR));
++      DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
++      DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
++               MACH64_READ(MACH64_DP_PIX_WIDTH));
++      DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
++      DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
++               MACH64_READ(MACH64_DP_WRITE_MASK));
++      DRM_INFO("         DSP_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_DSP_CONFIG));
++      DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
++               MACH64_READ(MACH64_DSP_ON_OFF));
++      DRM_INFO("           DST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_DST_CNTL));
++      DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_DST_OFF_PITCH));
++      DRM_INFO("\n");
++      /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
++      DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_EXT_MEM_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("          FIFO_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_FIFO_STAT));
++      DRM_INFO("\n");
++      DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GEN_TEST_CNTL));
++      /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
++      DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
++      DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
++      DRM_INFO("           GUI_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CNTL));
++      DRM_INFO("           GUI_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_STAT));
++      DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_TRAJ_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("          HOST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_HOST_CNTL));
++      DRM_INFO("           HW_DEBUG = 0x%08x\n",
++               MACH64_READ(MACH64_HW_DEBUG));
++      DRM_INFO("\n");
++      DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_MEM_ADDR_CONFIG));
++      DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_MEM_BUF_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("           PAT_REG0 = 0x%08x\n",
++               MACH64_READ(MACH64_PAT_REG0));
++      DRM_INFO("           PAT_REG1 = 0x%08x\n",
++               MACH64_READ(MACH64_PAT_REG1));
++      DRM_INFO("\n");
++      DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
++      DRM_INFO("           SC_RIGHT = 0x%08x\n",
++               MACH64_READ(MACH64_SC_RIGHT));
++      DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
++      DRM_INFO("          SC_BOTTOM = 0x%08x\n",
++               MACH64_READ(MACH64_SC_BOTTOM));
++      DRM_INFO("\n");
++      DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SCALE_3D_CNTL));
++      DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
++               MACH64_READ(MACH64_SCRATCH_REG0));
++      DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
++               MACH64_READ(MACH64_SCRATCH_REG1));
++      DRM_INFO("         SETUP_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SETUP_CNTL));
++      DRM_INFO("           SRC_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SRC_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("           TEX_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_TEX_CNTL));
++      DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_TEX_SIZE_PITCH));
++      DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_TIMER_CONFIG));
++      DRM_INFO("\n");
++      DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
++      DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_Z_OFF_PITCH));
++      DRM_INFO("\n");
++}
++
++#define MACH64_DUMP_CONTEXT   3
++
++/**
++ * Used by mach64_dump_ring_info() to dump the contents of the current buffer
++ * pointed by the ring head.
++ */
++static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
++                               struct drm_buf *buf)
++{
++      u32 addr = GETBUFADDR(buf);
++      u32 used = buf->used >> 2;
++      u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
++      u32 *p = GETBUFPTR(buf);
++      int skipped = 0;
++
++      DRM_INFO("buffer contents:\n");
++
++      while (used) {
++              u32 reg, count;
++
++              reg = le32_to_cpu(*p++);
++              if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
++                  (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
++                   addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
++                  addr >=
++                  GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
++                      DRM_INFO("%08x:  0x%08x\n", addr, reg);
++              }
++              addr += 4;
++              used--;
++
++              count = (reg >> 16) + 1;
++              reg = reg & 0xffff;
++              reg = MMSELECT(reg);
++              while (count && used) {
++                      if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
++                          (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
++                           addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
++                          addr >=
++                          GETBUFADDR(buf) + buf->used -
++                          MACH64_DUMP_CONTEXT * 4) {
++                              DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
++                                       reg, le32_to_cpu(*p));
++                              skipped = 0;
++                      } else {
++                              if (!skipped) {
++                                      DRM_INFO("  ...\n");
++                                      skipped = 1;
++                              }
++                      }
++                      p++;
++                      addr += 4;
++                      used--;
++
++                      reg += 4;
++                      count--;
++              }
++      }
++
++      DRM_INFO("\n");
++}
++
++/**
++ * Dump the ring state and contents, including the contents of the buffer being
++ * processed by the graphics engine.
++ */
++void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      int i, skipped;
++
++      DRM_INFO("\n");
++
++      DRM_INFO("ring contents:\n");
++      DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
++               ring->head_addr, ring->head, ring->tail);
++
++      skipped = 0;
++      for (i = 0; i < ring->size / sizeof(u32); i += 4) {
++              if (i <= MACH64_DUMP_CONTEXT * 4 ||
++                  i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
++                  (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
++                   i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
++                  (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
++                   i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
++                      DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
++                               (u32)(ring->start_addr + i * sizeof(u32)),
++                               le32_to_cpu(((u32 *) ring->start)[i + 0]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 1]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 2]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 3]),
++                               i == ring->head ? " (head)" : "",
++                               i == ring->tail ? " (tail)" : "");
++                      skipped = 0;
++              } else {
++                      if (!skipped) {
++                              DRM_INFO("  ...\n");
++                              skipped = 1;
++                      }
++              }
++      }
++
++      DRM_INFO("\n");
++
++      if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
++              struct list_head *ptr;
++              u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
++
++              list_for_each(ptr, &dev_priv->pending) {
++                      drm_mach64_freelist_t *entry =
++                          list_entry(ptr, drm_mach64_freelist_t, list);
++                      struct drm_buf *buf = entry->buf;
++
++                      u32 buf_addr = GETBUFADDR(buf);
++
++                      if (buf_addr <= addr && addr < buf_addr + buf->used)
++                              mach64_dump_buf_info(dev_priv, buf);
++              }
++      }
++
++      DRM_INFO("\n");
++      DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_GUI_TABLE));
++      DRM_INFO("\n");
++      DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
++               MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
++      DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
++      DRM_INFO("         BM_COMMAND = 0x%08x\n",
++               MACH64_READ(MACH64_BM_COMMAND));
++      DRM_INFO("\n");
++      DRM_INFO("          BM_STATUS = 0x%08x\n",
++               MACH64_READ(MACH64_BM_STATUS));
++      DRM_INFO("           BUS_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_BUS_CNTL));
++      DRM_INFO("          FIFO_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_FIFO_STAT));
++      DRM_INFO("           GUI_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_STAT));
++      DRM_INFO("           SRC_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SRC_CNTL));
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA descriptor ring macros */
++/*@{*/
++
++/**
++ * Add the end mark to the ring's new tail position.
++ *
++ * The bus master engine will keep processing the DMA buffers listed in the ring
++ * until it finds this mark, making it stop.
++ *
++ * \sa mach64_clear_dma_eol
++ */ 
++static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
++{
++#if defined(__i386__)
++      int nr = 31;
++
++      /* Taken from include/asm-i386/bitops.h linux header */
++      __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
++                           :"Ir"(nr));
++#elif defined(__powerpc__)
++      u32 old;
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      /* Taken from the include/asm-ppc/bitops.h linux header */
++      __asm__ __volatile__("\n\
++1:    lwarx   %0,0,%3 \n\
++      or      %0,%0,%2 \n\
++      stwcx.  %0,0,%3 \n\
++      bne-    1b":"=&r"(old), "=m"(*addr)
++                           :"r"(mask), "r"(addr), "m"(*addr)
++                           :"cc");
++#elif defined(__alpha__)
++      u32 temp;
++      u32 mask = MACH64_DMA_EOL;
++
++      /* Taken from the include/asm-alpha/bitops.h linux header */
++      __asm__ __volatile__("1:        ldl_l %0,%3\n"
++                           "  bis %0,%2,%0\n"
++                           "  stl_c %0,%1\n"
++                           "  beq %0,2f\n"
++                           ".subsection 2\n"
++                           "2:        br 1b\n"
++                           ".previous":"=&r"(temp), "=m"(*addr)
++                           :"Ir"(mask), "m"(*addr));
++#else
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      *addr |= mask;
++#endif
++}
++
++/**
++ * Remove the end mark from the ring's old tail position.
++ *
++ * It should be called after calling mach64_set_dma_eol to mark the ring's new
++ * tail position.
++ *
++ * We update the end marks while the bus master engine is in operation. Since
++ * the bus master engine may potentially be reading from the same position
++ * that we write, we must change atomically to avoid having intermediary bad
++ * data.
++ */
++static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
++{
++#if defined(__i386__)
++      int nr = 31;
++
++      /* Taken from include/asm-i386/bitops.h linux header */
++      __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
++                           :"Ir"(nr));
++#elif defined(__powerpc__)
++      u32 old;
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      /* Taken from the include/asm-ppc/bitops.h linux header */
++      __asm__ __volatile__("\n\
++1:    lwarx   %0,0,%3 \n\
++      andc    %0,%0,%2 \n\
++      stwcx.  %0,0,%3 \n\
++      bne-    1b":"=&r"(old), "=m"(*addr)
++                           :"r"(mask), "r"(addr), "m"(*addr)
++                           :"cc");
++#elif defined(__alpha__)
++      u32 temp;
++      u32 mask = ~MACH64_DMA_EOL;
++
++      /* Taken from the include/asm-alpha/bitops.h linux header */
++      __asm__ __volatile__("1:        ldl_l %0,%3\n"
++                           "  and %0,%2,%0\n"
++                           "  stl_c %0,%1\n"
++                           "  beq %0,2f\n"
++                           ".subsection 2\n"
++                           "2:        br 1b\n"
++                           ".previous":"=&r"(temp), "=m"(*addr)
++                           :"Ir"(mask), "m"(*addr));
++#else
++      u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
++
++      *addr &= mask;
++#endif
++}
++
++#define RING_LOCALS                                                   \
++      int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
++
++#define RING_WRITE_OFS  _ring_write
++
++#define BEGIN_RING(n)                                                 \
++      do {                                                            \
++              if (MACH64_VERBOSE) {                                   \
++                      DRM_INFO( "BEGIN_RING( %d ) \n",                \
++                                (n) );                                \
++              }                                                       \
++              if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
++                      int ret;                                        \
++                      if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
++                              DRM_ERROR( "wait_ring failed, resetting engine\n"); \
++                              mach64_dump_engine_info( dev_priv );    \
++                              mach64_do_engine_reset( dev_priv );     \
++                              return ret;                             \
++                      }                                               \
++              }                                                       \
++              dev_priv->ring.space -= (n) * sizeof(u32);              \
++              _ring = (u32 *) dev_priv->ring.start;                   \
++              _ring_tail = _ring_write = dev_priv->ring.tail;         \
++              _ring_mask = dev_priv->ring.tail_mask;                  \
++      } while (0)
++
++#define OUT_RING( x )                                         \
++do {                                                          \
++      if (MACH64_VERBOSE) {                                   \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
++                         (unsigned int)(x), _ring_write );    \
++      }                                                       \
++      _ring[_ring_write++] = cpu_to_le32( x );                \
++      _ring_write &= _ring_mask;                              \
++} while (0)
++
++#define ADVANCE_RING()                                                        \
++do {                                                                  \
++      if (MACH64_VERBOSE) {                                           \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        _ring_write, _ring_tail );                    \
++      }                                                               \
++      DRM_MEMORYBARRIER();                                            \
++      mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
++      DRM_MEMORYBARRIER();                                            \
++      dev_priv->ring.tail = _ring_write;                              \
++      mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
++} while (0)
++
++/**
++ * Queue a DMA buffer of registers writes into the ring buffer.
++ */ 
++int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
++                           drm_mach64_freelist_t *entry)
++{
++      int bytes, pages, remainder;
++      u32 address, page;
++      int i;
++      struct drm_buf *buf = entry->buf;
++      RING_LOCALS;
++
++      bytes = buf->used;
++      address = GETBUFADDR( buf );
++      pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
++
++      BEGIN_RING( pages * 4 );
++
++      for ( i = 0 ; i < pages-1 ; i++ ) {
++              page = address + i * MACH64_DMA_CHUNKSIZE;
++              OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++              OUT_RING( page );
++              OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
++              OUT_RING( 0 );
++      }
++
++      /* generate the final descriptor for any remaining commands in this buffer */
++      page = address + i * MACH64_DMA_CHUNKSIZE;
++      remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
++
++      /* Save dword offset of last descriptor for this buffer.
++       * This is needed to check for completion of the buffer in freelist_get
++       */
++      entry->ring_ofs = RING_WRITE_OFS;
++
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++      OUT_RING( page );
++      OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
++      OUT_RING( 0 );
++
++      ADVANCE_RING();
++      
++      return 0;
++}
++
++/**
++ * Queue DMA buffer controlling host data tranfers (e.g., blit).
++ * 
++ * Almost identical to mach64_add_buf_to_ring.
++ */
++int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                    drm_mach64_freelist_t *entry)
++{
++      int bytes, pages, remainder;
++      u32 address, page;
++      int i;
++      struct drm_buf *buf = entry->buf;
++      RING_LOCALS;
++      
++      bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
++      pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
++      address = GETBUFADDR( buf );
++      
++      BEGIN_RING( 4 + pages * 4 );
++      
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++      OUT_RING( address );
++      OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
++      OUT_RING( 0 );
++      address += MACH64_HOSTDATA_BLIT_OFFSET;
++      
++      for ( i = 0 ; i < pages-1 ; i++ ) {
++              page = address + i * MACH64_DMA_CHUNKSIZE;
++              OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
++              OUT_RING( page );
++              OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
++              OUT_RING( 0 );
++      }
++      
++      /* generate the final descriptor for any remaining commands in this buffer */
++      page = address + i * MACH64_DMA_CHUNKSIZE;
++      remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
++      
++      /* Save dword offset of last descriptor for this buffer.
++       * This is needed to check for completion of the buffer in freelist_get
++       */
++      entry->ring_ofs = RING_WRITE_OFS;
++      
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
++      OUT_RING( page );
++      OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
++      OUT_RING( 0 );
++      
++      ADVANCE_RING();
++      
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA test and initialization */
++/*@{*/
++
++/**
++ * Perform a simple DMA operation using the pattern registers to test whether
++ * DMA works.
++ *
++ * \return zero if successful.
++ *
++ * \note This function was the testbed for many experiences regarding Mach64
++ * DMA operation. It is left here since it so tricky to get DMA operating
++ * properly in some architectures and hardware.
++ */
++static int mach64_bm_dma_test(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_dma_handle_t *cpu_addr_dmah;
++      u32 data_addr;
++      u32 *table, *data;
++      u32 expected[2];
++      u32 src_cntl, pat_reg0, pat_reg1;
++      int i, count, failed;
++
++      DRM_DEBUG("\n");
++
++      table = (u32 *) dev_priv->ring.start;
++
++      /* FIXME: get a dma buffer from the freelist here */
++      DRM_DEBUG("Allocating data memory ...\n");
++#ifdef __FreeBSD__
++      DRM_UNLOCK();
++#endif
++      cpu_addr_dmah =
++          drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
++#ifdef __FreeBSD__
++      DRM_LOCK();
++#endif
++      if (!cpu_addr_dmah) {
++              DRM_INFO("data-memory allocation failed!\n");
++              return -ENOMEM;
++      } else {
++              data = (u32 *) cpu_addr_dmah->vaddr;
++              data_addr = (u32) cpu_addr_dmah->busaddr;
++      }
++
++      /* Save the X server's value for SRC_CNTL and restore it
++       * in case our test fails.  This prevents the X server
++       * from disabling it's cache for this register
++       */
++      src_cntl = MACH64_READ(MACH64_SRC_CNTL);
++      pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
++      pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
++
++      mach64_do_wait_for_fifo(dev_priv, 3);
++
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++      MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
++      MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
++
++      mach64_do_wait_for_idle(dev_priv);
++
++      for (i = 0; i < 2; i++) {
++              u32 reg;
++              reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
++              DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
++              if (reg != 0x11111111) {
++                      DRM_INFO("Error initializing test registers\n");
++                      DRM_INFO("resetting engine ...\n");
++                      mach64_do_engine_reset(dev_priv);
++                      DRM_INFO("freeing data buffer memory.\n");
++                      drm_pci_free(dev, cpu_addr_dmah);
++                      return -EIO;
++              }
++      }
++
++      /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
++      count = 0;
++
++      data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
++      data[count++] = expected[0] = 0x22222222;
++      data[count++] = expected[1] = 0xaaaaaaaa;
++
++      while (count < 1020) {
++              data[count++] =
++                  cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
++              data[count++] = 0x22222222;
++              data[count++] = 0xaaaaaaaa;
++      }
++      data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
++      data[count++] = 0;
++
++      DRM_DEBUG("Preparing table ...\n");
++      table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
++                                                       MACH64_APERTURE_OFFSET);
++      table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
++      table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
++                                              | MACH64_DMA_HOLD_OFFSET
++                                              | MACH64_DMA_EOL);
++      table[MACH64_DMA_RESERVED] = 0;
++
++      DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
++      DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
++      DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
++      DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
++
++      for (i = 0; i < 6; i++) {
++              DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
++      }
++      DRM_DEBUG(" ...\n");
++      for (i = count - 5; i < count; i++) {
++              DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
++      }
++
++      DRM_MEMORYBARRIER();
++
++      DRM_DEBUG("waiting for idle...\n");
++      if ((i = mach64_do_wait_for_idle(dev_priv))) {
++              DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
++              DRM_INFO("resetting engine ...\n");
++              mach64_do_engine_reset(dev_priv);
++              mach64_do_wait_for_fifo(dev_priv, 3);
++              MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++              MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++              MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++              DRM_INFO("freeing data buffer memory.\n");
++              drm_pci_free(dev, cpu_addr_dmah);
++              return i;
++      }
++      DRM_DEBUG("waiting for idle...done\n");
++
++      DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
++      DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
++      DRM_DEBUG("\n");
++      DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
++      DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
++
++      DRM_DEBUG("starting DMA transfer...\n");
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      MACH64_WRITE(MACH64_SRC_CNTL,
++                   MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
++                   MACH64_SRC_BM_OP_SYSTEM_TO_REG);
++
++      /* Kick off the transfer */
++      DRM_DEBUG("starting DMA transfer... done.\n");
++      MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
++
++      DRM_DEBUG("waiting for idle...\n");
++
++      if ((i = mach64_do_wait_for_idle(dev_priv))) {
++              /* engine locked up, dump register state and reset */
++              DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
++              mach64_dump_engine_info(dev_priv);
++              DRM_INFO("resetting engine ...\n");
++              mach64_do_engine_reset(dev_priv);
++              mach64_do_wait_for_fifo(dev_priv, 3);
++              MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++              MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++              MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++              DRM_INFO("freeing data buffer memory.\n");
++              drm_pci_free(dev, cpu_addr_dmah);
++              return i;
++      }
++
++      DRM_DEBUG("waiting for idle...done\n");
++
++      /* restore SRC_CNTL */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++
++      failed = 0;
++
++      /* Check register values to see if the GUI master operation succeeded */
++      for (i = 0; i < 2; i++) {
++              u32 reg;
++              reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
++              DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
++              if (reg != expected[i]) {
++                      failed = -1;
++              }
++      }
++
++      /* restore pattern registers */
++      mach64_do_wait_for_fifo(dev_priv, 2);
++      MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++      MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++
++      DRM_DEBUG("freeing data buffer memory.\n");
++      drm_pci_free(dev, cpu_addr_dmah);
++      DRM_DEBUG("returning ...\n");
++
++      return failed;
++}
++
++/**
++ * Called during the DMA initialization ioctl to initialize all the necessary
++ * software and hardware state for DMA operation.
++ */
++static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
++{
++      drm_mach64_private_t *dev_priv;
++      u32 tmp;
++      int i, ret;
++
++      DRM_DEBUG("\n");
++
++      dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_mach64_private_t));
++
++      dev_priv->is_pci = init->is_pci;
++
++      dev_priv->fb_bpp = init->fb_bpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      dev_priv->depth_bpp = init->depth_bpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
++                                      (dev_priv->front_offset >> 3));
++      dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
++                                     (dev_priv->back_offset >> 3));
++      dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
++                                      (dev_priv->depth_offset >> 3));
++
++      dev_priv->usec_timeout = 1000000;
++
++      /* Set up the freelist, placeholder list and pending list */
++      INIT_LIST_HEAD(&dev_priv->free_list);
++      INIT_LIST_HEAD(&dev_priv->placeholders);
++      INIT_LIST_HEAD(&dev_priv->pending);
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++      dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
++      if (!dev_priv->fb) {
++              DRM_ERROR("can not find frame buffer map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("can not find mmio map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->ring_map) {
++              DRM_ERROR("can not find ring map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv = (drm_mach64_sarea_t *)
++          ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
++
++      if (!dev_priv->is_pci) {
++              drm_core_ioremap(dev_priv->ring_map, dev);
++              if (!dev_priv->ring_map->handle) {
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " descriptor ring\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -ENOMEM;
++              }
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map =
++                  drm_core_findmap(dev, init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("can not find dma buffer map!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -EINVAL;
++              }
++              /* there might be a nicer way to do this -
++                 dev isn't passed all the way though the mach64 - DA */
++              dev_priv->dev_buffers = dev->agp_buffer_map;
++
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev->agp_buffer_map->handle) {
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " dma buffer\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -ENOMEM;
++              }
++              dev_priv->agp_textures =
++                  drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("can not find agp texture region!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->driver_mode = init->dma_mode;
++
++      /* changing the FIFO size from the default causes problems with DMA */
++      tmp = MACH64_READ(MACH64_GUI_CNTL);
++      if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
++              DRM_INFO("Setting FIFO size to 128 entries\n");
++              /* FIFO must be empty to change the FIFO depth */
++              if ((ret = mach64_do_wait_for_idle(dev_priv))) {
++                      DRM_ERROR
++                          ("wait for idle failed before changing FIFO depth!\n");
++                      mach64_do_cleanup_dma(dev);
++                      return ret;
++              }
++              MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
++                                             | MACH64_CMDFIFO_SIZE_128));
++              /* need to read GUI_STAT for proper sync according to docs */
++              if ((ret = mach64_do_wait_for_idle(dev_priv))) {
++                      DRM_ERROR
++                          ("wait for idle failed when changing FIFO depth!\n");
++                      mach64_do_cleanup_dma(dev);
++                      return ret;
++              }
++      }
++
++      dev_priv->ring.size = 0x4000;   /* 16KB */
++      dev_priv->ring.start = dev_priv->ring_map->handle;
++      dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
++
++      memset(dev_priv->ring.start, 0, dev_priv->ring.size);
++      DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
++               dev_priv->ring.start, dev_priv->ring.start_addr);
++
++      ret = 0;
++      if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
++
++              /* enable block 1 registers and bus mastering */
++              MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
++                                              | MACH64_BUS_EXT_REG_EN)
++                                             & ~MACH64_BUS_MASTER_DIS));
++
++              /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
++              DRM_DEBUG("Starting DMA test...\n");
++              if ((ret = mach64_bm_dma_test(dev))) {
++                      dev_priv->driver_mode = MACH64_MODE_MMIO;
++              }
++      }
++
++      switch (dev_priv->driver_mode) {
++      case MACH64_MODE_MMIO:
++              MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
++                                             | MACH64_BUS_EXT_REG_EN
++                                             | MACH64_BUS_MASTER_DIS));
++              if (init->dma_mode == MACH64_MODE_MMIO)
++                      DRM_INFO("Forcing pseudo-DMA mode\n");
++              else
++                      DRM_INFO
++                          ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
++                           ret);
++              break;
++      case MACH64_MODE_DMA_SYNC:
++              DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
++              break;
++      case MACH64_MODE_DMA_ASYNC:
++      default:
++              DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
++      }
++
++      dev_priv->ring_running = 0;
++
++      /* setup offsets for physical address of table start and end */
++      dev_priv->ring.head_addr = dev_priv->ring.start_addr;
++      dev_priv->ring.head = dev_priv->ring.tail = 0;
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++      dev_priv->ring.space = dev_priv->ring.size;
++
++      /* setup physical address and size of descriptor table */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   (dev_priv->ring.
++                    head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
++
++      /* init frame counter */
++      dev_priv->sarea_priv->frames_queued = 0;
++      for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++              dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
++      }
++
++      /* Allocate the DMA buffer freelist */
++      if ((ret = mach64_init_freelist(dev))) {
++              DRM_ERROR("Freelist allocation failed\n");
++              mach64_do_cleanup_dma(dev);
++              return ret;
++      }
++
++      return 0;
++}
++
++/*******************************************************************/
++/** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
++ */
++
++int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      volatile u32 *ring_read;
++      struct list_head *ptr;
++      drm_mach64_freelist_t *entry;
++      struct drm_buf *buf = NULL;
++      u32 *buf_ptr;
++      u32 used, reg, target;
++      int fifo, count, found, ret, no_idle_wait;
++
++      fifo = count = reg = no_idle_wait = 0;
++      target = MACH64_BM_ADDR;
++
++      if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
++              DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
++              mach64_dump_engine_info(dev_priv);
++              mach64_do_engine_reset(dev_priv);
++              return ret;
++      }
++
++      ring_read = (u32 *) ring->start;
++
++      while (ring->tail != ring->head) {
++              u32 buf_addr, new_target, offset;
++              u32 bytes, remaining, head, eol;
++
++              head = ring->head;
++
++              new_target =
++                  le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
++              buf_addr = le32_to_cpu(ring_read[head++]);
++              eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
++              bytes = le32_to_cpu(ring_read[head++])
++                  & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
++              head++;
++              head &= ring->tail_mask;
++
++              /* can't wait for idle between a blit setup descriptor
++               * and a HOSTDATA descriptor or the engine will lock
++               */
++              if (new_target == MACH64_BM_HOSTDATA
++                  && target == MACH64_BM_ADDR)
++                      no_idle_wait = 1;
++
++              target = new_target;
++
++              found = 0;
++              offset = 0;
++              list_for_each(ptr, &dev_priv->pending) {
++                      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++                      buf = entry->buf;
++                      offset = buf_addr - GETBUFADDR(buf);
++                      if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
++                              found = 1;
++                              break;
++                      }
++              }
++
++              if (!found || buf == NULL) {
++                      DRM_ERROR
++                          ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
++                           head, ring->tail, buf_addr, (eol ? "eol" : ""));
++                      mach64_dump_ring_info(dev_priv);
++                      mach64_do_engine_reset(dev_priv);
++                      return -EINVAL;
++              }
++
++              /* Hand feed the buffer to the card via MMIO, waiting for the fifo
++               * every 16 writes
++               */
++              DRM_DEBUG("target: (0x%08x) %s\n", target,
++                        (target ==
++                         MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
++              DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
++                        buf->used);
++
++              remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
++              used = bytes >> 2;      /* dwords in buffer for this descriptor */
++              buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
++
++              while (used) {
++
++                      if (count == 0) {
++                              if (target == MACH64_BM_HOSTDATA) {
++                                      reg = DMAREG(MACH64_HOST_DATA0);
++                                      count =
++                                          (remaining > 16) ? 16 : remaining;
++                                      fifo = 0;
++                              } else {
++                                      reg = le32_to_cpu(*buf_ptr++);
++                                      used--;
++                                      count = (reg >> 16) + 1;
++                              }
++
++                              reg = reg & 0xffff;
++                              reg = MMSELECT(reg);
++                      }
++                      while (count && used) {
++                              if (!fifo) {
++                                      if (no_idle_wait) {
++                                              if ((ret =
++                                                   mach64_do_wait_for_fifo
++                                                   (dev_priv, 16)) < 0) {
++                                                      no_idle_wait = 0;
++                                                      return ret;
++                                              }
++                                      } else {
++                                              if ((ret =
++                                                   mach64_do_wait_for_idle
++                                                   (dev_priv)) < 0) {
++                                                      return ret;
++                                              }
++                                      }
++                                      fifo = 16;
++                              }
++                              --fifo;
++                              MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
++                              used--;
++                              remaining--;
++
++                              reg += 4;
++                              count--;
++                      }
++              }
++              ring->head = head;
++              ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
++              ring->space += (4 * sizeof(u32));
++      }
++
++      if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
++              return ret;
++      }
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      DRM_DEBUG("completed\n");
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA cleanup */
++/*@{*/
++
++int mach64_do_cleanup_dma(struct drm_device * dev)
++{
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_mach64_private_t *dev_priv = dev->dev_private;
++
++              if (!dev_priv->is_pci) {
++                      if (dev_priv->ring_map)
++                              drm_core_ioremapfree(dev_priv->ring_map, dev);
++
++                      if (dev->agp_buffer_map) {
++                              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                              dev->agp_buffer_map = NULL;
++                      }
++              }
++
++              mach64_destroy_freelist(dev);
++
++              drm_free(dev_priv, sizeof(drm_mach64_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++      }
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name IOCTL handlers */
++/*@{*/
++
++int mach64_dma_init(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case DRM_MACH64_INIT_DMA:
++              return mach64_do_dma_init(dev, init);
++      case DRM_MACH64_CLEANUP_DMA:
++              return mach64_do_cleanup_dma(dev);
++      }
++
++      return -EINVAL;
++}
++
++int mach64_dma_idle(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_dma_idle(dev_priv);
++}
++
++int mach64_dma_flush(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_dma_flush(dev_priv);
++}
++
++int mach64_engine_reset(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_engine_reset(dev_priv);
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name Freelist management */
++/*@{*/
++
++int mach64_init_freelist(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      int i;
++
++      DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              if ((entry =
++                   (drm_mach64_freelist_t *)
++                   drm_alloc(sizeof(drm_mach64_freelist_t),
++                             DRM_MEM_BUFLISTS)) == NULL)
++                      return -ENOMEM;
++              memset(entry, 0, sizeof(drm_mach64_freelist_t));
++              entry->buf = dma->buflist[i];
++              ptr = &entry->list;
++              list_add_tail(ptr, &dev_priv->free_list);
++      }
++
++      return 0;
++}
++
++void mach64_destroy_freelist(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      struct list_head *tmp;
++
++      DRM_DEBUG("\n");
++
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++      list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++
++      list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++}
++
++/* IMPORTANT: This function should only be called when the engine is idle or locked up,
++ * as it assumes all buffers in the pending list have been completed by the hardware.
++ */
++int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
++{
++      struct list_head *ptr;
++      struct list_head *tmp;
++      drm_mach64_freelist_t *entry;
++      int i;
++
++      if (list_empty(&dev_priv->pending))
++              return 0;
++
++      /* Iterate the pending list and move all buffers into the freelist... */
++      i = 0;
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              if (entry->discard) {
++                      entry->buf->pending = 0;
++                      list_del(ptr);
++                      list_add_tail(ptr, &dev_priv->free_list);
++                      i++;
++              }
++      }
++
++      DRM_DEBUG("released %d buffers from pending list\n", i);
++
++      return 0;
++}
++
++static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      struct list_head *ptr;
++      struct list_head *tmp;
++      drm_mach64_freelist_t *entry;
++      u32 head, tail, ofs;
++
++      mach64_ring_tick(dev_priv, ring);
++      head = ring->head;
++      tail = ring->tail;
++
++      if (head == tail) {
++#if MACH64_EXTRA_CHECKING
++              if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
++                      DRM_ERROR("Empty ring with non-idle engine!\n");
++                      mach64_dump_ring_info(dev_priv);
++                      return -1;
++              }
++#endif
++              /* last pass is complete, so release everything */
++              mach64_do_release_used_buffers(dev_priv);
++              DRM_DEBUG("idle engine, freed all buffers.\n");
++              if (list_empty(&dev_priv->free_list)) {
++                      DRM_ERROR("Freelist empty with idle engine\n");
++                      return -1;
++              }
++              return 0;
++      }
++      /* Look for a completed buffer and bail out of the loop
++       * as soon as we find one -- don't waste time trying
++       * to free extra bufs here, leave that to do_release_used_buffers
++       */
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              ofs = entry->ring_ofs;
++              if (entry->discard &&
++                  ((head < tail && (ofs < head || ofs >= tail)) ||
++                   (head > tail && (ofs < head && ofs >= tail)))) {
++#if MACH64_EXTRA_CHECKING
++                      int i;
++
++                      for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
++                      {
++                              u32 o1 = le32_to_cpu(((u32 *) ring->
++                                               start)[i + 1]);
++                              u32 o2 = GETBUFADDR(entry->buf);
++
++                              if (o1 == o2) {
++                                      DRM_ERROR
++                                          ("Attempting to free used buffer: "
++                                           "i=%d  buf=0x%08x\n",
++                                           i, o1);
++                                      mach64_dump_ring_info(dev_priv);
++                                      return -1;
++                              }
++                      }
++#endif
++                      /* found a processed buffer */
++                      entry->buf->pending = 0;
++                      list_del(ptr);
++                      list_add_tail(ptr, &dev_priv->free_list);
++                      DRM_DEBUG
++                          ("freed processed buffer (head=%d tail=%d "
++                           "buf ring ofs=%d).\n",
++                           head, tail, ofs);
++                      return 0;
++              }
++      }
++
++      return 1;
++}
++
++struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      int t;
++
++      if (list_empty(&dev_priv->free_list)) {
++              if (list_empty(&dev_priv->pending)) {
++                      DRM_ERROR
++                          ("Couldn't get buffer - pending and free lists empty\n");
++                      t = 0;
++                      list_for_each(ptr, &dev_priv->placeholders) {
++                              t++;
++                      }
++                      DRM_INFO("Placeholders: %d\n", t);
++                      return NULL;
++              }
++
++              for (t = 0; t < dev_priv->usec_timeout; t++) {
++                      int ret;
++
++                      ret = mach64_do_reclaim_completed(dev_priv);
++                      if (ret == 0)
++                              goto _freelist_entry_found;
++                      if (ret < 0)
++                              return NULL;
++
++                      DRM_UDELAY(1);
++              }
++              mach64_dump_ring_info(dev_priv);
++              DRM_ERROR
++                  ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
++                   ring->head_addr, ring->head, ring->tail);
++              return NULL;
++      }
++
++      _freelist_entry_found:
++      ptr = dev_priv->free_list.next;
++      list_del(ptr);
++      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      entry->buf->used = 0;
++      list_add_tail(ptr, &dev_priv->placeholders);
++      return entry->buf;
++}
++
++int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
++{
++      struct list_head *ptr;
++      drm_mach64_freelist_t *entry;
++
++#if MACH64_EXTRA_CHECKING
++      list_for_each(ptr, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              if (copy_buf == entry->buf) {
++                      DRM_ERROR("Trying to release a pending buf\n");
++                      return -EFAULT;
++              }
++      }
++#endif
++      ptr = dev_priv->placeholders.next;
++      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      copy_buf->pending = 0;
++      copy_buf->used = 0;
++      entry->buf = copy_buf;
++      entry->discard = 1;
++      list_del(ptr);
++      list_add_tail(ptr, &dev_priv->free_list);
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA buffer request and submission IOCTL handler */
++/*@{*/
++
++static int mach64_dma_get_buffers(struct drm_device *dev,
++                                struct drm_file *file_priv,
++                                struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = mach64_freelist_get(dev_priv);
++#if MACH64_EXTRA_CHECKING
++              if (!buf)
++                      return -EFAULT;
++#else
++              if (!buf)
++                      return -EAGAIN;
++#endif
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int mach64_dma_buffers(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              ret = -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = mach64_dma_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++void mach64_driver_lastclose(struct drm_device * dev)
++{
++      mach64_do_cleanup_dma(dev);
++}
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drm.h git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h
+--- git/drivers/gpu/drm-tungsten/mach64_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,256 @@
++/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
++ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Frank C. Earl <fearl@airmail.net>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#ifndef __MACH64_DRM_H__
++#define __MACH64_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mach64_sarea.h)
++ */
++#ifndef __MACH64_SAREA_DEFINES__
++#define __MACH64_SAREA_DEFINES__
++
++/* What needs to be changed for the current vertex buffer?
++ * GH: We're going to be pedantic about this.  We want the card to do as
++ * little as possible, so let's avoid having it fetch a whole bunch of
++ * register values that don't change all that often, if at all.
++ */
++#define MACH64_UPLOAD_DST_OFF_PITCH   0x0001
++#define MACH64_UPLOAD_Z_OFF_PITCH     0x0002
++#define MACH64_UPLOAD_Z_ALPHA_CNTL    0x0004
++#define MACH64_UPLOAD_SCALE_3D_CNTL   0x0008
++#define MACH64_UPLOAD_DP_FOG_CLR      0x0010
++#define MACH64_UPLOAD_DP_WRITE_MASK   0x0020
++#define MACH64_UPLOAD_DP_PIX_WIDTH    0x0040
++#define MACH64_UPLOAD_SETUP_CNTL      0x0080
++#define MACH64_UPLOAD_MISC            0x0100
++#define MACH64_UPLOAD_TEXTURE         0x0200
++#define MACH64_UPLOAD_TEX0IMAGE               0x0400
++#define MACH64_UPLOAD_TEX1IMAGE               0x0800
++#define MACH64_UPLOAD_CLIPRECTS               0x1000  /* handled client-side */
++#define MACH64_UPLOAD_CONTEXT         0x00ff
++#define MACH64_UPLOAD_ALL             0x1fff
++
++/* DMA buffer size
++ */
++#define MACH64_BUFFER_SIZE            16384
++
++/* Max number of swaps allowed on the ring
++ * before the client must wait
++ */
++#define MACH64_MAX_QUEUED_FRAMES        3U
++
++/* Byte offsets for host blit buffer data
++ */
++#define MACH64_HOSTDATA_BLIT_OFFSET   104
++
++/* Keep these small for testing.
++ */
++#define MACH64_NR_SAREA_CLIPRECTS     8
++
++#define MACH64_CARD_HEAP              0
++#define MACH64_AGP_HEAP                       1
++#define MACH64_NR_TEX_HEAPS           2
++#define MACH64_NR_TEX_REGIONS         64
++#define MACH64_LOG_TEX_GRANULARITY    16
++
++#define MACH64_TEX_MAXLEVELS          1
++
++#define MACH64_NR_CONTEXT_REGS                15
++#define MACH64_NR_TEXTURE_REGS                4
++
++#endif                                /* __MACH64_SAREA_DEFINES__ */
++
++typedef struct {
++      unsigned int dst_off_pitch;
++
++      unsigned int z_off_pitch;
++      unsigned int z_cntl;
++      unsigned int alpha_tst_cntl;
++
++      unsigned int scale_3d_cntl;
++
++      unsigned int sc_left_right;
++      unsigned int sc_top_bottom;
++
++      unsigned int dp_fog_clr;
++      unsigned int dp_write_mask;
++      unsigned int dp_pix_width;
++      unsigned int dp_mix;
++      unsigned int dp_src;
++
++      unsigned int clr_cmp_cntl;
++      unsigned int gui_traj_cntl;
++
++      unsigned int setup_cntl;
++
++      unsigned int tex_size_pitch;
++      unsigned int tex_cntl;
++      unsigned int secondary_tex_off;
++      unsigned int tex_offset;
++} drm_mach64_context_regs_t;
++
++typedef struct drm_mach64_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex dma buffer.
++       */
++      drm_mach64_context_regs_t context_state;
++      unsigned int dirty;
++      unsigned int vertsize;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int frames_queued;
++
++      /* Texture memory LRU.
++       */
++      struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
++                                                     1];
++      unsigned int tex_age[MACH64_NR_TEX_HEAPS];
++      int ctx_owner;
++} drm_mach64_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mach64_common.h)
++ */
++
++/* Mach64 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++
++#define DRM_MACH64_INIT           0x00
++#define DRM_MACH64_IDLE           0x01
++#define DRM_MACH64_RESET          0x02
++#define DRM_MACH64_SWAP           0x03
++#define DRM_MACH64_CLEAR          0x04
++#define DRM_MACH64_VERTEX         0x05
++#define DRM_MACH64_BLIT           0x06
++#define DRM_MACH64_FLUSH          0x07
++#define DRM_MACH64_GETPARAM       0x08
++
++#define DRM_IOCTL_MACH64_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
++#define DRM_IOCTL_MACH64_IDLE           DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_IDLE )
++#define DRM_IOCTL_MACH64_RESET          DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_RESET )
++#define DRM_IOCTL_MACH64_SWAP           DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_SWAP )
++#define DRM_IOCTL_MACH64_CLEAR          DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
++#define DRM_IOCTL_MACH64_VERTEX         DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
++#define DRM_IOCTL_MACH64_BLIT           DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
++#define DRM_IOCTL_MACH64_FLUSH          DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
++#define DRM_IOCTL_MACH64_GETPARAM       DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
++
++/* Buffer flags for clears
++ */
++#define MACH64_FRONT                  0x1
++#define MACH64_BACK                   0x2
++#define MACH64_DEPTH                  0x4
++
++/* Primitive types for vertex buffers
++ */
++#define MACH64_PRIM_POINTS            0x00000000
++#define MACH64_PRIM_LINES             0x00000001
++#define MACH64_PRIM_LINE_LOOP         0x00000002
++#define MACH64_PRIM_LINE_STRIP                0x00000003
++#define MACH64_PRIM_TRIANGLES         0x00000004
++#define MACH64_PRIM_TRIANGLE_STRIP    0x00000005
++#define MACH64_PRIM_TRIANGLE_FAN      0x00000006
++#define MACH64_PRIM_QUADS             0x00000007
++#define MACH64_PRIM_QUAD_STRIP                0x00000008
++#define MACH64_PRIM_POLYGON           0x00000009
++
++typedef enum _drm_mach64_dma_mode_t {
++      MACH64_MODE_DMA_ASYNC,
++      MACH64_MODE_DMA_SYNC,
++      MACH64_MODE_MMIO
++} drm_mach64_dma_mode_t;
++
++typedef struct drm_mach64_init {
++      enum {
++              DRM_MACH64_INIT_DMA = 0x01,
++              DRM_MACH64_CLEANUP_DMA = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++      int is_pci;
++      drm_mach64_dma_mode_t dma_mode;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long ring_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++} drm_mach64_init_t;
++
++typedef struct drm_mach64_clear {
++      unsigned int flags;
++      int x, y, w, h;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++} drm_mach64_clear_t;
++
++typedef struct drm_mach64_vertex {
++      int prim;
++      void *buf;              /* Address of vertex buffer */
++      unsigned long used;     /* Number of bytes in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_mach64_vertex_t;
++
++typedef struct drm_mach64_blit {
++      void *buf;
++      int pitch;
++      int offset;
++      int format;
++      unsigned short x, y;
++      unsigned short width, height;
++} drm_mach64_blit_t;
++
++typedef struct drm_mach64_getparam {
++      enum {
++              MACH64_PARAM_FRAMES_QUEUED = 0x01,
++              MACH64_PARAM_IRQ_NR = 0x02
++      } param;
++      void *value;
++} drm_mach64_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.c git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c
+--- git/drivers/gpu/drm-tungsten/mach64_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,105 @@
++/* mach64_drv.c -- mach64 (Rage Pro) driver -*- linux-c -*-
++ * Created: Fri Nov 24 18:34:32 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 Gareth Hughes
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      mach64_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
++          | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .lastclose = mach64_driver_lastclose,
++      .get_vblank_counter = mach64_get_vblank_counter,
++      .enable_vblank = mach64_enable_vblank,
++      .disable_vblank = mach64_disable_vblank,
++      .irq_preinstall = mach64_driver_irq_preinstall,
++      .irq_postinstall = mach64_driver_irq_postinstall,
++      .irq_uninstall = mach64_driver_irq_uninstall,
++      .irq_handler = mach64_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = mach64_ioctls,
++      .dma_ioctl = mach64_dma_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init mach64_init(void)
++{
++      driver.num_ioctls = mach64_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit mach64_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(mach64_init);
++module_exit(mach64_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.h git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h
+--- git/drivers/gpu/drm-tungsten/mach64_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,859 @@
++/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*-
++ * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Frank C. Earl <fearl@airmail.net>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ *    José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++#ifndef __MACH64_DRV_H__
++#define __MACH64_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, Leif Delgass, José Fonseca"
++
++#define DRIVER_NAME           "mach64"
++#define DRIVER_DESC           "DRM module for the ATI Rage Pro"
++#define DRIVER_DATE           "20060718"
++
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++/* FIXME: remove these when not needed */
++/* Development driver options */
++#define MACH64_EXTRA_CHECKING     0   /* Extra sanity checks for DMA/freelist management */
++#define MACH64_VERBOSE                  0     /* Verbose debugging output */
++
++typedef struct drm_mach64_freelist {
++      struct list_head list;  /* List pointers for free_list, placeholders, or pending list */
++      struct drm_buf *buf;            /* Pointer to the buffer */
++      int discard;            /* This flag is set when we're done (re)using a buffer */
++      u32 ring_ofs;           /* dword offset in ring of last descriptor for this buffer */
++} drm_mach64_freelist_t;
++
++typedef struct drm_mach64_descriptor_ring {
++      void *start;            /* write pointer (cpu address) to start of descriptor ring */
++      u32 start_addr;         /* bus address of beginning of descriptor ring */
++      int size;               /* size of ring in bytes */
++
++      u32 head_addr;          /* bus address of descriptor ring head */
++      u32 head;               /* dword offset of descriptor ring head */
++      u32 tail;               /* dword offset of descriptor ring tail */
++      u32 tail_mask;          /* mask used to wrap ring */
++      int space;              /* number of free bytes in ring */
++} drm_mach64_descriptor_ring_t;
++
++typedef struct drm_mach64_private {
++      drm_mach64_sarea_t *sarea_priv;
++
++      int is_pci;
++      drm_mach64_dma_mode_t driver_mode;      /* Async DMA, sync DMA, or MMIO */
++
++      int usec_timeout;       /* Timeout for the wait functions */
++
++      drm_mach64_descriptor_ring_t ring;      /* DMA descriptor table (ring buffer) */
++      int ring_running;       /* Is bus mastering is enabled */
++
++      struct list_head free_list;     /* Free-list head */
++      struct list_head placeholders;  /* Placeholder list for buffers held by clients */
++      struct list_head pending;       /* Buffers pending completion */
++
++      u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES];        /* dword ring offsets of most recent frame swaps */
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      atomic_t vbl_received;          /**< Number of vblanks received. */
++
++      u32 front_offset_pitch;
++      u32 back_offset_pitch;
++      u32 depth_offset_pitch;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *fb;
++      drm_local_map_t *mmio;
++      drm_local_map_t *ring_map;
++      drm_local_map_t *dev_buffers;   /* this is a pointer to a structure in dev */
++      drm_local_map_t *agp_textures;
++} drm_mach64_private_t;
++
++extern struct drm_ioctl_desc mach64_ioctls[];
++extern int mach64_max_ioctl;
++
++                              /* mach64_dma.c */
++extern int mach64_dma_init(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_idle(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_flush(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int mach64_engine_reset(struct drm_device *dev, void *data,
++                             struct drm_file *file_priv);
++extern int mach64_dma_buffers(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++extern void mach64_driver_lastclose(struct drm_device * dev);
++
++extern int mach64_init_freelist(struct drm_device * dev);
++extern void mach64_destroy_freelist(struct drm_device * dev);
++extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv);
++extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
++                             struct drm_buf * copy_buf);
++
++extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
++                                 int entries);
++extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv);
++extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n);
++extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv);
++extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv);
++extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
++extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
++extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
++
++extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                  drm_mach64_freelist_t *_entry);
++extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                           drm_mach64_freelist_t *_entry);
++
++extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
++extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
++extern int mach64_do_cleanup_dma(struct drm_device * dev);
++
++                              /* mach64_state.c */
++extern int mach64_dma_clear(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int mach64_dma_swap(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_vertex(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++extern int mach64_dma_blit(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_get_param(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++
++extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int mach64_enable_vblank(struct drm_device *dev, int crtc);
++extern void mach64_disable_vblank(struct drm_device *dev, int crtc);
++extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
++extern void mach64_driver_irq_preinstall(struct drm_device *dev);
++extern int mach64_driver_irq_postinstall(struct drm_device *dev);
++extern void mach64_driver_irq_uninstall(struct drm_device *dev);
++
++/* ================================================================
++ * Registers
++ */
++
++#define MACH64_AGP_BASE                               0x0148
++#define MACH64_AGP_CNTL                               0x014c
++#define MACH64_ALPHA_TST_CNTL                 0x0550
++
++#define MACH64_DSP_CONFIG                     0x0420
++#define MACH64_DSP_ON_OFF                     0x0424
++#define MACH64_EXT_MEM_CNTL                   0x04ac
++#define MACH64_GEN_TEST_CNTL                  0x04d0
++#define MACH64_HW_DEBUG                               0x047c
++#define MACH64_MEM_ADDR_CONFIG                        0x0434
++#define MACH64_MEM_BUF_CNTL                   0x042c
++#define MACH64_MEM_CNTL                               0x04b0
++
++#define MACH64_BM_ADDR                                0x0648
++#define MACH64_BM_COMMAND                     0x0188
++#define MACH64_BM_DATA                                0x0648
++#define MACH64_BM_FRAME_BUF_OFFSET            0x0180
++#define MACH64_BM_GUI_TABLE                   0x01b8
++#define MACH64_BM_GUI_TABLE_CMD                       0x064c
++#     define MACH64_CIRCULAR_BUF_SIZE_16KB            (0 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_32KB            (1 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_64KB            (2 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_128KB           (3 << 0)
++#     define MACH64_LAST_DESCRIPTOR                   (1 << 31)
++#define MACH64_BM_HOSTDATA                    0x0644
++#define MACH64_BM_STATUS                      0x018c
++#define MACH64_BM_SYSTEM_MEM_ADDR             0x0184
++#define MACH64_BM_SYSTEM_TABLE                        0x01bc
++#define MACH64_BUS_CNTL                               0x04a0
++#     define MACH64_BUS_MSTR_RESET                    (1 << 1)
++#     define MACH64_BUS_APER_REG_DIS                  (1 << 4)
++#     define MACH64_BUS_FLUSH_BUF                     (1 << 2)
++#     define MACH64_BUS_MASTER_DIS                    (1 << 6)
++#     define MACH64_BUS_EXT_REG_EN                    (1 << 27)
++
++#define MACH64_CLR_CMP_CLR                    0x0700
++#define MACH64_CLR_CMP_CNTL                   0x0708
++#define MACH64_CLR_CMP_MASK                   0x0704
++#define MACH64_CONFIG_CHIP_ID                 0x04e0
++#define MACH64_CONFIG_CNTL                    0x04dc
++#define MACH64_CONFIG_STAT0                   0x04e4
++#define MACH64_CONFIG_STAT1                   0x0494
++#define MACH64_CONFIG_STAT2                   0x0498
++#define MACH64_CONTEXT_LOAD_CNTL              0x072c
++#define MACH64_CONTEXT_MASK                   0x0720
++#define MACH64_COMPOSITE_SHADOW_ID            0x0798
++#define MACH64_CRC_SIG                                0x04e8
++#define MACH64_CUSTOM_MACRO_CNTL              0x04d4
++
++#define MACH64_DP_BKGD_CLR                    0x06c0
++#define MACH64_DP_FOG_CLR                     0x06c4
++#define MACH64_DP_FGRD_BKGD_CLR                       0x06e0
++#define MACH64_DP_FRGD_CLR                    0x06c4
++#define MACH64_DP_FGRD_CLR_MIX                        0x06dc
++
++#define MACH64_DP_MIX                         0x06d4
++#     define BKGD_MIX_NOT_D                           (0 << 0)
++#     define BKGD_MIX_ZERO                            (1 << 0)
++#     define BKGD_MIX_ONE                             (2 << 0)
++#     define MACH64_BKGD_MIX_D                        (3 << 0)
++#     define BKGD_MIX_NOT_S                           (4 << 0)
++#     define BKGD_MIX_D_XOR_S                         (5 << 0)
++#     define BKGD_MIX_NOT_D_XOR_S                     (6 << 0)
++#     define MACH64_BKGD_MIX_S                        (7 << 0)
++#     define BKGD_MIX_NOT_D_OR_NOT_S                  (8 << 0)
++#     define BKGD_MIX_D_OR_NOT_S                      (9 << 0)
++#     define BKGD_MIX_NOT_D_OR_S                      (10 << 0)
++#     define BKGD_MIX_D_OR_S                          (11 << 0)
++#     define BKGD_MIX_D_AND_S                         (12 << 0)
++#     define BKGD_MIX_NOT_D_AND_S                     (13 << 0)
++#     define BKGD_MIX_D_AND_NOT_S                     (14 << 0)
++#     define BKGD_MIX_NOT_D_AND_NOT_S                 (15 << 0)
++#     define BKGD_MIX_D_PLUS_S_DIV2                   (23 << 0)
++#     define FRGD_MIX_NOT_D                           (0 << 16)
++#     define FRGD_MIX_ZERO                            (1 << 16)
++#     define FRGD_MIX_ONE                             (2 << 16)
++#     define FRGD_MIX_D                               (3 << 16)
++#     define FRGD_MIX_NOT_S                           (4 << 16)
++#     define FRGD_MIX_D_XOR_S                         (5 << 16)
++#     define FRGD_MIX_NOT_D_XOR_S                     (6 << 16)
++#     define MACH64_FRGD_MIX_S                        (7 << 16)
++#     define FRGD_MIX_NOT_D_OR_NOT_S                  (8 << 16)
++#     define FRGD_MIX_D_OR_NOT_S                      (9 << 16)
++#     define FRGD_MIX_NOT_D_OR_S                      (10 << 16)
++#     define FRGD_MIX_D_OR_S                          (11 << 16)
++#     define FRGD_MIX_D_AND_S                         (12 << 16)
++#     define FRGD_MIX_NOT_D_AND_S                     (13 << 16)
++#     define FRGD_MIX_D_AND_NOT_S                     (14 << 16)
++#     define FRGD_MIX_NOT_D_AND_NOT_S                 (15 << 16)
++#     define FRGD_MIX_D_PLUS_S_DIV2                   (23 << 16)
++
++#define MACH64_DP_PIX_WIDTH                   0x06d0
++#     define MACH64_HOST_TRIPLE_ENABLE                (1 << 13)
++#     define MACH64_BYTE_ORDER_MSB_TO_LSB             (0 << 24)
++#     define MACH64_BYTE_ORDER_LSB_TO_MSB             (1 << 24)
++
++#define MACH64_DP_SRC                         0x06d8
++#     define MACH64_BKGD_SRC_BKGD_CLR                 (0 << 0)
++#     define MACH64_BKGD_SRC_FRGD_CLR                 (1 << 0)
++#     define MACH64_BKGD_SRC_HOST                     (2 << 0)
++#     define MACH64_BKGD_SRC_BLIT                     (3 << 0)
++#     define MACH64_BKGD_SRC_PATTERN                  (4 << 0)
++#     define MACH64_BKGD_SRC_3D                       (5 << 0)
++#     define MACH64_FRGD_SRC_BKGD_CLR                 (0 << 8)
++#     define MACH64_FRGD_SRC_FRGD_CLR                 (1 << 8)
++#     define MACH64_FRGD_SRC_HOST                     (2 << 8)
++#     define MACH64_FRGD_SRC_BLIT                     (3 << 8)
++#     define MACH64_FRGD_SRC_PATTERN                  (4 << 8)
++#     define MACH64_FRGD_SRC_3D                       (5 << 8)
++#     define MACH64_MONO_SRC_ONE                      (0 << 16)
++#     define MACH64_MONO_SRC_PATTERN                  (1 << 16)
++#     define MACH64_MONO_SRC_HOST                     (2 << 16)
++#     define MACH64_MONO_SRC_BLIT                     (3 << 16)
++
++#define MACH64_DP_WRITE_MASK                  0x06c8
++
++#define MACH64_DST_CNTL                               0x0530
++#     define MACH64_DST_X_RIGHT_TO_LEFT               (0 << 0)
++#     define MACH64_DST_X_LEFT_TO_RIGHT               (1 << 0)
++#     define MACH64_DST_Y_BOTTOM_TO_TOP               (0 << 1)
++#     define MACH64_DST_Y_TOP_TO_BOTTOM               (1 << 1)
++#     define MACH64_DST_X_MAJOR                       (0 << 2)
++#     define MACH64_DST_Y_MAJOR                       (1 << 2)
++#     define MACH64_DST_X_TILE                        (1 << 3)
++#     define MACH64_DST_Y_TILE                        (1 << 4)
++#     define MACH64_DST_LAST_PEL                      (1 << 5)
++#     define MACH64_DST_POLYGON_ENABLE                (1 << 6)
++#     define MACH64_DST_24_ROTATION_ENABLE            (1 << 7)
++
++#define MACH64_DST_HEIGHT_WIDTH                       0x0518
++#define MACH64_DST_OFF_PITCH                  0x0500
++#define MACH64_DST_WIDTH_HEIGHT                       0x06ec
++#define MACH64_DST_X_Y                                0x06e8
++#define MACH64_DST_Y_X                                0x050c
++
++#define MACH64_FIFO_STAT                      0x0710
++#     define MACH64_FIFO_SLOT_MASK                    0x0000ffff
++#     define MACH64_FIFO_ERR                          (1 << 31)
++
++#define MACH64_GEN_TEST_CNTL                  0x04d0
++#     define MACH64_GUI_ENGINE_ENABLE                 (1 << 8)
++#define MACH64_GUI_CMDFIFO_DEBUG              0x0170
++#define MACH64_GUI_CMDFIFO_DATA                       0x0174
++#define MACH64_GUI_CNTL                               0x0178
++#       define MACH64_CMDFIFO_SIZE_MASK                 0x00000003ul
++#       define MACH64_CMDFIFO_SIZE_192                  0x00000000ul
++#       define MACH64_CMDFIFO_SIZE_128                  0x00000001ul
++#       define MACH64_CMDFIFO_SIZE_64                   0x00000002ul
++#define MACH64_GUI_STAT                               0x0738
++#     define MACH64_GUI_ACTIVE                        (1 << 0)
++#define MACH64_GUI_TRAJ_CNTL                  0x0730
++
++#define MACH64_HOST_CNTL                      0x0640
++#define MACH64_HOST_DATA0                     0x0600
++
++#define MACH64_ONE_OVER_AREA                  0x029c
++#define MACH64_ONE_OVER_AREA_UC                       0x0300
++
++#define MACH64_PAT_REG0                               0x0680
++#define MACH64_PAT_REG1                               0x0684
++
++#define MACH64_SC_LEFT                          0x06a0
++#define MACH64_SC_RIGHT                         0x06a4
++#define MACH64_SC_LEFT_RIGHT                    0x06a8
++#define MACH64_SC_TOP                           0x06ac
++#define MACH64_SC_BOTTOM                        0x06b0
++#define MACH64_SC_TOP_BOTTOM                    0x06b4
++
++#define MACH64_SCALE_3D_CNTL                  0x05fc
++#define MACH64_SCRATCH_REG0                   0x0480
++#define MACH64_SCRATCH_REG1                   0x0484
++#define MACH64_SECONDARY_TEX_OFF              0x0778
++#define MACH64_SETUP_CNTL                     0x0304
++#define MACH64_SRC_CNTL                               0x05b4
++#     define MACH64_SRC_BM_ENABLE                     (1 << 8)
++#     define MACH64_SRC_BM_SYNC                       (1 << 9)
++#     define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM         (0 << 10)
++#     define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME         (1 << 10)
++#     define MACH64_SRC_BM_OP_REG_TO_SYSTEM           (2 << 10)
++#     define MACH64_SRC_BM_OP_SYSTEM_TO_REG           (3 << 10)
++#define MACH64_SRC_HEIGHT1                    0x0594
++#define MACH64_SRC_HEIGHT2                    0x05ac
++#define MACH64_SRC_HEIGHT1_WIDTH1             0x0598
++#define MACH64_SRC_HEIGHT2_WIDTH2             0x05b0
++#define MACH64_SRC_OFF_PITCH                  0x0580
++#define MACH64_SRC_WIDTH1                     0x0590
++#define MACH64_SRC_Y_X                                0x058c
++
++#define MACH64_TEX_0_OFF                      0x05c0
++#define MACH64_TEX_CNTL                               0x0774
++#define MACH64_TEX_SIZE_PITCH                 0x0770
++#define MACH64_TIMER_CONFIG                   0x0428
++
++#define MACH64_VERTEX_1_ARGB                  0x0254
++#define MACH64_VERTEX_1_S                     0x0240
++#define MACH64_VERTEX_1_SECONDARY_S           0x0328
++#define MACH64_VERTEX_1_SECONDARY_T           0x032c
++#define MACH64_VERTEX_1_SECONDARY_W           0x0330
++#define MACH64_VERTEX_1_SPEC_ARGB             0x024c
++#define MACH64_VERTEX_1_T                     0x0244
++#define MACH64_VERTEX_1_W                     0x0248
++#define MACH64_VERTEX_1_X_Y                   0x0258
++#define MACH64_VERTEX_1_Z                     0x0250
++#define MACH64_VERTEX_2_ARGB                  0x0274
++#define MACH64_VERTEX_2_S                     0x0260
++#define MACH64_VERTEX_2_SECONDARY_S           0x0334
++#define MACH64_VERTEX_2_SECONDARY_T           0x0338
++#define MACH64_VERTEX_2_SECONDARY_W           0x033c
++#define MACH64_VERTEX_2_SPEC_ARGB             0x026c
++#define MACH64_VERTEX_2_T                     0x0264
++#define MACH64_VERTEX_2_W                     0x0268
++#define MACH64_VERTEX_2_X_Y                   0x0278
++#define MACH64_VERTEX_2_Z                     0x0270
++#define MACH64_VERTEX_3_ARGB                  0x0294
++#define MACH64_VERTEX_3_S                     0x0280
++#define MACH64_VERTEX_3_SECONDARY_S           0x02a0
++#define MACH64_VERTEX_3_SECONDARY_T           0x02a4
++#define MACH64_VERTEX_3_SECONDARY_W           0x02a8
++#define MACH64_VERTEX_3_SPEC_ARGB             0x028c
++#define MACH64_VERTEX_3_T                     0x0284
++#define MACH64_VERTEX_3_W                     0x0288
++#define MACH64_VERTEX_3_X_Y                   0x0298
++#define MACH64_VERTEX_3_Z                     0x0290
++
++#define MACH64_Z_CNTL                         0x054c
++#define MACH64_Z_OFF_PITCH                    0x0548
++
++#define MACH64_CRTC_VLINE_CRNT_VLINE          0x0410
++#     define MACH64_CRTC_VLINE_MASK                   0x000007ff
++#     define MACH64_CRTC_CRNT_VLINE_MASK              0x07ff0000
++#define MACH64_CRTC_OFF_PITCH                 0x0414
++#define MACH64_CRTC_INT_CNTL                  0x0418
++#     define MACH64_CRTC_VBLANK                       (1 << 0)
++#     define MACH64_CRTC_VBLANK_INT_EN                (1 << 1)
++#     define MACH64_CRTC_VBLANK_INT                   (1 << 2)
++#     define MACH64_CRTC_VLINE_INT_EN                 (1 << 3)
++#     define MACH64_CRTC_VLINE_INT                    (1 << 4)
++#     define MACH64_CRTC_VLINE_SYNC                   (1 << 5)        /* 0=even, 1=odd */
++#     define MACH64_CRTC_FRAME                        (1 << 6)        /* 0=even, 1=odd */
++#     define MACH64_CRTC_SNAPSHOT_INT_EN              (1 << 7)
++#     define MACH64_CRTC_SNAPSHOT_INT                 (1 << 8)
++#     define MACH64_CRTC_I2C_INT_EN                   (1 << 9)
++#     define MACH64_CRTC_I2C_INT                      (1 << 10)
++#     define MACH64_CRTC2_VBLANK                      (1 << 11)       /* LT Pro */
++#     define MACH64_CRTC2_VBLANK_INT_EN               (1 << 12)       /* LT Pro */
++#     define MACH64_CRTC2_VBLANK_INT                  (1 << 13)       /* LT Pro */
++#     define MACH64_CRTC2_VLINE_INT_EN                (1 << 14)       /* LT Pro */
++#     define MACH64_CRTC2_VLINE_INT                   (1 << 15)       /* LT Pro */
++#     define MACH64_CRTC_CAPBUF0_INT_EN               (1 << 16)
++#     define MACH64_CRTC_CAPBUF0_INT                  (1 << 17)
++#     define MACH64_CRTC_CAPBUF1_INT_EN               (1 << 18)
++#     define MACH64_CRTC_CAPBUF1_INT                  (1 << 19)
++#     define MACH64_CRTC_OVERLAY_EOF_INT_EN           (1 << 20)
++#     define MACH64_CRTC_OVERLAY_EOF_INT              (1 << 21)
++#     define MACH64_CRTC_ONESHOT_CAP_INT_EN           (1 << 22)
++#     define MACH64_CRTC_ONESHOT_CAP_INT              (1 << 23)
++#     define MACH64_CRTC_BUSMASTER_EOL_INT_EN         (1 << 24)
++#     define MACH64_CRTC_BUSMASTER_EOL_INT            (1 << 25)
++#     define MACH64_CRTC_GP_INT_EN                    (1 << 26)
++#     define MACH64_CRTC_GP_INT                       (1 << 27)
++#     define MACH64_CRTC2_VLINE_SYNC                  (1 << 28) /* LT Pro */  /* 0=even, 1=odd */
++#     define MACH64_CRTC_SNAPSHOT2_INT_EN             (1 << 29)       /* LT Pro */
++#     define MACH64_CRTC_SNAPSHOT2_INT                (1 << 30)       /* LT Pro */
++#     define MACH64_CRTC_VBLANK2_INT                  (1 << 31)
++#     define MACH64_CRTC_INT_ENS                              \
++              (                                               \
++                      MACH64_CRTC_VBLANK_INT_EN |             \
++                      MACH64_CRTC_VLINE_INT_EN |              \
++                      MACH64_CRTC_SNAPSHOT_INT_EN |           \
++                      MACH64_CRTC_I2C_INT_EN |                \
++                      MACH64_CRTC2_VBLANK_INT_EN |            \
++                      MACH64_CRTC2_VLINE_INT_EN |             \
++                      MACH64_CRTC_CAPBUF0_INT_EN |            \
++                      MACH64_CRTC_CAPBUF1_INT_EN |            \
++                      MACH64_CRTC_OVERLAY_EOF_INT_EN |        \
++                      MACH64_CRTC_ONESHOT_CAP_INT_EN |        \
++                      MACH64_CRTC_BUSMASTER_EOL_INT_EN |      \
++                      MACH64_CRTC_GP_INT_EN |                 \
++                      MACH64_CRTC_SNAPSHOT2_INT_EN |          \
++                      0                                       \
++              )
++#     define MACH64_CRTC_INT_ACKS                     \
++              (                                       \
++                      MACH64_CRTC_VBLANK_INT |        \
++                      MACH64_CRTC_VLINE_INT |         \
++                      MACH64_CRTC_SNAPSHOT_INT |      \
++                      MACH64_CRTC_I2C_INT |           \
++                      MACH64_CRTC2_VBLANK_INT |       \
++                      MACH64_CRTC2_VLINE_INT |        \
++                      MACH64_CRTC_CAPBUF0_INT |       \
++                      MACH64_CRTC_CAPBUF1_INT |       \
++                      MACH64_CRTC_OVERLAY_EOF_INT |   \
++                      MACH64_CRTC_ONESHOT_CAP_INT |   \
++                      MACH64_CRTC_BUSMASTER_EOL_INT | \
++                      MACH64_CRTC_GP_INT |            \
++                      MACH64_CRTC_SNAPSHOT2_INT |     \
++                      MACH64_CRTC_VBLANK2_INT |       \
++                      0                               \
++              )
++
++#define MACH64_DATATYPE_CI8                           2
++#define MACH64_DATATYPE_ARGB1555                      3
++#define MACH64_DATATYPE_RGB565                                4
++#define MACH64_DATATYPE_ARGB8888                      6
++#define MACH64_DATATYPE_RGB332                                7
++#define MACH64_DATATYPE_Y8                            8
++#define MACH64_DATATYPE_RGB8                          9
++#define MACH64_DATATYPE_VYUY422                               11
++#define MACH64_DATATYPE_YVYU422                               12
++#define MACH64_DATATYPE_AYUV444                               14
++#define MACH64_DATATYPE_ARGB4444                      15
++
++#define MACH64_READ(reg)      DRM_READ32(dev_priv->mmio, (reg) )
++#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) )
++
++#define DWMREG0               0x0400
++#define DWMREG0_END   0x07ff
++#define DWMREG1               0x0000
++#define DWMREG1_END   0x03ff
++
++#define ISREG0(r)     (((r) >= DWMREG0) && ((r) <= DWMREG0_END))
++#define DMAREG0(r)    (((r) - DWMREG0) >> 2)
++#define DMAREG1(r)    ((((r) - DWMREG1) >> 2 ) | 0x0100)
++#define DMAREG(r)     (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
++
++#define MMREG0                0x0000
++#define MMREG0_END    0x00ff
++
++#define ISMMREG0(r)   (((r) >= MMREG0) && ((r) <= MMREG0_END))
++#define MMSELECT0(r)  (((r) << 2) + DWMREG0)
++#define MMSELECT1(r)  (((((r) & 0xff) << 2) + DWMREG1))
++#define MMSELECT(r)   (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r))
++
++/* ================================================================
++ * DMA constants
++ */
++
++/* DMA descriptor field indices:
++ * The descriptor fields are loaded into the read-only
++ * BM_* system bus master registers during a bus-master operation
++ */
++#define MACH64_DMA_FRAME_BUF_OFFSET   0       /* BM_FRAME_BUF_OFFSET */
++#define MACH64_DMA_SYS_MEM_ADDR               1       /* BM_SYSTEM_MEM_ADDR */
++#define MACH64_DMA_COMMAND            2       /* BM_COMMAND */
++#define MACH64_DMA_RESERVED           3       /* BM_STATUS */
++
++/* BM_COMMAND descriptor field flags */
++#define MACH64_DMA_HOLD_OFFSET                (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */
++#define MACH64_DMA_EOL                        (1<<31) /* End of descriptor list flag */
++
++#define MACH64_DMA_CHUNKSIZE          0x1000  /* 4kB per DMA descriptor */
++#define MACH64_APERTURE_OFFSET                0x7ff800        /* frame-buffer offset for gui-masters */
++
++/* ================================================================
++ * Ring operations
++ *
++ * Since the Mach64 bus master engine requires polling, these functions end
++ * up being called frequently, hence being inline.
++ */
++
++static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      if (mach64_do_wait_for_idle(dev_priv) < 0) {
++              mach64_do_engine_reset(dev_priv);
++      }
++
++      if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
++              /* enable bus mastering and block 1 registers */
++              MACH64_WRITE(MACH64_BUS_CNTL,
++                           (MACH64_READ(MACH64_BUS_CNTL) &
++                            ~MACH64_BUS_MASTER_DIS)
++                           | MACH64_BUS_EXT_REG_EN);
++              mach64_do_wait_for_idle(dev_priv);
++      }
++
++      /* reset descriptor table ring head */
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      dev_priv->ring_running = 1;
++}
++
++static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
++                                        drm_mach64_descriptor_ring_t * ring)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      /* reset descriptor table ring head */
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      if (dev_priv->driver_mode == MACH64_MODE_MMIO) {
++              mach64_do_dispatch_pseudo_dma(dev_priv);
++      } else {
++              /* enable GUI bus mastering, and sync the bus master to the GUI */
++              MACH64_WRITE(MACH64_SRC_CNTL,
++                           MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
++                           MACH64_SRC_BM_OP_SYSTEM_TO_REG);
++
++              /* kick off the transfer */
++              MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
++              if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {
++                      if ((mach64_do_wait_for_idle(dev_priv)) < 0) {
++                              DRM_ERROR("idle failed, resetting engine\n");
++                              mach64_dump_engine_info(dev_priv);
++                              mach64_do_engine_reset(dev_priv);
++                              return;
++                      }
++                      mach64_do_release_used_buffers(dev_priv);
++              }
++      }
++}
++
++/**
++ * Poll the ring head and make sure the bus master is alive.
++ * 
++ * Mach64's bus master engine will stop if there are no more entries to process.
++ * This function polls the engine for the last processed entry and calls 
++ * mach64_ring_resume if there is an unprocessed entry.
++ * 
++ * Note also that, since we update the ring tail while the bus master engine is 
++ * in operation, it is possible that the last tail update was too late to be 
++ * processed, and the bus master engine stops at the previous tail position. 
++ * Therefore it is important to call this function frequently. 
++ */
++static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
++                                      drm_mach64_descriptor_ring_t * ring)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      if (!dev_priv->ring_running) {
++              mach64_ring_start(dev_priv);
++
++              if (ring->head != ring->tail) {
++                      mach64_ring_resume(dev_priv, ring);
++              }
++      } else {
++              /* GUI_ACTIVE must be read before BM_GUI_TABLE to
++               * correctly determine the ring head
++               */
++              int gui_active =
++                  MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
++
++              ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
++
++              if (gui_active) {
++                      /* If not idle, BM_GUI_TABLE points one descriptor
++                       * past the current head
++                       */
++                      if (ring->head_addr == ring->start_addr) {
++                              ring->head_addr += ring->size;
++                      }
++                      ring->head_addr -= 4 * sizeof(u32);
++              }
++
++              if (ring->head_addr < ring->start_addr ||
++                  ring->head_addr >= ring->start_addr + ring->size) {
++                      DRM_ERROR("bad ring head address: 0x%08x\n",
++                                ring->head_addr);
++                      mach64_dump_ring_info(dev_priv);
++                      mach64_do_engine_reset(dev_priv);
++                      return;
++              }
++
++              ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32);
++
++              if (!gui_active && ring->head != ring->tail) {
++                      mach64_ring_resume(dev_priv, ring);
++              }
++      }
++}
++
++static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                dev_priv->ring.head_addr, dev_priv->ring.head,
++                dev_priv->ring.tail, dev_priv->ring.space);
++
++      /* restore previous SRC_CNTL to disable busmastering */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++
++      /* disable busmastering but keep the block 1 registers enabled */
++      mach64_do_wait_for_idle(dev_priv);
++      MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL)
++                   | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN);
++
++      dev_priv->ring_running = 0;
++}
++
++static __inline__ void
++mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      DRM_DEBUG("\n");
++
++      mach64_ring_tick(dev_priv, ring);
++
++      ring->space = (ring->head - ring->tail) * sizeof(u32);
++      if (ring->space <= 0) {
++              ring->space += ring->size;
++      }
++}
++
++/* ================================================================
++ * DMA macros
++ * 
++ * Mach64's ring buffer doesn't take register writes directly. These 
++ * have to be written indirectly in DMA buffers. These macros simplify 
++ * the task of setting up a buffer, writing commands to it, and 
++ * queuing the buffer in the ring. 
++ */
++
++#define DMALOCALS                             \
++      drm_mach64_freelist_t *_entry = NULL;   \
++      struct drm_buf *_buf = NULL;            \
++      u32 *_buf_wptr; int _outcount
++
++#define GETBUFPTR( __buf )                                            \
++((dev_priv->is_pci) ?                                                 \
++      ((u32 *)(__buf)->address) :                                     \
++      ((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
++
++#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
++
++#define GETRINGOFFSET() (_entry->ring_ofs)
++
++static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
++                                                  dev_priv,
++                                                  drm_mach64_freelist_t **
++                                                  entry, struct drm_buf * buf)
++{
++      struct list_head *ptr;
++#if MACH64_EXTRA_CHECKING
++      if (list_empty(&dev_priv->pending)) {
++              DRM_ERROR("Empty pending list in \n");
++              return -EINVAL;
++      }
++#endif
++      ptr = dev_priv->pending.prev;
++      *entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      while ((*entry)->buf != buf) {
++              if (ptr == &dev_priv->pending) {
++                      return -EFAULT;
++              }
++              ptr = ptr->prev;
++              *entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      }
++      return 0;
++}
++
++#define DMASETPTR( _p )                               \
++do {                                          \
++      _buf = (_p);                            \
++      _outcount = 0;                          \
++      _buf_wptr = GETBUFPTR( _buf );          \
++} while(0)
++
++/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
++#define DMAGETPTR( file_priv, dev_priv, n )                           \
++do {                                                                  \
++      if ( MACH64_VERBOSE ) {                                         \
++              DRM_INFO( "DMAGETPTR( %d )\n", (n) );                   \
++      }                                                               \
++      _buf = mach64_freelist_get( dev_priv );                         \
++      if (_buf == NULL) {                                             \
++              DRM_ERROR("couldn't get buffer in DMAGETPTR\n");        \
++              return -EAGAIN;                                 \
++      }                                                               \
++      if (_buf->pending) {                                            \
++              DRM_ERROR("pending buf in DMAGETPTR\n");                \
++              return -EFAULT;                                 \
++      }                                                               \
++      _buf->file_priv = file_priv;                                    \
++      _outcount = 0;                                                  \
++                                                                      \
++        _buf_wptr = GETBUFPTR( _buf );                                        \
++} while (0)
++
++#define DMAOUTREG( reg, val )                                 \
++do {                                                          \
++      if ( MACH64_VERBOSE ) {                                 \
++              DRM_INFO( "   DMAOUTREG( 0x%x = 0x%08x )\n",    \
++                        reg, val );                           \
++      }                                                       \
++      _buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg));      \
++      _buf_wptr[_outcount++] = cpu_to_le32((val));            \
++      _buf->used += 8;                                        \
++} while (0)
++
++#define DMAADVANCE( dev_priv, _discard )                              \
++      do {                                                            \
++              struct list_head *ptr;                                  \
++              int ret;                                                \
++                                                                      \
++              if ( MACH64_VERBOSE ) {                                 \
++                      DRM_INFO( "DMAADVANCE() in \n" );               \
++              }                                                       \
++                                                                      \
++              if (_buf->used <= 0) {                                  \
++                      DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \
++                                 _buf->idx );                         \
++                      return -EFAULT;                                 \
++              }                                                       \
++              if (_buf->pending) {                                    \
++                      /* This is a resued buffer, so we need to find it in the pending list */ \
++                      if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
++                              DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \
++                              return ret;                             \
++                      }                                               \
++                      if (_entry->discard) {                          \
++                              DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \
++                              return -EFAULT;                         \
++                      }                                               \
++              } else {                                                \
++                      if (list_empty(&dev_priv->placeholders)) {      \
++                              DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \
++                              return -EFAULT;                         \
++                      }                                               \
++                      ptr = dev_priv->placeholders.next;              \
++                      list_del(ptr);                                  \
++                      _entry = list_entry(ptr, drm_mach64_freelist_t, list); \
++                      _buf->pending = 1;                              \
++                      _entry->buf = _buf;                             \
++                      list_add_tail(ptr, &dev_priv->pending);         \
++              }                                                       \
++              _entry->discard = (_discard);                           \
++              if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \
++                      return ret;                                     \
++      } while (0)
++
++#define DMADISCARDBUF()                                                       \
++      do {                                                            \
++              if (_entry == NULL) {                                   \
++                      int ret;                                        \
++                      if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
++                              DRM_ERROR( "couldn't find pending buf %d\n", \
++                                         _buf->idx );                 \
++                              return ret;                             \
++                      }                                               \
++              }                                                       \
++              _entry->discard = 1;                                    \
++      } while(0)
++
++#define DMAADVANCEHOSTDATA( dev_priv )                                        \
++      do {                                                            \
++              struct list_head *ptr;                                  \
++              int ret;                                                \
++                                                                      \
++              if ( MACH64_VERBOSE ) {                                 \
++                      DRM_INFO( "DMAADVANCEHOSTDATA() in \n" );       \
++              }                                                       \
++                                                                      \
++              if (_buf->used <= 0) {                                  \
++                      DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \
++                      return -EFAULT;                                 \
++              }                                                       \
++              if (list_empty(&dev_priv->placeholders)) {              \
++                      DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \
++                      return -EFAULT;                                 \
++              }                                                       \
++                                                                      \
++              ptr = dev_priv->placeholders.next;                      \
++              list_del(ptr);                                          \
++              _entry = list_entry(ptr, drm_mach64_freelist_t, list);  \
++              _entry->buf = _buf;                                     \
++              _entry->buf->pending = 1;                               \
++              list_add_tail(ptr, &dev_priv->pending);                 \
++              _entry->discard = 1;                                    \
++              if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \
++                      return ret;                                     \
++      } while (0)
++
++#endif                                /* __MACH64_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_irq.c git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c
+--- git/drivers/gpu/drm-tungsten/mach64_irq.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,159 @@
++/* mach64_irq.c -- IRQ handling for ATI Mach64 -*- linux-c -*-
++ * Created: Tue Feb 25, 2003 by Leif Delgass, based on radeon_irq.c/r128_irq.c
++ */
++/*-
++ * Copyright (C) The Weather Channel, Inc.  2002.
++ * Copyright 2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = arg;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      int status;
++
++      status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      /* VBLANK interrupt */
++      if (status & MACH64_CRTC_VBLANK_INT) {
++              /* Mask off all interrupt ack bits before setting the ack bit, since
++               * there may be other handlers outside the DRM.
++               *
++               * NOTE: On mach64, you need to keep the enable bits set when doing
++               * the ack, despite what the docs say about not acking and enabling
++               * in a single write.
++               */
++              MACH64_WRITE(MACH64_CRTC_INT_CNTL,
++                           (status & ~MACH64_CRTC_INT_ACKS)
++                           | MACH64_CRTC_VBLANK_INT);
++
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              return IRQ_HANDLED;
++      }
++      return IRQ_NONE;
++}
++
++u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)
++{
++      const drm_mach64_private_t *const dev_priv = dev->dev_private;
++
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++int mach64_enable_vblank(struct drm_device * dev, int crtc)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                        crtc);
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status);
++
++      /* Turn on VBLANK interrupt */
++      MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
++                   | MACH64_CRTC_VBLANK_INT_EN);
++
++      return 0;
++}
++
++void mach64_disable_vblank(struct drm_device * dev, int crtc)
++{
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++              return;
++      }
++
++      /*
++       * FIXME: implement proper interrupt disable by using the vblank
++       * counter register (if available).
++       */
++}
++
++static void mach64_disable_vblank_local(struct drm_device * dev, int crtc)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++              return;
++      }
++
++      DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);
++
++      /* Disable and clear VBLANK interrupt */
++      MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
++                   | MACH64_CRTC_VBLANK_INT);
++}
++
++void mach64_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
++
++      mach64_disable_vblank_local(dev, 0);
++}
++
++int mach64_driver_irq_postinstall(struct drm_device * dev)
++{
++      return drm_vblank_init(dev, 1);
++}
++
++void mach64_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      mach64_disable_vblank_local(dev, 0);
++
++      DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
++                MACH64_READ(MACH64_CRTC_INT_CNTL));
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_state.c git-nokia/drivers/gpu/drm-tungsten/mach64_state.c
+--- git/drivers/gpu/drm-tungsten/mach64_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,910 @@
++/* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*-
++ * Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ *    José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++/* Interface history:
++ *
++ * 1.0 - Initial mach64 DRM
++ *
++ */
++struct drm_ioctl_desc mach64_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
++};
++
++int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
++
++/* ================================================================
++ * DMA hardware state programming functions
++ */
++
++static void mach64_print_dirty(const char *msg, unsigned int flags)
++{
++      DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
++                msg,
++                flags,
++                (flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " :
++                "",
++                (flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "",
++                (flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " :
++                "", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
++                (flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " :
++                "",
++                (flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "",
++                (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "",
++                (flags & MACH64_UPLOAD_MISC) ? "misc, " : "",
++                (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "",
++                (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "",
++                (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "",
++                (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : "");
++}
++
++/* Mach64 doesn't have hardware cliprects, just one hardware scissor,
++ * so the GL scissor is intersected with each cliprect here
++ */
++/* This function returns 0 on success, 1 for no intersection, and
++ * negative for an error
++ */
++static int mach64_emit_cliprect(struct drm_file *file_priv,
++                              drm_mach64_private_t * dev_priv,
++                              struct drm_clip_rect * box)
++{
++      u32 sc_left_right, sc_top_bottom;
++      struct drm_clip_rect scissor;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
++      DMALOCALS;
++
++      DRM_DEBUG("box=%p\n", box);
++
++      /* Get GL scissor */
++      /* FIXME: store scissor in SAREA as a cliprect instead of in
++       * hardware format, or do intersection client-side
++       */
++      scissor.x1 = regs->sc_left_right & 0xffff;
++      scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16;
++      scissor.y1 = regs->sc_top_bottom & 0xffff;
++      scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16;
++
++      /* Intersect GL scissor with cliprect */
++      if (box->x1 > scissor.x1)
++              scissor.x1 = box->x1;
++      if (box->y1 > scissor.y1)
++              scissor.y1 = box->y1;
++      if (box->x2 < scissor.x2)
++              scissor.x2 = box->x2;
++      if (box->y2 < scissor.y2)
++              scissor.y2 = box->y2;
++      /* positive return means skip */
++      if (scissor.x1 >= scissor.x2)
++              return 1;
++      if (scissor.y1 >= scissor.y2)
++              return 1;
++
++      DMAGETPTR(file_priv, dev_priv, 2);      /* returns on failure to get buffer */
++
++      sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
++      sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right);
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom);
++
++      DMAADVANCE(dev_priv, 1);
++
++      return 0;
++}
++
++static __inline__ int mach64_emit_state(struct drm_file *file_priv,
++                                      drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
++      unsigned int dirty = sarea_priv->dirty;
++      u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2);
++      DMALOCALS;
++
++      if (MACH64_VERBOSE) {
++              mach64_print_dirty(__FUNCTION__, dirty);
++      } else {
++              DRM_DEBUG("dirty=0x%08x\n", dirty);
++      }
++
++      DMAGETPTR(file_priv, dev_priv, 17);     /* returns on failure to get buffer */
++
++      if (dirty & MACH64_UPLOAD_MISC) {
++              DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
++              DMAOUTREG(MACH64_DP_SRC, regs->dp_src);
++              DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl);
++              DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_MISC;
++      }
++
++      if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) {
++              DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH;
++      }
++      if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) {
++              DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH;
++      }
++      if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) {
++              DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl);
++              DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL;
++      }
++      if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) {
++              DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL;
++      }
++      if (dirty & MACH64_UPLOAD_DP_FOG_CLR) {
++              DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR;
++      }
++      if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) {
++              DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK;
++      }
++      if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) {
++              DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH;
++      }
++      if (dirty & MACH64_UPLOAD_SETUP_CNTL) {
++              DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL;
++      }
++
++      if (dirty & MACH64_UPLOAD_TEXTURE) {
++              DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch);
++              DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl);
++              DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off);
++              DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE;
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS;
++
++      return 0;
++
++}
++
++/* ================================================================
++ * DMA command dispatch functions
++ */
++
++static int mach64_dma_dispatch_clear(struct drm_device * dev,
++                                   struct drm_file *file_priv,
++                                   unsigned int flags,
++                                   int cx, int cy, int cw, int ch,
++                                   unsigned int clear_color,
++                                   unsigned int clear_depth)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      u32 fb_bpp, depth_bpp;
++      int i;
++      DMALOCALS;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 32:
++              fb_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      default:
++              return -EINVAL;
++      }
++      switch (dev_priv->depth_bpp) {
++      case 16:
++              depth_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 24:
++      case 32:
++              depth_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (!nbox)
++              return 0;
++
++      DMAGETPTR(file_priv, dev_priv, nbox * 31);      /* returns on failure to get buffer */
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
++                        pbox[i].x1, pbox[i].y1,
++                        pbox[i].x2, pbox[i].y2, flags);
++
++              if (flags & (MACH64_FRONT | MACH64_BACK)) {
++                      /* Setup for color buffer clears
++                       */
++
++                      DMAOUTREG(MACH64_Z_CNTL, 0);
++                      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++                      DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
++                      DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
++
++                      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++                      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                                (MACH64_DST_X_LEFT_TO_RIGHT |
++                                 MACH64_DST_Y_TOP_TO_BOTTOM));
++
++                      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
++                                                      (fb_bpp << 4) |
++                                                      (fb_bpp << 8) |
++                                                      (fb_bpp << 16) |
++                                                      (fb_bpp << 28)));
++
++                      DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color);
++                      DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask);
++                      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
++                                                MACH64_FRGD_MIX_S));
++                      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
++                                                MACH64_FRGD_SRC_FRGD_CLR |
++                                                MACH64_MONO_SRC_ONE));
++
++              }
++
++              if (flags & MACH64_FRONT) {
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->front_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++              }
++
++              if (flags & MACH64_BACK) {
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->back_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++              }
++
++              if (flags & MACH64_DEPTH) {
++                      /* Setup for depth buffer clear
++                       */
++                      DMAOUTREG(MACH64_Z_CNTL, 0);
++                      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++                      DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
++                      DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
++
++                      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++                      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                                (MACH64_DST_X_LEFT_TO_RIGHT |
++                                 MACH64_DST_Y_TOP_TO_BOTTOM));
++
++                      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) |
++                                                      (depth_bpp << 4) |
++                                                      (depth_bpp << 8) |
++                                                      (depth_bpp << 16) |
++                                                      (depth_bpp << 28)));
++
++                      DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth);
++                      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
++                      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
++                                                MACH64_FRGD_MIX_S));
++                      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
++                                                MACH64_FRGD_SRC_FRGD_CLR |
++                                                MACH64_MONO_SRC_ONE));
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->depth_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++              }
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      return 0;
++}
++
++static int mach64_dma_dispatch_swap(struct drm_device * dev,
++                                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      u32 fb_bpp;
++      int i;
++      DMALOCALS;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 32:
++      default:
++              fb_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      }
++
++      if (!nbox)
++              return 0;
++
++      DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4);  /* returns on failure to get buffer */
++
++      DMAOUTREG(MACH64_Z_CNTL, 0);
++      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16));      /* no scissor */
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
++
++      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++      DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT |
++                                       MACH64_DST_Y_TOP_TO_BOTTOM));
++
++      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
++                                      (fb_bpp << 4) |
++                                      (fb_bpp << 8) |
++                                      (fb_bpp << 16) | (fb_bpp << 28)));
++
++      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
++      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S));
++      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR |
++                                MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE));
++
++      DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch);
++      DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch);
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch swap %d,%d-%d,%d\n",
++                        pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
++
++              DMAOUTREG(MACH64_SRC_WIDTH1, w);
++              DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y);
++              DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y);
++              DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) {
++              for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) {
++                      dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1];
++              }
++              dev_priv->frame_ofs[i] = GETRINGOFFSET();
++
++              dev_priv->sarea_priv->frames_queued++;
++      }
++
++      return 0;
++}
++
++static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int i, start;
++      u32 head, tail, ofs;
++
++      DRM_DEBUG("\n");
++
++      if (sarea_priv->frames_queued == 0)
++              return 0;
++
++      tail = ring->tail;
++      mach64_ring_tick(dev_priv, ring);
++      head = ring->head;
++
++      start = (MACH64_MAX_QUEUED_FRAMES -
++               DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued));
++
++      if (head == tail) {
++              sarea_priv->frames_queued = 0;
++              for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++                      dev_priv->frame_ofs[i] = ~0;
++              }
++              return 0;
++      }
++
++      for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++              ofs = dev_priv->frame_ofs[i];
++              DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs);
++              if (ofs == ~0 ||
++                  (head < tail && (ofs < head || ofs >= tail)) ||
++                  (head > tail && (ofs < head && ofs >= tail))) {
++                      sarea_priv->frames_queued =
++                          (MACH64_MAX_QUEUED_FRAMES - 1) - i;
++                      dev_priv->frame_ofs[i] = ~0;
++              }
++      }
++
++      return sarea_priv->frames_queued;
++}
++
++/* Copy and verify a client submited buffer.
++ * FIXME: Make an assembly optimized version
++ */
++static __inline__ int copy_from_user_vertex(u32 *to,
++                                          const u32 __user *ufrom,
++                                          unsigned long bytes)
++{
++      unsigned long n = bytes;        /* dwords remaining in buffer */
++      u32 *from, *orig_from;
++
++      from = drm_alloc(bytes, DRM_MEM_DRIVER);
++      if (from == NULL)
++              return -ENOMEM;
++
++      if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
++              drm_free(from, bytes, DRM_MEM_DRIVER);
++              return -EFAULT;
++      }
++      orig_from = from; /* we'll be modifying the "from" ptr, so save it */
++
++      n >>= 2;
++
++      while (n > 1) {
++              u32 data, reg, count;
++
++              data = *from++;
++
++              n--;
++
++              reg = le32_to_cpu(data);
++              count = (reg >> 16) + 1;
++              if (count <= n) {
++                      n -= count;
++                      reg &= 0xffff;
++
++                      /* This is an exact match of Mach64's Setup Engine registers,
++                       * excluding SETUP_CNTL (1_C1).
++                       */
++                      if ((reg >= 0x0190 && reg < 0x01c1) ||
++                          (reg >= 0x01ca && reg <= 0x01cf)) {
++                              *to++ = data;
++                              memcpy(to, from, count << 2);
++                              from += count;
++                              to += count;
++                      } else {
++                              DRM_ERROR("Got bad command: 0x%04x\n", reg);
++                              drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++                              return -EACCES;
++                      }
++              } else {
++                      DRM_ERROR
++                          ("Got bad command count(=%u) dwords remaining=%lu\n",
++                           count, n);
++                      drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++                      return -EINVAL;
++              }
++      }
++
++      drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++      if (n == 0)
++              return 0;
++      else {
++              DRM_ERROR("Bad buf->used(=%lu)\n", bytes);
++              return -EINVAL;
++      }
++}
++
++static int mach64_dma_dispatch_vertex(struct drm_device * dev,
++                                    struct drm_file *file_priv,
++                                    drm_mach64_vertex_t * vertex)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_buf *copy_buf;
++      void *buf = vertex->buf;
++      unsigned long used = vertex->used;
++      int ret = 0;
++      int i = 0;
++      int done = 0;
++      int verify_ret = 0;
++      DMALOCALS;
++
++      DRM_DEBUG("buf=%p used=%lu nbox=%d\n",
++                buf, used, sarea_priv->nbox);
++
++      if (!used)
++              goto _vertex_done;
++
++      copy_buf = mach64_freelist_get(dev_priv);
++      if (copy_buf == NULL) {
++              DRM_ERROR("couldn't get buffer\n");
++              return -EAGAIN;
++      }
++
++      /* Mach64's vertex data is actually register writes. To avoid security
++       * compromises these register writes have to be verified and copied from
++       * user space into a private DMA buffer.
++       */
++      verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
++
++      if (verify_ret != 0) {
++              mach64_freelist_put(dev_priv, copy_buf);
++              goto _vertex_done;
++      }
++
++      copy_buf->used = used;
++
++      DMASETPTR(copy_buf);
++
++      if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
++              ret = mach64_emit_state(file_priv, dev_priv);
++              if (ret < 0)
++                      return ret;
++      }
++
++      do {
++              /* Emit the next cliprect */
++              if (i < sarea_priv->nbox) {
++                      ret = mach64_emit_cliprect(file_priv, dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      if (ret < 0) {
++                              /* failed to get buffer */
++                              return ret;
++                      } else if (ret != 0) {
++                              /* null intersection with scissor */
++                              continue;
++                      }
++              }
++              if ((i >= sarea_priv->nbox - 1))
++                      done = 1;
++
++              /* Add the buffer to the DMA queue */
++              DMAADVANCE(dev_priv, done);
++
++      } while (++i < sarea_priv->nbox);
++
++      if (!done) {
++              if (copy_buf->pending) {
++                      DMADISCARDBUF();
++              } else {
++                      /* This buffer wasn't used (no cliprects), so place it
++                       * back on the free list
++                       */
++                      mach64_freelist_put(dev_priv, copy_buf);
++              }
++      }
++
++_vertex_done:
++      sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++
++      return verify_ret;
++}
++
++static __inline__ int copy_from_user_blit(u32 *to,
++                                        const u32 __user *ufrom,
++                                        unsigned long bytes)
++{
++      to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
++
++      if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int mach64_dma_dispatch_blit(struct drm_device * dev,
++                                  struct drm_file *file_priv,
++                                  drm_mach64_blit_t * blit)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      int dword_shift, dwords;
++      unsigned long used;
++      struct drm_buf *copy_buf;
++      int verify_ret = 0;
++      DMALOCALS;
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (blit->format) {
++      case MACH64_DATATYPE_ARGB8888:
++              dword_shift = 0;
++              break;
++      case MACH64_DATATYPE_ARGB1555:
++      case MACH64_DATATYPE_RGB565:
++      case MACH64_DATATYPE_VYUY422:
++      case MACH64_DATATYPE_YVYU422:
++      case MACH64_DATATYPE_ARGB4444:
++              dword_shift = 1;
++              break;
++      case MACH64_DATATYPE_CI8:
++      case MACH64_DATATYPE_RGB8:
++              dword_shift = 2;
++              break;
++      default:
++              DRM_ERROR("invalid blit format %d\n", blit->format);
++              return -EINVAL;
++      }
++
++      /* Set buf->used to the bytes of blit data based on the blit dimensions
++       * and verify the size.  When the setup is emitted to the buffer with
++       * the DMA* macros below, buf->used is incremented to include the bytes
++       * used for setup as well as the blit data.
++       */
++      dwords = (blit->width * blit->height) >> dword_shift;
++      used = dwords << 2;
++      if (used <= 0 ||
++          used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
++              DRM_ERROR("Invalid blit size: %lu bytes\n", used);
++              return -EINVAL;
++      }
++
++      copy_buf = mach64_freelist_get(dev_priv);
++      if (copy_buf == NULL) {
++              DRM_ERROR("couldn't get buffer\n");
++              return -EAGAIN;
++      }
++
++      /* Copy the blit data from userspace.
++       * 
++       * XXX: This is overkill. The most efficient solution would be having 
++       * two sets of buffers (one set private for vertex data, the other set 
++       * client-writable for blits). However that would bring more complexity 
++       * and would break backward compatability. The solution currently 
++       * implemented is keeping all buffers private, allowing to secure the
++       * driver, without increasing complexity at the expense of some speed 
++       * transfering data.
++       */
++      verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
++
++      if (verify_ret != 0) {
++              mach64_freelist_put(dev_priv, copy_buf);
++              goto _blit_done;
++      }
++
++      copy_buf->used = used;
++
++      /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
++       * continuation buffers?
++       */
++
++      /* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require
++       * a register command every 16 dwords.  State setup is added at the start of the
++       * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
++       */
++      DMASETPTR(copy_buf);
++
++      DMAOUTREG(MACH64_Z_CNTL, 0);
++      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16));      /* no scissor */
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
++
++      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);      /* disable */
++      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM);
++
++      DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0)      /* dst pix width */
++                |(blit->format << 4)  /* composite pix width */
++                |(blit->format << 8)  /* src pix width */
++                |(blit->format << 16) /* host data pix width */
++                |(blit->format << 28) /* scaler/3D pix width */
++          );
++
++      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);    /* enable all planes */
++      DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S);
++      DMAOUTREG(MACH64_DP_SRC,
++                MACH64_BKGD_SRC_BKGD_CLR
++                | MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE);
++
++      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                (blit->pitch << 22) | (blit->offset >> 3));
++      DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
++      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
++
++      DRM_DEBUG("%lu bytes\n", used);
++
++      /* Add the buffer to the queue */
++      DMAADVANCEHOSTDATA(dev_priv);
++
++_blit_done:
++      return verify_ret;
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++
++int mach64_dma_clear(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_clear_t *clear = data;
++      int ret;
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
++                                      clear->x, clear->y, clear->w, clear->h,
++                                      clear->clear_color,
++                                      clear->clear_depth);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
++      return ret;
++}
++
++int mach64_dma_swap(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int ret;
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      ret = mach64_dma_dispatch_swap(dev, file_priv);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
++      return ret;
++}
++
++int mach64_dma_vertex(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n",
++                DRM_CURRENTPID,
++                vertex->buf, vertex->used, vertex->discard);
++
++      if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
++              DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
++                        vertex->used);
++              return -EINVAL;
++      }
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
++}
++
++int mach64_dma_blit(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_blit_t *blit = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
++                            MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS);
++
++      return ret;
++}
++
++int mach64_get_param(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_getparam_t *param = data;
++      int value;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case MACH64_PARAM_FRAMES_QUEUED:
++              /* Needs lock since it calls mach64_ring_tick() */
++              LOCK_TEST_WITH_RETURN(dev, file_priv);
++              value = mach64_do_get_frames_queued(dev_priv);
++              break;
++      case MACH64_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/Makefile git-nokia/drivers/gpu/drm-tungsten/Makefile
+--- git/drivers/gpu/drm-tungsten/Makefile      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/Makefile        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,74 @@
++#
++# Makefile for the drm device driver.  This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++# Based on David Woodhouse's mtd build.
++#
++# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $
++#
++
++drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
++              drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
++              drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
++              drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
++              drm_memory_debug.o ati_pcigart.o drm_sman.o \
++              drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
++              drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
++              drm_regman.o drm_vm_nopage_compat.o drm_gem.o
++pvr2d-objs  := pvr2d_drv.o
++tdfx-objs   := tdfx_drv.o
++r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
++mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
++i810-objs   := i810_drv.o i810_dma.o
++i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
++              i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
++              i915_opregion.o \
++              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
++nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
++              nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
++              nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
++              nv04_timer.o \
++              nv04_mc.o nv40_mc.o nv50_mc.o \
++              nv04_fb.o nv10_fb.o nv40_fb.o \
++              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
++              nv04_graph.o nv10_graph.o nv20_graph.o \
++              nv40_graph.o nv50_graph.o \
++              nv04_instmem.o nv50_instmem.o
++radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
++sis-objs    := sis_drv.o sis_mm.o
++ffb-objs    := ffb_drv.o ffb_context.o
++savage-objs := savage_drv.o savage_bci.o savage_state.o
++via-objs    := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
++              via_video.o via_dmablit.o via_fence.o via_buffer.o
++mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
++nv-objs := nv_drv.o
++xgi-objs    := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
++              xgi_fence.o
++
++ifeq ($(CONFIG_COMPAT),y)
++drm-objs    += drm_ioc32.o
++radeon-objs += radeon_ioc32.o
++mga-objs    += mga_ioc32.o
++r128-objs   += r128_ioc32.o
++i915-objs   += i915_ioc32.o
++nouveau-objs += nouveau_ioc32.o
++xgi-objs    += xgi_ioc32.o
++endif
++
++obj-m                                 += drm.o
++obj-$(CONFIG_DRM_TUNGSTEN_PVR2D)      += pvr2d.o
++obj-$(CONFIG_DRM_TUNGSTEN_TDFX)               += tdfx.o
++obj-$(CONFIG_DRM_TUNGSTEN_R128)               += r128.o
++obj-$(CONFIG_DRM_TUNGSTEN_RADEON)     += radeon.o
++obj-$(CONFIG_DRM_TUNGSTEN_MGA)                += mga.o
++obj-$(CONFIG_DRM_TUNGSTEN_I810)               += i810.o
++obj-$(CONFIG_DRM_TUNGSTEN_I915)               += i915.o
++obj-$(CONFIG_DRM_TUNGSTEN_SIS)                += sis.o
++obj-$(CONFIG_DRM_TUNGSTEN_FFB)                += ffb.o
++obj-$(CONFIG_DRM_TUNGSTEN_SAVAGE)     += savage.o
++obj-$(CONFIG_DRM_TUNGSTEN_VIA)                += via.o
++obj-$(CONFIG_DRM_TUNGSTEN_MACH64)     += mach64.o
++obj-$(CONFIG_DRM_TUNGSTEN_NV)         += nv.o
++obj-$(CONFIG_DRM_TUNGSTEN_NOUVEAU)    += nouveau.o
++obj-$(CONFIG_DRM_TUNGSTEN_XGI)                += xgi.o
++
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_dma.c git-nokia/drivers/gpu/drm-tungsten/mga_dma.c
+--- git/drivers/gpu/drm-tungsten/mga_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1161 @@
++/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ */
++/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++/**
++ * \file mga_dma.c
++ * DMA support for MGA G200 / G400.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Jeff Hartmann <jhartmann@valinux.com>
++ * \author Keith Whitwell <keith@tungstengraphics.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++#define MGA_DEFAULT_USEC_TIMEOUT      10000
++#define MGA_FREELIST_DEBUG            0
++
++#define MINIMAL_CLEANUP    0
++#define FULL_CLEANUP       1
++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
++
++/* ================================================================
++ * Engine control
++ */
++
++int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
++{
++      u32 status = 0;
++      int i;
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++              if (status == MGA_ENDPRDMASTS) {
++                      MGA_WRITE8(MGA_CRTC_INDEX, 0);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if MGA_DMA_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++
++      DRM_DEBUG("\n");
++
++      /* The primary DMA stream should look like new right about now.
++       */
++      primary->tail = 0;
++      primary->space = primary->size;
++      primary->last_flush = 0;
++
++      sarea_priv->last_wrap = 0;
++
++      /* FIXME: Reset counters, buffer ages etc...
++       */
++
++      /* FIXME: What else do we need to reinitialize?  WARP stuff?
++       */
++
++      return 0;
++}
++
++/* ================================================================
++ * Primary DMA stream
++ */
++
++void mga_do_dma_flush(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      u32 head, tail;
++      u32 status = 0;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* We need to wait so that we can do an safe flush */
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++              if (status == MGA_ENDPRDMASTS)
++                      break;
++              DRM_UDELAY(1);
++      }
++
++      if (primary->tail == primary->last_flush) {
++              DRM_DEBUG("   bailing out...\n");
++              return;
++      }
++
++      tail = primary->tail + dev_priv->primary->offset;
++
++      /* We need to pad the stream between flushes, as the card
++       * actually (partially?) reads the first of these commands.
++       * See page 4-16 in the G400 manual, middle of the page or so.
++       */
++      BEGIN_DMA(1);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++
++      primary->last_flush = primary->tail;
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++
++      if (head <= tail) {
++              primary->space = primary->size - primary->tail;
++      } else {
++              primary->space = head - tail;
++      }
++
++      DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
++      DRM_DEBUG("   tail = 0x%06lx\n", tail - dev_priv->primary->offset);
++      DRM_DEBUG("  space = 0x%06x\n", primary->space);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
++
++      DRM_DEBUG("done.\n");
++}
++
++void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      u32 head, tail;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA_WRAP();
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++
++      tail = primary->tail + dev_priv->primary->offset;
++
++      primary->tail = 0;
++      primary->last_flush = 0;
++      primary->last_wrap++;
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++
++      if (head == dev_priv->primary->offset) {
++              primary->space = primary->size;
++      } else {
++              primary->space = head - dev_priv->primary->offset;
++      }
++
++      DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
++      DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
++      DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
++      DRM_DEBUG("  space = 0x%06x\n", primary->space);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
++
++      set_bit(0, &primary->wrapped);
++      DRM_DEBUG("done.\n");
++}
++
++void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 head = dev_priv->primary->offset;
++      DRM_DEBUG("\n");
++
++      sarea_priv->last_wrap++;
++      DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
++
++      clear_bit(0, &primary->wrapped);
++      DRM_DEBUG("done.\n");
++}
++
++/* ================================================================
++ * Freelist management
++ */
++
++#define MGA_BUFFER_USED               ~0
++#define MGA_BUFFER_FREE               0
++
++#if MGA_FREELIST_DEBUG
++static void mga_freelist_print(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *entry;
++
++      DRM_INFO("\n");
++      DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
++               dev_priv->sarea_priv->last_dispatch,
++               (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
++                              dev_priv->primary->offset));
++      DRM_INFO("current freelist:\n");
++
++      for (entry = dev_priv->head->next; entry; entry = entry->next) {
++              DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
++                       entry, entry->buf->idx, entry->age.head,
++                       entry->age.head - dev_priv->primary->offset);
++      }
++      DRM_INFO("\n");
++}
++#endif
++
++static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_freelist_t *entry;
++      int i;
++      DRM_DEBUG("count=%d\n", dma->buf_count);
++
++      dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++      if (dev_priv->head == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
++      SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++
++              entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++              if (entry == NULL)
++                      return -ENOMEM;
++
++              memset(entry, 0, sizeof(drm_mga_freelist_t));
++
++              entry->next = dev_priv->head->next;
++              entry->prev = dev_priv->head;
++              SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
++              entry->buf = buf;
++
++              if (dev_priv->head->next != NULL)
++                      dev_priv->head->next->prev = entry;
++              if (entry->next == NULL)
++                      dev_priv->tail = entry;
++
++              buf_priv->list_entry = entry;
++              buf_priv->discard = 0;
++              buf_priv->dispatched = 0;
++
++              dev_priv->head->next = entry;
++      }
++
++      return 0;
++}
++
++static void mga_freelist_cleanup(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *entry;
++      drm_mga_freelist_t *next;
++      DRM_DEBUG("\n");
++
++      entry = dev_priv->head;
++      while (entry) {
++              next = entry->next;
++              drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++              entry = next;
++      }
++
++      dev_priv->head = dev_priv->tail = NULL;
++}
++
++#if 0
++/* FIXME: Still needed?
++ */
++static void mga_freelist_reset(struct drm_device * dev)
++{
++      drm_device_dma_t *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      int i;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++              SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
++      }
++}
++#endif
++
++static struct drm_buf *mga_freelist_get(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *next;
++      drm_mga_freelist_t *prev;
++      drm_mga_freelist_t *tail = dev_priv->tail;
++      u32 head, wrap;
++      DRM_DEBUG("\n");
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++      wrap = dev_priv->sarea_priv->last_wrap;
++
++      DRM_DEBUG("   tail=0x%06lx %d\n",
++                tail->age.head ?
++                tail->age.head - dev_priv->primary->offset : 0,
++                tail->age.wrap);
++      DRM_DEBUG("   head=0x%06lx %d\n",
++                head - dev_priv->primary->offset, wrap);
++
++      if (TEST_AGE(&tail->age, head, wrap)) {
++              prev = dev_priv->tail->prev;
++              next = dev_priv->tail;
++              prev->next = NULL;
++              next->prev = next->next = NULL;
++              dev_priv->tail = prev;
++              SET_AGE(&next->age, MGA_BUFFER_USED, 0);
++              return next->buf;
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_freelist_t *head, *entry, *prev;
++
++      DRM_DEBUG("age=0x%06lx wrap=%d\n",
++                buf_priv->list_entry->age.head -
++                dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
++
++      entry = buf_priv->list_entry;
++      head = dev_priv->head;
++
++      if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
++              SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
++              prev = dev_priv->tail;
++              prev->next = entry;
++              entry->prev = prev;
++              entry->next = NULL;
++      } else {
++              prev = head->next;
++              head->next = entry;
++              prev->prev = entry;
++              entry->prev = head;
++              entry->next = prev;
++      }
++
++      return 0;
++}
++
++/* ================================================================
++ * DMA initialization, cleanup
++ */
++
++int mga_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      drm_mga_private_t *dev_priv;
++
++      dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
++      if (!dev_priv)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++      memset(dev_priv, 0, sizeof(drm_mga_private_t));
++
++      dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
++      dev_priv->chipset = flags;
++
++      dev_priv->mmio_base = drm_get_resource_start(dev, 1);
++      dev_priv->mmio_size = drm_get_resource_len(dev, 1);
++
++      dev->counters += 3;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++
++      return 0;
++}
++
++/**
++ * Bootstrap the driver for AGP DMA.
++ *
++ * \todo
++ * Investigate whether there is any benifit to storing the WARP microcode in
++ * AGP memory.  If not, the microcode may as well always be put in PCI
++ * memory.
++ *
++ * \todo
++ * This routine needs to set dma_bs->agp_mode to the mode actually configured
++ * in the hardware.  Looking just at the Linux AGP driver code, I don't see
++ * an easy way to determine this.
++ *
++ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
++ */
++static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
++                                  drm_mga_dma_bootstrap_t * dma_bs)
++{
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *)dev->dev_private;
++      unsigned int warp_size = mga_warp_microcode_size(dev_priv);
++      int err;
++      unsigned offset;
++      const unsigned secondary_size = dma_bs->secondary_bin_count
++              * dma_bs->secondary_bin_size;
++      const unsigned agp_size = (dma_bs->agp_size << 20);
++      struct drm_buf_desc req;
++      struct drm_agp_mode mode;
++      struct drm_agp_info info;
++      struct drm_agp_buffer agp_req;
++      struct drm_agp_binding bind_req;
++
++      /* Acquire AGP. */
++      err = drm_agp_acquire(dev);
++      if (err) {
++              DRM_ERROR("Unable to acquire AGP: %d\n", err);
++              return err;
++      }
++
++      err = drm_agp_info(dev, &info);
++      if (err) {
++              DRM_ERROR("Unable to get AGP info: %d\n", err);
++              return err;
++      }
++
++      mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
++      err = drm_agp_enable(dev, mode);
++      if (err) {
++              DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
++              return err;
++      }
++
++      /* In addition to the usual AGP mode configuration, the G200 AGP cards
++       * need to have the AGP mode "manually" set.
++       */
++
++      if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
++              if (mode.mode & 0x02) {
++                      MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
++              } else {
++                      MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
++              }
++      }
++
++      /* Allocate and bind AGP memory. */
++      agp_req.size = agp_size;
++      agp_req.type = 0;
++      err = drm_agp_alloc(dev, &agp_req);
++      if (err) {
++              dev_priv->agp_size = 0;
++              DRM_ERROR("Unable to allocate %uMB AGP memory\n",
++                        dma_bs->agp_size);
++              return err;
++      }
++
++      dev_priv->agp_size = agp_size;
++      dev_priv->agp_handle = agp_req.handle;
++
++      bind_req.handle = agp_req.handle;
++      bind_req.offset = 0;
++      err = drm_agp_bind( dev, &bind_req );
++      if (err) {
++              DRM_ERROR("Unable to bind AGP memory: %d\n", err);
++              return err;
++      }
++
++      /* Make drm_addbufs happy by not trying to create a mapping for less
++       * than a page.
++       */
++      if (warp_size < PAGE_SIZE)
++              warp_size = PAGE_SIZE;
++
++      offset = 0;
++      err = drm_addmap(dev, offset, warp_size,
++                       _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
++      if (err) {
++              DRM_ERROR("Unable to map WARP microcode: %d\n", err);
++              return err;
++      }
++
++      offset += warp_size;
++      err = drm_addmap(dev, offset, dma_bs->primary_size,
++                       _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);
++      if (err) {
++              DRM_ERROR("Unable to map primary DMA region: %d\n", err);
++              return err;
++      }
++
++      offset += dma_bs->primary_size;
++      err = drm_addmap(dev, offset, secondary_size,
++                       _DRM_AGP, 0, & dev->agp_buffer_map);
++      if (err) {
++              DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
++              return err;
++      }
++
++      (void)memset( &req, 0, sizeof(req) );
++      req.count = dma_bs->secondary_bin_count;
++      req.size = dma_bs->secondary_bin_size;
++      req.flags = _DRM_AGP_BUFFER;
++      req.agp_start = offset;
++
++      err = drm_addbufs_agp(dev, &req);
++      if (err) {
++              DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
++              return err;
++      }
++
++#ifdef __linux__
++      {
++              struct drm_map_list *_entry;
++              unsigned long agp_token = 0;
++
++              list_for_each_entry(_entry, &dev->maplist, head) {
++                      if (_entry->map == dev->agp_buffer_map)
++                              agp_token = _entry->user_token;
++              }
++              if (!agp_token)
++                      return -EFAULT;
++
++              dev->agp_buffer_token = agp_token;
++      }
++#endif
++
++      offset += secondary_size;
++      err = drm_addmap(dev, offset, agp_size - offset,
++                       _DRM_AGP, 0, & dev_priv->agp_textures);
++      if (err) {
++              DRM_ERROR("Unable to map AGP texture region: %d\n", err);
++              return err;
++      }
++
++      drm_core_ioremap(dev_priv->warp, dev);
++      drm_core_ioremap(dev_priv->primary, dev);
++      drm_core_ioremap(dev->agp_buffer_map, dev);
++
++      if (!dev_priv->warp->handle ||
++          !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
++              DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
++                        dev_priv->warp->handle, dev_priv->primary->handle,
++                        dev->agp_buffer_map->handle);
++              return -ENOMEM;
++      }
++
++      dev_priv->dma_access = MGA_PAGPXFER;
++      dev_priv->wagp_enable = MGA_WAGP_ENABLE;
++
++      DRM_INFO("Initialized card for AGP DMA.\n");
++      return 0;
++}
++
++/**
++ * Bootstrap the driver for PCI DMA.
++ *
++ * \todo
++ * The algorithm for decreasing the size of the primary DMA buffer could be
++ * better.  The size should be rounded up to the nearest page size, then
++ * decrease the request size by a single page each pass through the loop.
++ *
++ * \todo
++ * Determine whether the maximum address passed to drm_pci_alloc is correct.
++ * The same goes for drm_addbufs_pci.
++ *
++ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
++ */
++static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
++                                  drm_mga_dma_bootstrap_t * dma_bs)
++{
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++      unsigned int warp_size = mga_warp_microcode_size(dev_priv);
++      unsigned int primary_size;
++      unsigned int bin_count;
++      int err;
++      struct drm_buf_desc req;
++
++
++      if (dev->dma == NULL) {
++              DRM_ERROR("dev->dma is NULL\n");
++              return -EFAULT;
++      }
++
++      /* Make drm_addbufs happy by not trying to create a mapping for less
++       * than a page.
++       */
++      if (warp_size < PAGE_SIZE)
++              warp_size = PAGE_SIZE;
++
++      /* The proper alignment is 0x100 for this mapping */
++      err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
++                       _DRM_READ_ONLY, &dev_priv->warp);
++      if (err != 0) {
++              DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
++                        err);
++              return err;
++      }
++
++      /* Other than the bottom two bits being used to encode other
++       * information, there don't appear to be any restrictions on the
++       * alignment of the primary or secondary DMA buffers.
++       */
++
++      for (primary_size = dma_bs->primary_size; primary_size != 0;
++           primary_size >>= 1 ) {
++              /* The proper alignment for this mapping is 0x04 */
++              err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
++                               _DRM_READ_ONLY, &dev_priv->primary);
++              if (!err)
++                      break;
++      }
++
++      if (err != 0) {
++              DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
++              return -ENOMEM;
++      }
++
++      if (dev_priv->primary->size != dma_bs->primary_size) {
++              DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
++                       dma_bs->primary_size,
++                       (unsigned)dev_priv->primary->size);
++              dma_bs->primary_size = dev_priv->primary->size;
++      }
++
++      for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
++           bin_count-- ) {
++              (void)memset(&req, 0, sizeof(req));
++              req.count = bin_count;
++              req.size = dma_bs->secondary_bin_size;
++
++              err = drm_addbufs_pci(dev, &req);
++              if (!err) {
++                      break;
++              }
++      }
++
++      if (bin_count == 0) {
++              DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
++              return err;
++      }
++
++      if (bin_count != dma_bs->secondary_bin_count) {
++              DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
++                       "to %u.\n", dma_bs->secondary_bin_count, bin_count);
++
++              dma_bs->secondary_bin_count = bin_count;
++      }
++
++      dev_priv->dma_access = 0;
++      dev_priv->wagp_enable = 0;
++
++      dma_bs->agp_mode = 0;
++
++      DRM_INFO("Initialized card for PCI DMA.\n");
++      return 0;
++}
++
++
++static int mga_do_dma_bootstrap(struct drm_device *dev,
++                              drm_mga_dma_bootstrap_t *dma_bs)
++{
++      const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
++      int err;
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++
++
++      dev_priv->used_new_dma_init = 1;
++
++      /* The first steps are the same for both PCI and AGP based DMA.  Map
++       * the cards MMIO registers and map a status page.
++       */
++      err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
++                       _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);
++      if (err) {
++              DRM_ERROR("Unable to map MMIO region: %d\n", err);
++              return err;
++      }
++
++
++      err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
++                       _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
++                       & dev_priv->status);
++      if (err) {
++              DRM_ERROR("Unable to map status region: %d\n", err);
++              return err;
++      }
++
++
++      /* The DMA initialization procedure is slightly different for PCI and
++       * AGP cards.  AGP cards just allocate a large block of AGP memory and
++       * carve off portions of it for internal uses.  The remaining memory
++       * is returned to user-mode to be used for AGP textures.
++       */
++
++      if (is_agp) {
++              err = mga_do_agp_dma_bootstrap(dev, dma_bs);
++      }
++
++      /* If we attempted to initialize the card for AGP DMA but failed,
++       * clean-up any mess that may have been created.
++       */
++
++      if (err) {
++              mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
++      }
++
++
++      /* Not only do we want to try and initialized PCI cards for PCI DMA,
++       * but we also try to initialized AGP cards that could not be
++       * initialized for AGP DMA.  This covers the case where we have an AGP
++       * card in a system with an unsupported AGP chipset.  In that case the
++       * card will be detected as AGP, but we won't be able to allocate any
++       * AGP memory, etc.
++       */
++
++      if (!is_agp || err) {
++              err = mga_do_pci_dma_bootstrap(dev, dma_bs);
++      }
++
++
++      return err;
++}
++
++int mga_dma_bootstrap(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      drm_mga_dma_bootstrap_t *bootstrap = data;
++      int err;
++      static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
++      const drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++
++
++      err = mga_do_dma_bootstrap(dev, bootstrap);
++      if (err) {
++              mga_do_cleanup_dma(dev, FULL_CLEANUP);
++              return err;
++      }
++
++      if (dev_priv->agp_textures != NULL) {
++              bootstrap->texture_handle = dev_priv->agp_textures->offset;
++              bootstrap->texture_size = dev_priv->agp_textures->size;
++      } else {
++              bootstrap->texture_handle = 0;
++              bootstrap->texture_size = 0;
++      }
++
++      bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
++
++      return 0;
++}
++
++
++static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
++{
++      drm_mga_private_t *dev_priv;
++      int ret;
++      DRM_DEBUG("\n");
++
++
++      dev_priv = dev->dev_private;
++
++      if (init->sgram) {
++              dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
++      } else {
++              dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
++      }
++      dev_priv->maccess = init->maccess;
++
++      dev_priv->fb_cpp = init->fb_cpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      dev_priv->depth_cpp = init->depth_cpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      /* FIXME: Need to support AGP textures...
++       */
++      dev_priv->texture_offset = init->texture_offset[0];
++      dev_priv->texture_size = init->texture_size[0];
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("failed to find sarea!\n");
++              return -EINVAL;
++      }
++
++      if (!dev_priv->used_new_dma_init) {
++
++              dev_priv->dma_access = MGA_PAGPXFER;
++              dev_priv->wagp_enable = MGA_WAGP_ENABLE;
++
++              dev_priv->status = drm_core_findmap(dev, init->status_offset);
++              if (!dev_priv->status) {
++                      DRM_ERROR("failed to find status page!\n");
++                      return -EINVAL;
++              }
++              dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++              if (!dev_priv->mmio) {
++                      DRM_ERROR("failed to find mmio region!\n");
++                      return -EINVAL;
++              }
++              dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
++              if (!dev_priv->warp) {
++                      DRM_ERROR("failed to find warp microcode region!\n");
++                      return -EINVAL;
++              }
++              dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
++              if (!dev_priv->primary) {
++                      DRM_ERROR("failed to find primary dma region!\n");
++                      return -EINVAL;
++              }
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map =
++                      drm_core_findmap(dev, init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("failed to find dma buffer region!\n");
++                      return -EINVAL;
++              }
++
++              drm_core_ioremap(dev_priv->warp, dev);
++              drm_core_ioremap(dev_priv->primary, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++      }
++
++      dev_priv->sarea_priv =
++          (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                               init->sarea_priv_offset);
++
++      if (!dev_priv->warp->handle ||
++          !dev_priv->primary->handle ||
++          ((dev_priv->dma_access != 0) &&
++           ((dev->agp_buffer_map == NULL) ||
++            (dev->agp_buffer_map->handle == NULL)))) {
++              DRM_ERROR("failed to ioremap agp regions!\n");
++              return -ENOMEM;
++      }
++
++      ret = mga_warp_install_microcode(dev_priv);
++      if (ret != 0) {
++              DRM_ERROR("failed to install WARP ucode: %d!\n", ret);
++              return ret;
++      }
++
++      ret = mga_warp_init(dev_priv);
++      if (ret != 0) {
++              DRM_ERROR("failed to init WARP engine: %d!\n", ret);
++              return ret;
++      }
++
++      dev_priv->prim.status = (u32 *) dev_priv->status->handle;
++
++      mga_do_wait_for_idle(dev_priv);
++
++      /* Init the primary DMA registers.
++       */
++      MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
++
++      dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
++      dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
++                            + dev_priv->primary->size);
++      dev_priv->prim.size = dev_priv->primary->size;
++
++      dev_priv->prim.tail = 0;
++      dev_priv->prim.space = dev_priv->prim.size;
++      dev_priv->prim.wrapped = 0;
++
++      dev_priv->prim.last_flush = 0;
++      dev_priv->prim.last_wrap = 0;
++
++      dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
++
++      dev_priv->prim.status[0] = dev_priv->primary->offset;
++      dev_priv->prim.status[1] = 0;
++
++      dev_priv->sarea_priv->last_wrap = 0;
++      dev_priv->sarea_priv->last_frame.head = 0;
++      dev_priv->sarea_priv->last_frame.wrap = 0;
++
++      if (mga_freelist_init(dev, dev_priv) < 0) {
++              DRM_ERROR("could not initialize freelist\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
++{
++      int err = 0;
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_mga_private_t *dev_priv = dev->dev_private;
++
++              if ((dev_priv->warp != NULL)
++                  && (dev_priv->warp->type != _DRM_CONSISTENT))
++                      drm_core_ioremapfree(dev_priv->warp, dev);
++
++              if ((dev_priv->primary != NULL)
++                  && (dev_priv->primary->type != _DRM_CONSISTENT))
++                      drm_core_ioremapfree(dev_priv->primary, dev);
++
++              if (dev->agp_buffer_map != NULL)
++                      drm_core_ioremapfree(dev->agp_buffer_map, dev);
++
++              if (dev_priv->used_new_dma_init) {
++                      if (dev_priv->agp_handle != 0) {
++                              struct drm_agp_binding unbind_req;
++                              struct drm_agp_buffer free_req;
++
++                              unbind_req.handle = dev_priv->agp_handle;
++                              drm_agp_unbind(dev, &unbind_req);
++
++                              free_req.handle = dev_priv->agp_handle;
++                              drm_agp_free(dev, &free_req);
++
++                              dev_priv->agp_textures = NULL;
++                              dev_priv->agp_size = 0;
++                              dev_priv->agp_handle = 0;
++                      }
++
++                      if ((dev->agp != NULL) && dev->agp->acquired) {
++                              err = drm_agp_release(dev);
++                      }
++              }
++
++              dev_priv->warp = NULL;
++              dev_priv->primary = NULL;
++              dev_priv->sarea = NULL;
++              dev_priv->sarea_priv = NULL;
++              dev->agp_buffer_map = NULL;
++
++              if (full_cleanup) {
++                      dev_priv->mmio = NULL;
++                      dev_priv->status = NULL;
++                      dev_priv->used_new_dma_init = 0;
++              }
++
++              memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
++              dev_priv->warp_pipe = 0;
++              memset(dev_priv->warp_pipe_phys, 0,
++                     sizeof(dev_priv->warp_pipe_phys));
++
++              if (dev_priv->head != NULL) {
++                      mga_freelist_cleanup(dev);
++              }
++      }
++
++      return err;
++}
++
++int mga_dma_init(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      drm_mga_init_t *init = data;
++      int err;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case MGA_INIT_DMA:
++              err = mga_do_init_dma(dev, init);
++              if (err) {
++                      (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
++              }
++              return err;
++      case MGA_CLEANUP_DMA:
++              return mga_do_cleanup_dma(dev, FULL_CLEANUP);
++      }
++
++      return -EINVAL;
++}
++
++/* ================================================================
++ * Primary DMA stream management
++ */
++
++int mga_dma_flush(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      struct drm_lock *lock = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("%s%s%s\n",
++                (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
++                (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
++                (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
++
++      WRAP_WAIT_WITH_RETURN(dev_priv);
++
++      if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
++              mga_do_dma_flush(dev_priv);
++      }
++
++      if (lock->flags & _DRM_LOCK_QUIESCENT) {
++#if MGA_DMA_DEBUG
++              int ret = mga_do_wait_for_idle(dev_priv);
++              if (ret < 0)
++                      DRM_INFO("-EBUSY\n");
++              return ret;
++#else
++              return mga_do_wait_for_idle(dev_priv);
++#endif
++      } else {
++              return 0;
++      }
++}
++
++int mga_dma_reset(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mga_do_dma_reset(dev_priv);
++}
++
++/* ================================================================
++ * DMA buffer management
++ */
++
++static int mga_dma_get_buffers(struct drm_device * dev,
++                             struct drm_file *file_priv, struct drm_dma * d)
++{
++      struct drm_buf *buf;
++      int i;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = mga_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i],
++                                   &buf->idx, sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i],
++                                   &buf->total, sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int mga_dma_buffers(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = mga_dma_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++/**
++ * Called just before the module is unloaded.
++ */
++int mga_driver_unload(struct drm_device * dev)
++{
++      drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++      return 0;
++}
++
++/**
++ * Called when the last opener of the device is closed.
++ */
++void mga_driver_lastclose(struct drm_device * dev)
++{
++      mga_do_cleanup_dma(dev, FULL_CLEANUP);
++}
++
++int mga_driver_dma_quiescent(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      return mga_do_wait_for_idle(dev_priv);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drm.h git-nokia/drivers/gpu/drm-tungsten/mga_drm.h
+--- git/drivers/gpu/drm-tungsten/mga_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,425 @@
++/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
++ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *
++ * Rewritten by:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __MGA_DRM_H__
++#define __MGA_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mga_sarea.h)
++ */
++
++#ifndef __MGA_SAREA_DEFINES__
++#define __MGA_SAREA_DEFINES__
++
++/* WARP pipe flags
++ */
++#define MGA_F                 0x1     /* fog */
++#define MGA_A                 0x2     /* alpha */
++#define MGA_S                 0x4     /* specular */
++#define MGA_T2                        0x8     /* multitexture */
++
++#define MGA_WARP_TGZ          0
++#define MGA_WARP_TGZF         (MGA_F)
++#define MGA_WARP_TGZA         (MGA_A)
++#define MGA_WARP_TGZAF                (MGA_F|MGA_A)
++#define MGA_WARP_TGZS         (MGA_S)
++#define MGA_WARP_TGZSF                (MGA_S|MGA_F)
++#define MGA_WARP_TGZSA                (MGA_S|MGA_A)
++#define MGA_WARP_TGZSAF               (MGA_S|MGA_F|MGA_A)
++#define MGA_WARP_T2GZ         (MGA_T2)
++#define MGA_WARP_T2GZF                (MGA_T2|MGA_F)
++#define MGA_WARP_T2GZA                (MGA_T2|MGA_A)
++#define MGA_WARP_T2GZAF               (MGA_T2|MGA_A|MGA_F)
++#define MGA_WARP_T2GZS                (MGA_T2|MGA_S)
++#define MGA_WARP_T2GZSF               (MGA_T2|MGA_S|MGA_F)
++#define MGA_WARP_T2GZSA               (MGA_T2|MGA_S|MGA_A)
++#define MGA_WARP_T2GZSAF      (MGA_T2|MGA_S|MGA_F|MGA_A)
++
++#define MGA_MAX_G200_PIPES    8       /* no multitex */
++#define MGA_MAX_G400_PIPES    16
++#define MGA_MAX_WARP_PIPES    MGA_MAX_G400_PIPES
++#define MGA_WARP_UCODE_SIZE   32768   /* in bytes */
++
++#define MGA_CARD_TYPE_G200    1
++#define MGA_CARD_TYPE_G400    2
++#define MGA_CARD_TYPE_G450    3       /* not currently used */
++#define MGA_CARD_TYPE_G550    4
++
++#define MGA_FRONT             0x1
++#define MGA_BACK              0x2
++#define MGA_DEPTH             0x4
++
++/* What needs to be changed for the current vertex dma buffer?
++ */
++#define MGA_UPLOAD_CONTEXT    0x1
++#define MGA_UPLOAD_TEX0               0x2
++#define MGA_UPLOAD_TEX1               0x4
++#define MGA_UPLOAD_PIPE               0x8
++#define MGA_UPLOAD_TEX0IMAGE  0x10    /* handled client-side */
++#define MGA_UPLOAD_TEX1IMAGE  0x20    /* handled client-side */
++#define MGA_UPLOAD_2D         0x40
++#define MGA_WAIT_AGE          0x80    /* handled client-side */
++#define MGA_UPLOAD_CLIPRECTS  0x100   /* handled client-side */
++#if 0
++#define MGA_DMA_FLUSH         0x200   /* set when someone gets the lock
++                                         quiescent */
++#endif
++
++/* 32 buffers of 64k each, total 2 meg.
++ */
++#define MGA_BUFFER_SIZE               (1 << 16)
++#define MGA_NUM_BUFFERS               128
++
++/* Keep these small for testing.
++ */
++#define MGA_NR_SAREA_CLIPRECTS        8
++
++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
++ * regions, subject to a minimum region size of (1<<16) == 64k.
++ *
++ * Clients may subdivide regions internally, but when sharing between
++ * clients, the region size is the minimum granularity.
++ */
++
++#define MGA_CARD_HEAP                 0
++#define MGA_AGP_HEAP                  1
++#define MGA_NR_TEX_HEAPS              2
++#define MGA_NR_TEX_REGIONS            16
++#define MGA_LOG_MIN_TEX_REGION_SIZE   16
++
++#define  DRM_MGA_IDLE_RETRY          2048
++
++#endif                                /* __MGA_SAREA_DEFINES__ */
++
++/* Setup registers for 3D context
++ */
++typedef struct {
++      unsigned int dstorg;
++      unsigned int maccess;
++      unsigned int plnwt;
++      unsigned int dwgctl;
++      unsigned int alphactrl;
++      unsigned int fogcolor;
++      unsigned int wflag;
++      unsigned int tdualstage0;
++      unsigned int tdualstage1;
++      unsigned int fcol;
++      unsigned int stencil;
++      unsigned int stencilctl;
++} drm_mga_context_regs_t;
++
++/* Setup registers for 2D, X server
++ */
++typedef struct {
++      unsigned int pitch;
++} drm_mga_server_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int texctl;
++      unsigned int texctl2;
++      unsigned int texfilter;
++      unsigned int texbordercol;
++      unsigned int texorg;
++      unsigned int texwidth;
++      unsigned int texheight;
++      unsigned int texorg1;
++      unsigned int texorg2;
++      unsigned int texorg3;
++      unsigned int texorg4;
++} drm_mga_texture_regs_t;
++
++/* General aging mechanism
++ */
++typedef struct {
++      unsigned int head;      /* Position of head pointer          */
++      unsigned int wrap;      /* Primary DMA wrap count            */
++} drm_mga_age_t;
++
++typedef struct _drm_mga_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex dma buffer.
++       */
++      drm_mga_context_regs_t context_state;
++      drm_mga_server_regs_t server_state;
++      drm_mga_texture_regs_t tex_state[2];
++      unsigned int warp_pipe;
++      unsigned int dirty;
++      unsigned int vertsize;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Information about the most recently used 3d drawable.  The
++       * client fills in the req_* fields, the server fills in the
++       * exported_ fields and puts the cliprects into boxes, above.
++       *
++       * The client clears the exported_drawable field before
++       * clobbering the boxes data.
++       */
++      unsigned int req_drawable;      /* the X drawable id */
++      unsigned int req_draw_buffer;   /* MGA_FRONT or MGA_BACK */
++
++      unsigned int exported_drawable;
++      unsigned int exported_index;
++      unsigned int exported_stamp;
++      unsigned int exported_buffers;
++      unsigned int exported_nfront;
++      unsigned int exported_nback;
++      int exported_back_x, exported_front_x, exported_w;
++      int exported_back_y, exported_front_y, exported_h;
++      struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
++
++      /* Counters for aging textures and for client-side throttling.
++       */
++      unsigned int status[4];
++      unsigned int last_wrap;
++
++      drm_mga_age_t last_frame;
++      unsigned int last_enqueue;      /* last time a buffer was enqueued */
++      unsigned int last_dispatch;     /* age of the most recently dispatched buffer */
++      unsigned int last_quiescent;    /*  */
++
++      /* LRU lists for texture memory in agp space and on the card.
++       */
++      struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
++      unsigned int texAge[MGA_NR_TEX_HEAPS];
++
++      /* Mechanism to validate card state.
++       */
++      int ctxOwner;
++} drm_mga_sarea_t;
++
++
++/* MGA specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_MGA_INIT     0x00
++#define DRM_MGA_FLUSH    0x01
++#define DRM_MGA_RESET    0x02
++#define DRM_MGA_SWAP     0x03
++#define DRM_MGA_CLEAR    0x04
++#define DRM_MGA_VERTEX   0x05
++#define DRM_MGA_INDICES  0x06
++#define DRM_MGA_ILOAD    0x07
++#define DRM_MGA_BLIT     0x08
++#define DRM_MGA_GETPARAM 0x09
++
++/* 3.2:
++ * ioctls for operating on fences.
++ */
++#define DRM_MGA_SET_FENCE      0x0a
++#define DRM_MGA_WAIT_FENCE     0x0b
++#define DRM_MGA_DMA_BOOTSTRAP  0x0c
++
++
++#define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
++#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
++#define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
++#define DRM_IOCTL_MGA_SWAP     DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_SWAP)
++#define DRM_IOCTL_MGA_CLEAR    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
++#define DRM_IOCTL_MGA_VERTEX   DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
++#define DRM_IOCTL_MGA_INDICES  DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
++#define DRM_IOCTL_MGA_ILOAD    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
++#define DRM_IOCTL_MGA_BLIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
++#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
++#define DRM_IOCTL_MGA_SET_FENCE     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
++#define DRM_IOCTL_MGA_WAIT_FENCE    DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
++#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
++
++typedef struct _drm_mga_warp_index {
++      int installed;
++      unsigned long phys_addr;
++      int size;
++} drm_mga_warp_index_t;
++
++typedef struct drm_mga_init {
++      enum {
++              MGA_INIT_DMA = 0x01,
++              MGA_CLEANUP_DMA = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++
++      int chipset;
++      int sgram;
++
++      unsigned int maccess;
++
++      unsigned int fb_cpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_cpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned int texture_offset[MGA_NR_TEX_HEAPS];
++      unsigned int texture_size[MGA_NR_TEX_HEAPS];
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long status_offset;
++      unsigned long warp_offset;
++      unsigned long primary_offset;
++      unsigned long buffers_offset;
++} drm_mga_init_t;
++
++
++typedef struct drm_mga_dma_bootstrap {
++      /**
++       * \name AGP texture region
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
++       * be filled in with the actual AGP texture settings.
++       *
++       * \warning
++       * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
++       * is zero, it means that PCI memory (most likely through the use of
++       * an IOMMU) is being used for "AGP" textures.
++       */
++      /*@{*/
++      unsigned long texture_handle;  /**< Handle used to map AGP textures. */
++      uint32_t     texture_size;    /**< Size of the AGP texture region. */
++      /*@}*/
++
++
++      /**
++       * Requested size of the primary DMA region.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual AGP mode.  If AGP was not available
++       */
++      uint32_t primary_size;
++
++
++      /**
++       * Requested number of secondary DMA buffers.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual number of secondary DMA buffers
++       * allocated.  Particularly when PCI DMA is used, this may be
++       * (subtantially) less than the number requested.
++       */
++      uint32_t secondary_bin_count;
++
++
++      /**
++       * Requested size of each secondary DMA buffer.
++       *
++       * While the kernel \b is free to reduce
++       * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
++       * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
++       */
++      uint32_t secondary_bin_size;
++
++
++      /**
++       * Bit-wise mask of AGPSTAT2_* values.  Currently only \c AGPSTAT2_1X,
++       * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported.  If this value is
++       * zero, it means that PCI DMA should be used, even if AGP is
++       * possible.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual AGP mode.  If AGP was not available
++       * (i.e., PCI DMA was used), this value will be zero.
++       */
++      uint32_t agp_mode;
++
++
++      /**
++       * Desired AGP GART size, measured in megabytes.
++       */
++      uint8_t agp_size;
++} drm_mga_dma_bootstrap_t;
++
++typedef struct drm_mga_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;
++} drm_mga_clear_t;
++
++typedef struct drm_mga_vertex {
++      int idx;                /* buffer to queue */
++      int used;               /* bytes in use */
++      int discard;            /* client finished with buffer?  */
++} drm_mga_vertex_t;
++
++typedef struct drm_mga_indices {
++      int idx;                /* buffer to queue */
++      unsigned int start;
++      unsigned int end;
++      int discard;            /* client finished with buffer?  */
++} drm_mga_indices_t;
++
++typedef struct drm_mga_iload {
++      int idx;
++      unsigned int dstorg;
++      unsigned int length;
++} drm_mga_iload_t;
++
++typedef struct _drm_mga_blit {
++      unsigned int planemask;
++      unsigned int srcorg;
++      unsigned int dstorg;
++      int src_pitch, dst_pitch;
++      int delta_sx, delta_sy;
++      int delta_dx, delta_dy;
++      int height, ydir;       /* flip image vertically */
++      int source_pitch, dest_pitch;
++} drm_mga_blit_t;
++
++/* 3.1: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define MGA_PARAM_IRQ_NR            1
++
++/* 3.2: Query the actual card type.  The DDX only distinguishes between
++ * G200 chips and non-G200 chips, which it calls G400.  It turns out that
++ * there are some very sublte differences between the G4x0 chips and the G550
++ * chips.  Using this parameter query, a client-side driver can detect the
++ * difference between a G4x0 and a G550.
++ */
++#define MGA_PARAM_CARD_TYPE         2
++
++typedef struct drm_mga_getparam {
++      int param;
++      void __user *value;
++} drm_mga_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.c git-nokia/drivers/gpu/drm-tungsten/mga_drv.c
+--- git/drivers/gpu/drm-tungsten/mga_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++#include "drm_pciids.h"
++
++static int mga_driver_device_is_agp(struct drm_device * dev);
++
++static struct pci_device_id pciidlist[] = {
++      mga_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
++          DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof (drm_mga_buf_priv_t),
++      .load = mga_driver_load,
++      .unload = mga_driver_unload,
++      .lastclose = mga_driver_lastclose,
++      .dma_quiescent = mga_driver_dma_quiescent,
++      .device_is_agp = mga_driver_device_is_agp,
++      .get_vblank_counter = mga_get_vblank_counter,
++      .enable_vblank = mga_enable_vblank,
++      .disable_vblank = mga_disable_vblank,
++      .irq_preinstall = mga_driver_irq_preinstall,
++      .irq_postinstall = mga_driver_irq_postinstall,
++      .irq_uninstall = mga_driver_irq_uninstall,
++      .irq_handler = mga_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = mga_ioctls,
++      .dma_ioctl = mga_dma_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = mga_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init mga_init(void)
++{
++      driver.num_ioctls = mga_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit mga_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(mga_init);
++module_exit(mga_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * In addition to the usual tests performed by \c drm_device_is_agp, this
++ * function detects PCI G450 cards that appear to the system exactly like
++ * AGP G450 cards.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
++ */
++static int mga_driver_device_is_agp(struct drm_device * dev)
++{
++      const struct pci_dev * const pdev = dev->pdev;
++
++
++      /* There are PCI versions of the G450.  These cards have the
++       * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
++       * bridge chip.  We detect these cards, which are not currently
++       * supported by this driver, by looking at the device ID of the
++       * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
++       * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
++       * device.
++       */
++
++      if ((pdev->device == 0x0525) && pdev->bus->self
++          && (pdev->bus->self->vendor == 0x3388)
++          && (pdev->bus->self->device == 0x0021)) {
++              return 0;
++      }
++
++      return 2;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.h git-nokia/drivers/gpu/drm-tungsten/mga_drv.h
+--- git/drivers/gpu/drm-tungsten/mga_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,691 @@
++/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __MGA_DRV_H__
++#define __MGA_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, VA Linux Systems Inc."
++
++#define DRIVER_NAME           "mga"
++#define DRIVER_DESC           "Matrox G200/G400"
++#define DRIVER_DATE           "20060319"
++
++#define DRIVER_MAJOR          3
++#define DRIVER_MINOR          2
++#define DRIVER_PATCHLEVEL     2
++
++typedef struct drm_mga_primary_buffer {
++      u8 *start;
++      u8 *end;
++      int size;
++
++      u32 tail;
++      int space;
++      volatile long wrapped;
++
++      volatile u32 *status;
++
++      u32 last_flush;
++      u32 last_wrap;
++
++      u32 high_mark;
++} drm_mga_primary_buffer_t;
++
++typedef struct drm_mga_freelist {
++      struct drm_mga_freelist *next;
++      struct drm_mga_freelist *prev;
++      drm_mga_age_t age;
++      struct drm_buf *buf;
++} drm_mga_freelist_t;
++
++typedef struct {
++      drm_mga_freelist_t *list_entry;
++      int discard;
++      int dispatched;
++} drm_mga_buf_priv_t;
++
++typedef struct drm_mga_private {
++      drm_mga_primary_buffer_t prim;
++      drm_mga_sarea_t *sarea_priv;
++
++      drm_mga_freelist_t *head;
++      drm_mga_freelist_t *tail;
++
++      unsigned int warp_pipe;
++      unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
++
++      int chipset;
++      int usec_timeout;
++
++      /**
++       * If set, the new DMA initialization sequence was used.  This is
++       * primarilly used to select how the driver should uninitialized its
++       * internal DMA structures.
++       */
++      int used_new_dma_init;
++
++      /**
++       * If AGP memory is used for DMA buffers, this will be the value
++       * \c MGA_PAGPXFER.  Otherwise, it will be zero (for a PCI transfer).
++       */
++      u32 dma_access;
++
++      /**
++       * If AGP memory is used for DMA buffers, this will be the value
++       * \c MGA_WAGP_ENABLE.  Otherwise, it will be zero (for a PCI
++       * transfer).
++       */
++      u32 wagp_enable;
++
++      /**
++       * \name MMIO region parameters.
++       *
++       * \sa drm_mga_private_t::mmio
++       */
++      /*@{*/
++      u32 mmio_base;                  /**< Bus address of base of MMIO. */
++      u32 mmio_size;                  /**< Size of the MMIO region. */
++      /*@}*/
++
++      u32 clear_cmd;
++      u32 maccess;
++
++      atomic_t vbl_received;          /**< Number of vblanks received. */
++      wait_queue_head_t fence_queue;
++      atomic_t last_fence_retired;
++      u32 next_fence_to_post;
++
++      unsigned int fb_cpp;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      unsigned int depth_cpp;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *status;
++      drm_local_map_t *warp;
++      drm_local_map_t *primary;
++      drm_local_map_t *agp_textures;
++
++      unsigned long agp_handle;
++      unsigned int agp_size;
++} drm_mga_private_t;
++
++extern struct drm_ioctl_desc mga_ioctls[];
++extern int mga_max_ioctl;
++
++                              /* mga_dma.c */
++extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++extern int mga_dma_init(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int mga_dma_flush(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int mga_dma_reset(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int mga_dma_buffers(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
++extern int mga_driver_unload(struct drm_device * dev);
++extern void mga_driver_lastclose(struct drm_device * dev);
++extern int mga_driver_dma_quiescent(struct drm_device * dev);
++
++extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
++
++extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
++extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
++extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
++
++extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
++
++                              /* mga_warp.c */
++extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
++extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
++extern int mga_warp_init(drm_mga_private_t * dev_priv);
++
++                              /* mga_irq.c */
++extern int mga_enable_vblank(struct drm_device *dev, int crtc);
++extern void mga_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
++extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
++extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
++extern void mga_driver_irq_preinstall(struct drm_device * dev);
++extern int mga_driver_irq_postinstall(struct drm_device * dev);
++extern void mga_driver_irq_uninstall(struct drm_device * dev);
++extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
++                           unsigned long arg);
++
++#define mga_flush_write_combine()     DRM_WRITEMEMORYBARRIER()
++
++#if defined(__linux__) && defined(__alpha__)
++#define MGA_BASE( reg )               ((unsigned long)(dev_priv->mmio->handle))
++#define MGA_ADDR( reg )               (MGA_BASE(reg) + reg)
++
++#define MGA_DEREF( reg )      *(volatile u32 *)MGA_ADDR( reg )
++#define MGA_DEREF8( reg )     *(volatile u8 *)MGA_ADDR( reg )
++
++#define MGA_READ( reg )               (_MGA_READ((u32 *)MGA_ADDR(reg)))
++#define MGA_READ8( reg )      (_MGA_READ((u8 *)MGA_ADDR(reg)))
++#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
++#define MGA_WRITE8( reg, val )  do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
++
++static inline u32 _MGA_READ(u32 * addr)
++{
++      DRM_MEMORYBARRIER();
++      return *(volatile u32 *)addr;
++}
++#else
++#define MGA_READ8( reg )      DRM_READ8(dev_priv->mmio, (reg))
++#define MGA_READ( reg )               DRM_READ32(dev_priv->mmio, (reg))
++#define MGA_WRITE8( reg, val )  DRM_WRITE8(dev_priv->mmio, (reg), (val))
++#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
++#endif
++
++#define DWGREG0               0x1c00
++#define DWGREG0_END   0x1dff
++#define DWGREG1               0x2c00
++#define DWGREG1_END   0x2dff
++
++#define ISREG0(r)     (r >= DWGREG0 && r <= DWGREG0_END)
++#define DMAREG0(r)    (u8)((r - DWGREG0) >> 2)
++#define DMAREG1(r)    (u8)(((r - DWGREG1) >> 2) | 0x80)
++#define DMAREG(r)     (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
++
++/* ================================================================
++ * Helper macross...
++ */
++
++#define MGA_EMIT_STATE( dev_priv, dirty )                             \
++do {                                                                  \
++      if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) {                        \
++              if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) {        \
++                      mga_g400_emit_state( dev_priv );                \
++              } else {                                                \
++                      mga_g200_emit_state( dev_priv );                \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++#define WRAP_TEST_WITH_RETURN( dev_priv )                             \
++do {                                                                  \
++      if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {                 \
++              if ( mga_is_idle( dev_priv ) ) {                        \
++                      mga_do_dma_wrap_end( dev_priv );                \
++              } else if ( dev_priv->prim.space <                      \
++                          dev_priv->prim.high_mark ) {                \
++                      if ( MGA_DMA_DEBUG )                            \
++                              DRM_INFO( "wrap...\n");         \
++                      return -EBUSY;                  \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++#define WRAP_WAIT_WITH_RETURN( dev_priv )                             \
++do {                                                                  \
++      if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {                 \
++              if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {           \
++                      if ( MGA_DMA_DEBUG )                            \
++                              DRM_INFO( "wrap...\n");         \
++                      return -EBUSY;                  \
++              }                                                       \
++              mga_do_dma_wrap_end( dev_priv );                        \
++      }                                                               \
++} while (0)
++
++/* ================================================================
++ * Primary DMA command stream
++ */
++
++#define MGA_VERBOSE   0
++
++#define DMA_LOCALS    unsigned int write; volatile u8 *prim;
++
++#define DMA_BLOCK_SIZE        (5 * sizeof(u32))
++
++#define BEGIN_DMA( n )                                                        \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "BEGIN_DMA( %d )\n", (n) );           \
++              DRM_INFO( "   space=0x%x req=0x%Zx\n",                  \
++                        dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
++      }                                                               \
++      prim = dev_priv->prim.start;                                    \
++      write = dev_priv->prim.tail;                                    \
++} while (0)
++
++#define BEGIN_DMA_WRAP()                                              \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "BEGIN_DMA()\n" );                            \
++              DRM_INFO( "   space=0x%x\n", dev_priv->prim.space );    \
++      }                                                               \
++      prim = dev_priv->prim.start;                                    \
++      write = dev_priv->prim.tail;                                    \
++} while (0)
++
++#define ADVANCE_DMA()                                                 \
++do {                                                                  \
++      dev_priv->prim.tail = write;                                    \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n",        \
++                        write, dev_priv->prim.space );                \
++      }                                                               \
++} while (0)
++
++#define FLUSH_DMA()                                                   \
++do {                                                                  \
++      if ( 0 ) {                                                      \
++              DRM_INFO( "\n" );                                       \
++              DRM_INFO( "   tail=0x%06x head=0x%06lx\n",              \
++                        dev_priv->prim.tail,                          \
++                        MGA_READ( MGA_PRIMADDRESS ) -                 \
++                        dev_priv->primary->offset );                  \
++      }                                                               \
++      if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) {                \
++              if ( dev_priv->prim.space <                             \
++                   dev_priv->prim.high_mark ) {                       \
++                      mga_do_dma_wrap_start( dev_priv );              \
++              } else {                                                \
++                      mga_do_dma_flush( dev_priv );                   \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
++ */
++#define DMA_WRITE( offset, val )                                      \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "   DMA_WRITE( 0x%08x ) at 0x%04Zx\n",        \
++                        (u32)(val), write + (offset) * sizeof(u32) ); \
++      }                                                               \
++      *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
++} while (0)
++
++#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 )   \
++do {                                                                  \
++      DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) |                          \
++                     (DMAREG( reg1 ) << 8) |                          \
++                     (DMAREG( reg2 ) << 16) |                         \
++                     (DMAREG( reg3 ) << 24)) );                       \
++      DMA_WRITE( 1, val0 );                                           \
++      DMA_WRITE( 2, val1 );                                           \
++      DMA_WRITE( 3, val2 );                                           \
++      DMA_WRITE( 4, val3 );                                           \
++      write += DMA_BLOCK_SIZE;                                        \
++} while (0)
++
++/* Buffer aging via primary DMA stream head pointer.
++ */
++
++#define SET_AGE( age, h, w )                                          \
++do {                                                                  \
++      (age)->head = h;                                                \
++      (age)->wrap = w;                                                \
++} while (0)
++
++#define TEST_AGE( age, h, w )         ( (age)->wrap < w ||            \
++                                        ( (age)->wrap == w &&         \
++                                          (age)->head < h ) )
++
++#define AGE_BUFFER( buf_priv )                                                \
++do {                                                                  \
++      drm_mga_freelist_t *entry = (buf_priv)->list_entry;             \
++      if ( (buf_priv)->dispatched ) {                                 \
++              entry->age.head = (dev_priv->prim.tail +                \
++                                 dev_priv->primary->offset);          \
++              entry->age.wrap = dev_priv->sarea_priv->last_wrap;      \
++      } else {                                                        \
++              entry->age.head = 0;                                    \
++              entry->age.wrap = 0;                                    \
++      }                                                               \
++} while (0)
++
++#define MGA_ENGINE_IDLE_MASK          (MGA_SOFTRAPEN |                \
++                                       MGA_DWGENGSTS |                \
++                                       MGA_ENDPRDMASTS)
++#define MGA_DMA_IDLE_MASK             (MGA_SOFTRAPEN |                \
++                                       MGA_ENDPRDMASTS)
++
++#define MGA_DMA_DEBUG                 0
++
++/* A reduced set of the mga registers.
++ */
++#define MGA_CRTC_INDEX                        0x1fd4
++#define MGA_CRTC_DATA                 0x1fd5
++
++/* CRTC11 */
++#define MGA_VINTCLR                   (1 << 4)
++#define MGA_VINTEN                    (1 << 5)
++
++#define MGA_ALPHACTRL                 0x2c7c
++#define MGA_AR0                               0x1c60
++#define MGA_AR1                               0x1c64
++#define MGA_AR2                               0x1c68
++#define MGA_AR3                               0x1c6c
++#define MGA_AR4                               0x1c70
++#define MGA_AR5                               0x1c74
++#define MGA_AR6                               0x1c78
++
++#define MGA_CXBNDRY                   0x1c80
++#define MGA_CXLEFT                    0x1ca0
++#define MGA_CXRIGHT                   0x1ca4
++
++#define MGA_DMAPAD                    0x1c54
++#define MGA_DSTORG                    0x2cb8
++#define MGA_DWGCTL                    0x1c00
++#     define MGA_OPCOD_MASK                   (15 << 0)
++#     define MGA_OPCOD_TRAP                   (4 << 0)
++#     define MGA_OPCOD_TEXTURE_TRAP           (6 << 0)
++#     define MGA_OPCOD_BITBLT                 (8 << 0)
++#     define MGA_OPCOD_ILOAD                  (9 << 0)
++#     define MGA_ATYPE_MASK                   (7 << 4)
++#     define MGA_ATYPE_RPL                    (0 << 4)
++#     define MGA_ATYPE_RSTR                   (1 << 4)
++#     define MGA_ATYPE_ZI                     (3 << 4)
++#     define MGA_ATYPE_BLK                    (4 << 4)
++#     define MGA_ATYPE_I                      (7 << 4)
++#     define MGA_LINEAR                       (1 << 7)
++#     define MGA_ZMODE_MASK                   (7 << 8)
++#     define MGA_ZMODE_NOZCMP                 (0 << 8)
++#     define MGA_ZMODE_ZE                     (2 << 8)
++#     define MGA_ZMODE_ZNE                    (3 << 8)
++#     define MGA_ZMODE_ZLT                    (4 << 8)
++#     define MGA_ZMODE_ZLTE                   (5 << 8)
++#     define MGA_ZMODE_ZGT                    (6 << 8)
++#     define MGA_ZMODE_ZGTE                   (7 << 8)
++#     define MGA_SOLID                        (1 << 11)
++#     define MGA_ARZERO                       (1 << 12)
++#     define MGA_SGNZERO                      (1 << 13)
++#     define MGA_SHIFTZERO                    (1 << 14)
++#     define MGA_BOP_MASK                     (15 << 16)
++#     define MGA_BOP_ZERO                     (0 << 16)
++#     define MGA_BOP_DST                      (10 << 16)
++#     define MGA_BOP_SRC                      (12 << 16)
++#     define MGA_BOP_ONE                      (15 << 16)
++#     define MGA_TRANS_SHIFT                  20
++#     define MGA_TRANS_MASK                   (15 << 20)
++#     define MGA_BLTMOD_MASK                  (15 << 25)
++#     define MGA_BLTMOD_BMONOLEF              (0 << 25)
++#     define MGA_BLTMOD_BMONOWF               (4 << 25)
++#     define MGA_BLTMOD_PLAN                  (1 << 25)
++#     define MGA_BLTMOD_BFCOL                 (2 << 25)
++#     define MGA_BLTMOD_BU32BGR               (3 << 25)
++#     define MGA_BLTMOD_BU32RGB               (7 << 25)
++#     define MGA_BLTMOD_BU24BGR               (11 << 25)
++#     define MGA_BLTMOD_BU24RGB               (15 << 25)
++#     define MGA_PATTERN                      (1 << 29)
++#     define MGA_TRANSC                       (1 << 30)
++#     define MGA_CLIPDIS                      (1 << 31)
++#define MGA_DWGSYNC                   0x2c4c
++
++#define MGA_FCOL                      0x1c24
++#define MGA_FIFOSTATUS                        0x1e10
++#define MGA_FOGCOL                    0x1cf4
++#define MGA_FXBNDRY                   0x1c84
++#define MGA_FXLEFT                    0x1ca8
++#define MGA_FXRIGHT                   0x1cac
++
++#define MGA_ICLEAR                    0x1e18
++#     define MGA_SOFTRAPICLR                  (1 << 0)
++#     define MGA_VLINEICLR                    (1 << 5)
++#define MGA_IEN                               0x1e1c
++#     define MGA_SOFTRAPIEN                   (1 << 0)
++#     define MGA_VLINEIEN                     (1 << 5)
++
++#define MGA_LEN                               0x1c5c
++
++#define MGA_MACCESS                   0x1c04
++
++#define MGA_PITCH                     0x1c8c
++#define MGA_PLNWT                     0x1c1c
++#define MGA_PRIMADDRESS                       0x1e58
++#     define MGA_DMA_GENERAL                  (0 << 0)
++#     define MGA_DMA_BLIT                     (1 << 0)
++#     define MGA_DMA_VECTOR                   (2 << 0)
++#     define MGA_DMA_VERTEX                   (3 << 0)
++#define MGA_PRIMEND                   0x1e5c
++#     define MGA_PRIMNOSTART                  (1 << 0)
++#     define MGA_PAGPXFER                     (1 << 1)
++#define MGA_PRIMPTR                   0x1e50
++#     define MGA_PRIMPTREN0                   (1 << 0)
++#     define MGA_PRIMPTREN1                   (1 << 1)
++
++#define MGA_RST                               0x1e40
++#     define MGA_SOFTRESET                    (1 << 0)
++#     define MGA_SOFTEXTRST                   (1 << 1)
++
++#define MGA_SECADDRESS                        0x2c40
++#define MGA_SECEND                    0x2c44
++#define MGA_SETUPADDRESS              0x2cd0
++#define MGA_SETUPEND                  0x2cd4
++#define MGA_SGN                               0x1c58
++#define MGA_SOFTRAP                   0x2c48
++#define MGA_SRCORG                    0x2cb4
++#     define MGA_SRMMAP_MASK                  (1 << 0)
++#     define MGA_SRCMAP_FB                    (0 << 0)
++#     define MGA_SRCMAP_SYSMEM                (1 << 0)
++#     define MGA_SRCACC_MASK                  (1 << 1)
++#     define MGA_SRCACC_PCI                   (0 << 1)
++#     define MGA_SRCACC_AGP                   (1 << 1)
++#define MGA_STATUS                    0x1e14
++#     define MGA_SOFTRAPEN                    (1 << 0)
++#     define MGA_VSYNCPEN                     (1 << 4)
++#     define MGA_VLINEPEN                     (1 << 5)
++#     define MGA_DWGENGSTS                    (1 << 16)
++#     define MGA_ENDPRDMASTS                  (1 << 17)
++#define MGA_STENCIL                   0x2cc8
++#define MGA_STENCILCTL                        0x2ccc
++
++#define MGA_TDUALSTAGE0                       0x2cf8
++#define MGA_TDUALSTAGE1                       0x2cfc
++#define MGA_TEXBORDERCOL              0x2c5c
++#define MGA_TEXCTL                    0x2c30
++#define MGA_TEXCTL2                   0x2c3c
++#     define MGA_DUALTEX                      (1 << 7)
++#     define MGA_G400_TC2_MAGIC               (1 << 15)
++#     define MGA_MAP1_ENABLE                  (1 << 31)
++#define MGA_TEXFILTER                 0x2c58
++#define MGA_TEXHEIGHT                 0x2c2c
++#define MGA_TEXORG                    0x2c24
++#     define MGA_TEXORGMAP_MASK               (1 << 0)
++#     define MGA_TEXORGMAP_FB                 (0 << 0)
++#     define MGA_TEXORGMAP_SYSMEM             (1 << 0)
++#     define MGA_TEXORGACC_MASK               (1 << 1)
++#     define MGA_TEXORGACC_PCI                (0 << 1)
++#     define MGA_TEXORGACC_AGP                (1 << 1)
++#define MGA_TEXORG1                   0x2ca4
++#define MGA_TEXORG2                   0x2ca8
++#define MGA_TEXORG3                   0x2cac
++#define MGA_TEXORG4                   0x2cb0
++#define MGA_TEXTRANS                  0x2c34
++#define MGA_TEXTRANSHIGH              0x2c38
++#define MGA_TEXWIDTH                  0x2c28
++
++#define MGA_WACCEPTSEQ                        0x1dd4
++#define MGA_WCODEADDR                 0x1e6c
++#define MGA_WFLAG                     0x1dc4
++#define MGA_WFLAG1                    0x1de0
++#define MGA_WFLAGNB                   0x1e64
++#define MGA_WFLAGNB1                  0x1e08
++#define MGA_WGETMSB                   0x1dc8
++#define MGA_WIADDR                    0x1dc0
++#define MGA_WIADDR2                   0x1dd8
++#     define MGA_WMODE_SUSPEND                (0 << 0)
++#     define MGA_WMODE_RESUME                 (1 << 0)
++#     define MGA_WMODE_JUMP                   (2 << 0)
++#     define MGA_WMODE_START                  (3 << 0)
++#     define MGA_WAGP_ENABLE                  (1 << 2)
++#define MGA_WMISC                     0x1e70
++#     define MGA_WUCODECACHE_ENABLE           (1 << 0)
++#     define MGA_WMASTER_ENABLE               (1 << 1)
++#     define MGA_WCACHEFLUSH_ENABLE           (1 << 3)
++#define MGA_WVRTXSZ                   0x1dcc
++
++#define MGA_YBOT                      0x1c9c
++#define MGA_YDST                      0x1c90
++#define MGA_YDSTLEN                   0x1c88
++#define MGA_YDSTORG                   0x1c94
++#define MGA_YTOP                      0x1c98
++
++#define MGA_ZORG                      0x1c0c
++
++/* This finishes the current batch of commands
++ */
++#define MGA_EXEC                      0x0100
++
++/* AGP PLL encoding (for G200 only).
++ */
++#define MGA_AGP_PLL                   0x1e4c
++#     define MGA_AGP2XPLL_DISABLE             (0 << 0)
++#     define MGA_AGP2XPLL_ENABLE              (1 << 0)
++
++/* Warp registers
++ */
++#define MGA_WR0                               0x2d00
++#define MGA_WR1                               0x2d04
++#define MGA_WR2                               0x2d08
++#define MGA_WR3                               0x2d0c
++#define MGA_WR4                               0x2d10
++#define MGA_WR5                               0x2d14
++#define MGA_WR6                               0x2d18
++#define MGA_WR7                               0x2d1c
++#define MGA_WR8                               0x2d20
++#define MGA_WR9                               0x2d24
++#define MGA_WR10                      0x2d28
++#define MGA_WR11                      0x2d2c
++#define MGA_WR12                      0x2d30
++#define MGA_WR13                      0x2d34
++#define MGA_WR14                      0x2d38
++#define MGA_WR15                      0x2d3c
++#define MGA_WR16                      0x2d40
++#define MGA_WR17                      0x2d44
++#define MGA_WR18                      0x2d48
++#define MGA_WR19                      0x2d4c
++#define MGA_WR20                      0x2d50
++#define MGA_WR21                      0x2d54
++#define MGA_WR22                      0x2d58
++#define MGA_WR23                      0x2d5c
++#define MGA_WR24                      0x2d60
++#define MGA_WR25                      0x2d64
++#define MGA_WR26                      0x2d68
++#define MGA_WR27                      0x2d6c
++#define MGA_WR28                      0x2d70
++#define MGA_WR29                      0x2d74
++#define MGA_WR30                      0x2d78
++#define MGA_WR31                      0x2d7c
++#define MGA_WR32                      0x2d80
++#define MGA_WR33                      0x2d84
++#define MGA_WR34                      0x2d88
++#define MGA_WR35                      0x2d8c
++#define MGA_WR36                      0x2d90
++#define MGA_WR37                      0x2d94
++#define MGA_WR38                      0x2d98
++#define MGA_WR39                      0x2d9c
++#define MGA_WR40                      0x2da0
++#define MGA_WR41                      0x2da4
++#define MGA_WR42                      0x2da8
++#define MGA_WR43                      0x2dac
++#define MGA_WR44                      0x2db0
++#define MGA_WR45                      0x2db4
++#define MGA_WR46                      0x2db8
++#define MGA_WR47                      0x2dbc
++#define MGA_WR48                      0x2dc0
++#define MGA_WR49                      0x2dc4
++#define MGA_WR50                      0x2dc8
++#define MGA_WR51                      0x2dcc
++#define MGA_WR52                      0x2dd0
++#define MGA_WR53                      0x2dd4
++#define MGA_WR54                      0x2dd8
++#define MGA_WR55                      0x2ddc
++#define MGA_WR56                      0x2de0
++#define MGA_WR57                      0x2de4
++#define MGA_WR58                      0x2de8
++#define MGA_WR59                      0x2dec
++#define MGA_WR60                      0x2df0
++#define MGA_WR61                      0x2df4
++#define MGA_WR62                      0x2df8
++#define MGA_WR63                      0x2dfc
++#     define MGA_G400_WR_MAGIC                (1 << 6)
++#     define MGA_G400_WR56_MAGIC              0x46480000      /* 12800.0f */
++
++#define MGA_ILOAD_ALIGN               64
++#define MGA_ILOAD_MASK                (MGA_ILOAD_ALIGN - 1)
++
++#define MGA_DWGCTL_FLUSH      (MGA_OPCOD_TEXTURE_TRAP |               \
++                               MGA_ATYPE_I |                          \
++                               MGA_ZMODE_NOZCMP |                     \
++                               MGA_ARZERO |                           \
++                               MGA_SGNZERO |                          \
++                               MGA_BOP_SRC |                          \
++                               (15 << MGA_TRANS_SHIFT))
++
++#define MGA_DWGCTL_CLEAR      (MGA_OPCOD_TRAP |                       \
++                               MGA_ZMODE_NOZCMP |                     \
++                               MGA_SOLID |                            \
++                               MGA_ARZERO |                           \
++                               MGA_SGNZERO |                          \
++                               MGA_SHIFTZERO |                        \
++                               MGA_BOP_SRC |                          \
++                               (0 << MGA_TRANS_SHIFT) |               \
++                               MGA_BLTMOD_BMONOLEF |                  \
++                               MGA_TRANSC |                           \
++                               MGA_CLIPDIS)
++
++#define MGA_DWGCTL_COPY               (MGA_OPCOD_BITBLT |                     \
++                               MGA_ATYPE_RPL |                        \
++                               MGA_SGNZERO |                          \
++                               MGA_SHIFTZERO |                        \
++                               MGA_BOP_SRC |                          \
++                               (0 << MGA_TRANS_SHIFT) |               \
++                               MGA_BLTMOD_BFCOL |                     \
++                               MGA_CLIPDIS)
++
++/* Simple idle test.
++ */
++static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
++{
++      u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++      return (status == MGA_ENDPRDMASTS);
++}
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_ioc32.c git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c
+--- git/drivers/gpu/drm-tungsten/mga_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,234 @@
++
++/**
++ * \file mga_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the MGA DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++
++typedef struct drm32_mga_init {
++      int func;
++      u32 sarea_priv_offset;
++      int chipset;
++      int sgram;
++      unsigned int maccess;
++      unsigned int fb_cpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_cpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int texture_offset[MGA_NR_TEX_HEAPS];
++      unsigned int texture_size[MGA_NR_TEX_HEAPS];
++      u32 fb_offset;
++      u32 mmio_offset;
++      u32 status_offset;
++      u32 warp_offset;
++      u32 primary_offset;
++      u32 buffers_offset;
++} drm_mga_init32_t;
++
++static int compat_mga_init(struct file *file, unsigned int cmd,
++                         unsigned long arg)
++{
++      drm_mga_init32_t init32;
++      drm_mga_init_t __user *init;
++      int err = 0, i;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.chipset, &init->chipset)
++          || __put_user(init32.sgram, &init->sgram)
++          || __put_user(init32.maccess, &init->maccess)
++          || __put_user(init32.fb_cpp, &init->fb_cpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_cpp, &init->depth_cpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.status_offset, &init->status_offset)
++          || __put_user(init32.warp_offset, &init->warp_offset)
++          || __put_user(init32.primary_offset, &init->primary_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset))
++              return -EFAULT;
++
++      for (i=0; i<MGA_NR_TEX_HEAPS; i++)
++      {
++              err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]);
++              err |= __put_user(init32.texture_size[i], &init->texture_size[i]);
++      }
++      if (err)
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MGA_INIT, (unsigned long) init);
++}
++
++
++typedef struct drm_mga_getparam32 {
++      int param;
++      u32 value;
++} drm_mga_getparam32_t;
++
++
++static int compat_mga_getparam(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_mga_getparam32_t getparam32;
++      drm_mga_getparam_t __user *getparam;
++
++      if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
++              return -EFAULT;
++
++      getparam = compat_alloc_user_space(sizeof(*getparam));
++      if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
++          || __put_user(getparam32.param, &getparam->param)
++          || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
++}
++
++typedef struct drm_mga_drm_bootstrap32 {
++      u32 texture_handle;
++      u32 texture_size;
++      u32 primary_size;
++      u32 secondary_bin_count;
++      u32 secondary_bin_size;
++      u32 agp_mode;
++      u8 agp_size;
++} drm_mga_dma_bootstrap32_t;
++
++static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_mga_dma_bootstrap32_t dma_bootstrap32;
++      drm_mga_dma_bootstrap_t __user *dma_bootstrap;
++      int err;
++
++      if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
++                         sizeof(dma_bootstrap32)))
++              return -EFAULT;
++
++      dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
++      if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
++          || __put_user(dma_bootstrap32.texture_handle,
++                        &dma_bootstrap->texture_handle)
++          || __put_user(dma_bootstrap32.texture_size,
++                        &dma_bootstrap->texture_size)
++          || __put_user(dma_bootstrap32.primary_size,
++                        &dma_bootstrap->primary_size)
++          || __put_user(dma_bootstrap32.secondary_bin_count,
++                        &dma_bootstrap->secondary_bin_count)
++          || __put_user(dma_bootstrap32.secondary_bin_size,
++                        &dma_bootstrap->secondary_bin_size)
++          || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
++          || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_MGA_DMA_BOOTSTRAP,
++                      (unsigned long)dma_bootstrap);
++      if (err)
++              return err;
++
++      if (__get_user(dma_bootstrap32.texture_handle,
++                     &dma_bootstrap->texture_handle)
++          || __get_user(dma_bootstrap32.texture_size,
++                        &dma_bootstrap->texture_size)
++          || __get_user(dma_bootstrap32.primary_size,
++                        &dma_bootstrap->primary_size)
++          || __get_user(dma_bootstrap32.secondary_bin_count,
++                        &dma_bootstrap->secondary_bin_count)
++          || __get_user(dma_bootstrap32.secondary_bin_size,
++                        &dma_bootstrap->secondary_bin_size)
++          || __get_user(dma_bootstrap32.agp_mode,
++                        &dma_bootstrap->agp_mode)
++          || __get_user(dma_bootstrap32.agp_size,
++                        &dma_bootstrap->agp_size))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &dma_bootstrap32,
++                       sizeof(dma_bootstrap32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++drm_ioctl_compat_t *mga_compat_ioctls[] = {
++      [DRM_MGA_INIT] = compat_mga_init,
++      [DRM_MGA_GETPARAM] = compat_mga_getparam,
++      [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long mga_compat_ioctl(struct file *filp, unsigned int cmd,
++                       unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
++              fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_irq.c git-nokia/drivers/gpu/drm-tungsten/mga_irq.c
+--- git/drivers/gpu/drm-tungsten/mga_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,182 @@
++/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
++ */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      const drm_mga_private_t *const dev_priv = 
++              (drm_mga_private_t *) dev->dev_private;
++
++      if (crtc != 0) {
++              return 0;
++      }
++
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++
++irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      int status;
++      int handled = 0;
++
++      status = MGA_READ(MGA_STATUS);
++
++      /* VBLANK interrupt */
++      if (status & MGA_VLINEPEN) {
++              MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              handled = 1;
++      }
++
++      /* SOFTRAP interrupt */
++      if (status & MGA_SOFTRAPEN) {
++              const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
++              const u32 prim_end = MGA_READ(MGA_PRIMEND);
++
++
++              MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
++
++              /* In addition to clearing the interrupt-pending bit, we
++               * have to write to MGA_PRIMEND to re-start the DMA operation.
++               */
++              if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
++                      MGA_WRITE(MGA_PRIMEND, prim_end);
++              }
++
++              atomic_inc(&dev_priv->last_fence_retired);
++              DRM_WAKEUP(&dev_priv->fence_queue);
++              handled = 1;
++      }
++
++      if (handled)
++              return IRQ_HANDLED;
++      return IRQ_NONE;
++}
++
++int mga_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                        crtc);
++              return 0;
++      }
++
++      MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
++      return 0;
++}
++
++
++void mga_disable_vblank(struct drm_device *dev, int crtc)
++{
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++      }
++
++      /* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
++       * a nice hardware counter that tracks the number of refreshes when
++       * the interrupt is disabled, and the kernel doesn't know the refresh
++       * rate to calculate an estimate.
++       */
++      /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
++}
++
++int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      unsigned int cur_fence;
++      int ret = 0;
++
++      /* Assume that the user has missed the current sequence number
++       * by about a day rather than she wants to wait for years
++       * using fences.
++       */
++      DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
++                  (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++                    - *sequence) <= (1 << 23)));
++
++      *sequence = cur_fence;
++
++      return ret;
++}
++
++void mga_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      /* Disable *all* interrupts */
++      MGA_WRITE(MGA_IEN, 0);
++      /* Clear bits if they're already high */
++      MGA_WRITE(MGA_ICLEAR, ~0);
++}
++
++int mga_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      int ret;
++
++      ret = drm_vblank_init(dev, 1);
++      if (ret)
++              return ret;
++
++      DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
++
++      /* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
++       * in mga_enable_vblank.
++       */
++      MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
++      return 0;
++}
++
++void mga_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      /* Disable *all* interrupts */
++      MGA_WRITE(MGA_IEN, 0);
++
++      dev->irq_enabled = 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_state.c git-nokia/drivers/gpu/drm-tungsten/mga_state.c
+--- git/drivers/gpu/drm-tungsten/mga_state.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_state.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1139 @@
++/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
++ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
++ */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *
++ * Rewritten by:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++/* ================================================================
++ * DMA hardware state programming functions
++ */
++
++static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
++                             struct drm_clip_rect * box)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      unsigned int pitch = dev_priv->front_pitch;
++      DMA_LOCALS;
++
++      BEGIN_DMA(2);
++
++      /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
++       */
++      if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
++              DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
++                        MGA_LEN + MGA_EXEC, 0x80000000,
++                        MGA_DWGCTL, ctx->dwgctl,
++                        MGA_LEN + MGA_EXEC, 0x80000000);
++      }
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
++                MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      DMA_LOCALS;
++
++      BEGIN_DMA(3);
++
++      DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
++                MGA_MACCESS, ctx->maccess,
++                MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
++
++      DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
++                MGA_FOGCOL, ctx->fogcolor,
++                MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
++
++      DMA_BLOCK(MGA_FCOL, ctx->fcol,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      DMA_LOCALS;
++
++      BEGIN_DMA(4);
++
++      DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
++                MGA_MACCESS, ctx->maccess,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
++                MGA_FOGCOL, ctx->fogcolor,
++                MGA_WFLAG, ctx->wflag,
++                MGA_ZORG, dev_priv->depth_offset);
++
++      DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
++                MGA_TDUALSTAGE0, ctx->tdualstage0,
++                MGA_TDUALSTAGE1, ctx->tdualstage1,
++                MGA_FCOL, ctx->fcol);
++
++      DMA_BLOCK(MGA_STENCIL, ctx->stencil,
++                MGA_STENCILCTL, ctx->stencilctl,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      DMA_LOCALS;
++
++      BEGIN_DMA(4);
++
++      DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR24, tex->texwidth);
++
++      DMA_BLOCK(MGA_WR34, tex->texheight,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff,
++                MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
++/*           tex->texctl, tex->texctl2); */
++
++      BEGIN_DMA(6);
++
++      DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR49, 0x00000000);
++
++      DMA_BLOCK(MGA_WR57, 0x00000000,
++                MGA_WR53, 0x00000000,
++                MGA_WR61, 0x00000000,
++                MGA_WR52, MGA_G400_WR_MAGIC);
++
++      DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
++                MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
++                MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
++                MGA_DMAPAD, 0x00000000);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */
++/*           tex->texctl, tex->texctl2); */
++
++      BEGIN_DMA(5);
++
++      DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
++                              MGA_MAP1_ENABLE |
++                              MGA_G400_TC2_MAGIC),
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR49, 0x00000000);
++
++      DMA_BLOCK(MGA_WR57, 0x00000000,
++                MGA_WR53, 0x00000000,
++                MGA_WR61, 0x00000000,
++                MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
++
++      DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff,
++                MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int pipe = sarea_priv->warp_pipe;
++      DMA_LOCALS;
++
++      BEGIN_DMA(3);
++
++      DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
++                MGA_WVRTXSZ, 0x00000007,
++                MGA_WFLAG, 0x00000000,
++                MGA_WR24, 0x00000000);
++
++      DMA_BLOCK(MGA_WR25, 0x00000100,
++                MGA_WR34, 0x00000000,
++                MGA_WR42, 0x0000ffff,
++                MGA_WR60, 0x0000ffff);
++
++      /* Padding required to to hardware bug.
++       */
++      DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
++                             MGA_WMODE_START | dev_priv->wagp_enable));
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int pipe = sarea_priv->warp_pipe;
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_pipe %x\n", pipe); */
++
++      BEGIN_DMA(10);
++
++      DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000);
++
++      if (pipe & MGA_T2) {
++              DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000);
++
++              DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x1e000000);
++      } else {
++              if (dev_priv->warp_pipe & MGA_T2) {
++                      /* Flush the WARP pipe */
++                      DMA_BLOCK(MGA_YDST, 0x00000000,
++                                MGA_FXLEFT, 0x00000000,
++                                MGA_FXRIGHT, 0x00000001,
++                                MGA_DWGCTL, MGA_DWGCTL_FLUSH);
++
++                      DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
++                                MGA_DWGSYNC, 0x00007000,
++                                MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
++                                MGA_LEN + MGA_EXEC, 0x00000000);
++
++                      DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
++                                              MGA_G400_TC2_MAGIC),
++                                MGA_LEN + MGA_EXEC, 0x00000000,
++                                MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
++                                MGA_DMAPAD, 0x00000000);
++              }
++
++              DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000);
++
++              DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x18000000);
++      }
++
++      DMA_BLOCK(MGA_WFLAG, 0x00000000,
++                MGA_WFLAG1, 0x00000000,
++                MGA_WR56, MGA_G400_WR56_MAGIC,
++                MGA_DMAPAD, 0x00000000);
++
++      DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0              */
++                MGA_WR57, 0x00000000, /* tex0              */
++                MGA_WR53, 0x00000000, /* tex1              */
++                MGA_WR61, 0x00000000);        /* tex1              */
++
++      DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC,  /* tex0 width        */
++                MGA_WR62, MGA_G400_WR_MAGIC,  /* tex0 height       */
++                MGA_WR52, MGA_G400_WR_MAGIC,  /* tex1 width        */
++                MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height       */
++
++      /* Padding required to to hardware bug */
++      DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
++                              MGA_WMODE_START | dev_priv->wagp_enable));
++
++      ADVANCE_DMA();
++}
++
++static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
++              mga_g200_emit_pipe(dev_priv);
++              dev_priv->warp_pipe = sarea_priv->warp_pipe;
++      }
++
++      if (dirty & MGA_UPLOAD_CONTEXT) {
++              mga_g200_emit_context(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & MGA_UPLOAD_TEX0) {
++              mga_g200_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
++      }
++}
++
++static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++      int multitex = sarea_priv->warp_pipe & MGA_T2;
++
++      if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
++              mga_g400_emit_pipe(dev_priv);
++              dev_priv->warp_pipe = sarea_priv->warp_pipe;
++      }
++
++      if (dirty & MGA_UPLOAD_CONTEXT) {
++              mga_g400_emit_context(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & MGA_UPLOAD_TEX0) {
++              mga_g400_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
++      }
++
++      if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
++              mga_g400_emit_tex1(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
++      }
++}
++
++/* ================================================================
++ * SAREA state verification
++ */
++
++/* Disallow all write destinations except the front and backbuffer.
++ */
++static int mga_verify_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++
++      if (ctx->dstorg != dev_priv->front_offset &&
++          ctx->dstorg != dev_priv->back_offset) {
++              DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
++                        ctx->dstorg, dev_priv->front_offset,
++                        dev_priv->back_offset);
++              ctx->dstorg = 0;
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* Disallow texture reads from PCI space.
++ */
++static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
++      unsigned int org;
++
++      org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
++
++      if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
++              DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
++              tex->texorg = 0;
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int mga_verify_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++      int ret = 0;
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      if (dirty & MGA_UPLOAD_CONTEXT)
++              ret |= mga_verify_context(dev_priv);
++
++      if (dirty & MGA_UPLOAD_TEX0)
++              ret |= mga_verify_tex(dev_priv, 0);
++
++      if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
++              if (dirty & MGA_UPLOAD_TEX1)
++                      ret |= mga_verify_tex(dev_priv, 1);
++
++              if (dirty & MGA_UPLOAD_PIPE)
++                      ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
++      } else {
++              if (dirty & MGA_UPLOAD_PIPE)
++                      ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
++      }
++
++      return (ret == 0);
++}
++
++static int mga_verify_iload(drm_mga_private_t * dev_priv,
++                          unsigned int dstorg, unsigned int length)
++{
++      if (dstorg < dev_priv->texture_offset ||
++          dstorg + length > (dev_priv->texture_offset +
++                             dev_priv->texture_size)) {
++              DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
++              return -EINVAL;
++      }
++
++      if (length & MGA_ILOAD_MASK) {
++              DRM_ERROR("*** bad iload length: 0x%x\n",
++                        length & MGA_ILOAD_MASK);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int mga_verify_blit(drm_mga_private_t * dev_priv,
++                         unsigned int srcorg, unsigned int dstorg)
++{
++      if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
++          (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
++              DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++/* ================================================================
++ *
++ */
++
++static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA(1);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      ADVANCE_DMA();
++
++      for (i = 0; i < nbox; i++) {
++              struct drm_clip_rect *box = &pbox[i];
++              u32 height = box->y2 - box->y1;
++
++              DRM_DEBUG("   from=%d,%d to=%d,%d\n",
++                        box->x1, box->y1, box->x2, box->y2);
++
++              if (clear->flags & MGA_FRONT) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->color_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_color,
++                                MGA_DSTORG, dev_priv->front_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++              if (clear->flags & MGA_BACK) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->color_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_color,
++                                MGA_DSTORG, dev_priv->back_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++              if (clear->flags & MGA_DEPTH) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->depth_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_depth,
++                                MGA_DSTORG, dev_priv->depth_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++      }
++
++      BEGIN_DMA(1);
++
++      /* Force reset of DWGCTL */
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_swap(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      sarea_priv->last_frame.head = dev_priv->prim.tail;
++      sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
++
++      BEGIN_DMA(4 + nbox);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
++                MGA_MACCESS, dev_priv->maccess,
++                MGA_SRCORG, dev_priv->back_offset,
++                MGA_AR5, dev_priv->front_pitch);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, 0xffffffff,
++                MGA_DWGCTL, MGA_DWGCTL_COPY);
++
++      for (i = 0; i < nbox; i++) {
++              struct drm_clip_rect *box = &pbox[i];
++              u32 height = box->y2 - box->y1;
++              u32 start = box->y1 * dev_priv->front_pitch;
++
++              DRM_DEBUG("   from=%d,%d to=%d,%d\n",
++                        box->x1, box->y1, box->x2, box->y2);
++
++              DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
++                        MGA_AR3, start + box->x1,
++                        MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
++                        MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
++      }
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_SRCORG, dev_priv->front_offset,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++
++      FLUSH_DMA();
++
++      DRM_DEBUG("... done.\n");
++}
++
++static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 address = (u32) buf->bus_address;
++      u32 length = (u32) buf->used;
++      int i = 0;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
++
++      if (buf->used) {
++              buf_priv->dispatched = 1;
++
++              MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
++
++              do {
++                      if (i < sarea_priv->nbox) {
++                              mga_emit_clip_rect(dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      }
++
++                      BEGIN_DMA(1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_DMAPAD, 0x00000000,
++                                MGA_SECADDRESS, (address |
++                                                 MGA_DMA_VERTEX),
++                                MGA_SECEND, ((address + length) |
++                                             dev_priv->dma_access));
++
++                      ADVANCE_DMA();
++              } while (++i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              AGE_BUFFER(buf_priv);
++              buf->pending = 0;
++              buf->used = 0;
++              buf_priv->dispatched = 0;
++
++              mga_freelist_put(dev, buf);
++      }
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
++                                   unsigned int start, unsigned int end)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 address = (u32) buf->bus_address;
++      int i = 0;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
++
++      if (start != end) {
++              buf_priv->dispatched = 1;
++
++              MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
++
++              do {
++                      if (i < sarea_priv->nbox) {
++                              mga_emit_clip_rect(dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      }
++
++                      BEGIN_DMA(1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_DMAPAD, 0x00000000,
++                                MGA_SETUPADDRESS, address + start,
++                                MGA_SETUPEND, ((address + end) |
++                                               dev_priv->dma_access));
++
++                      ADVANCE_DMA();
++              } while (++i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              AGE_BUFFER(buf_priv);
++              buf->pending = 0;
++              buf->used = 0;
++              buf_priv->dispatched = 0;
++
++              mga_freelist_put(dev, buf);
++      }
++
++      FLUSH_DMA();
++}
++
++/* This copies a 64 byte aligned agp region to the frambuffer with a
++ * standard blit, the ioctl needs to do checking.
++ */
++static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
++                                 unsigned int dstorg, unsigned int length)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
++      u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
++      u32 y2;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
++
++      y2 = length / 64;
++
++      BEGIN_DMA(5);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DSTORG, dstorg,
++                MGA_MACCESS, 0x00000000,
++                MGA_SRCORG, srcorg,
++                MGA_AR5, 64);
++
++      DMA_BLOCK(MGA_PITCH, 64,
++                MGA_PLNWT, 0xffffffff,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGCTL, MGA_DWGCTL_COPY);
++
++      DMA_BLOCK(MGA_AR0, 63,
++                MGA_AR3, 0,
++                MGA_FXBNDRY, (63 << 16) | 0,
++                MGA_YDSTLEN + MGA_EXEC, y2);
++
++      DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
++                MGA_SRCORG, dev_priv->front_offset,
++                MGA_PITCH, dev_priv->front_pitch,
++                MGA_DWGSYNC, 0x00007000);
++
++      ADVANCE_DMA();
++
++      AGE_BUFFER(buf_priv);
++
++      buf->pending = 0;
++      buf->used = 0;
++      buf_priv->dispatched = 0;
++
++      mga_freelist_put(dev, buf);
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      u32 scandir = 0, i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA(4 + nbox);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
++                MGA_PLNWT, blit->planemask,
++                MGA_SRCORG, blit->srcorg,
++                MGA_DSTORG, blit->dstorg);
++
++      DMA_BLOCK(MGA_SGN, scandir,
++                MGA_MACCESS, dev_priv->maccess,
++                MGA_AR5, blit->ydir * blit->src_pitch,
++                MGA_PITCH, blit->dst_pitch);
++
++      for (i = 0; i < nbox; i++) {
++              int srcx = pbox[i].x1 + blit->delta_sx;
++              int srcy = pbox[i].y1 + blit->delta_sy;
++              int dstx = pbox[i].x1 + blit->delta_dx;
++              int dsty = pbox[i].y1 + blit->delta_dy;
++              int h = pbox[i].y2 - pbox[i].y1;
++              int w = pbox[i].x2 - pbox[i].x1 - 1;
++              int start;
++
++              if (blit->ydir == -1) {
++                      srcy = blit->height - srcy - 1;
++              }
++
++              start = srcy * blit->src_pitch + srcx;
++
++              DMA_BLOCK(MGA_AR0, start + w,
++                        MGA_AR3, start,
++                        MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
++                        MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
++      }
++
++      /* Do something to flush AGP?
++       */
++
++      /* Force reset of DWGCTL */
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_PITCH, dev_priv->front_pitch,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++}
++
++/* ================================================================
++ *
++ */
++
++static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_clear_t *clear = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_clear(dev, clear);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_swap(dev);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (vertex->idx < 0 || vertex->idx > dma->buf_count)
++              return -EINVAL;
++      buf = dma->buflist[vertex->idx];
++      buf_priv = buf->dev_private;
++
++      buf->used = vertex->used;
++      buf_priv->discard = vertex->discard;
++
++      if (!mga_verify_state(dev_priv)) {
++              if (vertex->discard) {
++                      if (buf_priv->dispatched == 1)
++                              AGE_BUFFER(buf_priv);
++                      buf_priv->dispatched = 0;
++                      mga_freelist_put(dev, buf);
++              }
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_vertex(dev, buf);
++
++      return 0;
++}
++
++static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_indices_t *indices = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (indices->idx < 0 || indices->idx > dma->buf_count)
++              return -EINVAL;
++
++      buf = dma->buflist[indices->idx];
++      buf_priv = buf->dev_private;
++
++      buf_priv->discard = indices->discard;
++
++      if (!mga_verify_state(dev_priv)) {
++              if (indices->discard) {
++                      if (buf_priv->dispatched == 1)
++                              AGE_BUFFER(buf_priv);
++                      buf_priv->dispatched = 0;
++                      mga_freelist_put(dev, buf);
++              }
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
++
++      return 0;
++}
++
++static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_iload_t *iload = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++#if 0
++      if (mga_do_wait_for_idle(dev_priv) < 0) {
++              if (MGA_DMA_DEBUG)
++                      DRM_INFO("-EBUSY\n");
++              return -EBUSY;
++      }
++#endif
++      if (iload->idx < 0 || iload->idx > dma->buf_count)
++              return -EINVAL;
++
++      buf = dma->buflist[iload->idx];
++      buf_priv = buf->dev_private;
++
++      if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
++              mga_freelist_put(dev, buf);
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_blit_t *blit = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
++              return -EINVAL;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_blit(dev, blit);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case MGA_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      case MGA_PARAM_CARD_TYPE:
++              value = dev_priv->chipset;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      u32 *fence = data;
++      DMA_LOCALS;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      /* I would normal do this assignment in the declaration of fence,
++       * but dev_priv may be NULL.
++       */
++
++      *fence = dev_priv->next_fence_to_post;
++      dev_priv->next_fence_to_post++;
++
++      BEGIN_DMA(1);
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_SOFTRAP, 0x00000000);
++      ADVANCE_DMA();
++
++      return 0;
++}
++
++static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      u32 *fence = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      mga_driver_fence_wait(dev, fence);
++
++      return 0;
++}
++
++struct drm_ioctl_desc mga_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++};
++
++int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_ucode.h git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h
+--- git/drivers/gpu/drm-tungsten/mga_ucode.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,11645 @@
++/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*-
++ * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com
++ *
++ * Copyright 1999 Matrox Graphics Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
++ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Kernel-based WARP engine management:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * WARP pipes are named according to the functions they perform, where:
++ *
++ *   - T stands for computation of texture stage 0
++ *   - T2 stands for computation of both texture stage 0 and texture stage 1
++ *   - G stands for computation of triangle intensity (Gouraud interpolation)
++ *   - Z stands for computation of Z buffer interpolation
++ *   - S stands for computation of specular highlight
++ *   - A stands for computation of the alpha channel
++ *   - F stands for computation of vertex fog interpolation
++ */
++
++static unsigned char warp_g200_tgz[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x72, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x60, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x03, 0x80, 0x0A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x57, 0x39, 0x20, 0xE9,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x2B, 0x32, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x16, 0x28, 0x20, 0xE9,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x2B, 0x20, 0xE9,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x85, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x84, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x82, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x7F, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgza[] = {
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x7D, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1F, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3F, 0x3D, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0x26, 0x1F, 0xDF,
++      0x9D, 0x1F, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x9E, 0x3F, 0x4F, 0xE9,
++
++      0x07, 0x07, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x9C, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x7A, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x79, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x77, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzaf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x83, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6F, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x2D, 0x20,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x1F, 0x62, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x3F, 0x3D, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0x1F, 0x26, 0x1F, 0xDF,
++      0x9D, 0x1F, 0x4F, 0xE9,
++
++      0x9E, 0x3F, 0x4F, 0xE9,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x07, 0x07, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x9C, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x73, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x71, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6E, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x7F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x78, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x77, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x75, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x72, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzs[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8B, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x77, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x8F, 0x20,
++
++      0xA5, 0x37, 0x4F, 0xE9,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x6C, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6B, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x69, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsa[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x7B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x44, 0x4C, 0xB6,
++      0x05, 0x44, 0x54, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2F, 0xC0, 0x44, 0xC6,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x9D, 0x17, 0x4F, 0xE9,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x9E, 0x37, 0x4F, 0xE9,
++      0x2F, 0x17, 0x2F, 0xAF,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x9C, 0x80, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x68, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x67, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x65, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsaf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x94, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x80, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3E, 0x3D, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x2F, 0x20,
++      0x00, 0xE0,
++      0xA3, 0x0F, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0xA1, 0x1F, 0x4F, 0xE9,
++
++      0x1E, 0x26, 0x1E, 0xDF,
++      0x9D, 0x1E, 0x4F, 0xE9,
++
++      0x35, 0x17, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x07, 0x07, 0x1E, 0xAF,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x9E, 0x3E, 0x4F, 0xE9,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x9C, 0x80, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x63, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x60, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x5D, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x7B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2F, 0x20,
++      0x00, 0xE0,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x68, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x67, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x65, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g400_t2gz[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x78, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x69, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x25, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9F, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBE, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x7D, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gza[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x7C, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x6D, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x29, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x3D, 0xCF, 0x74, 0xC2,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9B, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBA, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x79, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzaf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x81, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x72, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x0F, 0xCF, 0x74, 0xC6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x55, 0xB6,
++      0x02, 0x45, 0x65, 0xB6,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x96, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xB5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x7D, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x6E, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0F, 0xCF, 0x75, 0xC6,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x28, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x45, 0x55, 0xB6,
++      0x1A, 0x45, 0x65, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9A, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBB, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x78, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzs[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x85, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x76, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x31, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x20,
++      0x1A, 0x20,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA7, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x92, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xB2, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x70, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsa[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8A, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7B, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x36, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x8D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xAD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x6B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsaf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8E, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7F, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x3A, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x55, 0xB6,
++      0x02, 0x45, 0x65, 0xB6,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x89, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xA9, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x67, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8A, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7B, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x36, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB6,
++      0x1A, 0x45, 0x65, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x8D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xAD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x6B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgz[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x58, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4A, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x1D, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAF, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD6, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x9D, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgza[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x5C, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4E, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x3D, 0xCF, 0x74, 0xC2,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x20, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAB, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD3, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x99, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzaf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x61, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x53, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x26, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x27, 0xCF, 0x74, 0xC6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x4D, 0xB6,
++      0x02, 0x45, 0x55, 0xB6,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xA6, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xCD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x94, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x5D, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4F, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x27, 0xCF, 0x75, 0xC6,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x20, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x45, 0x4D, 0xB6,
++      0x1A, 0x45, 0x55, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAA, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD3, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x98, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzs[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x65, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x57, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x29, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA7, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xA2, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xCA, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x90, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsa[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6A, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x5C, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x9D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x8B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsaf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6E, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x60, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x32, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x4D, 0xB6,
++      0x02, 0x45, 0x55, 0xB6,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x99, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC1, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x87, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6A, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x5C, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB6,
++      0x1A, 0x45, 0x55, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x9D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x8B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_warp.c git-nokia/drivers/gpu/drm-tungsten/mga_warp.c
+--- git/drivers/gpu/drm-tungsten/mga_warp.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_warp.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,198 @@
++/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
++ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++#include "mga_ucode.h"
++
++#define MGA_WARP_CODE_ALIGN           256     /* in bytes */
++
++#define WARP_UCODE_SIZE( which )                                      \
++      ((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
++
++#define WARP_UCODE_INSTALL( which, where )                            \
++do {                                                                  \
++      DRM_DEBUG( " pcbase = 0x%08lx  vcbase = %p\n", pcbase, vcbase );\
++      dev_priv->warp_pipe_phys[where] = pcbase;                       \
++      memcpy( vcbase, which, sizeof(which) );                         \
++      pcbase += WARP_UCODE_SIZE( which );                             \
++      vcbase += WARP_UCODE_SIZE( which );                             \
++} while (0)
++
++static const unsigned int mga_warp_g400_microcode_size =
++             (WARP_UCODE_SIZE(warp_g400_tgz) +
++              WARP_UCODE_SIZE(warp_g400_tgza) +
++              WARP_UCODE_SIZE(warp_g400_tgzaf) +
++              WARP_UCODE_SIZE(warp_g400_tgzf) +
++              WARP_UCODE_SIZE(warp_g400_tgzs) +
++              WARP_UCODE_SIZE(warp_g400_tgzsa) +
++              WARP_UCODE_SIZE(warp_g400_tgzsaf) +
++              WARP_UCODE_SIZE(warp_g400_tgzsf) +
++              WARP_UCODE_SIZE(warp_g400_t2gz) +
++              WARP_UCODE_SIZE(warp_g400_t2gza) +
++              WARP_UCODE_SIZE(warp_g400_t2gzaf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzs) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsa) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsf));
++
++static const unsigned int mga_warp_g200_microcode_size =
++             (WARP_UCODE_SIZE(warp_g200_tgz) +
++              WARP_UCODE_SIZE(warp_g200_tgza) +
++              WARP_UCODE_SIZE(warp_g200_tgzaf) +
++              WARP_UCODE_SIZE(warp_g200_tgzf) +
++              WARP_UCODE_SIZE(warp_g200_tgzs) +
++              WARP_UCODE_SIZE(warp_g200_tgzsa) +
++              WARP_UCODE_SIZE(warp_g200_tgzsaf) +
++              WARP_UCODE_SIZE(warp_g200_tgzsf));
++
++
++unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
++{
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              return PAGE_ALIGN(mga_warp_g400_microcode_size);
++      case MGA_CARD_TYPE_G200:
++              return PAGE_ALIGN(mga_warp_g200_microcode_size);
++      default:
++              DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset);
++              return 0;
++      }
++}
++
++static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
++{
++      unsigned char *vcbase = dev_priv->warp->handle;
++      unsigned long pcbase = dev_priv->warp->offset;
++
++      memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
++
++      WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
++      WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
++      WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
++      WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
++      WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
++      WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
++      WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
++      WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
++
++      WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
++      WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
++      WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
++      WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
++      WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
++
++      return 0;
++}
++
++static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
++{
++      unsigned char *vcbase = dev_priv->warp->handle;
++      unsigned long pcbase = dev_priv->warp->offset;
++
++      memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
++
++      WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
++      WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
++      WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
++      WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
++      WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
++      WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
++      WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
++      WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
++
++      return 0;
++}
++
++int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
++{
++      const unsigned int size = mga_warp_microcode_size(dev_priv);
++
++      DRM_DEBUG("MGA ucode size = %d bytes\n", size);
++      if (size > dev_priv->warp->size) {
++              DRM_ERROR("microcode too large! (%u > %lu)\n",
++                        size, dev_priv->warp->size);
++              return -ENOMEM;
++      }
++
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              return mga_warp_install_g400_microcode(dev_priv);
++      case MGA_CARD_TYPE_G200:
++              return mga_warp_install_g200_microcode(dev_priv);
++      default:
++              return -EINVAL;
++      }
++}
++
++#define WMISC_EXPECTED                (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
++
++int mga_warp_init(drm_mga_private_t * dev_priv)
++{
++      u32 wmisc;
++
++      /* FIXME: Get rid of these damned magic numbers...
++       */
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
++              MGA_WRITE(MGA_WGETMSB, 0x00000E00);
++              MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
++              MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
++              break;
++      case MGA_CARD_TYPE_G200:
++              MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
++              MGA_WRITE(MGA_WGETMSB, 0x1606);
++              MGA_WRITE(MGA_WVRTXSZ, 7);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
++                            MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
++      wmisc = MGA_READ(MGA_WMISC);
++      if (wmisc != WMISC_EXPECTED) {
++              DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
++                        wmisc, WMISC_EXPECTED);
++              return -EINVAL;
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_bo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c
+--- git/drivers/gpu/drm-tungsten/nouveau_bo.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,296 @@
++/*
++ * Copyright 2007 Dave Airlied
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/*
++ * Authors: Dave Airlied <airlied@linux.ie>
++ *        Ben Skeggs   <darktama@iinet.net.au>
++ *        Jeremy Kolb  <jkolb@brandeis.edu>
++ */
++
++#include "drmP.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++static struct drm_ttm_backend *
++nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      switch (dev_priv->gart_info.type) {
++      case NOUVEAU_GART_AGP:
++              return drm_agp_init_ttm(dev);
++      case NOUVEAU_GART_SGDMA:
++              return nouveau_sgdma_init_ttm(dev);
++      default:
++              DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
++              break;
++      }
++
++      return NULL;
++}
++
++static int
++nouveau_bo_fence_type(struct drm_buffer_object *bo,
++                    uint32_t *fclass, uint32_t *type)
++{
++      /* When we get called, *fclass is set to the requested fence class */
++
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
++              *type = 3;
++      else
++              *type = 1;
++      return 0;
++
++}
++
++static int
++nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
++{
++      /* We'll do this from user space. */
++      return 0;
++}
++
++static int
++nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
++                       struct drm_mem_type_manager *man)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                           _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              break;
++      case DRM_BO_MEM_VRAM:
++              man->flags = _DRM_FLAG_MEMTYPE_FIXED |
++                           _DRM_FLAG_MEMTYPE_MAPPABLE |
++                           _DRM_FLAG_NEEDS_IOREMAP;
++              man->io_addr = NULL;
++              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
++              man->io_offset = drm_get_resource_start(dev, 1);
++              man->io_size = drm_get_resource_len(dev, 1);
++              if (man->io_size > nouveau_mem_fb_amount(dev))
++                      man->io_size = nouveau_mem_fb_amount(dev);
++              break;
++      case DRM_BO_MEM_PRIV0:
++              /* Unmappable VRAM */
++              man->flags = _DRM_FLAG_MEMTYPE_CMA;
++              man->drm_bus_maptype = 0;
++              break;
++      case DRM_BO_MEM_TT:
++              switch (dev_priv->gart_info.type) {
++              case NOUVEAU_GART_AGP:
++                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                                   _DRM_FLAG_MEMTYPE_CSELECT |
++                                   _DRM_FLAG_NEEDS_IOREMAP;
++                      man->drm_bus_maptype = _DRM_AGP;
++                      break;
++              case NOUVEAU_GART_SGDMA:
++                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                                   _DRM_FLAG_MEMTYPE_CSELECT |
++                                   _DRM_FLAG_MEMTYPE_CMA;
++                      man->drm_bus_maptype = _DRM_SCATTER_GATHER;
++                      break;
++              default:
++                      DRM_ERROR("Unknown GART type: %d\n",
++                                dev_priv->gart_info.type);
++                      return -EINVAL;
++              }
++
++              man->io_offset  = dev_priv->gart_info.aper_base;
++              man->io_size    = dev_priv->gart_info.aper_size;
++              man->io_addr   = NULL;
++              break;
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++static uint64_t
++nouveau_bo_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL;
++      default:
++              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
++      }
++      return 0;
++}
++
++
++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
++ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
++ */
++static int
++nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
++                   struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      uint32_t srch, dsth, page_count;
++
++      /* Can happen during init/takedown */
++      if (!dchan->chan)
++              return -EINVAL;
++
++      srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
++      dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
++      if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
++              dchan->m2mf_dma_source = srch;
++              dchan->m2mf_dma_destin = dsth;
++
++              BEGIN_RING(NvSubM2MF,
++                         NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
++              OUT_RING  (dchan->m2mf_dma_source);
++              OUT_RING  (dchan->m2mf_dma_destin);
++      }
++
++      page_count = new_mem->num_pages;
++      while (page_count) {
++              int line_count = (page_count > 2047) ? 2047 : page_count;
++
++              BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
++              OUT_RING  (old_mem->mm_node->start << PAGE_SHIFT);
++              OUT_RING  (new_mem->mm_node->start << PAGE_SHIFT);
++              OUT_RING  (PAGE_SIZE); /* src_pitch */
++              OUT_RING  (PAGE_SIZE); /* dst_pitch */
++              OUT_RING  (PAGE_SIZE); /* line_length */
++              OUT_RING  (line_count);
++              OUT_RING  ((1<<8)|(1<<0));
++              OUT_RING  (0);
++              BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
++              OUT_RING  (0);
++
++              page_count -= line_count;
++      }
++
++      return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
++                                       DRM_FENCE_TYPE_EXE, 0, new_mem);
++}
++
++/* Flip pages into the GART and move if we can. */
++static int
++nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
++                    struct drm_bo_mem_reg *new_mem)
++{
++        struct drm_device *dev = bo->dev;
++        struct drm_bo_mem_reg tmp_mem;
++        int ret;
++
++        tmp_mem = *new_mem;
++        tmp_mem.mm_node = NULL;
++        tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
++                                DRM_BO_FLAG_CACHED |
++                                DRM_BO_FLAG_FORCE_CACHING);
++
++        ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
++        if (ret)
++                return ret;
++
++        ret = drm_ttm_bind(bo->ttm, &tmp_mem);
++        if (ret)
++                goto out_cleanup;
++
++        ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
++        if (ret)
++                goto out_cleanup;
++
++        ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
++
++out_cleanup:
++        if (tmp_mem.mm_node) {
++                mutex_lock(&dev->struct_mutex);
++                if (tmp_mem.mm_node != bo->pinned_node)
++                        drm_mm_put_block(tmp_mem.mm_node);
++                tmp_mem.mm_node = NULL;
++                mutex_unlock(&dev->struct_mutex);
++        }
++
++        return ret;
++}
++
++static int
++nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
++              struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++              if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      else
++      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      else {
++              if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++
++      return 0;
++}
++
++static void
++nouveau_bo_flush_ttm(struct drm_ttm *ttm)
++{
++}
++
++static uint32_t nouveau_mem_prios[]  = {
++      DRM_BO_MEM_PRIV0,
++      DRM_BO_MEM_VRAM,
++      DRM_BO_MEM_TT,
++      DRM_BO_MEM_LOCAL
++};
++static uint32_t nouveau_busy_prios[] = {
++      DRM_BO_MEM_TT,
++      DRM_BO_MEM_PRIV0,
++      DRM_BO_MEM_VRAM,
++      DRM_BO_MEM_LOCAL
++};
++
++struct drm_bo_driver nouveau_bo_driver = {
++      .mem_type_prio = nouveau_mem_prios,
++      .mem_busy_prio = nouveau_busy_prios,
++      .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
++      .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
++      .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
++      .fence_type = nouveau_bo_fence_type,
++      .invalidate_caches = nouveau_bo_invalidate_caches,
++      .init_mem_type = nouveau_bo_init_mem_type,
++      .evict_flags = nouveau_bo_evict_flags,
++      .move = nouveau_bo_move,
++      .ttm_cache_flush= nouveau_bo_flush_ttm,
++      .command_stream_barrier = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c
+--- git/drivers/gpu/drm-tungsten/nouveau_dma.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,172 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++int
++nouveau_dma_channel_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      struct mem_block *pushbuf;
++      int grclass, ret, i;
++
++      DRM_DEBUG("\n");
++
++      pushbuf = nouveau_mem_alloc(dev, 0, 0x8000,
++                                  NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED,
++                                  (struct drm_file *)-2);
++      if (!pushbuf) {
++              DRM_ERROR("Failed to allocate DMA push buffer\n");
++              return -ENOMEM;
++      }
++
++      /* Allocate channel */
++      ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2,
++                               pushbuf, NvDmaFB, NvDmaTT);
++      if (ret) {
++              DRM_ERROR("Error allocating GPU channel: %d\n", ret);
++              return ret;
++      }
++      DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id);
++
++      /* Map push buffer */
++      drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev);
++      if (!dchan->chan->pushbuf_mem->map->handle) {
++              DRM_ERROR("Failed to ioremap push buffer\n");
++              return -EINVAL;
++      }
++      dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle;
++
++      /* Initialise DMA vars */
++      dchan->max  = (dchan->chan->pushbuf_mem->size >> 2) - 2;
++      dchan->put  = dchan->chan->pushbuf_base >> 2;
++      dchan->cur  = dchan->put;
++      dchan->free = dchan->max - dchan->cur;
++
++      /* Insert NOPS for NOUVEAU_DMA_SKIPS */
++      dchan->free -= NOUVEAU_DMA_SKIPS;
++      dchan->push_free = NOUVEAU_DMA_SKIPS;
++      for (i=0; i < NOUVEAU_DMA_SKIPS; i++)
++              OUT_RING(0);
++
++      /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
++      if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1,
++                                        &dchan->notify0_offset))) {
++              DRM_ERROR("Error allocating NvNotify0: %d\n", ret);
++              return ret;
++      }
++
++      /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
++      if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT;
++      else                             grclass = NV50_MEMORY_TO_MEMORY_FORMAT;
++      if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) {
++              DRM_ERROR("Error creating NvM2MF: %d\n", ret);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF,
++                                        gpuobj, NULL))) {
++              DRM_ERROR("Error referencing NvM2MF: %d\n", ret);
++              return ret;
++      }
++      dchan->m2mf_dma_source = NvDmaFB;
++      dchan->m2mf_dma_destin = NvDmaFB;
++
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
++      OUT_RING  (NvM2MF);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1);
++      OUT_RING  (NvNotify0);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
++      OUT_RING  (dchan->m2mf_dma_source);
++      OUT_RING  (dchan->m2mf_dma_destin);
++      FIRE_RING();
++
++      return 0;
++}
++
++void
++nouveau_dma_channel_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++
++      DRM_DEBUG("\n");
++
++      if (dchan->chan) {
++              nouveau_fifo_free(dchan->chan);
++              dchan->chan = NULL;
++      }
++}
++
++#define READ_GET() ((NV_READ(dchan->chan->get) -                               \
++                  dchan->chan->pushbuf_base) >> 2)
++#define WRITE_PUT(val) do {                                                    \
++      NV_WRITE(dchan->chan->put,                                             \
++               ((val) << 2) + dchan->chan->pushbuf_base);                    \
++} while(0)
++
++int
++nouveau_dma_wait(struct drm_device *dev, int size)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      uint32_t get;
++
++      while (dchan->free < size) {
++              get = READ_GET();
++
++              if (dchan->put >= get) {
++                      dchan->free = dchan->max - dchan->cur;
++
++                      if (dchan->free < size) {
++                              dchan->push_free = 1;
++                              OUT_RING(0x20000000|dchan->chan->pushbuf_base);
++                              if (get <= NOUVEAU_DMA_SKIPS) {
++                                      /*corner case - will be idle*/
++                                      if (dchan->put <= NOUVEAU_DMA_SKIPS)
++                                              WRITE_PUT(NOUVEAU_DMA_SKIPS + 1);
++
++                                      do {
++                                              get = READ_GET();
++                                      } while (get <= NOUVEAU_DMA_SKIPS);
++                              }
++
++                              WRITE_PUT(NOUVEAU_DMA_SKIPS);
++                              dchan->cur  = dchan->put = NOUVEAU_DMA_SKIPS;
++                              dchan->free = get - (NOUVEAU_DMA_SKIPS + 1);
++                      }
++              } else {
++                      dchan->free = get - dchan->cur - 1;
++              }
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.h git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h
+--- git/drivers/gpu/drm-tungsten/nouveau_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,96 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_DMA_H__
++#define __NOUVEAU_DMA_H__
++
++typedef enum {
++      NvSubM2MF       = 0,
++} nouveau_subchannel_id_t;
++
++typedef enum {
++      NvM2MF          = 0x80039001,
++      NvDmaFB         = 0x8003d001,
++      NvDmaTT         = 0x8003d002,
++      NvNotify0       = 0x8003d003
++} nouveau_object_handle_t;
++
++#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY                     0x00000180
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE                     0x00000184
++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
++
++#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
++
++#define BEGIN_RING(subc, mthd, cnt) do {                                       \
++      int push_size = (cnt) + 1;                                             \
++      if (dchan->push_free) {                                                \
++              DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free);  \
++              break;                                                         \
++      }                                                                      \
++      if (dchan->free < push_size) {                                         \
++              if (nouveau_dma_wait(dev, push_size)) {                        \
++                      DRM_ERROR("FIFO timeout\n");                           \
++                      break;                                                 \
++              }                                                              \
++      }                                                                      \
++      dchan->free -= push_size;                                              \
++      dchan->push_free = push_size;                                          \
++      OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd);                           \
++} while(0)
++
++#define OUT_RING(data) do {                                                    \
++      if (dchan->push_free == 0) {                                           \
++              DRM_ERROR("no space left in packet\n");                        \
++              break;                                                         \
++      }                                                                      \
++      dchan->pushbuf[dchan->cur++] = (data);                                 \
++      dchan->push_free--;                                                    \
++} while(0)
++
++#define FIRE_RING() do {                                                       \
++      if (dchan->push_free) {                                                \
++              DRM_ERROR("packet incomplete: %d\n", dchan->push_free);        \
++              break;                                                         \
++      }                                                                      \
++      if (dchan->cur != dchan->put) {                                        \
++              DRM_MEMORYBARRIER();                                           \
++              dchan->put = dchan->cur;                                       \
++              NV_WRITE(dchan->chan->put, dchan->put << 2);                   \
++      }                                                                      \
++} while(0)
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drm.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h
+--- git/drivers/gpu/drm-tungsten/nouveau_drm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,184 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRM_H__
++#define __NOUVEAU_DRM_H__
++
++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11
++
++struct drm_nouveau_channel_alloc {
++      uint32_t     fb_ctxdma_handle;
++      uint32_t     tt_ctxdma_handle;
++
++      int          channel;
++      uint32_t     put_base;
++      /* FIFO control regs */
++      drm_handle_t ctrl;
++      int          ctrl_size;
++      /* DMA command buffer */
++      drm_handle_t cmdbuf;
++      int          cmdbuf_size;
++      /* Notifier memory */
++      drm_handle_t notifier;
++      int          notifier_size;
++};
++
++struct drm_nouveau_channel_free {
++      int channel;
++};
++
++struct drm_nouveau_grobj_alloc {
++      int      channel;
++      uint32_t handle;
++      int      class;
++};
++
++#define NOUVEAU_MEM_ACCESS_RO 1
++#define NOUVEAU_MEM_ACCESS_WO 2
++#define NOUVEAU_MEM_ACCESS_RW 3
++struct drm_nouveau_notifierobj_alloc {
++      int      channel;
++      uint32_t handle;
++      int      count;
++
++      uint32_t offset;
++};
++
++struct drm_nouveau_gpuobj_free {
++      int      channel;
++      uint32_t handle;
++};
++
++/* This is needed to avoid a race condition.
++ * Otherwise you may be writing in the fetch area.
++ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes?
++ */
++#define NOUVEAU_DMA_SKIPS 8
++
++#define NOUVEAU_MEM_FB                        0x00000001
++#define NOUVEAU_MEM_AGP                       0x00000002
++#define NOUVEAU_MEM_FB_ACCEPTABLE     0x00000004
++#define NOUVEAU_MEM_AGP_ACCEPTABLE    0x00000008
++#define NOUVEAU_MEM_PCI                       0x00000010
++#define NOUVEAU_MEM_PCI_ACCEPTABLE    0x00000020
++#define NOUVEAU_MEM_PINNED            0x00000040
++#define NOUVEAU_MEM_USER_BACKED               0x00000080
++#define NOUVEAU_MEM_MAPPED            0x00000100
++#define NOUVEAU_MEM_TILE              0x00000200
++#define NOUVEAU_MEM_TILE_ZETA         0x00000400
++#define NOUVEAU_MEM_INSTANCE          0x01000000 /* internal */
++#define NOUVEAU_MEM_NOTIFIER            0x02000000 /* internal */
++#define NOUVEAU_MEM_NOVM              0x04000000 /* internal */
++#define NOUVEAU_MEM_USER              0x08000000 /* internal */
++#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \
++                            NOUVEAU_MEM_NOTIFIER | \
++                            NOUVEAU_MEM_NOVM | \
++                            NOUVEAU_MEM_USER)
++
++struct drm_nouveau_mem_alloc {
++      int flags;
++      int alignment;
++      uint64_t size;  // in bytes
++      uint64_t offset;
++      drm_handle_t map_handle;
++};
++
++struct drm_nouveau_mem_free {
++      uint64_t offset;
++      int flags;
++};
++
++struct drm_nouveau_mem_tile {
++      uint64_t offset;
++      uint64_t delta;
++      uint64_t size;
++      int flags;
++};
++
++/* FIXME : maybe unify {GET,SET}PARAMs */
++#define NOUVEAU_GETPARAM_PCI_VENDOR      3
++#define NOUVEAU_GETPARAM_PCI_DEVICE      4
++#define NOUVEAU_GETPARAM_BUS_TYPE        5
++#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
++#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
++#define NOUVEAU_GETPARAM_FB_SIZE         8
++#define NOUVEAU_GETPARAM_AGP_SIZE        9
++#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
++#define NOUVEAU_GETPARAM_CHIPSET_ID      11
++struct drm_nouveau_getparam {
++      uint64_t param;
++      uint64_t value;
++};
++
++#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1
++#define NOUVEAU_SETPARAM_CMDBUF_SIZE     2
++struct drm_nouveau_setparam {
++      uint64_t param;
++      uint64_t value;
++};
++
++enum nouveau_card_type {
++      NV_UNKNOWN =0,
++      NV_04      =4,
++      NV_05      =5,
++      NV_10      =10,
++      NV_11      =11,
++      NV_17      =17,
++      NV_20      =20,
++      NV_30      =30,
++      NV_40      =40,
++      NV_44      =44,
++      NV_50      =50,
++      NV_LAST    =0xffff,
++};
++
++enum nouveau_bus_type {
++      NV_AGP     =0,
++      NV_PCI     =1,
++      NV_PCIE    =2,
++};
++
++#define NOUVEAU_MAX_SAREA_CLIPRECTS 16
++
++struct drm_nouveau_sarea {
++      /* the cliprects */
++      struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
++      unsigned int nbox;
++};
++
++#define DRM_NOUVEAU_CARD_INIT          0x00
++#define DRM_NOUVEAU_GETPARAM           0x01
++#define DRM_NOUVEAU_SETPARAM           0x02
++#define DRM_NOUVEAU_CHANNEL_ALLOC      0x03
++#define DRM_NOUVEAU_CHANNEL_FREE       0x04
++#define DRM_NOUVEAU_GROBJ_ALLOC        0x05
++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x06
++#define DRM_NOUVEAU_GPUOBJ_FREE        0x07
++#define DRM_NOUVEAU_MEM_ALLOC          0x08
++#define DRM_NOUVEAU_MEM_FREE           0x09
++#define DRM_NOUVEAU_MEM_TILE           0x0a
++#define DRM_NOUVEAU_SUSPEND            0x0b
++#define DRM_NOUVEAU_RESUME             0x0c
++
++#endif /* __NOUVEAU_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.c git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c
+--- git/drivers/gpu/drm-tungsten/nouveau_drv.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,120 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      {
++              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
++              .class = PCI_BASE_CLASS_DISPLAY << 16,
++              .class_mask  = 0xff << 16,
++      },
++      {
++              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
++              .class = PCI_BASE_CLASS_DISPLAY << 16,
++              .class_mask  = 0xff << 16,
++      }
++};
++
++extern struct drm_ioctl_desc nouveau_ioctls[];
++extern int nouveau_max_ioctl;
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++              DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
++              DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .load = nouveau_load,
++      .firstopen = nouveau_firstopen,
++      .lastclose = nouveau_lastclose,
++      .unload = nouveau_unload,
++      .preclose = nouveau_preclose,
++      .irq_preinstall = nouveau_irq_preinstall,
++      .irq_postinstall = nouveau_irq_postinstall,
++      .irq_uninstall = nouveau_irq_uninstall,
++      .irq_handler = nouveau_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = nouveau_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = nouveau_compat_ioctl,
++#endif
++      },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++        .bo_driver = &nouveau_bo_driver,
++        .fence_driver = &nouveau_fence_driver,
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++#ifdef GIT_REVISION
++      .date = GIT_REVISION,
++#else
++      .date = DRIVER_DATE,
++#endif
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init nouveau_init(void)
++{
++      driver.num_ioctls = nouveau_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit nouveau_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(nouveau_init);
++module_exit(nouveau_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h
+--- git/drivers/gpu/drm-tungsten/nouveau_drv.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,621 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRV_H__
++#define __NOUVEAU_DRV_H__
++
++#define DRIVER_AUTHOR         "Stephane Marchesin"
++#define DRIVER_EMAIL          "dri-devel@lists.sourceforge.net"
++
++#define DRIVER_NAME           "nouveau"
++#define DRIVER_DESC           "nVidia Riva/TNT/GeForce"
++#define DRIVER_DATE           "20060213"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     11
++
++#define NOUVEAU_FAMILY   0x0000FFFF
++#define NOUVEAU_FLAGS    0xFFFF0000
++
++#include "nouveau_drm.h"
++#include "nouveau_reg.h"
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      uint64_t start;
++      uint64_t size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++      int flags;
++      drm_local_map_t *map;
++      drm_handle_t map_handle;
++};
++
++enum nouveau_flags {
++      NV_NFORCE   =0x10000000,
++      NV_NFORCE2  =0x20000000
++};
++
++#define NVOBJ_ENGINE_SW               0
++#define NVOBJ_ENGINE_GR               1
++#define NVOBJ_ENGINE_INT      0xdeadbeef
++
++#define NVOBJ_FLAG_ALLOW_NO_REFS      (1 << 0)
++#define NVOBJ_FLAG_ZERO_ALLOC         (1 << 1)
++#define NVOBJ_FLAG_ZERO_FREE          (1 << 2)
++#define NVOBJ_FLAG_FAKE                       (1 << 3)
++struct nouveau_gpuobj {
++      struct list_head list;
++
++      int im_channel;
++      struct mem_block *im_pramin;
++      struct mem_block *im_backing;
++      int im_bound;
++
++      uint32_t flags;
++      int refcount;
++
++      uint32_t engine;
++      uint32_t class;
++
++      void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
++      void *priv;
++};
++
++struct nouveau_gpuobj_ref {
++      struct list_head list;
++
++      struct nouveau_gpuobj *gpuobj;
++      uint32_t instance;
++
++      int channel;
++      int handle;
++};
++
++struct nouveau_channel
++{
++      struct drm_device *dev;
++      int id;
++
++      /* owner of this fifo */
++      struct drm_file *file_priv;
++      /* mapping of the fifo itself */
++      drm_local_map_t *map;
++      /* mapping of the regs controling the fifo */
++      drm_local_map_t *regs;
++
++      /* Fencing */
++      uint32_t next_sequence;
++
++      /* DMA push buffer */
++      struct nouveau_gpuobj_ref *pushbuf;
++      struct mem_block          *pushbuf_mem;
++      uint32_t                   pushbuf_base;
++
++      /* FIFO user control regs */
++      uint32_t user, user_size;
++      uint32_t put;
++      uint32_t get;
++      uint32_t ref_cnt;
++
++      /* Notifier memory */
++      struct mem_block *notifier_block;
++      struct mem_block *notifier_heap;
++      drm_local_map_t  *notifier_map;
++
++      /* PFIFO context */
++      struct nouveau_gpuobj_ref *ramfc;
++
++      /* PGRAPH context */
++      /* XXX may be merge 2 pointers as private data ??? */
++      struct nouveau_gpuobj_ref *ramin_grctx;
++      void *pgraph_ctx;
++
++      /* NV50 VM */
++      struct nouveau_gpuobj     *vm_pd;
++      struct nouveau_gpuobj_ref *vm_gart_pt;
++      struct nouveau_gpuobj_ref *vm_vram_pt;
++
++      /* Objects */
++      struct nouveau_gpuobj_ref *ramin; /* Private instmem */
++      struct mem_block          *ramin_heap; /* Private PRAMIN heap */
++      struct nouveau_gpuobj_ref *ramht; /* Hash table */
++      struct list_head           ramht_refs; /* Objects referenced by RAMHT */
++};
++
++struct nouveau_drm_channel {
++      struct nouveau_channel *chan;
++
++      /* DMA state */
++      int max, put, cur, free;
++      int push_free;
++      volatile uint32_t *pushbuf;
++
++      /* Notifiers */
++      uint32_t notify0_offset;
++
++      /* Buffer moves */
++      uint32_t m2mf_dma_source;
++      uint32_t m2mf_dma_destin;
++};
++
++struct nouveau_config {
++      struct {
++              int location;
++              int size;
++      } cmdbuf;
++};
++
++struct nouveau_instmem_engine {
++      void    *priv;
++
++      int     (*init)(struct drm_device *dev);
++      void    (*takedown)(struct drm_device *dev);
++
++      int     (*populate)(struct drm_device *, struct nouveau_gpuobj *,
++                          uint32_t *size);
++      void    (*clear)(struct drm_device *, struct nouveau_gpuobj *);
++      int     (*bind)(struct drm_device *, struct nouveau_gpuobj *);
++      int     (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
++};
++
++struct nouveau_mc_engine {
++      int  (*init)(struct drm_device *dev);
++      void (*takedown)(struct drm_device *dev);
++};
++
++struct nouveau_timer_engine {
++      int      (*init)(struct drm_device *dev);
++      void     (*takedown)(struct drm_device *dev);
++      uint64_t (*read)(struct drm_device *dev);
++};
++
++struct nouveau_fb_engine {
++      int  (*init)(struct drm_device *dev);
++      void (*takedown)(struct drm_device *dev);
++};
++
++struct nouveau_fifo_engine {
++      void *priv;
++
++      int  channels;
++
++      int  (*init)(struct drm_device *);
++      void (*takedown)(struct drm_device *);
++
++      int  (*channel_id)(struct drm_device *);
++
++      int  (*create_context)(struct nouveau_channel *);
++      void (*destroy_context)(struct nouveau_channel *);
++      int  (*load_context)(struct nouveau_channel *);
++      int  (*save_context)(struct nouveau_channel *);
++};
++
++struct nouveau_pgraph_engine {
++      int  (*init)(struct drm_device *);
++      void (*takedown)(struct drm_device *);
++
++      int  (*create_context)(struct nouveau_channel *);
++      void (*destroy_context)(struct nouveau_channel *);
++      int  (*load_context)(struct nouveau_channel *);
++      int  (*save_context)(struct nouveau_channel *);
++};
++
++struct nouveau_engine {
++      struct nouveau_instmem_engine instmem;
++      struct nouveau_mc_engine      mc;
++      struct nouveau_timer_engine   timer;
++      struct nouveau_fb_engine      fb;
++      struct nouveau_pgraph_engine  graph;
++      struct nouveau_fifo_engine    fifo;
++};
++
++#define NOUVEAU_MAX_CHANNEL_NR 128
++struct drm_nouveau_private {
++      enum {
++              NOUVEAU_CARD_INIT_DOWN,
++              NOUVEAU_CARD_INIT_DONE,
++              NOUVEAU_CARD_INIT_FAILED
++      } init_state;
++
++      int ttm;
++
++      /* the card type, takes NV_* as values */
++      int card_type;
++      /* exact chipset, derived from NV_PMC_BOOT_0 */
++      int chipset;
++      int flags;
++
++      drm_local_map_t *mmio;
++      drm_local_map_t *fb;
++      drm_local_map_t *ramin; /* NV40 onwards */
++
++      int fifo_alloc_count;
++      struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
++
++      struct nouveau_engine Engine;
++      struct nouveau_drm_channel channel;
++
++      /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
++      struct nouveau_gpuobj *ramht;
++      uint32_t ramin_rsvd_vram;
++      uint32_t ramht_offset;
++      uint32_t ramht_size;
++      uint32_t ramht_bits;
++      uint32_t ramfc_offset;
++      uint32_t ramfc_size;
++      uint32_t ramro_offset;
++      uint32_t ramro_size;
++
++      /* base physical adresses */
++      uint64_t fb_phys;
++      uint64_t fb_available_size;
++
++      struct {
++              enum {
++                      NOUVEAU_GART_NONE = 0,
++                      NOUVEAU_GART_AGP,
++                      NOUVEAU_GART_SGDMA
++              } type;
++              uint64_t aper_base;
++              uint64_t aper_size;
++
++              struct nouveau_gpuobj *sg_ctxdma;
++              struct page *sg_dummy_page;
++              dma_addr_t sg_dummy_bus;
++
++              /* nottm hack */
++              struct drm_ttm_backend *sg_be;
++              unsigned long sg_handle;
++      } gart_info;
++
++      /* G8x global VRAM page table */
++      struct nouveau_gpuobj *vm_vram_pt;
++
++      /* the mtrr covering the FB */
++      int fb_mtrr;
++
++      struct mem_block *agp_heap;
++      struct mem_block *fb_heap;
++      struct mem_block *fb_nomap_heap;
++      struct mem_block *ramin_heap;
++      struct mem_block *pci_heap;
++
++        /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
++        uint32_t ctx_table_size;
++      struct nouveau_gpuobj_ref *ctx_table;
++
++      struct nouveau_config config;
++
++      struct list_head gpuobj_list;
++
++      struct nouveau_suspend_resume {
++              uint32_t fifo_mode;
++              uint32_t graph_ctx_control;
++              uint32_t graph_state;
++              uint32_t *ramin_copy;
++              uint64_t ramin_size;
++      } susres;
++};
++
++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do {         \
++      struct drm_nouveau_private *nv = dev->dev_private; \
++      if (nv->init_state != NOUVEAU_CARD_INIT_DONE) {    \
++              DRM_ERROR("called without init\n");        \
++              return -EINVAL;                            \
++      }                                                  \
++} while(0)
++
++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do {  \
++      struct drm_nouveau_private *nv = dev->dev_private;   \
++      if (!nouveau_fifo_owner(dev, (cl), (id))) {          \
++              DRM_ERROR("pid %d doesn't own channel %d\n", \
++                        DRM_CURRENTPID, (id));             \
++              return -EPERM;                               \
++      }                                                    \
++      (ch) = nv->fifos[(id)];                              \
++} while(0)
++
++/* nouveau_state.c */
++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
++extern int  nouveau_load(struct drm_device *, unsigned long flags);
++extern int  nouveau_firstopen(struct drm_device *);
++extern void nouveau_lastclose(struct drm_device *);
++extern int  nouveau_unload(struct drm_device *);
++extern int  nouveau_ioctl_getparam(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern void nouveau_wait_for_idle(struct drm_device *);
++extern int  nouveau_card_init(struct drm_device *);
++extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
++                                  struct drm_file *);
++extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
++                                struct drm_file *);
++extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
++                               struct drm_file *);
++
++/* nouveau_mem.c */
++extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
++                               uint64_t size);
++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
++                                               uint64_t size, int align2,
++                                               struct drm_file *, int tail);
++extern void nouveau_mem_takedown(struct mem_block **heap);
++extern void nouveau_mem_free_block(struct mem_block *);
++extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
++extern int  nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
++                                  struct drm_file *);
++extern int  nouveau_ioctl_mem_free(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern int  nouveau_ioctl_mem_tile(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
++                                         int alignment, uint64_t size,
++                                         int flags, struct drm_file *);
++extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
++extern int  nouveau_mem_init(struct drm_device *);
++extern int  nouveau_mem_init_ttm(struct drm_device *);
++extern void nouveau_mem_close(struct drm_device *);
++
++/* nouveau_notifier.c */
++extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
++extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
++                                 int cout, uint32_t *offset);
++extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
++                                       struct drm_file *);
++extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data,
++                                      struct drm_file *);
++
++/* nouveau_fifo.c */
++extern int  nouveau_fifo_init(struct drm_device *);
++extern int  nouveau_fifo_ctx_size(struct drm_device *);
++extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);
++extern int  nouveau_fifo_owner(struct drm_device *, struct drm_file *,
++                             int channel);
++extern int  nouveau_fifo_alloc(struct drm_device *dev,
++                             struct nouveau_channel **chan,
++                             struct drm_file *file_priv,
++                             struct mem_block *pushbuf,
++                             uint32_t fb_ctxdma, uint32_t tt_ctxdma);
++extern void nouveau_fifo_free(struct nouveau_channel *);
++extern int  nouveau_channel_idle(struct nouveau_channel *chan);
++
++/* nouveau_object.c */
++extern int  nouveau_gpuobj_early_init(struct drm_device *);
++extern int  nouveau_gpuobj_init(struct drm_device *);
++extern void nouveau_gpuobj_takedown(struct drm_device *);
++extern void nouveau_gpuobj_late_takedown(struct drm_device *);
++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
++                                     uint32_t vram_h, uint32_t tt_h);
++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
++                            int size, int align, uint32_t flags,
++                            struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
++                                uint32_t handle, struct nouveau_gpuobj *,
++                                struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_del(struct drm_device *,
++                                struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
++                                 struct nouveau_gpuobj_ref **ref_ret);
++extern int nouveau_gpuobj_new_ref(struct drm_device *,
++                                struct nouveau_channel *alloc_chan,
++                                struct nouveau_channel *ref_chan,
++                                uint32_t handle, int size, int align,
++                                uint32_t flags, struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_new_fake(struct drm_device *,
++                                 uint32_t p_offset, uint32_t b_offset,
++                                 uint32_t size, uint32_t flags,
++                                 struct nouveau_gpuobj **,
++                                 struct nouveau_gpuobj_ref**);
++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
++                                uint64_t offset, uint64_t size, int access,
++                                int target, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
++                                     uint64_t offset, uint64_t size,
++                                     int access, struct nouveau_gpuobj **,
++                                     uint32_t *o_ret);
++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
++                               struct nouveau_gpuobj **);
++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
++                                   struct drm_file *);
++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
++                                   struct drm_file *);
++
++/* nouveau_irq.c */
++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
++extern void        nouveau_irq_preinstall(struct drm_device *);
++extern int         nouveau_irq_postinstall(struct drm_device *);
++extern void        nouveau_irq_uninstall(struct drm_device *);
++
++/* nouveau_sgdma.c */
++extern int nouveau_sgdma_init(struct drm_device *);
++extern void nouveau_sgdma_takedown(struct drm_device *);
++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
++                                uint32_t *page);
++extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
++extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
++extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);
++
++/* nouveau_dma.c */
++extern int  nouveau_dma_channel_init(struct drm_device *);
++extern void nouveau_dma_channel_takedown(struct drm_device *);
++extern int  nouveau_dma_wait(struct drm_device *, int size);
++
++/* nv04_fb.c */
++extern int  nv04_fb_init(struct drm_device *);
++extern void nv04_fb_takedown(struct drm_device *);
++
++/* nv10_fb.c */
++extern int  nv10_fb_init(struct drm_device *);
++extern void nv10_fb_takedown(struct drm_device *);
++
++/* nv40_fb.c */
++extern int  nv40_fb_init(struct drm_device *);
++extern void nv40_fb_takedown(struct drm_device *);
++
++/* nv04_fifo.c */
++extern int  nv04_fifo_channel_id(struct drm_device *);
++extern int  nv04_fifo_create_context(struct nouveau_channel *);
++extern void nv04_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv04_fifo_load_context(struct nouveau_channel *);
++extern int  nv04_fifo_save_context(struct nouveau_channel *);
++
++/* nv10_fifo.c */
++extern int  nv10_fifo_channel_id(struct drm_device *);
++extern int  nv10_fifo_create_context(struct nouveau_channel *);
++extern void nv10_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv10_fifo_load_context(struct nouveau_channel *);
++extern int  nv10_fifo_save_context(struct nouveau_channel *);
++
++/* nv40_fifo.c */
++extern int  nv40_fifo_init(struct drm_device *);
++extern int  nv40_fifo_create_context(struct nouveau_channel *);
++extern void nv40_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv40_fifo_load_context(struct nouveau_channel *);
++extern int  nv40_fifo_save_context(struct nouveau_channel *);
++
++/* nv50_fifo.c */
++extern int  nv50_fifo_init(struct drm_device *);
++extern void nv50_fifo_takedown(struct drm_device *);
++extern int  nv50_fifo_channel_id(struct drm_device *);
++extern int  nv50_fifo_create_context(struct nouveau_channel *);
++extern void nv50_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv50_fifo_load_context(struct nouveau_channel *);
++extern int  nv50_fifo_save_context(struct nouveau_channel *);
++
++/* nv04_graph.c */
++extern void nouveau_nv04_context_switch(struct drm_device *);
++extern int  nv04_graph_init(struct drm_device *);
++extern void nv04_graph_takedown(struct drm_device *);
++extern int  nv04_graph_create_context(struct nouveau_channel *);
++extern void nv04_graph_destroy_context(struct nouveau_channel *);
++extern int  nv04_graph_load_context(struct nouveau_channel *);
++extern int  nv04_graph_save_context(struct nouveau_channel *);
++
++/* nv10_graph.c */
++extern void nouveau_nv10_context_switch(struct drm_device *);
++extern int  nv10_graph_init(struct drm_device *);
++extern void nv10_graph_takedown(struct drm_device *);
++extern int  nv10_graph_create_context(struct nouveau_channel *);
++extern void nv10_graph_destroy_context(struct nouveau_channel *);
++extern int  nv10_graph_load_context(struct nouveau_channel *);
++extern int  nv10_graph_save_context(struct nouveau_channel *);
++
++/* nv20_graph.c */
++extern int  nv20_graph_create_context(struct nouveau_channel *);
++extern void nv20_graph_destroy_context(struct nouveau_channel *);
++extern int  nv20_graph_load_context(struct nouveau_channel *);
++extern int  nv20_graph_save_context(struct nouveau_channel *);
++extern int  nv20_graph_init(struct drm_device *);
++extern void nv20_graph_takedown(struct drm_device *);
++extern int  nv30_graph_init(struct drm_device *);
++
++/* nv40_graph.c */
++extern int  nv40_graph_init(struct drm_device *);
++extern void nv40_graph_takedown(struct drm_device *);
++extern int  nv40_graph_create_context(struct nouveau_channel *);
++extern void nv40_graph_destroy_context(struct nouveau_channel *);
++extern int  nv40_graph_load_context(struct nouveau_channel *);
++extern int  nv40_graph_save_context(struct nouveau_channel *);
++
++/* nv50_graph.c */
++extern int  nv50_graph_init(struct drm_device *);
++extern void nv50_graph_takedown(struct drm_device *);
++extern int  nv50_graph_create_context(struct nouveau_channel *);
++extern void nv50_graph_destroy_context(struct nouveau_channel *);
++extern int  nv50_graph_load_context(struct nouveau_channel *);
++extern int  nv50_graph_save_context(struct nouveau_channel *);
++
++/* nv04_instmem.c */
++extern int  nv04_instmem_init(struct drm_device *);
++extern void nv04_instmem_takedown(struct drm_device *);
++extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++                                uint32_t *size);
++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++
++/* nv50_instmem.c */
++extern int  nv50_instmem_init(struct drm_device *);
++extern void nv50_instmem_takedown(struct drm_device *);
++extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++                                uint32_t *size);
++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++
++/* nv04_mc.c */
++extern int  nv04_mc_init(struct drm_device *);
++extern void nv04_mc_takedown(struct drm_device *);
++
++/* nv40_mc.c */
++extern int  nv40_mc_init(struct drm_device *);
++extern void nv40_mc_takedown(struct drm_device *);
++
++/* nv50_mc.c */
++extern int  nv50_mc_init(struct drm_device *);
++extern void nv50_mc_takedown(struct drm_device *);
++
++/* nv04_timer.c */
++extern int  nv04_timer_init(struct drm_device *);
++extern uint64_t nv04_timer_read(struct drm_device *);
++extern void nv04_timer_takedown(struct drm_device *);
++
++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
++                               unsigned long arg);
++
++/* nouveau_buffer.c */
++extern struct drm_bo_driver nouveau_bo_driver;
++
++/* nouveau_fence.c */
++extern struct drm_fence_driver nouveau_fence_driver;
++extern void nouveau_fence_handler(struct drm_device *dev, int channel);
++
++#if defined(__powerpc__)
++#define NV_READ(reg)        in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
++#define NV_WRITE(reg,val)   out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) )
++#else
++#define NV_READ(reg)        DRM_READ32(  dev_priv->mmio, (reg) )
++#define NV_WRITE(reg,val)   DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#endif
++
++/* PRAMIN access */
++#if defined(__powerpc__)
++#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o))
++#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v))
++#else
++#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o))
++#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v))
++#endif
++
++#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2))
++#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
++
++#endif /* __NOUVEAU_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fence.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c
+--- git/drivers/gpu/drm-tungsten/nouveau_fence.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,119 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++static int
++nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
++
++      /* DRM's channel always uses IRQs to signal fences */
++      if (class == dev_priv->channel.chan->id)
++              return 1;
++
++      /* Other channels don't use IRQs at all yet */
++      return 0;
++}
++
++static int
++nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
++                 uint32_t *breadcrumb, uint32_t *native_type)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[class];
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++
++      DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
++
++      /* We can't emit fences on client channels, update sequence number
++       * and userspace will emit the fence
++       */
++      *breadcrumb  = ++chan->next_sequence;
++      *native_type = DRM_FENCE_TYPE_EXE;
++      if (chan != dchan->chan) {
++              DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
++              return 0;
++      }
++
++      DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
++      OUT_RING  (*breadcrumb);
++      BEGIN_RING(NvSubM2MF, 0x0150, 1);
++      OUT_RING  (0);
++      FIRE_RING ();
++
++      return 0;
++}
++
++static void
++nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
++      struct nouveau_channel *chan = dev_priv->fifos[class];
++
++      DRM_DEBUG("class=%d\n", class);
++      DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
++
++      if (waiting_types & DRM_FENCE_TYPE_EXE) {
++              uint32_t sequence = NV_READ(chan->ref_cnt);
++
++              DRM_DEBUG("got 0x%08x\n", sequence);
++              drm_fence_handler(dev, class, sequence, waiting_types, 0);
++      }
++}
++
++void
++nouveau_fence_handler(struct drm_device *dev, int channel)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[channel];
++
++      DRM_DEBUG("class=%d\n", channel);
++
++      write_lock(&fm->lock);
++      nouveau_fence_poll(dev, channel, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++struct drm_fence_driver nouveau_fence_driver = {
++      .num_classes    = 8,
++      .wrap_diff      = (1 << 30),
++      .flush_diff     = (1 << 29),
++      .sequence_mask  = 0xffffffffU,
++      .has_irq        = nouveau_fence_has_irq,
++      .emit           = nouveau_fence_emit,
++      .flush          = NULL,
++      .poll           = nouveau_fence_poll,
++      .needed_flush   = NULL,
++      .wait           = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fifo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c
+--- git/drivers/gpu/drm-tungsten/nouveau_fifo.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,601 @@
++/*
++ * Copyright 2005-2006 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++
++/* returns the size of fifo context */
++int nouveau_fifo_ctx_size(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++
++      if (dev_priv->card_type >= NV_40)
++              return 128;
++      else if (dev_priv->card_type >= NV_17)
++              return 64;
++      else
++              return 32;
++}
++
++/***********************************
++ * functions doing the actual work
++ ***********************************/
++
++static int nouveau_fifo_instmem_configure(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PFIFO_RAMHT,
++                      (0x03 << 24) /* search 128 */ |
++                      ((dev_priv->ramht_bits - 9) << 16) |
++                      (dev_priv->ramht_offset >> 8)
++                      );
++
++      NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
++
++      switch(dev_priv->card_type)
++      {
++              case NV_40:
++                      switch (dev_priv->chipset) {
++                      case 0x47:
++                      case 0x49:
++                      case 0x4b:
++                              NV_WRITE(0x2230, 1);
++                              break;
++                      default:
++                              break;
++                      }
++                      NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
++                      break;
++              case NV_44:
++                      NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
++                                      (2 << 16));
++                      break;
++              case NV_30:
++              case NV_20:
++              case NV_17:
++                      NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
++                                      (1 << 16) /* 64 Bytes entry*/);
++                      /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
++                      break;
++              case NV_11:
++              case NV_10:
++              case NV_04:
++                      NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
++                      break;
++      }
++
++      return 0;
++}
++
++int nouveau_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PFIFO);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PFIFO);
++
++      /* Enable PFIFO error reporting */
++      NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
++      NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
++
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++
++      ret = nouveau_fifo_instmem_configure(dev);
++      if (ret) {
++              DRM_ERROR("Failed to configure instance memory\n");
++              return ret;
++      }
++
++      /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
++
++      DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
++
++      /* All channels into PIO mode */
++      NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++      /* Channel 0 active, PIO mode */
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
++      /* PUT and GET to 0 */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
++      /* No cmdbuf object */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
++      NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
++                                    NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                                    NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
++#ifdef __BIG_ENDIAN
++                                    NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                                    0x00000000);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
++
++      /* FIXME on NV04 */
++      if (dev_priv->card_type >= NV_10) {
++              NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
++              NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
++              if (dev_priv->card_type >= NV_40)
++                      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
++              else
++                      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
++      } else {
++              NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
++              NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
++              NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
++      }
++
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      return 0;
++}
++
++static int
++nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct mem_block *pb = chan->pushbuf_mem;
++      struct nouveau_gpuobj *pushbuf = NULL;
++      int ret;
++
++      if (pb->flags & NOUVEAU_MEM_AGP) {
++              ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
++                                                NV_DMA_ACCESS_RO,
++                                                &pushbuf,
++                                                &chan->pushbuf_base);
++      } else
++      if (pb->flags & NOUVEAU_MEM_PCI) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start, pb->size,
++                                           NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_PCI_NONLINEAR,
++                                           &pushbuf);
++              chan->pushbuf_base = 0;
++      } else if (dev_priv->card_type != NV_04) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start, pb->size,
++                                           NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_VIDMEM, &pushbuf);
++              chan->pushbuf_base = 0;
++      } else {
++              /* NV04 cmdbuf hack, from original ddx.. not sure of it's
++               * exact reason for existing :)  PCI access to cmdbuf in
++               * VRAM.
++               */
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start +
++                                             drm_get_resource_start(dev, 1),
++                                           pb->size, NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_PCI, &pushbuf);
++              chan->pushbuf_base = 0;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
++                                        &chan->pushbuf))) {
++              DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
++              if (pushbuf != dev_priv->gart_info.sg_ctxdma)
++                      nouveau_gpuobj_del(dev, &pushbuf);
++              return ret;
++      }
++
++      return 0;
++}
++
++static struct mem_block *
++nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_config *config = &dev_priv->config;
++      struct mem_block *pb;
++      int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
++
++      /* Defaults for unconfigured values */
++      if (!config->cmdbuf.location)
++              config->cmdbuf.location = NOUVEAU_MEM_FB;
++      if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
++              config->cmdbuf.size = pb_min_size;
++
++      pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
++                             config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
++                             (struct drm_file *)-2);
++      if (!pb)
++              DRM_ERROR("Couldn't allocate DMA push buffer.\n");
++
++      return pb;
++}
++
++/* allocates and initializes a fifo for user space consumption */
++int
++nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
++                 struct drm_file *file_priv, struct mem_block *pushbuf,
++                 uint32_t vram_handle, uint32_t tt_handle)
++{
++      int ret;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_channel *chan;
++      int channel;
++
++      /*
++       * Alright, here is the full story
++       * Nvidia cards have multiple hw fifo contexts (praise them for that,
++       * no complicated crash-prone context switches)
++       * We allocate a new context for each app and let it write to it directly
++       * (woo, full userspace command submission !)
++       * When there are no more contexts, you lost
++       */
++      for (channel = 0; channel < engine->fifo.channels; channel++) {
++              if (dev_priv->fifos[channel] == NULL)
++                      break;
++      }
++
++      /* no more fifos. you lost. */
++      if (channel == engine->fifo.channels)
++              return -EINVAL;
++
++      dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
++                                            DRM_MEM_DRIVER);
++      if (!dev_priv->fifos[channel])
++              return -ENOMEM;
++      dev_priv->fifo_alloc_count++;
++      chan = dev_priv->fifos[channel];
++      chan->dev = dev;
++      chan->id = channel;
++      chan->file_priv = file_priv;
++      chan->pushbuf_mem = pushbuf;
++
++      DRM_INFO("Allocating FIFO number %d\n", channel);
++
++      /* Locate channel's user control regs */
++      if (dev_priv->card_type < NV_40) {
++              chan->user = NV03_USER(channel);
++              chan->user_size = NV03_USER_SIZE;
++              chan->put = NV03_USER_DMA_PUT(channel);
++              chan->get = NV03_USER_DMA_GET(channel);
++              chan->ref_cnt = NV03_USER_REF_CNT(channel);
++      } else
++      if (dev_priv->card_type < NV_50) {
++              chan->user = NV40_USER(channel);
++              chan->user_size = NV40_USER_SIZE;
++              chan->put = NV40_USER_DMA_PUT(channel);
++              chan->get = NV40_USER_DMA_GET(channel);
++              chan->ref_cnt = NV40_USER_REF_CNT(channel);
++      } else {
++              chan->user = NV50_USER(channel);
++              chan->user_size = NV50_USER_SIZE;
++              chan->put = NV50_USER_DMA_PUT(channel);
++              chan->get = NV50_USER_DMA_GET(channel);
++              chan->ref_cnt = NV50_USER_REF_CNT(channel);
++      }
++
++      /* Allocate space for per-channel fixed notifier memory */
++      ret = nouveau_notifier_init_channel(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Setup channel's default objects */
++      ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Create a dma object for the push buffer */
++      ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      nouveau_wait_for_idle(dev);
++
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      /* Create a graphics context for new channel */
++      ret = engine->graph.create_context(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Construct inital RAMFC for new channel */
++      ret = engine->fifo.create_context(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* setup channel's default get/put values
++       * XXX: quite possibly extremely pointless..
++       */
++      NV_WRITE(chan->get, chan->pushbuf_base);
++      NV_WRITE(chan->put, chan->pushbuf_base);
++
++      /* If this is the first channel, setup PFIFO ourselves.  For any
++       * other case, the GPU will handle this when it switches contexts.
++       */
++      if (dev_priv->fifo_alloc_count == 1) {
++              ret = engine->fifo.load_context(chan);
++              if (ret) {
++                      nouveau_fifo_free(chan);
++                      return ret;
++              }
++
++              ret = engine->graph.load_context(chan);
++              if (ret) {
++                      nouveau_fifo_free(chan);
++                      return ret;
++              }
++      }
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 1);
++
++      DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
++      *chan_ret = chan;
++      return 0;
++}
++
++int
++nouveau_channel_idle(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t caches;
++      int idle;
++
++      caches = NV_READ(NV03_PFIFO_CACHES);
++      NV_WRITE(NV03_PFIFO_CACHES, caches & ~1);
++
++      if (engine->fifo.channel_id(dev) != chan->id) {
++              struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
++
++              if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1))
++                      idle = 0;
++              else
++                      idle = 1;
++      } else {
++              idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) ==
++                      NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      }
++
++      NV_WRITE(NV03_PFIFO_CACHES, caches);
++      return idle;
++}
++
++/* stops a fifo */
++void nouveau_fifo_free(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint64_t t_start;
++
++      DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
++
++      /* Give the channel a chance to idle, wait 2s (hopefully) */
++      t_start = engine->timer.read(dev);
++      while (!nouveau_channel_idle(chan)) {
++              if (engine->timer.read(dev) - t_start > 2000000000ULL) {
++                      DRM_ERROR("Failed to idle channel %d before destroy."
++                                "Prepare for strangeness..\n", chan->id);
++                      break;
++              }
++      }
++
++      /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
++       *     from CACHE1 too?
++       */
++
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      // FIXME XXX needs more code
++
++      engine->fifo.destroy_context(chan);
++
++      /* Cleanup PGRAPH state */
++      engine->graph.destroy_context(chan);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++
++      /* Deallocate push buffer */
++      nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
++      if (chan->pushbuf_mem) {
++              nouveau_mem_free(dev, chan->pushbuf_mem);
++              chan->pushbuf_mem = NULL;
++      }
++
++      /* Destroy objects belonging to the channel */
++      nouveau_gpuobj_channel_takedown(chan);
++
++      nouveau_notifier_takedown_channel(chan);
++
++      dev_priv->fifos[chan->id] = NULL;
++      dev_priv->fifo_alloc_count--;
++      drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
++}
++
++/* cleanups all the fifos from file_priv */
++void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      DRM_DEBUG("clearing FIFO enables from file_priv\n");
++      for(i = 0; i < engine->fifo.channels; i++) {
++              struct nouveau_channel *chan = dev_priv->fifos[i];
++
++              if (chan && chan->file_priv == file_priv)
++                      nouveau_fifo_free(chan);
++      }
++}
++
++int
++nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
++                 int channel)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      if (channel >= engine->fifo.channels)
++              return 0;
++      if (dev_priv->fifos[channel] == NULL)
++              return 0;
++      return (dev_priv->fifos[channel]->file_priv == file_priv);
++}
++
++/***********************************
++ * ioctls wrapping the functions
++ ***********************************/
++
++static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_channel_alloc *init = data;
++      struct drm_map_list *entry;
++      struct nouveau_channel *chan;
++      struct mem_block *pushbuf;
++      int res;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
++              return -EINVAL;
++
++      pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
++      if (!pushbuf)
++              return -ENOMEM;
++
++      res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
++                               init->fb_ctxdma_handle,
++                               init->tt_ctxdma_handle);
++      if (res)
++              return res;
++      init->channel  = chan->id;
++      init->put_base = chan->pushbuf_base;
++
++      /* make the fifo available to user space */
++      /* first, the fifo control regs */
++      init->ctrl = dev_priv->mmio->offset + chan->user;
++      init->ctrl_size = chan->user_size;
++      res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
++                       0, &chan->regs);
++      if (res != 0)
++              return res;
++
++      entry = drm_find_matching_map(dev, chan->regs);
++      if (!entry)
++              return -EINVAL;
++      init->ctrl = entry->user_token;
++
++      /* pass back FIFO map info to the caller */
++      init->cmdbuf      = chan->pushbuf_mem->map_handle;
++      init->cmdbuf_size = chan->pushbuf_mem->size;
++
++      /* and the notifier block */
++      init->notifier      = chan->notifier_block->map_handle;
++      init->notifier_size = chan->notifier_block->size;
++
++      return 0;
++}
++
++static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv)
++{
++      struct drm_nouveau_channel_free *cfree = data;
++      struct nouveau_channel *chan;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
++
++      nouveau_fifo_free(chan);
++      return 0;
++}
++
++/***********************************
++ * finally, the ioctl table
++ ***********************************/
++
++struct drm_ioctl_desc nouveau_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH),
++};
++
++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_ioc32.c git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c
+--- git/drivers/gpu/drm-tungsten/nouveau_ioc32.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,72 @@
++/**
++ * \file mga_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the MGA DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "nouveau_drm.h"
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
++                       unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++#if 0
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
++              fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
++#endif
++      lock_kernel();    /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_irq.c git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c
+--- git/drivers/gpu/drm-tungsten/nouveau_irq.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,568 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama@iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_reg.h"
++#include "nouveau_swmthd.h"
++
++void
++nouveau_irq_preinstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master disable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++}
++
++int
++nouveau_irq_postinstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master enable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
++
++      return 0;
++}
++
++void
++nouveau_irq_uninstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master disable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++}
++
++static void
++nouveau_fifo_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t status, reassign;
++
++      reassign = NV_READ(NV03_PFIFO_CACHES) & 1;
++      while ((status = NV_READ(NV03_PFIFO_INTR_0))) {
++              uint32_t chid, get;
++
++              NV_WRITE(NV03_PFIFO_CACHES, 0);
++
++              chid = engine->fifo.channel_id(dev);
++              get  = NV_READ(NV03_PFIFO_CACHE1_GET);
++
++              if (status & NV_PFIFO_INTR_CACHE_ERROR) {
++                      uint32_t mthd, data;
++                      int ptr;
++
++                      ptr = get >> 2;
++                      if (dev_priv->card_type < NV_40) {
++                              mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr));
++                              data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr));
++                      } else {
++                              mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr));
++                              data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr));
++                      }
++
++                      DRM_INFO("PFIFO_CACHE_ERROR - "
++                               "Ch %d/%d Mthd 0x%04x Data 0x%08x\n",
++                               chid, (mthd >> 13) & 7, mthd & 0x1ffc, data);
++
++                      NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4);
++                      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1);
++
++                      status &= ~NV_PFIFO_INTR_CACHE_ERROR;
++                      NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
++              }
++
++              if (status & NV_PFIFO_INTR_DMA_PUSHER) {
++                      DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid);
++
++                      status &= ~NV_PFIFO_INTR_DMA_PUSHER;
++                      NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
++
++                      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
++                      if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get)
++                              NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4);
++              }
++
++              if (status) {
++                      DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
++                      NV_WRITE(NV03_PFIFO_INTR_0, status);
++                      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++              }
++
++              NV_WRITE(NV03_PFIFO_CACHES, reassign);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
++}
++
++struct nouveau_bitfield_names {
++      uint32_t mask;
++      const char * name;
++};
++
++static struct nouveau_bitfield_names nouveau_nstatus_names[] =
++{
++      { NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++      { NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++      { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++      { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] =
++{
++      { NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++      { NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++      { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++      { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nouveau_nsource_names[] =
++{
++      { NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
++      { NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
++      { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
++      { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
++      { NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
++      { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
++      { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
++      { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
++      { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
++      { NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
++      { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
++      { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
++      { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
++};
++
++static void
++nouveau_print_bitfield_names(uint32_t value,
++                             const struct nouveau_bitfield_names *namelist,
++                             const int namelist_len)
++{
++      int i;
++      for(i=0; i<namelist_len; ++i) {
++              uint32_t mask = namelist[i].mask;
++              if(value & mask) {
++                      printk(" %s", namelist[i].name);
++                      value &= ~mask;
++              }
++      }
++      if(value)
++              printk(" (unknown bits 0x%08x)", value);
++}
++
++static int
++nouveau_graph_chid_from_grctx(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++      int i;
++
++      if (dev_priv->card_type < NV_40)
++              return dev_priv->Engine.fifo.channels;
++      else
++      if (dev_priv->card_type < NV_50)
++              inst = (NV_READ(0x40032c) & 0xfffff) << 4;
++      else
++              inst = NV_READ(0x40032c) & 0xfffff;
++
++      for (i = 0; i < dev_priv->Engine.fifo.channels; i++) {
++              struct nouveau_channel *chan = dev_priv->fifos[i];
++
++              if (!chan || !chan->ramin_grctx)
++                      continue;
++
++              if (dev_priv->card_type < NV_50) {
++                      if (inst == chan->ramin_grctx->instance)
++                              break;
++              } else {
++                      if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0))
++                              break;
++              }
++      }
++
++      return i;
++}
++
++static int
++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int channel;
++
++      if (dev_priv->card_type < NV_10)
++              channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
++      else
++      if (dev_priv->card_type < NV_40)
++              channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
++      else
++              channel = nouveau_graph_chid_from_grctx(dev);
++
++      if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
++              DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
++              return -EINVAL;
++      }
++
++      *channel_ret = channel;
++      return 0;
++}
++
++struct nouveau_pgraph_trap {
++      int channel;
++      int class;
++      int subc, mthd, size;
++      uint32_t data, data2;
++      uint32_t nsource, nstatus;
++};
++
++static void
++nouveau_graph_trap_info(struct drm_device *dev,
++                      struct nouveau_pgraph_trap *trap)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t address;
++
++      trap->nsource = trap->nstatus = 0;
++      if (dev_priv->card_type < NV_50) {
++              trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE);
++              trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
++      }
++
++      if (nouveau_graph_trapped_channel(dev, &trap->channel))
++              trap->channel = -1;
++      address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
++
++      trap->mthd = address & 0x1FFC;
++      trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
++      if (dev_priv->card_type < NV_10) {
++              trap->subc  = (address >> 13) & 0x7;
++      } else {
++              trap->subc  = (address >> 16) & 0x7;
++              trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
++      }
++
++      if (dev_priv->card_type < NV_10) {
++              trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;
++      } else if (dev_priv->card_type < NV_40) {
++              trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;
++      } else if (dev_priv->card_type < NV_50) {
++              trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;
++      } else {
++              trap->class = NV_READ(0x400814);
++      }
++}
++
++static void
++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
++                           struct nouveau_pgraph_trap *trap)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
++
++      DRM_INFO("%s - nSource:", id);
++      nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
++                                   ARRAY_SIZE(nouveau_nsource_names));
++      printk(", nStatus:");
++      if (dev_priv->card_type < NV_10)
++              nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
++                                   ARRAY_SIZE(nouveau_nstatus_names));
++      else
++              nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10,
++                                   ARRAY_SIZE(nouveau_nstatus_names_nv10));
++      printk("\n");
++
++      DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
++               id, trap->channel, trap->subc, trap->class, trap->mthd,
++               trap->data2, trap->data);
++}
++
++static inline void
++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
++{
++      struct nouveau_pgraph_trap trap;
++      int unhandled = 0;
++
++      nouveau_graph_trap_info(dev, &trap);
++
++      if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++              /* NV4 (nvidia TNT 1) reports software methods with
++               * PGRAPH NOTIFY ILLEGAL_MTHD
++               */
++              DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
++                        trap.mthd, trap.class);
++
++              if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
++                      DRM_ERROR("Unable to execute NV04 software method %x "
++                                "for object class %x. Please report.\n",
++                                trap.mthd, trap.class);
++                      unhandled = 1;
++              }
++      } else {
++              unhandled = 1;
++      }
++
++      if (unhandled)
++              nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
++}
++
++static inline void
++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
++{
++      struct nouveau_pgraph_trap trap;
++      int unhandled = 0;
++
++      nouveau_graph_trap_info(dev, &trap);
++      trap.nsource = nsource;
++
++      if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++              if (trap.channel >= 0 && trap.mthd == 0x0150) {
++                      nouveau_fence_handler(dev, trap.channel);
++              } else
++              if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
++                      unhandled = 1;
++              }
++      } else {
++              unhandled = 1;
++      }
++
++      if (unhandled)
++              nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
++}
++
++static inline void
++nouveau_pgraph_intr_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t chid;
++
++      chid = engine->fifo.channel_id(dev);
++      DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);
++
++      switch(dev_priv->card_type) {
++      case NV_04:
++      case NV_05:
++              nouveau_nv04_context_switch(dev);
++              break;
++      case NV_10:
++      case NV_11:
++      case NV_17:
++              nouveau_nv10_context_switch(dev);
++              break;
++      default:
++              DRM_ERROR("Context switch not implemented\n");
++              break;
++      }
++}
++
++static void
++nouveau_pgraph_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      while ((status = NV_READ(NV03_PGRAPH_INTR))) {
++              uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE);
++
++              if (status & NV_PGRAPH_INTR_NOTIFY) {
++                      nouveau_pgraph_intr_notify(dev, nsource);
++
++                      status &= ~NV_PGRAPH_INTR_NOTIFY;
++                      NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
++              }
++
++              if (status & NV_PGRAPH_INTR_ERROR) {
++                      nouveau_pgraph_intr_error(dev, nsource);
++
++                      status &= ~NV_PGRAPH_INTR_ERROR;
++                      NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
++              }
++
++              if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
++                      nouveau_pgraph_intr_context_switch(dev);
++
++                      status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
++                      NV_WRITE(NV03_PGRAPH_INTR,
++                               NV_PGRAPH_INTR_CONTEXT_SWITCH);
++              }
++
++              if (status) {
++                      DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
++                      NV_WRITE(NV03_PGRAPH_INTR, status);
++              }
++
++              if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
++                      NV_WRITE(NV04_PGRAPH_FIFO, 1);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++}
++
++static void
++nv50_pgraph_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      status = NV_READ(NV03_PGRAPH_INTR);
++
++      if (status & 0x00000020) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
++
++              status &= ~0x00000020;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00000020);
++      }
++
++      if (status & 0x00100000) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_DATA_ERROR);
++
++              status &= ~0x00100000;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00100000);
++      }
++
++      if (status & 0x00200000) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
++
++              status &= ~0x00200000;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00200000);
++      }
++
++      if (status) {
++              DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
++              NV_WRITE(NV03_PGRAPH_INTR, status);
++      }
++
++      {
++              const int isb = (1 << 16) | (1 << 0);
++
++              if ((NV_READ(0x400500) & isb) != isb)
++                      NV_WRITE(0x400500, NV_READ(0x400500) | isb);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++}
++
++static void
++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (crtc&1) {
++              NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
++      }
++
++      if (crtc&2) {
++              NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
++      }
++}
++
++static void
++nouveau_nv50_display_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR);
++
++      DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val);
++
++      NV_WRITE(NV50_DISPLAY_SUPERVISOR, val);
++}
++
++static void
++nouveau_nv50_i2c_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER));
++
++      /* This seems to be the way to acknowledge an interrupt. */
++      NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF);
++}
++
++irqreturn_t
++nouveau_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device*)arg;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      status = NV_READ(NV03_PMC_INTR_0);
++      if (!status)
++              return IRQ_NONE;
++
++      if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
++              nouveau_fifo_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
++              if (dev_priv->card_type >= NV_50)
++                      nv50_pgraph_irq_handler(dev);
++              else
++                      nouveau_pgraph_irq_handler(dev);
++
++              status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
++              nouveau_crtc_irq_handler(dev, (status>>24)&3);
++              status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
++              nouveau_nv50_display_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) {
++              nouveau_nv50_i2c_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING;
++      }
++
++      if (status)
++              DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status);
++
++      return IRQ_HANDLED;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_mem.c git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c
+--- git/drivers/gpu/drm-tungsten/nouveau_mem.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,872 @@
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ * Copyright 2005 Stephane Marchesin
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "nouveau_drv.h"
++
++static struct mem_block *
++split_block(struct mem_block *p, uint64_t start, uint64_t size,
++          struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                      drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                      drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++struct mem_block *
++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
++                      int align2, struct drm_file *file_priv, int tail)
++{
++      struct mem_block *p;
++      uint64_t mask = (1 << align2) - 1;
++
++      if (!heap)
++              return NULL;
++
++      if (tail) {
++              list_for_each_prev(p, heap) {
++                      uint64_t start = ((p->start + p->size) - size) & ~mask;
++
++                      if (p->file_priv == 0 && start >= p->start &&
++                          start + size <= p->start + p->size)
++                              return split_block(p, start, size, file_priv);
++              }
++      } else {
++              list_for_each(p, heap) {
++                      uint64_t start = (p->start + mask) & ~mask;
++
++                      if (p->file_priv == 0 &&
++                          start + size <= p->start + p->size)
++                              return split_block(p, start, size, file_priv);
++              }
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
++{
++      struct mem_block *p;
++
++      list_for_each(p, heap)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++void nouveau_mem_free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == 0) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFS);
++      }
++
++      if (p->prev->file_priv == 0) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
++                        uint64_t size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/*
++ * Free all blocks associated with the releasing file_priv
++ */
++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      list_for_each(p, heap) {
++              if (p->file_priv == file_priv)
++                      p->file_priv = NULL;
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      list_for_each(p, heap) {
++              while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
++                     (p->next!=heap)) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++              }
++      }
++}
++
++/*
++ * Cleanup everything
++ */
++void nouveau_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
++      *heap = NULL;
++}
++
++void nouveau_mem_close(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_mem_takedown(&dev_priv->agp_heap);
++      nouveau_mem_takedown(&dev_priv->fb_heap);
++      if (dev_priv->pci_heap)
++              nouveau_mem_takedown(&dev_priv->pci_heap);
++}
++
++/*XXX won't work on BSD because of pci_read_config_dword */
++static uint32_t
++nouveau_mem_fb_amount_igp(struct drm_device *dev)
++{
++#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct pci_dev *bridge;
++      uint32_t mem;
++
++      bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
++      if (!bridge) {
++              DRM_ERROR("no bridge device\n");
++              return 0;
++      }
++
++      if (dev_priv->flags&NV_NFORCE) {
++              pci_read_config_dword(bridge, 0x7C, &mem);
++              return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
++      } else
++      if(dev_priv->flags&NV_NFORCE2) {
++              pci_read_config_dword(bridge, 0x84, &mem);
++              return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
++      }
++
++      DRM_ERROR("impossible!\n");
++#else
++      DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
++#endif
++
++      return 0;
++}
++
++/* returns the amount of FB ram in bytes */
++uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      switch(dev_priv->card_type)
++      {
++              case NV_04:
++              case NV_05:
++                      if (NV_READ(NV03_BOOT_0) & 0x00000100) {
++                              return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
++                      } else
++                      switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
++                      {
++                              case NV04_BOOT_0_RAM_AMOUNT_32MB:
++                                      return 32*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_16MB:
++                                      return 16*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_8MB:
++                                      return 8*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_4MB:
++                                      return 4*1024*1024;
++                      }
++                      break;
++              case NV_10:
++              case NV_11:
++              case NV_17:
++              case NV_20:
++              case NV_30:
++              case NV_40:
++              case NV_44:
++              case NV_50:
++              default:
++                      if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
++                              return nouveau_mem_fb_amount_igp(dev);
++                      } else {
++                              uint64_t mem;
++
++                              mem = (NV_READ(NV04_FIFO_DATA) &
++                                     NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
++                                    NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
++                              return mem*1024*1024;
++                      }
++                      break;
++      }
++
++      DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
++      return 0;
++}
++
++static void nouveau_mem_reset_agp(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
++
++      saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
++      saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
++
++      /* clear busmaster bit */
++      NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
++      /* clear SBA and AGP bits */
++      NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
++
++      /* power cycle pgraph, if enabled */
++      pmc_enable = NV_READ(NV03_PMC_ENABLE);
++      if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
++              NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
++              NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                              NV_PMC_ENABLE_PGRAPH);
++      }
++
++      /* and restore (gives effect of resetting AGP) */
++      NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
++      NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
++}
++
++static int
++nouveau_mem_init_agp(struct drm_device *dev, int ttm)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_agp_info info;
++      struct drm_agp_mode mode;
++      int ret;
++
++      nouveau_mem_reset_agp(dev);
++
++      ret = drm_agp_acquire(dev);
++      if (ret) {
++              DRM_ERROR("Unable to acquire AGP: %d\n", ret);
++              return ret;
++      }
++
++      ret = drm_agp_info(dev, &info);
++      if (ret) {
++              DRM_ERROR("Unable to get AGP info: %d\n", ret);
++              return ret;
++      }
++
++      /* see agp.h for the AGPSTAT_* modes available */
++      mode.mode = info.mode;
++      ret = drm_agp_enable(dev, mode);
++      if (ret) {
++              DRM_ERROR("Unable to enable AGP: %d\n", ret);
++              return ret;
++      }
++
++      if (!ttm) {
++              struct drm_agp_buffer agp_req;
++              struct drm_agp_binding bind_req;
++
++              agp_req.size = info.aperture_size;
++              agp_req.type = 0;
++              ret = drm_agp_alloc(dev, &agp_req);
++              if (ret) {
++                      DRM_ERROR("Unable to alloc AGP: %d\n", ret);
++                              return ret;
++              }
++
++              bind_req.handle = agp_req.handle;
++              bind_req.offset = 0;
++              ret = drm_agp_bind(dev, &bind_req);
++              if (ret) {
++                      DRM_ERROR("Unable to bind AGP: %d\n", ret);
++                      return ret;
++              }
++      }
++
++      dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
++      dev_priv->gart_info.aper_base   = info.aperture_base;
++      dev_priv->gart_info.aper_size   = info.aperture_size;
++      return 0;
++}
++
++#define HACK_OLD_MM
++int
++nouveau_mem_init_ttm(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t vram_size, bar1_size;
++      int ret;
++
++      dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
++      dev_priv->fb_phys = drm_get_resource_start(dev,1);
++      dev_priv->gart_info.type = NOUVEAU_GART_NONE;
++
++      drm_bo_driver_init(dev);
++
++      /* non-mappable vram */
++      dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
++      dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
++      vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
++      bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
++      if (bar1_size < vram_size) {
++              if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
++                                        bar1_size, vram_size - bar1_size, 1))) {
++                      DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
++                      return ret;
++              }
++              vram_size = bar1_size;
++      }
++
++      /* mappable vram */
++#ifdef HACK_OLD_MM
++      vram_size /= 4;
++#endif
++      if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
++              DRM_ERROR("Failed VRAM mm init: %d\n", ret);
++              return ret;
++      }
++
++      /* GART */
++#if !defined(__powerpc__) && !defined(__ia64__)
++      if (drm_device_is_agp(dev) && dev->agp) {
++              if ((ret = nouveau_mem_init_agp(dev, 1)))
++                      DRM_ERROR("Error initialising AGP: %d\n", ret);
++      }
++#endif
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
++              if ((ret = nouveau_sgdma_init(dev)))
++                      DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
++      }
++
++      if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
++                                dev_priv->gart_info.aper_size >>
++                                PAGE_SHIFT, 1))) {
++              DRM_ERROR("Failed TT mm init: %d\n", ret);
++              return ret;
++      }
++
++#ifdef HACK_OLD_MM
++      vram_size <<= PAGE_SHIFT;
++      DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
++      if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
++              return -ENOMEM;
++#endif
++
++      return 0;
++}
++
++int nouveau_mem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_size;
++      int ret = 0;
++
++      dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
++      dev_priv->fb_phys = 0;
++      dev_priv->gart_info.type = NOUVEAU_GART_NONE;
++
++      /* setup a mtrr over the FB */
++      dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
++                                       nouveau_mem_fb_amount(dev),
++                                       DRM_MTRR_WC);
++
++      /* Init FB */
++      dev_priv->fb_phys=drm_get_resource_start(dev,1);
++      fb_size = nouveau_mem_fb_amount(dev);
++      /* On G80, limit VRAM to 512MiB temporarily due to limits in how
++       * we handle VRAM page tables.
++       */
++      if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
++              fb_size = (512 * 1024 * 1024);
++      /* On at least NV40, RAMIN is actually at the end of vram.
++       * We don't want to allocate this... */
++      if (dev_priv->card_type >= NV_40)
++              fb_size -= dev_priv->ramin_rsvd_vram;
++      dev_priv->fb_available_size = fb_size;
++      DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
++
++      if (fb_size>256*1024*1024) {
++              /* On cards with > 256Mb, you can't map everything.
++               * So we create a second FB heap for that type of memory */
++              if (nouveau_mem_init_heap(&dev_priv->fb_heap,
++                                        0, 256*1024*1024))
++                      return -ENOMEM;
++              if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
++                                        256*1024*1024, fb_size-256*1024*1024))
++                      return -ENOMEM;
++      } else {
++              if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
++                      return -ENOMEM;
++              dev_priv->fb_nomap_heap=NULL;
++      }
++
++#if !defined(__powerpc__) && !defined(__ia64__)
++      /* Init AGP / NV50 PCIEGART */
++      if (drm_device_is_agp(dev) && dev->agp) {
++              if ((ret = nouveau_mem_init_agp(dev, 0)))
++                      DRM_ERROR("Error initialising AGP: %d\n", ret);
++      }
++#endif
++
++      /*Note: this is *not* just NV50 code, but only used on NV50 for now */
++      if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
++          dev_priv->card_type >= NV_50) {
++              ret = nouveau_sgdma_init(dev);
++              if (!ret) {
++                      ret = nouveau_sgdma_nottm_hack_init(dev);
++                      if (ret)
++                              nouveau_sgdma_takedown(dev);
++              }
++
++              if (ret)
++                      DRM_ERROR("Error initialising SG DMA: %d\n", ret);
++      }
++
++      if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
++              if (nouveau_mem_init_heap(&dev_priv->agp_heap,
++                                        0, dev_priv->gart_info.aper_size)) {
++                      if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
++                              nouveau_sgdma_nottm_hack_takedown(dev);
++                              nouveau_sgdma_takedown(dev);
++                      }
++              }
++      }
++
++      /* NV04-NV40 PCIEGART */
++      if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
++              struct drm_scatter_gather sgreq;
++
++              DRM_DEBUG("Allocating sg memory for PCI DMA\n");
++              sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
++
++              if (drm_sg_alloc(dev, &sgreq)) {
++                      DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
++                                " pages for PCI DMA!",sgreq.size>>20);
++              } else {
++                      if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
++                                                dev->sg->pages * PAGE_SIZE)) {
++                              DRM_ERROR("Unable to initialize pci_heap!");
++                      }
++              }
++      }
++
++      /* G8x: Allocate shared page table to map real VRAM pages into */
++      if (dev_priv->card_type >= NV_50) {
++              unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
++
++              ret = nouveau_gpuobj_new(dev, NULL, size, 0,
++                                       NVOBJ_FLAG_ZERO_ALLOC |
++                                       NVOBJ_FLAG_ALLOW_NO_REFS,
++                                       &dev_priv->vm_vram_pt);
++              if (ret) {
++                      DRM_ERROR("Error creating VRAM page table: %d\n", ret);
++                      return ret;
++              }
++      }
++
++
++      return 0;
++}
++
++struct mem_block *
++nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
++                int flags, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct mem_block *block;
++      int type, tail = !(flags & NOUVEAU_MEM_USER);
++
++      /*
++       * Make things easier on ourselves: all allocations are page-aligned.
++       * We need that to map allocated regions into the user space
++       */
++      if (alignment < PAGE_SHIFT)
++              alignment = PAGE_SHIFT;
++
++      /* Align allocation sizes to 64KiB blocks on G8x.  We use a 64KiB
++       * page size in the GPU VM.
++       */
++      if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
++              size = (size + 65535) & ~65535;
++              if (alignment < 16)
++                      alignment = 16;
++      }
++
++      /*
++       * Warn about 0 sized allocations, but let it go through. It'll return 1 page
++       */
++      if (size == 0)
++              DRM_INFO("warning : 0 byte allocation\n");
++
++      /*
++       * Keep alloc size a multiple of the page size to keep drm_addmap() happy
++       */
++      if (size & (~PAGE_MASK))
++              size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
++
++
++#define NOUVEAU_MEM_ALLOC_AGP {\
++              type=NOUVEAU_MEM_AGP;\
++                block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
++                                                alignment, file_priv, tail); \
++                if (block) goto alloc_ok;\
++              }
++
++#define NOUVEAU_MEM_ALLOC_PCI {\
++                type = NOUVEAU_MEM_PCI;\
++                block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
++                                              alignment, file_priv, tail); \
++                if ( block ) goto alloc_ok;\
++              }
++
++#define NOUVEAU_MEM_ALLOC_FB {\
++                type=NOUVEAU_MEM_FB;\
++                if (!(flags&NOUVEAU_MEM_MAPPED)) {\
++                        block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
++                                                        size, alignment, \
++                                                      file_priv, tail); \
++                        if (block) goto alloc_ok;\
++                }\
++                block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
++                                                alignment, file_priv, tail);\
++                if (block) goto alloc_ok;\
++              }
++
++
++      if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
++      if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
++      if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
++      if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
++      if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
++      if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
++
++
++      return NULL;
++
++alloc_ok:
++      block->flags=type;
++
++      /* On G8x, map memory into VM */
++      if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
++          !(flags & NOUVEAU_MEM_NOVM)) {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start;
++              unsigned count = block->size / 65536;
++              unsigned tile = 0;
++
++              if (!pt) {
++                      DRM_ERROR("vm alloc without vm pt\n");
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++
++              /* The tiling stuff is *not* what NVIDIA does - but both the
++               * 2D and 3D engines seem happy with this simpler method.
++               * Should look into why NVIDIA do what they do at some point.
++               */
++              if (flags & NOUVEAU_MEM_TILE) {
++                      if (flags & NOUVEAU_MEM_TILE_ZETA)
++                              tile = 0x00002800;
++                      else
++                              tile = 0x00007000;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++
++                      INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
++                      offset += 65536;
++              }
++      } else {
++              block->flags |= NOUVEAU_MEM_NOVM;
++      }       
++
++      if (flags&NOUVEAU_MEM_MAPPED)
++      {
++              struct drm_map_list *entry;
++              int ret = 0;
++              block->flags|=NOUVEAU_MEM_MAPPED;
++
++              if (type == NOUVEAU_MEM_AGP) {
++                      if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_AGP, 0, &block->map);
++                      else
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_SCATTER_GATHER, 0, &block->map);
++              }
++              else if (type == NOUVEAU_MEM_FB)
++                      ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
++                                       block->size, _DRM_FRAME_BUFFER,
++                                       0, &block->map);
++              else if (type == NOUVEAU_MEM_PCI)
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_SCATTER_GATHER, 0, &block->map);
++
++              if (ret) {
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++
++              entry = drm_find_matching_map(dev, block->map);
++              if (!entry) {
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++              block->map_handle = entry->user_token;
++      }
++
++      DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
++      return block;
++}
++
++void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
++
++      if (block->flags&NOUVEAU_MEM_MAPPED)
++              drm_rmmap(dev, block->map);
++
++      /* G8x: Remove pages from vm */
++      if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
++          !(block->flags & NOUVEAU_MEM_NOVM)) {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start;
++              unsigned count = block->size / 65536;
++
++              if (!pt) {
++                      DRM_ERROR("vm free without vm pt\n");
++                      goto out_free;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++                      INSTANCE_WR(pt, (pte * 2) + 0, 0);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0);
++                      offset += 65536;
++              }
++      }
++
++out_free:
++      nouveau_mem_free_block(block);
++}
++
++/*
++ * Ioctls
++ */
++
++int
++nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_alloc *alloc = data;
++      struct mem_block *block;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (alloc->flags & NOUVEAU_MEM_INTERNAL)
++              return -EINVAL;
++
++      block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
++                                alloc->flags | NOUVEAU_MEM_USER, file_priv);
++      if (!block)
++              return -ENOMEM;
++      alloc->map_handle=block->map_handle;
++      alloc->offset=block->start;
++      alloc->flags=block->flags;
++
++      if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
++              alloc->offset += 512*1024*1024;
++
++      return 0;
++}
++
++int
++nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_free *memfree = data;
++      struct mem_block *block;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
++              memfree->offset -= 512*1024*1024;
++
++      block=NULL;
++      if (memfree->flags & NOUVEAU_MEM_FB)
++              block = find_block(dev_priv->fb_heap, memfree->offset);
++      else if (memfree->flags & NOUVEAU_MEM_AGP)
++              block = find_block(dev_priv->agp_heap, memfree->offset);
++      else if (memfree->flags & NOUVEAU_MEM_PCI)
++              block = find_block(dev_priv->pci_heap, memfree->offset);
++      if (!block)
++              return -EFAULT;
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      nouveau_mem_free(dev, block);
++      return 0;
++}
++
++int
++nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_tile *memtile = data;
++      struct mem_block *block = NULL;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (dev_priv->card_type < NV_50)
++              return -EINVAL;
++      
++      if (memtile->flags & NOUVEAU_MEM_FB) {
++              memtile->offset -= 512*1024*1024;
++              block = find_block(dev_priv->fb_heap, memtile->offset);
++      }
++
++      if (!block)
++              return -EINVAL;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start + memtile->delta;
++              unsigned count = memtile->size / 65536;
++              unsigned tile = 0;
++
++              if (memtile->flags & NOUVEAU_MEM_TILE) {
++                      if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
++                              tile = 0x00002800;
++                      else
++                              tile = 0x00007000;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++
++                      INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
++                      offset += 65536;
++              }
++      }
++
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_notifier.c git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c
+--- git/drivers/gpu/drm-tungsten/nouveau_notifier.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,165 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nouveau_notifier_init_channel(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      int flags, ret;
++
++      flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED |
++               NOUVEAU_MEM_FB_ACCEPTABLE);
++
++      chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
++                                               (struct drm_file *)-2);
++      if (!chan->notifier_block)
++              return -ENOMEM;
++      DRM_DEBUG("Allocated notifier block in 0x%08x\n",
++                chan->notifier_block->flags);
++
++      ret = nouveau_mem_init_heap(&chan->notifier_heap,
++                                  0, chan->notifier_block->size);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++void
++nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++
++      if (chan->notifier_block) {
++              nouveau_mem_free(dev, chan->notifier_block);
++              chan->notifier_block = NULL;
++      }
++
++      nouveau_mem_takedown(&chan->notifier_heap);
++}
++
++static void
++nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
++                           struct nouveau_gpuobj *gpuobj)
++{
++      DRM_DEBUG("\n");
++
++      if (gpuobj->priv)
++              nouveau_mem_free_block(gpuobj->priv);
++}
++
++int
++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
++                     int count, uint32_t *b_offset)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *nobj = NULL;
++      struct mem_block *mem;
++      uint32_t offset;
++      int target, ret;
++
++      if (!chan->notifier_heap) {
++              DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
++                        chan->id);
++              return -EINVAL;
++      }
++
++      mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
++                                    (struct drm_file *)-2, 0);
++      if (!mem) {
++              DRM_ERROR("Channel %d notifier block full\n", chan->id);
++              return -ENOMEM;
++      }
++      mem->flags = NOUVEAU_MEM_NOTIFIER;
++
++      offset = chan->notifier_block->start;
++      if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
++              target = NV_DMA_TARGET_VIDMEM;
++      } else
++      if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
++              if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
++                  dev_priv->card_type < NV_50) {
++                      ret = nouveau_sgdma_get_page(dev, offset, &offset);
++                      if (ret)
++                              return ret;
++                      target = NV_DMA_TARGET_PCI;
++              } else {
++                      target = NV_DMA_TARGET_AGP;
++              }
++      } else
++      if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
++              target = NV_DMA_TARGET_PCI_NONLINEAR;
++      } else {
++              DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
++                        chan->notifier_block->flags);
++              return -EINVAL;
++      }
++      offset += mem->start;
++
++      if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                        offset, mem->size,
++                                        NV_DMA_ACCESS_RW, target, &nobj))) {
++              nouveau_mem_free_block(mem);
++              DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
++              return ret;
++      }
++      nobj->dtor   = nouveau_notifier_gpuobj_dtor;
++      nobj->priv   = mem;
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) {
++              nouveau_gpuobj_del(dev, &nobj);
++              nouveau_mem_free_block(mem);
++              DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      *b_offset = mem->start;
++      return 0;
++}
++
++int
++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv)
++{
++      struct drm_nouveau_notifierobj_alloc *na = data;
++      struct nouveau_channel *chan;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
++
++      ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset);
++      if (ret)
++              return ret;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_object.c git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c
+--- git/drivers/gpu/drm-tungsten/nouveau_object.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1178 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama@iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/* NVidia uses context objects to drive drawing operations.
++
++   Context objects can be selected into 8 subchannels in the FIFO,
++   and then used via DMA command buffers.
++
++   A context object is referenced by a user defined handle (CARD32). The HW
++   looks up graphics objects in a hash table in the instance RAM.
++
++   An entry in the hash table consists of 2 CARD32. The first CARD32 contains
++   the handle, the second one a bitfield, that contains the address of the
++   object in instance RAM.
++
++   The format of the second CARD32 seems to be:
++
++   NV4 to NV30:
++
++   15: 0  instance_addr >> 4
++   17:16  engine (here uses 1 = graphics)
++   28:24  channel id (here uses 0)
++   31   valid (use 1)
++
++   NV40:
++
++   15: 0  instance_addr >> 4   (maybe 19-0)
++   21:20  engine (here uses 1 = graphics)
++   I'm unsure about the other bits, but using 0 seems to work.
++
++   The key into the hash table depends on the object handle and channel id and
++   is given as:
++*/
++static uint32_t
++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      uint32_t hash = 0;
++      int i;
++
++      DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
++
++      for (i=32;i>0;i-=dev_priv->ramht_bits) {
++              hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
++              handle >>= dev_priv->ramht_bits;
++      }
++      if (dev_priv->card_type < NV_50)
++              hash ^= channel << (dev_priv->ramht_bits - 4);
++      hash <<= 3;
++
++      DRM_DEBUG("hash=0x%08x\n", hash);
++      return hash;
++}
++
++static int
++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
++                        uint32_t offset)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4);
++
++      if (dev_priv->card_type < NV_40)
++              return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
++      return (ctx != 0);
++}
++
++static int
++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
++      struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++      struct nouveau_gpuobj *gpuobj = ref->gpuobj;
++      uint32_t ctx, co, ho;
++
++      if (!ramht) {
++              DRM_ERROR("No hash table!\n");
++              return -EINVAL;
++      }
++
++      if (dev_priv->card_type < NV_40) {
++              ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
++                    (ref->channel   << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++                    (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
++      } else
++      if (dev_priv->card_type < NV_50) {
++              ctx = (ref->instance >> 4) |
++                    (ref->channel   << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++                    (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
++      } else {
++              ctx = (ref->instance  >> 4) |
++                    (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
++      }
++
++      co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
++      do {
++              if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
++                      DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++                                ref->channel, co, ref->handle, ctx);
++                      INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
++                      INSTANCE_WR(ramht, (co + 4)/4, ctx);
++
++                      list_add_tail(&ref->list, &chan->ramht_refs);
++                      return 0;
++              }
++              DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
++                        ref->channel, co, INSTANCE_RD(ramht, co/4));
++
++              co += 8;
++              if (co >= dev_priv->ramht_size) {
++                      DRM_INFO("no space left after collision\n");
++                      co = 0;
++                      /* exit as it seems to cause crash with nouveau_demo and
++                       * 0xdead0001 object */
++                      break;
++              }
++      } while (co != ho);
++
++      DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
++      return -ENOMEM;
++}
++
++static void
++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
++      struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++      uint32_t co, ho;
++
++      if (!ramht) {
++              DRM_ERROR("No hash table!\n");
++              return;
++      }
++
++      co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
++      do {
++              if (nouveau_ramht_entry_valid(dev, ramht, co) &&
++                  (ref->handle == INSTANCE_RD(ramht, (co/4)))) {
++                      DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++                                ref->channel, co, ref->handle,
++                                INSTANCE_RD(ramht, (co + 4)));
++                      INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
++                      INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
++
++                      list_del(&ref->list);
++                      return;
++              }
++
++              co += 8;
++              if (co >= dev_priv->ramht_size)
++                      co = 0;
++      } while (co != ho);
++
++      DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n",
++                ref->channel, ref->handle);
++}
++
++int
++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
++                 int size, int align, uint32_t flags,
++                 struct nouveau_gpuobj **gpuobj_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_gpuobj *gpuobj;
++      struct mem_block *pramin = NULL;
++      int ret;
++
++      DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
++                chan ? chan->id : -1, size, align, flags);
++
++      if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
++              return -EINVAL;
++
++      gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      if (!gpuobj)
++              return -ENOMEM;
++      DRM_DEBUG("gpuobj %p\n", gpuobj);
++      gpuobj->flags = flags;
++      gpuobj->im_channel = chan ? chan->id : -1;
++
++      list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++      /* Choose between global instmem heap, and per-channel private
++       * instmem heap.  On <NV50 allow requests for private instmem
++       * to be satisfied from global heap if no per-channel area
++       * available.
++       */
++      if (chan) {
++              if (chan->ramin_heap) {
++                      DRM_DEBUG("private heap\n");
++                      pramin = chan->ramin_heap;
++              } else
++              if (dev_priv->card_type < NV_50) {
++                      DRM_DEBUG("global heap fallback\n");
++                      pramin = dev_priv->ramin_heap;
++              }
++      } else {
++              DRM_DEBUG("global heap\n");
++              pramin = dev_priv->ramin_heap;
++      }
++
++      if (!pramin) {
++              DRM_ERROR("No PRAMIN heap!\n");
++              return -EINVAL;
++      }
++
++      if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      /* Allocate a chunk of the PRAMIN aperture */
++      gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
++                                                  drm_order(align),
++                                                  (struct drm_file *)-2, 0);
++      if (!gpuobj->im_pramin) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return -ENOMEM;
++      }
++      gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE;
++
++      if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++              int i;
++
++              for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++                      INSTANCE_WR(gpuobj, i/4, 0);
++      }
++
++      *gpuobj_ret = gpuobj;
++      return 0;
++}
++
++int
++nouveau_gpuobj_early_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      INIT_LIST_HEAD(&dev_priv->gpuobj_list);
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      if (dev_priv->card_type < NV_50) {
++              if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
++                                                 ~0, dev_priv->ramht_size,
++                                                 NVOBJ_FLAG_ZERO_ALLOC |
++                                                 NVOBJ_FLAG_ALLOW_NO_REFS,
++                                                 &dev_priv->ramht, NULL)))
++                      return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_gpuobj_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      nouveau_gpuobj_del(dev, &dev_priv->ramht);
++}
++
++void
++nouveau_gpuobj_late_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      struct list_head *entry, *tmp;
++
++      DRM_DEBUG("\n");
++
++      list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
++              gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
++
++              DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
++                        gpuobj, gpuobj->refcount);
++              gpuobj->refcount = 0;
++              nouveau_gpuobj_del(dev, &gpuobj);
++      }
++}
++
++int
++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_gpuobj *gpuobj;
++
++      DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
++
++      if (!dev_priv || !pgpuobj || !(*pgpuobj))
++              return -EINVAL;
++      gpuobj = *pgpuobj;
++
++      if (gpuobj->refcount != 0) {
++              DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount);
++              return -EINVAL;
++      }
++
++      if (gpuobj->dtor)
++              gpuobj->dtor(dev, gpuobj);
++
++      if (gpuobj->im_backing) {
++              if (gpuobj->flags & NVOBJ_FLAG_FAKE)
++                      drm_free(gpuobj->im_backing,
++                               sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
++              else
++                      engine->instmem.clear(dev, gpuobj);
++      }
++
++      if (gpuobj->im_pramin) {
++              if (gpuobj->flags & NVOBJ_FLAG_FAKE)
++                      drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin),
++                               DRM_MEM_DRIVER);
++              else
++                      nouveau_mem_free_block(gpuobj->im_pramin);
++      }
++
++      list_del(&gpuobj->list);
++
++      *pgpuobj = NULL;
++      drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      return 0;
++}
++
++static int
++nouveau_gpuobj_instance_get(struct drm_device *dev,
++                          struct nouveau_channel *chan,
++                          struct nouveau_gpuobj *gpuobj, uint32_t *inst)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *cpramin;
++
++      /* <NV50 use PRAMIN address everywhere */
++      if (dev_priv->card_type < NV_50) {
++              *inst = gpuobj->im_pramin->start;
++              return 0;
++      }
++
++      if (chan && gpuobj->im_channel != chan->id) {
++              DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
++                        gpuobj->im_channel, chan->id);
++              return -EINVAL;
++      }
++
++      /* NV50 channel-local instance */
++      if (chan > 0) {
++              cpramin = chan->ramin->gpuobj;
++              *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
++              return 0;
++      }
++
++      /* NV50 global (VRAM) instance */
++      if (gpuobj->im_channel < 0) {
++              /* ...from global heap */
++              if (!gpuobj->im_backing) {
++                      DRM_ERROR("AII, no VRAM backing gpuobj\n");
++                      return -EINVAL;
++              }
++              *inst = gpuobj->im_backing->start;
++              return 0;
++      } else {
++              /* ...from local heap */
++              cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj;
++              *inst = cpramin->im_backing->start +
++                      (gpuobj->im_pramin->start - cpramin->im_pramin->start);
++              return 0;
++      }
++
++      return -EINVAL;
++}
++
++int
++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
++                     uint32_t handle, struct nouveau_gpuobj *gpuobj,
++                     struct nouveau_gpuobj_ref **ref_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj_ref *ref;
++      uint32_t instance;
++      int ret;
++
++      DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
++                chan ? chan->id : -1, handle, gpuobj);
++
++      if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
++              return -EINVAL;
++
++      if (!chan && !ref_ret)
++              return -EINVAL;
++
++      ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
++      if (ret)
++              return ret;
++
++      ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER);
++      if (!ref)
++              return -ENOMEM;
++      ref->gpuobj   = gpuobj;
++      ref->channel  = chan ? chan->id : -1;
++      ref->instance = instance;
++
++      if (!ref_ret) {
++              ref->handle = handle;
++
++              ret = nouveau_ramht_insert(dev, ref);
++              if (ret) {
++                      drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
++                      return ret;
++              }
++      } else {
++              ref->handle = ~0;
++              *ref_ret = ref;
++      }
++
++      ref->gpuobj->refcount++;
++      return 0;
++}
++
++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
++{
++      struct nouveau_gpuobj_ref *ref;
++
++      DRM_DEBUG("ref %p\n", pref ? *pref : NULL);
++
++      if (!dev || !pref || *pref == NULL)
++              return -EINVAL;
++      ref = *pref;
++
++      if (ref->handle != ~0)
++              nouveau_ramht_remove(dev, ref);
++
++      if (ref->gpuobj) {
++              ref->gpuobj->refcount--;
++
++              if (ref->gpuobj->refcount == 0) {
++                      if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
++                              nouveau_gpuobj_del(dev, &ref->gpuobj);
++              }
++      }
++
++      *pref = NULL;
++      drm_free(ref, sizeof(ref), DRM_MEM_DRIVER);
++      return 0;
++}
++
++int
++nouveau_gpuobj_new_ref(struct drm_device *dev,
++                     struct nouveau_channel *oc, struct nouveau_channel *rc,
++                     uint32_t handle, int size, int align, uint32_t flags,
++                     struct nouveau_gpuobj_ref **ref)
++{
++      struct nouveau_gpuobj *gpuobj = NULL;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj)))
++              return ret;
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
++                      struct nouveau_gpuobj_ref **ref_ret)
++{
++      struct nouveau_gpuobj_ref *ref;
++      struct list_head *entry, *tmp;
++
++      list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++              ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++              if (ref->handle == handle) {
++                      if (ref_ret)
++                              *ref_ret = ref;
++                      return 0;
++              }
++      }
++
++      return -EINVAL;
++}
++
++int
++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
++                      uint32_t b_offset, uint32_t size,
++                      uint32_t flags, struct nouveau_gpuobj **pgpuobj,
++                      struct nouveau_gpuobj_ref **pref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      int i;
++
++      DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
++                p_offset, b_offset, size, flags);
++
++      gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      if (!gpuobj)
++              return -ENOMEM;
++      DRM_DEBUG("gpuobj %p\n", gpuobj);
++      gpuobj->im_channel = -1;
++      gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
++
++      list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++      if (p_offset != ~0) {
++              gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
++                                             DRM_MEM_DRIVER);
++              if (!gpuobj->im_pramin) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return -ENOMEM;
++              }
++              gpuobj->im_pramin->start = p_offset;
++              gpuobj->im_pramin->size  = size;
++      }
++
++      if (b_offset != ~0) {
++              gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
++                                             DRM_MEM_DRIVER);
++              if (!gpuobj->im_backing) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return -ENOMEM;
++              }
++              gpuobj->im_backing->start = b_offset;
++              gpuobj->im_backing->size  = size;
++      }
++
++      if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++              for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++                      INSTANCE_WR(gpuobj, i/4, 0);
++      }
++
++      if (pref) {
++              if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return i;
++              }
++      }
++
++      if (pgpuobj)
++              *pgpuobj = gpuobj;
++      return 0;
++}
++
++
++static int
++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /*XXX: dodgy hack for now */
++      if (dev_priv->card_type >= NV_50)
++              return 24;
++      if (dev_priv->card_type >= NV_40)
++              return 32;
++      return 16;
++}
++
++/*
++   DMA objects are used to reference a piece of memory in the
++   framebuffer, PCI or AGP address space. Each object is 16 bytes big
++   and looks as follows:
++
++   entry[0]
++   11:0  class (seems like I can always use 0 here)
++   12    page table present?
++   13    page entry linear?
++   15:14 access: 0 rw, 1 ro, 2 wo
++   17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
++   31:20 dma adjust (bits 0-11 of the address)
++   entry[1]
++   dma limit (size of transfer)
++   entry[X]
++   1     0 readonly, 1 readwrite
++   31:12 dma frame address of the page (bits 12-31 of the address)
++   entry[N]
++   page table terminator, same value as the first pte, as does nvidia
++   rivatv uses 0xffffffff
++
++   Non linear page tables need a list of frame addresses afterwards,
++   the rivatv project has some info on this.
++
++   The method below creates a DMA object in instance RAM and returns a handle
++   to it that can be used to set up context objects.
++*/
++int
++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
++                     uint64_t offset, uint64_t size, int access,
++                     int target, struct nouveau_gpuobj **gpuobj)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++      uint32_t is_scatter_gather = 0;
++
++      /* Total number of pages covered by the request.
++       */
++      const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
++
++
++      DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
++                chan->id, class, offset, size);
++      DRM_DEBUG("access=%d target=%d\n", access, target);
++
++      switch (target) {
++        case NV_DMA_TARGET_AGP:
++                 offset += dev_priv->gart_info.aper_base;
++                 break;
++        case NV_DMA_TARGET_PCI_NONLINEAR:
++                /*assume the "offset" is a virtual memory address*/
++                is_scatter_gather = 1;
++                /*put back the right value*/
++                target = NV_DMA_TARGET_PCI;
++                break;
++        default:
++                break;
++        }
++
++      ret = nouveau_gpuobj_new(dev, chan,
++                               is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
++                               16,
++                               NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
++                               gpuobj);
++      if (ret) {
++              DRM_ERROR("Error creating gpuobj: %d\n", ret);
++              return ret;
++      }
++
++      if (dev_priv->card_type < NV_50) {
++              uint32_t frame, adjust, pte_flags = 0;
++              adjust = offset &  0x00000fff;
++              if (access != NV_DMA_ACCESS_RO)
++                              pte_flags |= (1<<1);
++
++              if ( ! is_scatter_gather )
++                      {
++                      frame  = offset & ~0x00000fff;
++
++                      INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
++                                      (adjust << 20) |
++                                       (access << 14) |
++                                       (target << 16) |
++                                        class));
++                      INSTANCE_WR(*gpuobj, 1, size - 1);
++                      INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
++                      INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
++                      }
++              else
++                      {
++                      /* Intial page entry in the scatter-gather area that
++                       * corresponds to the base offset
++                       */
++                      unsigned int idx = offset / PAGE_SIZE;
++
++                      uint32_t instance_offset;
++                      unsigned int i;
++
++                      if ((idx + page_count) > dev->sg->pages) {
++                              DRM_ERROR("Requested page range exceedes "
++                                        "allocated scatter-gather range!");
++                              return -E2BIG;
++                      }
++
++                      DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size);
++                      INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) |
++                                (adjust << 20) |
++                                (access << 14) |
++                                (target << 16) |
++                                class));
++                      INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1);
++
++
++                      /*write starting at the third dword*/
++                      instance_offset = 2;
++
++                      /*for each PAGE, get its bus address, fill in the page table entry, and advance*/
++                      for (i = 0; i < page_count; i++) {
++                              if (dev->sg->busaddr[idx] == 0) {
++                                      dev->sg->busaddr[idx] =
++                                              pci_map_page(dev->pdev,
++                                                           dev->sg->pagelist[idx],
++                                                           0,
++                                                           PAGE_SIZE,
++                                                           DMA_BIDIRECTIONAL);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++                                      /* Not a 100% sure this is the right kdev in all cases. */
++                                      if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) {
++#else
++                                      if (dma_mapping_error(dev->sg->busaddr[idx])) {
++#endif
++                                              return -ENOMEM;
++                                      }
++                              }
++
++                              frame = (uint32_t) dev->sg->busaddr[idx];
++                              INSTANCE_WR(*gpuobj, instance_offset,
++                                          frame | pte_flags);
++
++                              idx++;
++                              instance_offset ++;
++                      }
++                      }
++      } else {
++              uint32_t flags0, flags5;
++
++              if (target == NV_DMA_TARGET_VIDMEM) {
++                      flags0 = 0x00190000;
++                      flags5 = 0x00010000;
++              } else {
++                      flags0 = 0x7fc00000;
++                      flags5 = 0x00080000;
++              }
++
++              INSTANCE_WR(*gpuobj, 0, flags0 | class);
++              INSTANCE_WR(*gpuobj, 1, offset + size - 1);
++              INSTANCE_WR(*gpuobj, 2, offset);
++              INSTANCE_WR(*gpuobj, 5, flags5);
++      }
++
++      (*gpuobj)->engine = NVOBJ_ENGINE_SW;
++      (*gpuobj)->class  = class;
++      return 0;
++}
++
++int
++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
++                          uint64_t offset, uint64_t size, int access,
++                          struct nouveau_gpuobj **gpuobj,
++                          uint32_t *o_ret)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
++          (dev_priv->card_type >= NV_50 &&
++           dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           offset, size, access,
++                                           NV_DMA_TARGET_AGP, gpuobj);
++              if (o_ret)
++                      *o_ret = 0;
++      } else
++      if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
++              *gpuobj = dev_priv->gart_info.sg_ctxdma;
++              if (offset & ~0xffffffffULL) {
++                      DRM_ERROR("obj offset exceeds 32-bits\n");
++                      return -EINVAL;
++              }
++              if (o_ret)
++                      *o_ret = (uint32_t)offset;
++              ret = (*gpuobj != NULL) ? 0 : -EINVAL;
++      } else {
++              DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
++              return -EINVAL;
++      }
++
++      return ret;
++}
++
++/* Context objects in the instance RAM have the following structure.
++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
++
++   NV4 - NV30:
++
++   entry[0]
++   11:0 class
++   12   chroma key enable
++   13   user clip enable
++   14   swizzle enable
++   17:15 patch config:
++       scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
++   18   synchronize enable
++   19   endian: 1 big, 0 little
++   21:20 dither mode
++   23    single step enable
++   24    patch status: 0 invalid, 1 valid
++   25    context_surface 0: 1 valid
++   26    context surface 1: 1 valid
++   27    context pattern: 1 valid
++   28    context rop: 1 valid
++   29,30 context beta, beta4
++   entry[1]
++   7:0   mono format
++   15:8  color format
++   31:16 notify instance address
++   entry[2]
++   15:0  dma 0 instance address
++   31:16 dma 1 instance address
++   entry[3]
++   dma method traps
++
++   NV40:
++   No idea what the exact format is. Here's what can be deducted:
++
++   entry[0]:
++   11:0  class  (maybe uses more bits here?)
++   17    user clip enable
++   21:19 patch config
++   25    patch status valid ?
++   entry[1]:
++   15:0  DMA notifier  (maybe 20:0)
++   entry[2]:
++   15:0  DMA 0 instance (maybe 20:0)
++   24    big endian
++   entry[3]:
++   15:0  DMA 1 instance (maybe 20:0)
++   entry[4]:
++   entry[5]:
++   set to 0?
++*/
++int
++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
++                    struct nouveau_gpuobj **gpuobj)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
++
++      ret = nouveau_gpuobj_new(dev, chan,
++                               nouveau_gpuobj_class_instmem_size(dev, class),
++                               16,
++                               NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
++                               gpuobj);
++      if (ret) {
++              DRM_ERROR("Error creating gpuobj: %d\n", ret);
++              return ret;
++      }
++
++      if (dev_priv->card_type >= NV_50) {
++              INSTANCE_WR(*gpuobj, 0, class);
++              INSTANCE_WR(*gpuobj, 5, 0x00010000);
++      } else {
++      switch (class) {
++      case NV_CLASS_NULL:
++              INSTANCE_WR(*gpuobj, 0, 0x00001030);
++              INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF);
++              break;
++      default:
++              if (dev_priv->card_type >= NV_40) {
++                      INSTANCE_WR(*gpuobj, 0, class);
++#ifdef __BIG_ENDIAN
++                      INSTANCE_WR(*gpuobj, 2, 0x01000000);
++#endif
++              } else {
++#ifdef __BIG_ENDIAN
++                      INSTANCE_WR(*gpuobj, 0, class | 0x00080000);
++#else
++                      INSTANCE_WR(*gpuobj, 0, class);
++#endif
++              }
++      }
++      }
++
++      (*gpuobj)->engine = NVOBJ_ENGINE_GR;
++      (*gpuobj)->class  = class;
++      return 0;
++}
++
++static int
++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *pramin = NULL;
++      int size, base, ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      /* Base amount for object storage (4KiB enough?) */
++      size = 0x1000;
++      base = 0;
++
++      /* PGRAPH context */
++
++      if (dev_priv->card_type == NV_50) {
++              /* Various fixed table thingos */
++              size += 0x1400; /* mostly unknown stuff */
++              size += 0x4000; /* vm pd */
++              base  = 0x6000;
++              /* RAMHT, not sure about setting size yet, 32KiB to be safe */
++              size += 0x8000;
++              /* RAMFC */
++              size += 0x1000;
++              /* PGRAPH context */
++              size += 0x70000;
++      }
++
++      DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
++                chan->id, size, base);
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
++                                   &chan->ramin);
++      if (ret) {
++              DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
++              return ret;
++      }
++      pramin = chan->ramin->gpuobj;
++
++      ret = nouveau_mem_init_heap(&chan->ramin_heap,
++                                  pramin->im_pramin->start + base, size);
++      if (ret) {
++              DRM_ERROR("Error creating PRAMIN heap: %d\n", ret);
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++              return ret;
++      }
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
++                          uint32_t vram_h, uint32_t tt_h)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *vram = NULL, *tt = NULL;
++      int ret, i;
++
++      INIT_LIST_HEAD(&chan->ramht_refs);
++
++      DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
++
++      /* Reserve a block of PRAMIN for the channel
++       *XXX: maybe on <NV50 too at some point
++       */
++      if (0 || dev_priv->card_type == NV_50) {
++              ret = nouveau_gpuobj_channel_init_pramin(chan);
++              if (ret)
++                      return ret;
++      }
++
++      /* NV50 VM
++       *  - Allocate per-channel page-directory
++       *  - Point offset 0-512MiB at shared PCIEGART table
++       *  - Point offset 512-1024MiB at shared VRAM table
++       */
++      if (dev_priv->card_type >= NV_50) {
++              uint32_t vm_offset;
++
++              vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
++              vm_offset += chan->ramin->gpuobj->im_pramin->start;
++              if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
++                                                 0, &chan->vm_pd, NULL)))
++                      return ret;
++              for (i=0; i<0x4000; i+=8) {
++                      INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000);
++                      INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
++              }
++
++              if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++                                                dev_priv->gart_info.sg_ctxdma,
++                                                &chan->vm_gart_pt)))
++                      return ret;
++              INSTANCE_WR(chan->vm_pd, (0+0)/4,
++                          chan->vm_gart_pt->instance | 0x03);
++              INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
++
++              if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++                                                dev_priv->vm_vram_pt,
++                                                &chan->vm_vram_pt)))
++                      return ret;
++              INSTANCE_WR(chan->vm_pd, (8+0)/4,
++                          chan->vm_vram_pt->instance | 0x61);
++              INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000);
++      }
++
++      /* RAMHT */
++      if (dev_priv->card_type < NV_50) {
++              ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
++                                           &chan->ramht);
++              if (ret)
++                      return ret;
++      } else {
++              ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
++                                           0x8000, 16,
++                                           NVOBJ_FLAG_ZERO_ALLOC,
++                                           &chan->ramht);
++              if (ret)
++                      return ret;
++      }
++
++      /* VRAM ctxdma */
++      if (dev_priv->card_type >= NV_50) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           0, 0x100000000ULL,
++                                           NV_DMA_ACCESS_RW,
++                                           NV_DMA_TARGET_AGP, &vram);
++              if (ret) {
++                      DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
++                      return ret;
++              }
++      } else
++      if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                        0, dev_priv->fb_available_size,
++                                        NV_DMA_ACCESS_RW,
++                                        NV_DMA_TARGET_VIDMEM, &vram))) {
++              DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
++              DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      /* TT memory ctxdma */
++      if (dev_priv->card_type >= NV_50) {
++              tt = vram;
++      } else
++      if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
++              ret = nouveau_gpuobj_gart_dma_new(chan, 0,
++                                                dev_priv->gart_info.aper_size,
++                                                NV_DMA_ACCESS_RW, &tt, NULL);
++      } else
++      if (dev_priv->pci_heap) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           0, dev->sg->pages * PAGE_SIZE,
++                                           NV_DMA_ACCESS_RW,
++                                           NV_DMA_TARGET_PCI_NONLINEAR, &tt);
++      } else {
++              DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
++              ret = -EINVAL;
++      }
++
++      if (ret) {
++              DRM_ERROR("Error creating TT ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
++      if (ret) {
++              DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct list_head *entry, *tmp;
++      struct nouveau_gpuobj_ref *ref;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++              ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++              nouveau_gpuobj_ref_del(dev, &ref);
++      }
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramht);
++
++      nouveau_gpuobj_del(dev, &chan->vm_pd);
++      nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
++      nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt);
++
++      if (chan->ramin_heap)
++              nouveau_mem_takedown(&chan->ramin_heap);
++      if (chan->ramin)
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++
++}
++
++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      struct nouveau_channel *chan;
++      struct drm_nouveau_grobj_alloc *init = data;
++      struct nouveau_gpuobj *gr = NULL;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
++
++      //FIXME: check args, only allow trusted objects to be created
++
++      if (init->handle == ~0)
++              return -EINVAL;
++
++      if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
++              return -EEXIST;
++
++      ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
++      if (ret) {
++              DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
++                        ret, init->channel, init->handle);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
++              DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
++                        ret, init->channel, init->handle);
++              nouveau_gpuobj_del(dev, &gr);
++              return ret;
++      }
++
++      return 0;
++}
++
++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      struct drm_nouveau_gpuobj_free *objfree = data;
++      struct nouveau_gpuobj_ref *ref;
++      struct nouveau_channel *chan;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
++
++      if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
++              return ret;
++      nouveau_gpuobj_ref_del(dev, &ref);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_reg.h git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h
+--- git/drivers/gpu/drm-tungsten/nouveau_reg.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,593 @@
++
++
++#define NV03_BOOT_0                                        0x00100000
++#    define NV03_BOOT_0_RAM_AMOUNT                         0x00000003
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB                     0x00000000
++#    define NV03_BOOT_0_RAM_AMOUNT_2MB                     0x00000001
++#    define NV03_BOOT_0_RAM_AMOUNT_4MB                     0x00000002
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM               0x00000003
++#    define NV04_BOOT_0_RAM_AMOUNT_32MB                    0x00000000
++#    define NV04_BOOT_0_RAM_AMOUNT_4MB                     0x00000001
++#    define NV04_BOOT_0_RAM_AMOUNT_8MB                     0x00000002
++#    define NV04_BOOT_0_RAM_AMOUNT_16MB                    0x00000003
++
++#define NV04_FIFO_DATA                                     0x0010020c
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK              0xfff00000
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT             20
++
++#define NV_RAMIN                                           0x00700000
++
++#define NV_RAMHT_HANDLE_OFFSET                             0
++#define NV_RAMHT_CONTEXT_OFFSET                            4
++#    define NV_RAMHT_CONTEXT_VALID                         (1<<31)
++#    define NV_RAMHT_CONTEXT_CHANNEL_SHIFT                 24
++#    define NV_RAMHT_CONTEXT_ENGINE_SHIFT                  16
++#        define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE           0
++#        define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS           1
++#    define NV_RAMHT_CONTEXT_INSTANCE_SHIFT                0
++#    define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT               23
++#    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
++#    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
++
++/* DMA object defines */
++#define NV_DMA_ACCESS_RW 0
++#define NV_DMA_ACCESS_RO 1
++#define NV_DMA_ACCESS_WO 2
++#define NV_DMA_TARGET_VIDMEM 0
++#define NV_DMA_TARGET_PCI    2
++#define NV_DMA_TARGET_AGP    3
++/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/
++#define NV_DMA_TARGET_PCI_NONLINEAR   8
++
++/* Some object classes we care about in the drm */
++#define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
++#define NV_CLASS_DMA_TO_MEMORY                             0x00000003
++#define NV_CLASS_NULL                                      0x00000030
++#define NV_CLASS_DMA_IN_MEMORY                             0x0000003D
++
++#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE))
++#define NV03_USER__SIZE                                                       16
++#define NV10_USER__SIZE                                                       32
++#define NV03_USER_SIZE                                                0x00010000
++#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_PUT__SIZE                                               16
++#define NV10_USER_DMA_PUT__SIZE                                               32
++#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_GET__SIZE                                               16
++#define NV10_USER_DMA_GET__SIZE                                               32
++#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE))
++#define NV03_USER_REF_CNT__SIZE                                               16
++#define NV10_USER_REF_CNT__SIZE                                               32
++
++#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE))
++#define NV40_USER_SIZE                                                0x00001000
++#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_PUT__SIZE                                               32
++#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_GET__SIZE                                               32
++#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE))
++#define NV40_USER_REF_CNT__SIZE                                               32
++
++#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE))
++#define NV50_USER_SIZE                                                0x00002000
++#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_PUT__SIZE                                              128
++#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_GET__SIZE                                              128
++/*XXX: I don't think this actually exists.. */
++#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE))
++#define NV50_USER_REF_CNT__SIZE                                              128
++
++#define NV03_FIFO_SIZE                                     0x8000UL
++
++#define NV03_PMC_BOOT_0                                    0x00000000
++#define NV03_PMC_BOOT_1                                    0x00000004
++#define NV03_PMC_INTR_0                                    0x00000100
++#    define NV_PMC_INTR_0_PFIFO_PENDING                       (1<< 8)
++#    define NV_PMC_INTR_0_PGRAPH_PENDING                      (1<<12)
++#    define NV_PMC_INTR_0_NV50_I2C_PENDING                  (1<<21)
++#    define NV_PMC_INTR_0_CRTC0_PENDING                       (1<<24)
++#    define NV_PMC_INTR_0_CRTC1_PENDING                       (1<<25)
++#    define NV_PMC_INTR_0_NV50_DISPLAY_PENDING           (1<<26)
++#    define NV_PMC_INTR_0_CRTCn_PENDING                       (3<<24)
++#define NV03_PMC_INTR_EN_0                                 0x00000140
++#    define NV_PMC_INTR_EN_0_MASTER_ENABLE                    (1<< 0)
++#define NV03_PMC_ENABLE                                    0x00000200
++#    define NV_PMC_ENABLE_PFIFO                               (1<< 8)
++#    define NV_PMC_ENABLE_PGRAPH                              (1<<12)
++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
++ * the card will hang early on in the X init process.
++ */
++#    define NV_PMC_ENABLE_UNK13                               (1<<13)
++#define NV40_PMC_1700                                      0x00001700
++#define NV40_PMC_1704                                      0x00001704
++#define NV40_PMC_1708                                      0x00001708
++#define NV40_PMC_170C                                      0x0000170C
++
++/* probably PMC ? */
++#define NV50_PUNK_BAR0_PRAMIN                              0x00001700
++#define NV50_PUNK_BAR_CFG_BASE                             0x00001704
++#define NV50_PUNK_BAR_CFG_BASE_VALID                          (1<<30)
++#define NV50_PUNK_BAR1_CTXDMA                              0x00001708
++#define NV50_PUNK_BAR1_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_BAR3_CTXDMA                              0x0000170C
++#define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_UNK1710                                  0x00001710
++
++#define NV04_PBUS_PCI_NV_1                                 0x00001804
++#define NV04_PBUS_PCI_NV_19                                0x0000184C
++
++#define NV04_PTIMER_INTR_0                                 0x00009100
++#define NV04_PTIMER_INTR_EN_0                              0x00009140
++#define NV04_PTIMER_NUMERATOR                              0x00009200
++#define NV04_PTIMER_DENOMINATOR                            0x00009210
++#define NV04_PTIMER_TIME_0                                 0x00009400
++#define NV04_PTIMER_TIME_1                                 0x00009410
++#define NV04_PTIMER_ALARM_0                                0x00009420
++
++#define NV50_I2C_CONTROLLER                           0x0000E054
++
++#define NV04_PFB_CFG0                                      0x00100200
++#define NV04_PFB_CFG1                                      0x00100204
++#define NV40_PFB_020C                                      0x0010020C
++#define NV10_PFB_TILE(i)                                   (0x00100240 + (i*16))
++#define NV10_PFB_TILE__SIZE                                8
++#define NV10_PFB_TLIMIT(i)                                 (0x00100244 + (i*16))
++#define NV10_PFB_TSIZE(i)                                  (0x00100248 + (i*16))
++#define NV10_PFB_TSTATUS(i)                                (0x0010024C + (i*16))
++#define NV10_PFB_CLOSE_PAGE2                               0x0010033C
++#define NV40_PFB_TILE(i)                                   (0x00100600 + (i*16))
++#define NV40_PFB_TILE__SIZE_0                              12
++#define NV40_PFB_TILE__SIZE_1                              15
++#define NV40_PFB_TLIMIT(i)                                 (0x00100604 + (i*16))
++#define NV40_PFB_TSIZE(i)                                  (0x00100608 + (i*16))
++#define NV40_PFB_TSTATUS(i)                                (0x0010060C + (i*16))
++#define NV40_PFB_UNK_800                                      0x00100800
++
++#define NV04_PGRAPH_DEBUG_0                                0x00400080
++#define NV04_PGRAPH_DEBUG_1                                0x00400084
++#define NV04_PGRAPH_DEBUG_2                                0x00400088
++#define NV04_PGRAPH_DEBUG_3                                0x0040008c
++#define NV10_PGRAPH_DEBUG_4                                0x00400090
++#define NV03_PGRAPH_INTR                                   0x00400100
++#define NV03_PGRAPH_NSTATUS                                0x00400104
++#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
++#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
++#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
++#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
++#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
++#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
++#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
++#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
++#define NV03_PGRAPH_NSOURCE                                0x00400108
++#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                  (1<< 0)
++#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                    (1<< 1)
++#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR              (1<< 2)
++#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION               (1<< 3)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                   (1<< 4)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                    (1<< 5)
++#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                  (1<< 6)
++#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION              (1<< 7)
++#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION              (1<< 8)
++#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION              (1<< 9)
++#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
++#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
++#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
++#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
++#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
++#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
++#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
++#define NV03_PGRAPH_INTR_EN                                0x00400140
++#define NV40_PGRAPH_INTR_EN                                0x0040013C
++#    define NV_PGRAPH_INTR_NOTIFY                             (1<< 0)
++#    define NV_PGRAPH_INTR_MISSING_HW                         (1<< 4)
++#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
++#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
++#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
++#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
++#define NV10_PGRAPH_CTX_USER                               0x00400148
++#define NV10_PGRAPH_CTX_SWITCH1                            0x0040014C
++#define NV10_PGRAPH_CTX_SWITCH2                            0x00400150
++#define NV10_PGRAPH_CTX_SWITCH3                            0x00400154
++#define NV10_PGRAPH_CTX_SWITCH4                            0x00400158
++#define NV10_PGRAPH_CTX_SWITCH5                            0x0040015C
++#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
++#define NV10_PGRAPH_CTX_CACHE1                             0x00400160
++#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
++#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
++#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
++#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
++#define NV04_PGRAPH_CTX_USER                               0x00400174
++#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
++#define NV10_PGRAPH_CTX_CACHE2                             0x00400180
++#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
++#define NV03_PGRAPH_CTX_USER                               0x00400194
++#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
++#define NV10_PGRAPH_CTX_CACHE3                             0x004001A0
++#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
++#define NV10_PGRAPH_CTX_CACHE4                             0x004001C0
++#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
++#define NV10_PGRAPH_CTX_CACHE5                             0x004001E0
++#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
++#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
++#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
++#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
++#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
++#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
++#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK                   0x000FFFFF
++#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
++#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
++#define NV03_PGRAPH_X_MISC                                 0x00400500
++#define NV03_PGRAPH_Y_MISC                                 0x00400504
++#define NV04_PGRAPH_VALID1                                 0x00400508
++#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
++#define NV04_PGRAPH_MISC24_0                               0x00400510
++#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
++#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
++#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
++#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
++#define NV03_PGRAPH_CLIPX_0                                0x00400524
++#define NV03_PGRAPH_CLIPX_1                                0x00400528
++#define NV03_PGRAPH_CLIPY_0                                0x0040052C
++#define NV03_PGRAPH_CLIPY_1                                0x00400530
++#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
++#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
++#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
++#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
++#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
++#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
++#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
++#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
++#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
++#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
++#define NV04_PGRAPH_MISC24_1                               0x00400570
++#define NV04_PGRAPH_MISC24_2                               0x00400574
++#define NV04_PGRAPH_VALID2                                 0x00400578
++#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
++#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
++#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
++#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
++#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
++#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
++#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
++#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
++#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
++#define NV04_PGRAPH_FORMAT_0                               0x004005A8
++#define NV04_PGRAPH_FORMAT_1                               0x004005AC
++#define NV04_PGRAPH_FILTER_0                               0x004005B0
++#define NV04_PGRAPH_FILTER_1                               0x004005B4
++#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
++#define NV04_PGRAPH_ROP3                                   0x00400604
++#define NV04_PGRAPH_BETA_AND                               0x00400608
++#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
++#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
++#define NV04_PGRAPH_FORMATS                                0x00400618
++#define NV10_PGRAPH_DEBUG_2                                0x00400620
++#define NV04_PGRAPH_BOFFSET0                               0x00400640
++#define NV04_PGRAPH_BOFFSET1                               0x00400644
++#define NV04_PGRAPH_BOFFSET2                               0x00400648
++#define NV04_PGRAPH_BOFFSET3                               0x0040064C
++#define NV04_PGRAPH_BOFFSET4                               0x00400650
++#define NV04_PGRAPH_BOFFSET5                               0x00400654
++#define NV04_PGRAPH_BBASE0                                 0x00400658
++#define NV04_PGRAPH_BBASE1                                 0x0040065C
++#define NV04_PGRAPH_BBASE2                                 0x00400660
++#define NV04_PGRAPH_BBASE3                                 0x00400664
++#define NV04_PGRAPH_BBASE4                                 0x00400668
++#define NV04_PGRAPH_BBASE5                                 0x0040066C
++#define NV04_PGRAPH_BPITCH0                                0x00400670
++#define NV04_PGRAPH_BPITCH1                                0x00400674
++#define NV04_PGRAPH_BPITCH2                                0x00400678
++#define NV04_PGRAPH_BPITCH3                                0x0040067C
++#define NV04_PGRAPH_BPITCH4                                0x00400680
++#define NV04_PGRAPH_BLIMIT0                                0x00400684
++#define NV04_PGRAPH_BLIMIT1                                0x00400688
++#define NV04_PGRAPH_BLIMIT2                                0x0040068C
++#define NV04_PGRAPH_BLIMIT3                                0x00400690
++#define NV04_PGRAPH_BLIMIT4                                0x00400694
++#define NV04_PGRAPH_BLIMIT5                                0x00400698
++#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
++#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
++#define NV03_PGRAPH_STATUS                                 0x004006B0
++#define NV04_PGRAPH_STATUS                                 0x00400700
++#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
++#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
++#define NV04_PGRAPH_SURFACE                                0x0040070C
++#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
++#define NV04_PGRAPH_STATE                                  0x00400710
++#define NV10_PGRAPH_SURFACE                                0x00400710
++#define NV04_PGRAPH_NOTIFY                                 0x00400714
++#define NV10_PGRAPH_STATE                                  0x00400714
++#define NV10_PGRAPH_NOTIFY                                 0x00400718
++
++#define NV04_PGRAPH_FIFO                                   0x00400720
++
++#define NV04_PGRAPH_BPIXEL                                 0x00400724
++#define NV10_PGRAPH_RDI_INDEX                              0x00400750
++#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
++#define NV10_PGRAPH_RDI_DATA                               0x00400754
++#define NV04_PGRAPH_DMA_PITCH                              0x00400760
++#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
++#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
++#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
++#define NV10_PGRAPH_DMA_PITCH                              0x00400770
++#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
++#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
++#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
++#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
++#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
++#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
++#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
++#define NV04_PGRAPH_PATTERN                                0x00400808
++#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
++#define NV04_PGRAPH_CHROMA                                 0x00400814
++#define NV04_PGRAPH_CONTROL0                               0x00400818
++#define NV04_PGRAPH_CONTROL1                               0x0040081C
++#define NV04_PGRAPH_CONTROL2                               0x00400820
++#define NV04_PGRAPH_BLEND                                  0x00400824
++#define NV04_PGRAPH_STORED_FMT                             0x00400830
++#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
++#define NV40_PGRAPH_TILE0(i)                               (0x00400900 + (i*16))
++#define NV40_PGRAPH_TLIMIT0(i)                             (0x00400904 + (i*16))
++#define NV40_PGRAPH_TSIZE0(i)                              (0x00400908 + (i*16))
++#define NV40_PGRAPH_TSTATUS0(i)                            (0x0040090C + (i*16))
++#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
++#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
++#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
++#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
++#define NV04_PGRAPH_U_RAM                                  0x00400D00
++#define NV47_PGRAPH_TILE0(i)                               (0x00400D00 + (i*16))
++#define NV47_PGRAPH_TLIMIT0(i)                             (0x00400D04 + (i*16))
++#define NV47_PGRAPH_TSIZE0(i)                              (0x00400D08 + (i*16))
++#define NV47_PGRAPH_TSTATUS0(i)                            (0x00400D0C + (i*16))
++#define NV04_PGRAPH_V_RAM                                  0x00400D40
++#define NV04_PGRAPH_W_RAM                                  0x00400D80
++#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
++#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
++#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
++#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
++#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
++#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
++#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
++#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
++#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
++#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
++#define NV10_PGRAPH_XFMODE0                                0x00400F40
++#define NV10_PGRAPH_XFMODE1                                0x00400F44
++#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
++#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
++#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
++#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
++#define NV04_PGRAPH_DMA_START_0                            0x00401000
++#define NV04_PGRAPH_DMA_START_1                            0x00401004
++#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
++#define NV04_PGRAPH_DMA_MISC                               0x0040100C
++#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
++#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
++#define NV04_PGRAPH_DMA_RM                                 0x00401030
++#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
++#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
++#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
++#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
++#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
++#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
++#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
++#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
++#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
++#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
++#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
++#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
++#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
++#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
++#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
++#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
++#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
++#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
++#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
++#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
++
++
++/* It's a guess that this works on NV03. Confirmed on NV04, though */
++#define NV04_PFIFO_DELAY_0                                 0x00002040
++#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
++#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
++#define NV03_PFIFO_INTR_0                                  0x00002100
++#define NV03_PFIFO_INTR_EN_0                               0x00002140
++#    define NV_PFIFO_INTR_CACHE_ERROR                         (1<< 0)
++#    define NV_PFIFO_INTR_RUNOUT                              (1<< 4)
++#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                     (1<< 8)
++#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
++#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
++#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
++#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
++#define NV03_PFIFO_RAMHT                                   0x00002210
++#define NV03_PFIFO_RAMFC                                   0x00002214
++#define NV03_PFIFO_RAMRO                                   0x00002218
++#define NV40_PFIFO_RAMFC                                   0x00002220
++#define NV03_PFIFO_CACHES                                  0x00002500
++#define NV04_PFIFO_MODE                                    0x00002504
++#define NV04_PFIFO_DMA                                     0x00002508
++#define NV04_PFIFO_SIZE                                    0x0000250c
++#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
++#define NV50_PFIFO_CTX_TABLE__SIZE                                128
++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
++#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
++#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
++#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
++#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
++#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
++#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
++#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
++#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
++#define NV03_PFIFO_CACHE1_PUT                              0x00003210
++#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
++#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
++#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
++#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
++#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
++#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
++#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
++#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
++#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
++#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
++#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
++#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
++#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
++#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
++#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
++#define NV04_PFIFO_CACHE1_HASH                             0x00003258
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
++#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
++#define NV03_PFIFO_CACHE1_GET                              0x00003270
++#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
++#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
++#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
++#define NV40_PFIFO_UNK32E4                                 0x000032E4
++#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
++#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
++#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
++#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
++
++#define NV_CRTC0_INTSTAT                                   0x00600100
++#define NV_CRTC0_INTEN                                     0x00600140
++#define NV_CRTC1_INTSTAT                                   0x00602100
++#define NV_CRTC1_INTEN                                     0x00602140
++#    define NV_CRTC_INTR_VBLANK                                (1<<0)
++
++/* This name is a partial guess. */
++#define NV50_DISPLAY_SUPERVISOR                     0x00610024
++
++/* Fifo commands. These are not regs, neither masks */
++#define NV03_FIFO_CMD_JUMP                                 0x20000000
++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK                     0x1ffffffc
++#define NV03_FIFO_CMD_REWIND                               (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
++
++/* RAMFC offsets */
++#define NV04_RAMFC_DMA_PUT                                       0x00
++#define NV04_RAMFC_DMA_GET                                       0x04
++#define NV04_RAMFC_DMA_INSTANCE                                  0x08
++#define NV04_RAMFC_DMA_STATE                                     0x0C
++#define NV04_RAMFC_DMA_FETCH                                     0x10
++#define NV04_RAMFC_ENGINE                                        0x14
++#define NV04_RAMFC_PULL1_ENGINE                                  0x18
++
++#define NV10_RAMFC_DMA_PUT                                       0x00
++#define NV10_RAMFC_DMA_GET                                       0x04
++#define NV10_RAMFC_REF_CNT                                       0x08
++#define NV10_RAMFC_DMA_INSTANCE                                  0x0C
++#define NV10_RAMFC_DMA_STATE                                     0x10
++#define NV10_RAMFC_DMA_FETCH                                     0x14
++#define NV10_RAMFC_ENGINE                                        0x18
++#define NV10_RAMFC_PULL1_ENGINE                                  0x1C
++#define NV10_RAMFC_ACQUIRE_VALUE                                 0x20
++#define NV10_RAMFC_ACQUIRE_TIMESTAMP                             0x24
++#define NV10_RAMFC_ACQUIRE_TIMEOUT                               0x28
++#define NV10_RAMFC_SEMAPHORE                                     0x2C
++#define NV10_RAMFC_DMA_SUBROUTINE                                0x30
++
++#define NV40_RAMFC_DMA_PUT                                       0x00
++#define NV40_RAMFC_DMA_GET                                       0x04
++#define NV40_RAMFC_REF_CNT                                       0x08
++#define NV40_RAMFC_DMA_INSTANCE                                  0x0C
++#define NV40_RAMFC_DMA_DCOUNT /* ? */                            0x10
++#define NV40_RAMFC_DMA_STATE                                     0x14
++#define NV40_RAMFC_DMA_FETCH                                     0x18
++#define NV40_RAMFC_ENGINE                                        0x1C
++#define NV40_RAMFC_PULL1_ENGINE                                  0x20
++#define NV40_RAMFC_ACQUIRE_VALUE                                 0x24
++#define NV40_RAMFC_ACQUIRE_TIMESTAMP                             0x28
++#define NV40_RAMFC_ACQUIRE_TIMEOUT                               0x2C
++#define NV40_RAMFC_SEMAPHORE                                     0x30
++#define NV40_RAMFC_DMA_SUBROUTINE                                0x34
++#define NV40_RAMFC_GRCTX_INSTANCE /* guess */                    0x38
++#define NV40_RAMFC_DMA_TIMESLICE                                 0x3C
++#define NV40_RAMFC_UNK_40                                        0x40
++#define NV40_RAMFC_UNK_44                                        0x44
++#define NV40_RAMFC_UNK_48                                        0x48
++#define NV40_RAMFC_UNK_4C                                        0x4C
++#define NV40_RAMFC_UNK_50                                        0x50
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_sgdma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c
+--- git/drivers/gpu/drm-tungsten/nouveau_sgdma.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,349 @@
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++#define NV_CTXDMA_PAGE_SHIFT 12
++#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
++#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
++
++struct nouveau_sgdma_be {
++      struct drm_ttm_backend backend;
++      struct drm_device *dev;
++
++      int         pages;
++      int         pages_populated;
++      dma_addr_t *pagelist;
++      int         is_bound;
++
++      unsigned int pte_start;
++};
++
++static int
++nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
++{
++      return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
++}
++
++static int
++nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
++                     struct page **pages, struct page *dummy_read_page)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      int p, d, o;
++
++      DRM_DEBUG("num_pages = %ld\n", num_pages);
++
++      if (nvbe->pagelist)
++              return -EINVAL;
++      nvbe->pages    = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
++      nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
++                                 DRM_MEM_PAGES);
++
++      nvbe->pages_populated = d = 0;
++      for (p = 0; p < num_pages; p++) {
++              for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
++                      struct page *page = pages[p];
++                      if (!page)
++                              page = dummy_read_page;
++                      nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
++                                                       page, o,
++                                                       NV_CTXDMA_PAGE_SIZE,
++                                                       PCI_DMA_BIDIRECTIONAL);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++                      if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
++#else
++                      if (pci_dma_mapping_error(nvbe->pagelist[d])) {
++#endif
++                              be->func->clear(be);
++                              DRM_ERROR("pci_map_page failed\n");
++                              return -EINVAL;
++                      }
++                      nvbe->pages_populated = ++d;
++              }
++      }
++
++      return 0;
++}
++
++static void
++nouveau_sgdma_clear(struct drm_ttm_backend *be)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      int d;
++
++      DRM_DEBUG("\n");
++
++      if (nvbe && nvbe->pagelist) {
++              if (nvbe->is_bound)
++                      be->func->unbind(be);
++
++              for (d = 0; d < nvbe->pages_populated; d++) {
++                      pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
++                                     NV_CTXDMA_PAGE_SIZE,
++                                     PCI_DMA_BIDIRECTIONAL);
++              }
++              drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
++                       DRM_MEM_PAGES);
++      }
++}
++
++static int
++nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++      uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
++      uint32_t i;
++
++      DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
++                offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
++
++      if (offset & NV_CTXDMA_PAGE_MASK)
++              return -EINVAL;
++      nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
++      if (dev_priv->card_type < NV_50)
++              nvbe->pte_start += 2; /* skip ctxdma header */
++
++      for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
++              uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
++
++              if (pteval & NV_CTXDMA_PAGE_MASK) {
++                      DRM_ERROR("Bad pteval 0x%llx\n", pteval);
++                      return -EINVAL;
++              }
++
++              if (dev_priv->card_type < NV_50) {
++                      INSTANCE_WR(gpuobj, i, pteval | 3);
++              } else {
++                      INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
++                      INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
++              }
++      }
++
++      nvbe->is_bound  = 1;
++      return 0;
++}
++
++static int
++nouveau_sgdma_unbind(struct drm_ttm_backend *be)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      if (nvbe->is_bound) {
++              struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++              unsigned int pte;
++
++              pte = nvbe->pte_start;
++              while (pte < (nvbe->pte_start + nvbe->pages)) {
++                      uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
++
++                      if (dev_priv->card_type < NV_50) {
++                              INSTANCE_WR(gpuobj, pte, pteval | 3);
++                      } else {
++                              INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
++                              INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
++                      }
++
++                      pte++;
++              }
++
++              nvbe->is_bound = 0;
++      }
++
++      return 0;
++}
++
++static void
++nouveau_sgdma_destroy(struct drm_ttm_backend *be)
++{
++      DRM_DEBUG("\n");
++      if (be) {
++              struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++              if (nvbe) {
++                      if (nvbe->pagelist)
++                              be->func->clear(be);
++                      drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
++              }
++      }
++}
++
++static struct drm_ttm_backend_func nouveau_sgdma_backend = {
++      .needs_ub_cache_adjust  = nouveau_sgdma_needs_ub_cache_adjust,
++      .populate               = nouveau_sgdma_populate,
++      .clear                  = nouveau_sgdma_clear,
++      .bind                   = nouveau_sgdma_bind,
++      .unbind                 = nouveau_sgdma_unbind,
++      .destroy                = nouveau_sgdma_destroy
++};
++
++struct drm_ttm_backend *
++nouveau_sgdma_init_ttm(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_sgdma_be *nvbe;
++
++      if (!dev_priv->gart_info.sg_ctxdma)
++              return NULL;
++
++      nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
++      if (!nvbe)
++              return NULL;
++
++      nvbe->dev = dev;
++
++      nvbe->backend.func      = &nouveau_sgdma_backend;
++
++      return &nvbe->backend;
++}
++
++int
++nouveau_sgdma_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      uint32_t aper_size, obj_size;
++      int i, ret;
++
++      if (dev_priv->card_type < NV_50) {
++              aper_size = (64 * 1024 * 1024);
++              obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
++              obj_size += 8; /* ctxdma header */
++      } else {
++              /* 1 entire VM page table */
++              aper_size = (512 * 1024 * 1024);
++              obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
++      }
++
++      if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
++                                    NVOBJ_FLAG_ALLOW_NO_REFS |
++                                    NVOBJ_FLAG_ZERO_ALLOC |
++                                    NVOBJ_FLAG_ZERO_FREE, &gpuobj)))  {
++              DRM_ERROR("Error creating sgdma object: %d\n", ret);
++              return ret;
++      }
++
++      dev_priv->gart_info.sg_dummy_page =
++              alloc_page(GFP_KERNEL|__GFP_DMA32);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++      set_page_locked(dev_priv->gart_info.sg_dummy_page);
++#else
++      SetPageLocked(dev_priv->gart_info.sg_dummy_page);
++#endif
++      dev_priv->gart_info.sg_dummy_bus =
++              pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
++                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++
++      if (dev_priv->card_type < NV_50) {
++              /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
++               * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
++               * on those cards? */
++              INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
++                                     (1 << 12) /* PT present */ |
++                                     (0 << 13) /* PT *not* linear */ |
++                                     (NV_DMA_ACCESS_RW  << 14) |
++                                     (NV_DMA_TARGET_PCI << 16));
++              INSTANCE_WR(gpuobj, 1, aper_size - 1);
++              for (i=2; i<2+(aper_size>>12); i++) {
++                      INSTANCE_WR(gpuobj, i,
++                                  dev_priv->gart_info.sg_dummy_bus | 3);
++              }
++      } else {
++              for (i=0; i<obj_size; i+=8) {
++                      INSTANCE_WR(gpuobj, (i+0)/4,
++                                  dev_priv->gart_info.sg_dummy_bus | 0x21);
++                      INSTANCE_WR(gpuobj, (i+4)/4, 0);
++              }
++      }
++
++      dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
++      dev_priv->gart_info.aper_base = 0;
++      dev_priv->gart_info.aper_size = aper_size;
++      dev_priv->gart_info.sg_ctxdma = gpuobj;
++      return 0;
++}
++
++void
++nouveau_sgdma_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (dev_priv->gart_info.sg_dummy_page) {
++              pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
++                             NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++              unlock_page(dev_priv->gart_info.sg_dummy_page);
++              __free_page(dev_priv->gart_info.sg_dummy_page);
++              dev_priv->gart_info.sg_dummy_page = NULL;
++              dev_priv->gart_info.sg_dummy_bus = 0;
++      }
++
++      nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
++}
++
++int
++nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_ttm_backend *be;
++      struct drm_scatter_gather sgreq;
++      struct drm_mm_node mm_node;
++      struct drm_bo_mem_reg mem;
++      int ret;
++
++      dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
++      if (!dev_priv->gart_info.sg_be)
++              return -ENOMEM;
++      be = dev_priv->gart_info.sg_be;
++
++      /* Hack the aperture size down to the amount of system memory
++       * we're going to bind into it.
++       */
++      if (dev_priv->gart_info.aper_size > 32*1024*1024)
++              dev_priv->gart_info.aper_size = 32*1024*1024;
++
++      sgreq.size = dev_priv->gart_info.aper_size;
++      if ((ret = drm_sg_alloc(dev, &sgreq))) {
++              DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
++              return ret;
++      }
++      dev_priv->gart_info.sg_handle = sgreq.handle;
++
++      if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
++              DRM_ERROR("failed populate: %d\n", ret);
++              return ret;
++      }
++
++      mm_node.start = 0;
++      mem.mm_node = &mm_node;
++
++      if ((ret = be->func->bind(be, &mem))) {
++              DRM_ERROR("failed bind: %d\n", ret);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
++{
++}
++
++int
++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++      int pte;
++
++      pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
++      if (dev_priv->card_type < NV_50) {
++              *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
++              return 0;
++      }
++
++      DRM_ERROR("Unimplemented on NV50\n");
++      return -EINVAL;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_state.c git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c
+--- git/drivers/gpu/drm-tungsten/nouveau_state.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,871 @@
++/*
++ * Copyright 2005 Stephane Marchesin
++ * Copyright 2008 Stuart Bennett
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++static int nouveau_init_card_mappings(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      /* resource 0 is mmio regs */
++      /* resource 1 is linear FB */
++      /* resource 2 is RAMIN (mmio regs + 0x1000000) */
++      /* resource 6 is bios */
++
++      /* map the mmio regs */
++      ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
++                            drm_get_resource_len(dev, 0),
++                            _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret) {
++              DRM_ERROR("Unable to initialize the mmio mapping (%d). "
++                        "Please report your setup to " DRIVER_EMAIL "\n",
++                        ret);
++              return -EINVAL;
++      }
++      DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);
++
++      /* map larger RAMIN aperture on NV40 cards */
++      dev_priv->ramin = NULL;
++      if (dev_priv->card_type >= NV_40) {
++              int ramin_resource = 2;
++              if (drm_get_resource_len(dev, ramin_resource) == 0)
++                      ramin_resource = 3;
++
++              ret = drm_addmap(dev,
++                               drm_get_resource_start(dev, ramin_resource),
++                               drm_get_resource_len(dev, ramin_resource),
++                               _DRM_REGISTERS, _DRM_READ_ONLY,
++                               &dev_priv->ramin);
++              if (ret) {
++                      DRM_ERROR("Failed to init RAMIN mapping, "
++                                "limited instance memory available\n");
++                      dev_priv->ramin = NULL;
++              }
++      }
++
++      /* On older cards (or if the above failed), create a map covering
++       * the BAR0 PRAMIN aperture */
++      if (!dev_priv->ramin) {
++              ret = drm_addmap(dev,
++                               drm_get_resource_start(dev, 0) + NV_RAMIN,
++                               (1*1024*1024),
++                               _DRM_REGISTERS, _DRM_READ_ONLY,
++                               &dev_priv->ramin);
++              if (ret) {
++                      DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++static int nouveau_stub_init(struct drm_device *dev) { return 0; }
++static void nouveau_stub_takedown(struct drm_device *dev) {}
++
++static int nouveau_init_engine_ptrs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      switch (dev_priv->chipset & 0xf0) {
++      case 0x00:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv04_fb_init;
++              engine->fb.takedown     = nv04_fb_takedown;
++              engine->graph.init      = nv04_graph_init;
++              engine->graph.takedown  = nv04_graph_takedown;
++              engine->graph.create_context    = nv04_graph_create_context;
++              engine->graph.destroy_context   = nv04_graph_destroy_context;
++              engine->graph.load_context      = nv04_graph_load_context;
++              engine->graph.save_context      = nv04_graph_save_context;
++              engine->fifo.channels   = 16;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv04_fifo_channel_id;
++              engine->fifo.create_context     = nv04_fifo_create_context;
++              engine->fifo.destroy_context    = nv04_fifo_destroy_context;
++              engine->fifo.load_context       = nv04_fifo_load_context;
++              engine->fifo.save_context       = nv04_fifo_save_context;
++              break;
++      case 0x10:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv10_graph_init;
++              engine->graph.takedown  = nv10_graph_takedown;
++              engine->graph.create_context    = nv10_graph_create_context;
++              engine->graph.destroy_context   = nv10_graph_destroy_context;
++              engine->graph.load_context      = nv10_graph_load_context;
++              engine->graph.save_context      = nv10_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x20:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv20_graph_init;
++              engine->graph.takedown  = nv20_graph_takedown;
++              engine->graph.create_context    = nv20_graph_create_context;
++              engine->graph.destroy_context   = nv20_graph_destroy_context;
++              engine->graph.load_context      = nv20_graph_load_context;
++              engine->graph.save_context      = nv20_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x30:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv30_graph_init;
++              engine->graph.takedown  = nv20_graph_takedown;
++              engine->graph.create_context    = nv20_graph_create_context;
++              engine->graph.destroy_context   = nv20_graph_destroy_context;
++              engine->graph.load_context      = nv20_graph_load_context;
++              engine->graph.save_context      = nv20_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x40:
++      case 0x60:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv40_mc_init;
++              engine->mc.takedown     = nv40_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv40_fb_init;
++              engine->fb.takedown     = nv40_fb_takedown;
++              engine->graph.init      = nv40_graph_init;
++              engine->graph.takedown  = nv40_graph_takedown;
++              engine->graph.create_context    = nv40_graph_create_context;
++              engine->graph.destroy_context   = nv40_graph_destroy_context;
++              engine->graph.load_context      = nv40_graph_load_context;
++              engine->graph.save_context      = nv40_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nv40_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv40_fifo_create_context;
++              engine->fifo.destroy_context    = nv40_fifo_destroy_context;
++              engine->fifo.load_context       = nv40_fifo_load_context;
++              engine->fifo.save_context       = nv40_fifo_save_context;
++              break;
++      case 0x50:
++      case 0x80: /* gotta love NVIDIA's consistency.. */
++      case 0x90:
++      case 0xA0:
++              engine->instmem.init    = nv50_instmem_init;
++              engine->instmem.takedown= nv50_instmem_takedown;
++              engine->instmem.populate        = nv50_instmem_populate;
++              engine->instmem.clear           = nv50_instmem_clear;
++              engine->instmem.bind            = nv50_instmem_bind;
++              engine->instmem.unbind          = nv50_instmem_unbind;
++              engine->mc.init         = nv50_mc_init;
++              engine->mc.takedown     = nv50_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nouveau_stub_init;
++              engine->fb.takedown     = nouveau_stub_takedown;
++              engine->graph.init      = nv50_graph_init;
++              engine->graph.takedown  = nv50_graph_takedown;
++              engine->graph.create_context    = nv50_graph_create_context;
++              engine->graph.destroy_context   = nv50_graph_destroy_context;
++              engine->graph.load_context      = nv50_graph_load_context;
++              engine->graph.save_context      = nv50_graph_save_context;
++              engine->fifo.channels   = 128;
++              engine->fifo.init       = nv50_fifo_init;
++              engine->fifo.takedown   = nv50_fifo_takedown;
++              engine->fifo.channel_id         = nv50_fifo_channel_id;
++              engine->fifo.create_context     = nv50_fifo_create_context;
++              engine->fifo.destroy_context    = nv50_fifo_destroy_context;
++              engine->fifo.load_context       = nv50_fifo_load_context;
++              engine->fifo.save_context       = nv50_fifo_save_context;
++              break;
++      default:
++              DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset);
++              return 1;
++      }
++
++      return 0;
++}
++
++int
++nouveau_card_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine;
++      int ret;
++
++      DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
++
++      if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
++              return 0;
++      dev_priv->ttm = 0;
++
++      /* Determine exact chipset we're running on */
++      if (dev_priv->card_type < NV_10)
++              dev_priv->chipset = dev_priv->card_type;
++      else
++              dev_priv->chipset =
++                      (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20;
++
++      /* Initialise internal driver API hooks */
++      ret = nouveau_init_engine_ptrs(dev);
++      if (ret) return ret;
++      engine = &dev_priv->Engine;
++      dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
++
++      ret = nouveau_gpuobj_early_init(dev);
++      if (ret) return ret;
++
++      /* Initialise instance memory, must happen before mem_init so we
++       * know exactly how much VRAM we're able to use for "normal"
++       * purposes.
++       */
++      ret = engine->instmem.init(dev);
++      if (ret) return ret;
++
++      /* Setup the memory manager */
++      if (dev_priv->ttm) {
++              ret = nouveau_mem_init_ttm(dev);
++              if (ret) return ret;
++      } else {
++              ret = nouveau_mem_init(dev);
++              if (ret) return ret;
++      }
++
++      ret = nouveau_gpuobj_init(dev);
++      if (ret) return ret;
++
++      /* Parse BIOS tables / Run init tables? */
++
++      /* PMC */
++      ret = engine->mc.init(dev);
++      if (ret) return ret;
++
++      /* PTIMER */
++      ret = engine->timer.init(dev);
++      if (ret) return ret;
++
++      /* PFB */
++      ret = engine->fb.init(dev);
++      if (ret) return ret;
++
++      /* PGRAPH */
++      ret = engine->graph.init(dev);
++      if (ret) return ret;
++
++      /* PFIFO */
++      ret = engine->fifo.init(dev);
++      if (ret) return ret;
++
++      /* this call irq_preinstall, register irq handler and
++       * call irq_postinstall
++       */
++      ret = drm_irq_install(dev);
++      if (ret) return ret;
++
++      /* what about PVIDEO/PCRTC/PRAMDAC etc? */
++
++      ret = nouveau_dma_channel_init(dev);
++      if (ret) return ret;
++
++      dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
++      return 0;
++}
++
++static void nouveau_card_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
++
++      if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
++              nouveau_dma_channel_takedown(dev);
++
++              engine->fifo.takedown(dev);
++              engine->graph.takedown(dev);
++              engine->fb.takedown(dev);
++              engine->timer.takedown(dev);
++              engine->mc.takedown(dev);
++
++              nouveau_sgdma_nottm_hack_takedown(dev);
++              nouveau_sgdma_takedown(dev);
++
++              nouveau_gpuobj_takedown(dev);
++              nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt);
++
++              nouveau_mem_close(dev);
++              engine->instmem.takedown(dev);
++
++              drm_irq_uninstall(dev);
++
++              nouveau_gpuobj_late_takedown(dev);
++
++              dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++      }
++}
++
++/* here a client dies, release the stuff that was allocated for its
++ * file_priv */
++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_fifo_cleanup(dev, file_priv);
++      nouveau_mem_release(file_priv,dev_priv->fb_heap);
++      nouveau_mem_release(file_priv,dev_priv->agp_heap);
++      nouveau_mem_release(file_priv,dev_priv->pci_heap);
++}
++
++/* first module load, setup the mmio/fb mapping */
++int nouveau_firstopen(struct drm_device *dev)
++{
++#if defined(__powerpc__)
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct device_node *dn;
++#endif
++      int ret;
++      /* Map any PCI resources we need on the card */
++      ret = nouveau_init_card_mappings(dev);
++      if (ret) return ret;
++
++#if defined(__powerpc__)
++      /* Put the card in BE mode if it's not */
++      if (NV_READ(NV03_PMC_BOOT_1))
++              NV_WRITE(NV03_PMC_BOOT_1,0x00000001);
++
++      DRM_MEMORYBARRIER();
++#endif
++
++#if defined(__linux__) && defined(__powerpc__)
++      /* if we have an OF card, copy vbios to RAMIN */
++      dn = pci_device_to_OF_node(dev->pdev);
++      if (dn)
++      {
++              int size;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++              const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size);
++#else
++              const uint32_t *bios = get_property(dn, "NVDA,BMP", &size);
++#endif
++              if (bios)
++              {
++                      int i;
++                      for(i=0;i<size;i+=4)
++                              NV_WI32(i, bios[i/4]);
++                      DRM_INFO("OF bios successfully copied (%d bytes)\n",size);
++              }
++              else
++                      DRM_INFO("Unable to get the OF bios\n");
++      }
++      else
++              DRM_INFO("Unable to get the OF node\n");
++#endif
++      return 0;
++}
++
++#define NV40_CHIPSET_MASK 0x00000baf
++#define NV44_CHIPSET_MASK 0x00005450
++
++int nouveau_load(struct drm_device *dev, unsigned long flags)
++{
++      struct drm_nouveau_private *dev_priv;
++      void __iomem *regs;
++      uint32_t reg0,reg1;
++      uint8_t architecture = 0;
++
++      dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
++      if (!dev_priv)
++              return -ENOMEM;
++
++      dev_priv->flags = flags & NOUVEAU_FLAGS;
++      dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++
++      DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);
++
++      /* Time to determine the card architecture */
++      regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
++      if (!regs) {
++              DRM_ERROR("Could not ioremap to determine register\n");
++              return -ENOMEM;
++      }
++
++      reg0 = readl(regs+NV03_PMC_BOOT_0);
++      reg1 = readl(regs+NV03_PMC_BOOT_1);
++#if defined(__powerpc__)
++      if (reg1)
++              reg0=___swab32(reg0);
++#endif
++
++      /* We're dealing with >=NV10 */
++      if ((reg0 & 0x0f000000) > 0 ) {
++              /* Bit 27-20 contain the architecture in hex */
++              architecture = (reg0 & 0xff00000) >> 20;
++      /* NV04 or NV05 */
++      } else if ((reg0 & 0xff00fff0) == 0x20004000) {
++              architecture = 0x04;
++      }
++
++      iounmap(regs);
++
++      if (architecture >= 0x80) {
++              dev_priv->card_type = NV_50;
++      } else if (architecture >= 0x60) {
++              /* FIXME we need to figure out who's who for NV6x */
++              dev_priv->card_type = NV_44;
++      } else if (architecture >= 0x50) {
++              dev_priv->card_type = NV_50;
++      } else if (architecture >= 0x40) {
++              uint8_t subarch = architecture & 0xf;
++              /* Selection criteria borrowed from NV40EXA */
++              if (NV40_CHIPSET_MASK & (1 << subarch)) {
++                      dev_priv->card_type = NV_40;
++              } else if (NV44_CHIPSET_MASK & (1 << subarch)) {
++                      dev_priv->card_type = NV_44;
++              } else {
++                      dev_priv->card_type = NV_UNKNOWN;
++              }
++      } else if (architecture >= 0x30) {
++              dev_priv->card_type = NV_30;
++      } else if (architecture >= 0x20) {
++              dev_priv->card_type = NV_20;
++      } else if (architecture >= 0x17) {
++              dev_priv->card_type = NV_17;
++      } else if (architecture >= 0x11) {
++              dev_priv->card_type = NV_11;
++      } else if (architecture >= 0x10) {
++              dev_priv->card_type = NV_10;
++      } else if (architecture >= 0x04) {
++              dev_priv->card_type = NV_04;
++      } else {
++              dev_priv->card_type = NV_UNKNOWN;
++      }
++
++      DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0);
++
++      if (dev_priv->card_type == NV_UNKNOWN) {
++              return -EINVAL;
++      }
++
++      /* Special flags */
++      if (dev->pci_device == 0x01a0) {
++              dev_priv->flags |= NV_NFORCE;
++      } else if (dev->pci_device == 0x01f0) {
++              dev_priv->flags |= NV_NFORCE2;
++      }
++
++      dev->dev_private = (void *)dev_priv;
++
++      return 0;
++}
++
++void nouveau_lastclose(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* In the case of an error dev_priv may not be be allocated yet */
++      if (dev_priv && dev_priv->card_type) {
++              nouveau_card_takedown(dev);
++
++              if(dev_priv->fb_mtrr>0)
++              {
++                      drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC);
++                      dev_priv->fb_mtrr=0;
++              }
++      }
++}
++
++int nouveau_unload(struct drm_device *dev)
++{
++      drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++      return 0;
++}
++
++int
++nouveau_ioctl_card_init(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      return nouveau_card_init(dev);
++}
++
++int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_getparam *getparam = data;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      switch (getparam->param) {
++      case NOUVEAU_GETPARAM_CHIPSET_ID:
++              getparam->value = dev_priv->chipset;
++              break;
++      case NOUVEAU_GETPARAM_PCI_VENDOR:
++              getparam->value=dev->pci_vendor;
++              break;
++      case NOUVEAU_GETPARAM_PCI_DEVICE:
++              getparam->value=dev->pci_device;
++              break;
++      case NOUVEAU_GETPARAM_BUS_TYPE:
++              if (drm_device_is_agp(dev))
++                      getparam->value=NV_AGP;
++              else if (drm_device_is_pcie(dev))
++                      getparam->value=NV_PCIE;
++              else
++                      getparam->value=NV_PCI;
++              break;
++      case NOUVEAU_GETPARAM_FB_PHYSICAL:
++              getparam->value=dev_priv->fb_phys;
++              break;
++      case NOUVEAU_GETPARAM_AGP_PHYSICAL:
++              getparam->value=dev_priv->gart_info.aper_base;
++              break;
++      case NOUVEAU_GETPARAM_PCI_PHYSICAL:
++              if ( dev -> sg )
++                      getparam->value=(unsigned long)dev->sg->virtual;
++              else
++                   {
++                   DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
++                   return -EINVAL;
++                   }
++              break;
++      case NOUVEAU_GETPARAM_FB_SIZE:
++              getparam->value=dev_priv->fb_available_size;
++              break;
++      case NOUVEAU_GETPARAM_AGP_SIZE:
++              getparam->value=dev_priv->gart_info.aper_size;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %lld\n", getparam->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_setparam *setparam = data;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      switch (setparam->param) {
++      case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
++              switch (setparam->value) {
++              case NOUVEAU_MEM_AGP:
++              case NOUVEAU_MEM_FB:
++              case NOUVEAU_MEM_PCI:
++              case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE:
++                      break;
++              default:
++                      DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
++                                      setparam->value);
++                      return -EINVAL;
++              }
++              dev_priv->config.cmdbuf.location = setparam->value;
++              break;
++      case NOUVEAU_SETPARAM_CMDBUF_SIZE:
++              dev_priv->config.cmdbuf.size = setparam->value;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %lld\n", setparam->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* waits for idle */
++void nouveau_wait_for_idle(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      switch(dev_priv->card_type) {
++      case NV_50:
++              break;
++      default: {
++              /* This stuff is more or less a copy of what is seen
++               * in nv28 kmmio dump.
++               */
++              uint64_t started = dev_priv->Engine.timer.read(dev);
++              uint64_t stopped = started;
++              uint32_t status;
++              do {
++                      uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE);
++                      (void)pmc_e;
++                      status = NV_READ(NV04_PGRAPH_STATUS);
++                      if (!status)
++                              break;
++                      stopped = dev_priv->Engine.timer.read(dev);
++              /* It'll never wrap anyway... */
++              } while (stopped - started < 1000000000ULL);
++              if (status)
++                      DRM_ERROR("timed out with status 0x%08x\n",
++                                status);
++      }
++      }
++}
++
++static int nouveau_suspend(struct drm_device *dev)
++{
++      struct mem_block *p;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_suspend_resume *susres = &dev_priv->susres;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
++      susres->ramin_size = 0;
++      list_for_each(p, dev_priv->ramin_heap)
++              if (p->file_priv && (p->start + p->size) > susres->ramin_size)
++                      susres->ramin_size = p->start + p->size;
++      if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) {
++              DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n");
++              return -ENOMEM;
++      }
++
++      for (i = 0; i < engine->fifo.channels; i++) {
++              uint64_t t_start = engine->timer.read(dev);
++
++              if (dev_priv->fifos[i] == NULL)
++                      continue;
++
++              /* Give the channel a chance to idle, wait 2s (hopefully) */
++              while (!nouveau_channel_idle(dev_priv->fifos[i]))
++                      if (engine->timer.read(dev) - t_start > 2000000000ULL) {
++                              DRM_ERROR("Failed to idle channel %d before"
++                                        "suspend.", dev_priv->fifos[i]->id);
++                              return -EBUSY;
++                      }
++      }
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV04_PGRAPH_FIFO, 0);
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      susres->fifo_mode = NV_READ(NV04_PFIFO_MODE);
++
++      if (dev_priv->card_type >= NV_10) {
++              susres->graph_state = NV_READ(NV10_PGRAPH_STATE);
++              susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL);
++      } else {
++              susres->graph_state = NV_READ(NV04_PGRAPH_STATE);
++              susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL);
++      }
++
++      engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
++      engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
++      nouveau_wait_for_idle(dev);
++
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              susres->ramin_copy[i] = NV_RI32(i << 2);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO, 1);
++
++      return 0;
++}
++
++static int nouveau_resume(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_suspend_resume *susres = &dev_priv->susres;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      if (!susres->ramin_copy)
++              return -EINVAL;
++
++      DRM_DEBUG("Doing resume\n");
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
++              struct drm_agp_info info;
++              struct drm_agp_mode mode;
++
++              /* agp bridge drivers don't re-enable agp on resume. lame. */
++              if ((i = drm_agp_info(dev, &info))) {
++                      DRM_ERROR("Unable to get AGP info: %d\n", i);
++                      return i;
++              }
++              mode.mode = info.mode;
++              if ((i = drm_agp_enable(dev, mode))) {
++                      DRM_ERROR("Unable to enable AGP: %d\n", i);
++                      return i;
++              }
++      }
++
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              NV_WI32(i << 2, susres->ramin_copy[i]);
++
++      engine->mc.init(dev);
++      engine->timer.init(dev);
++      engine->fb.init(dev);
++      engine->graph.init(dev);
++      engine->fifo.init(dev);
++
++      NV_WRITE(NV04_PGRAPH_FIFO, 0);
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      /* PMC power cycling PFIFO in init clobbers some of the stuff stored in
++       * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful
++       */
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              NV_WI32(i << 2, susres->ramin_copy[i]);
++
++      engine->fifo.load_context(dev_priv->fifos[0]);
++      NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode);
++
++      engine->graph.load_context(dev_priv->fifos[0]);
++      nouveau_wait_for_idle(dev);
++
++      if (dev_priv->card_type >= NV_10) {
++              NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state);
++              NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
++      } else {
++              NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state);
++              NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
++      }
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
++
++      if (dev->irq_enabled)
++              nouveau_irq_postinstall(dev);
++
++      drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
++      susres->ramin_copy = NULL;
++      susres->ramin_size = 0;
++
++      return 0;
++}
++
++int nouveau_ioctl_suspend(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv)
++{
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      return nouveau_suspend(dev);
++}
++
++int nouveau_ioctl_resume(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv)
++{
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      return nouveau_resume(dev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.c git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c
+--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,191 @@
++/*
++ * Copyright (C) 2007 Arthur Huillet.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Arthur Huillet <arthur.huillet AT free DOT fr>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_reg.h"
++
++/*TODO: add a "card_type" attribute*/
++typedef struct{
++      uint32_t oclass; /* object class for this software method */
++      uint32_t mthd; /* method number */
++      void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */
++ } nouveau_software_method_t;
++
++
++ /* This function handles the NV04 setcontext software methods.
++One function for all because they are very similar.*/
++static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF;
++      uint32_t value_to_set = 0, bit_to_set = 0;
++
++      switch ( oclass ) {
++              case 0x4a:
++                      switch ( mthd ) {
++                              case 0x188 :
++                              case 0x18c :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x198 :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                              default : ;
++                              };
++                      break;
++              case 0x5c:
++                      switch ( mthd ) {
++                              case 0x184:
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x188:
++                              case 0x18c:
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x198:
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x5f:
++                      switch ( mthd ) {
++                              case 0x184 :
++                                      bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/
++                                      break;
++                              case 0x188 :
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x18c :
++                              case 0x190 :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x19c :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x61:
++                      switch ( mthd ) {
++                              case 0x188 :
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x18c :
++                              case 0x190 :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x19c :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x77:
++                      switch ( mthd ) {
++                              case 0x198 :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x304 :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG
++                                      break;
++                      };
++                      break;
++              default :;
++              };
++
++      value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set;
++
++      /*RAMIN*/
++      nouveau_wait_for_idle(dev);
++      NV_WRITE(0x00700000 | inst_loc << 4, value_to_set);
++
++      /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/
++      NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set);
++
++      /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/
++      NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set);
++}
++
++ nouveau_software_method_t nouveau_sw_methods[] = {
++      /*NV04 context software methods*/
++      { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x190, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x77, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x77, 0x304, nouveau_NV04_setcontext_sw_method },
++      /*terminator*/
++      { 0x0, 0x0, NULL, },
++ };
++
++ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) {
++      int i = 0;
++      while ( nouveau_sw_methods[ i ] . method_code != NULL )
++              {
++              if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method )
++                      {
++                      nouveau_sw_methods[ i ] . method_code(dev, oclass, method);
++                      return 0;
++                      }
++              i ++;
++              }
++
++       return 1;
++ }
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.h git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h
+--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,33 @@
++/*
++ * Copyright (C) 2007 Arthur Huillet.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Arthur Huillet <arthur.huillet AT free DOT fr>
++ */
++
++int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fb.c git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c
+--- git/drivers/gpu/drm-tungsten/nv04_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,23 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
++       * nvidia reading PFB_CFG_0, then writing back its original value.
++       * (which was 0x701114 in this case)
++       */
++      NV_WRITE(NV04_PFB_CFG0, 0x1114);
++
++      return 0;
++}
++
++void
++nv04_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv04_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,138 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV04_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV04_RAMFC_##offset/4)
++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
++#define NV04_RAMFC__SIZE 32
++
++int
++nv04_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV03_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv04_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
++                                              NV04_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Setup initial state */
++      RAMFC_WR(DMA_PUT, chan->pushbuf_base);
++      RAMFC_WR(DMA_GET, chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                           NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                           NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                           NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                           0));
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<<chan->id));
++      return 0;
++}
++
++void
++nv04_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv04_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
++
++      tmp = RAMFC_RD(DMA_INSTANCE);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE));
++
++      /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv04_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
++      tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE);
++      RAMFC_WR(DMA_INSTANCE, tmp);
++
++      RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++      RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
++      RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_graph.c git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c
+--- git/drivers/gpu/drm-tungsten/nv04_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,516 @@
++/*
++ * Copyright 2007 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++static uint32_t nv04_graph_ctx_regs [] = {
++      NV04_PGRAPH_CTX_SWITCH1,
++      NV04_PGRAPH_CTX_SWITCH2,
++      NV04_PGRAPH_CTX_SWITCH3,
++      NV04_PGRAPH_CTX_SWITCH4,
++      NV04_PGRAPH_CTX_CACHE1,
++      NV04_PGRAPH_CTX_CACHE2,
++      NV04_PGRAPH_CTX_CACHE3,
++      NV04_PGRAPH_CTX_CACHE4,
++      0x00400184,
++      0x004001a4,
++      0x004001c4,
++      0x004001e4,
++      0x00400188,
++      0x004001a8,
++      0x004001c8,
++      0x004001e8,
++      0x0040018c,
++      0x004001ac,
++      0x004001cc,
++      0x004001ec,
++      0x00400190,
++      0x004001b0,
++      0x004001d0,
++      0x004001f0,
++      0x00400194,
++      0x004001b4,
++      0x004001d4,
++      0x004001f4,
++      0x00400198,
++      0x004001b8,
++      0x004001d8,
++      0x004001f8,
++      0x0040019c,
++      0x004001bc,
++      0x004001dc,
++      0x004001fc,
++      0x00400174,
++      NV04_PGRAPH_DMA_START_0,
++      NV04_PGRAPH_DMA_START_1,
++      NV04_PGRAPH_DMA_LENGTH,
++      NV04_PGRAPH_DMA_MISC,
++      NV04_PGRAPH_DMA_PITCH,
++      NV04_PGRAPH_BOFFSET0,
++      NV04_PGRAPH_BBASE0,
++      NV04_PGRAPH_BLIMIT0,
++      NV04_PGRAPH_BOFFSET1,
++      NV04_PGRAPH_BBASE1,
++      NV04_PGRAPH_BLIMIT1,
++      NV04_PGRAPH_BOFFSET2,
++      NV04_PGRAPH_BBASE2,
++      NV04_PGRAPH_BLIMIT2,
++      NV04_PGRAPH_BOFFSET3,
++      NV04_PGRAPH_BBASE3,
++      NV04_PGRAPH_BLIMIT3,
++      NV04_PGRAPH_BOFFSET4,
++      NV04_PGRAPH_BBASE4,
++      NV04_PGRAPH_BLIMIT4,
++      NV04_PGRAPH_BOFFSET5,
++      NV04_PGRAPH_BBASE5,
++      NV04_PGRAPH_BLIMIT5,
++      NV04_PGRAPH_BPITCH0,
++      NV04_PGRAPH_BPITCH1,
++      NV04_PGRAPH_BPITCH2,
++      NV04_PGRAPH_BPITCH3,
++      NV04_PGRAPH_BPITCH4,
++      NV04_PGRAPH_SURFACE,
++      NV04_PGRAPH_STATE,
++      NV04_PGRAPH_BSWIZZLE2,
++      NV04_PGRAPH_BSWIZZLE5,
++      NV04_PGRAPH_BPIXEL,
++      NV04_PGRAPH_NOTIFY,
++      NV04_PGRAPH_PATT_COLOR0,
++      NV04_PGRAPH_PATT_COLOR1,
++      NV04_PGRAPH_PATT_COLORRAM+0x00,
++      NV04_PGRAPH_PATT_COLORRAM+0x01,
++      NV04_PGRAPH_PATT_COLORRAM+0x02,
++      NV04_PGRAPH_PATT_COLORRAM+0x03,
++      NV04_PGRAPH_PATT_COLORRAM+0x04,
++      NV04_PGRAPH_PATT_COLORRAM+0x05,
++      NV04_PGRAPH_PATT_COLORRAM+0x06,
++      NV04_PGRAPH_PATT_COLORRAM+0x07,
++      NV04_PGRAPH_PATT_COLORRAM+0x08,
++      NV04_PGRAPH_PATT_COLORRAM+0x09,
++      NV04_PGRAPH_PATT_COLORRAM+0x0A,
++      NV04_PGRAPH_PATT_COLORRAM+0x0B,
++      NV04_PGRAPH_PATT_COLORRAM+0x0C,
++      NV04_PGRAPH_PATT_COLORRAM+0x0D,
++      NV04_PGRAPH_PATT_COLORRAM+0x0E,
++      NV04_PGRAPH_PATT_COLORRAM+0x0F,
++      NV04_PGRAPH_PATT_COLORRAM+0x10,
++      NV04_PGRAPH_PATT_COLORRAM+0x11,
++      NV04_PGRAPH_PATT_COLORRAM+0x12,
++      NV04_PGRAPH_PATT_COLORRAM+0x13,
++      NV04_PGRAPH_PATT_COLORRAM+0x14,
++      NV04_PGRAPH_PATT_COLORRAM+0x15,
++      NV04_PGRAPH_PATT_COLORRAM+0x16,
++      NV04_PGRAPH_PATT_COLORRAM+0x17,
++      NV04_PGRAPH_PATT_COLORRAM+0x18,
++      NV04_PGRAPH_PATT_COLORRAM+0x19,
++      NV04_PGRAPH_PATT_COLORRAM+0x1A,
++      NV04_PGRAPH_PATT_COLORRAM+0x1B,
++      NV04_PGRAPH_PATT_COLORRAM+0x1C,
++      NV04_PGRAPH_PATT_COLORRAM+0x1D,
++      NV04_PGRAPH_PATT_COLORRAM+0x1E,
++      NV04_PGRAPH_PATT_COLORRAM+0x1F,
++      NV04_PGRAPH_PATT_COLORRAM+0x20,
++      NV04_PGRAPH_PATT_COLORRAM+0x21,
++      NV04_PGRAPH_PATT_COLORRAM+0x22,
++      NV04_PGRAPH_PATT_COLORRAM+0x23,
++      NV04_PGRAPH_PATT_COLORRAM+0x24,
++      NV04_PGRAPH_PATT_COLORRAM+0x25,
++      NV04_PGRAPH_PATT_COLORRAM+0x26,
++      NV04_PGRAPH_PATT_COLORRAM+0x27,
++      NV04_PGRAPH_PATT_COLORRAM+0x28,
++      NV04_PGRAPH_PATT_COLORRAM+0x29,
++      NV04_PGRAPH_PATT_COLORRAM+0x2A,
++      NV04_PGRAPH_PATT_COLORRAM+0x2B,
++      NV04_PGRAPH_PATT_COLORRAM+0x2C,
++      NV04_PGRAPH_PATT_COLORRAM+0x2D,
++      NV04_PGRAPH_PATT_COLORRAM+0x2E,
++      NV04_PGRAPH_PATT_COLORRAM+0x2F,
++      NV04_PGRAPH_PATT_COLORRAM+0x30,
++      NV04_PGRAPH_PATT_COLORRAM+0x31,
++      NV04_PGRAPH_PATT_COLORRAM+0x32,
++      NV04_PGRAPH_PATT_COLORRAM+0x33,
++      NV04_PGRAPH_PATT_COLORRAM+0x34,
++      NV04_PGRAPH_PATT_COLORRAM+0x35,
++      NV04_PGRAPH_PATT_COLORRAM+0x36,
++      NV04_PGRAPH_PATT_COLORRAM+0x37,
++      NV04_PGRAPH_PATT_COLORRAM+0x38,
++      NV04_PGRAPH_PATT_COLORRAM+0x39,
++      NV04_PGRAPH_PATT_COLORRAM+0x3A,
++      NV04_PGRAPH_PATT_COLORRAM+0x3B,
++      NV04_PGRAPH_PATT_COLORRAM+0x3C,
++      NV04_PGRAPH_PATT_COLORRAM+0x3D,
++      NV04_PGRAPH_PATT_COLORRAM+0x3E,
++      NV04_PGRAPH_PATT_COLORRAM+0x3F,
++      NV04_PGRAPH_PATTERN,
++      0x0040080c,
++      NV04_PGRAPH_PATTERN_SHAPE,
++      0x00400600,
++      NV04_PGRAPH_ROP3,
++      NV04_PGRAPH_CHROMA,
++      NV04_PGRAPH_BETA_AND,
++      NV04_PGRAPH_BETA_PREMULT,
++      NV04_PGRAPH_CONTROL0,
++      NV04_PGRAPH_CONTROL1,
++      NV04_PGRAPH_CONTROL2,
++      NV04_PGRAPH_BLEND,
++      NV04_PGRAPH_STORED_FMT,
++      NV04_PGRAPH_SOURCE_COLOR,
++      0x00400560,
++      0x00400568,
++      0x00400564,
++      0x0040056c,
++      0x00400400,
++      0x00400480,
++      0x00400404,
++      0x00400484,
++      0x00400408,
++      0x00400488,
++      0x0040040c,
++      0x0040048c,
++      0x00400410,
++      0x00400490,
++      0x00400414,
++      0x00400494,
++      0x00400418,
++      0x00400498,
++      0x0040041c,
++      0x0040049c,
++      0x00400420,
++      0x004004a0,
++      0x00400424,
++      0x004004a4,
++      0x00400428,
++      0x004004a8,
++      0x0040042c,
++      0x004004ac,
++      0x00400430,
++      0x004004b0,
++      0x00400434,
++      0x004004b4,
++      0x00400438,
++      0x004004b8,
++      0x0040043c,
++      0x004004bc,
++      0x00400440,
++      0x004004c0,
++      0x00400444,
++      0x004004c4,
++      0x00400448,
++      0x004004c8,
++      0x0040044c,
++      0x004004cc,
++      0x00400450,
++      0x004004d0,
++      0x00400454,
++      0x004004d4,
++      0x00400458,
++      0x004004d8,
++      0x0040045c,
++      0x004004dc,
++      0x00400460,
++      0x004004e0,
++      0x00400464,
++      0x004004e4,
++      0x00400468,
++      0x004004e8,
++      0x0040046c,
++      0x004004ec,
++      0x00400470,
++      0x004004f0,
++      0x00400474,
++      0x004004f4,
++      0x00400478,
++      0x004004f8,
++      0x0040047c,
++      0x004004fc,
++      0x0040053c,
++      0x00400544,
++      0x00400540,
++      0x00400548,
++      0x00400560,
++      0x00400568,
++      0x00400564,
++      0x0040056c,
++      0x00400534,
++      0x00400538,
++      0x00400514,
++      0x00400518,
++      0x0040051c,
++      0x00400520,
++      0x00400524,
++      0x00400528,
++      0x0040052c,
++      0x00400530,
++      0x00400d00,
++      0x00400d40,
++      0x00400d80,
++      0x00400d04,
++      0x00400d44,
++      0x00400d84,
++      0x00400d08,
++      0x00400d48,
++      0x00400d88,
++      0x00400d0c,
++      0x00400d4c,
++      0x00400d8c,
++      0x00400d10,
++      0x00400d50,
++      0x00400d90,
++      0x00400d14,
++      0x00400d54,
++      0x00400d94,
++      0x00400d18,
++      0x00400d58,
++      0x00400d98,
++      0x00400d1c,
++      0x00400d5c,
++      0x00400d9c,
++      0x00400d20,
++      0x00400d60,
++      0x00400da0,
++      0x00400d24,
++      0x00400d64,
++      0x00400da4,
++      0x00400d28,
++      0x00400d68,
++      0x00400da8,
++      0x00400d2c,
++      0x00400d6c,
++      0x00400dac,
++      0x00400d30,
++      0x00400d70,
++      0x00400db0,
++      0x00400d34,
++      0x00400d74,
++      0x00400db4,
++      0x00400d38,
++      0x00400d78,
++      0x00400db8,
++      0x00400d3c,
++      0x00400d7c,
++      0x00400dbc,
++      0x00400590,
++      0x00400594,
++      0x00400598,
++      0x0040059c,
++      0x004005a8,
++      0x004005ac,
++      0x004005b0,
++      0x004005b4,
++      0x004005c0,
++      0x004005c4,
++      0x004005c8,
++      0x004005cc,
++      0x004005d0,
++      0x004005d4,
++      0x004005d8,
++      0x004005dc,
++      0x004005e0,
++      NV04_PGRAPH_PASSTHRU_0,
++      NV04_PGRAPH_PASSTHRU_1,
++      NV04_PGRAPH_PASSTHRU_2,
++      NV04_PGRAPH_DVD_COLORFMT,
++      NV04_PGRAPH_SCALED_FORMAT,
++      NV04_PGRAPH_MISC24_0,
++      NV04_PGRAPH_MISC24_1,
++      NV04_PGRAPH_MISC24_2,
++      0x00400500,
++      0x00400504,
++      NV04_PGRAPH_VALID1,
++      NV04_PGRAPH_VALID2
++
++
++};
++
++struct graph_state {
++      int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])];
++};
++
++void nouveau_nv04_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_channel *next, *last;
++      int chid;
++
++      if (!dev) {
++              DRM_DEBUG("Invalid drm_device\n");
++              return;
++      }
++      dev_priv = dev->dev_private;
++      if (!dev_priv) {
++              DRM_DEBUG("Invalid drm_nouveau_private\n");
++              return;
++      }
++      if (!dev_priv->fifos) {
++              DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
++              return;
++      }
++
++      chid = engine->fifo.channel_id(dev);
++      next = dev_priv->fifos[chid];
++
++      if (!next) {
++              DRM_DEBUG("Invalid next channel\n");
++              return;
++      }
++
++      chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
++      last = dev_priv->fifos[chid];
++
++      if (!last) {
++              DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
++                        next->id);
++      } else {
++              DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
++                       last->id, next->id);
++      }
++
++/*    NV_WRITE(NV03_PFIFO_CACHES, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++
++      if (last)
++              nv04_graph_save_context(last);
++
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
++      NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
++
++      nouveau_wait_for_idle(dev);
++
++      nv04_graph_load_context(next);
++
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
++      NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
++
++/*    NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/
++      NV_WRITE(NV04_PGRAPH_FIFO,0x1);
++}
++
++int nv04_graph_create_context(struct nouveau_channel *chan) {
++      struct graph_state* pgraph_ctx;
++      DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
++
++      chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
++                                            DRM_MEM_DRIVER);
++
++      if (pgraph_ctx == NULL)
++              return -ENOMEM;
++
++      //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
++      pgraph_ctx->nv04[0] = 0x0001ffff;
++      /* is it really needed ??? */
++      //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
++      //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
++
++      return 0;
++}
++
++void nv04_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++
++      drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
++      chan->pgraph_ctx = NULL;
++}
++
++int nv04_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
++              NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
++
++      return 0;
++}
++
++int nv04_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
++              pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]);
++
++      return 0;
++}
++
++int nv04_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      /* Enable PGRAPH interrupts */
++      NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_VALID1, 0);
++      NV_WRITE(NV04_PGRAPH_VALID2, 0);
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
++      /*1231C000 blob, 001 haiku*/
++      //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100);
++      /*0x72111100 blob , 01 haiku*/
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071);
++      /*haiku same*/
++
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
++      /*haiku and blob 10d4*/
++
++      NV_WRITE(NV04_PGRAPH_STATE        , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL  , 0x10010100);
++      NV_WRITE(NV04_PGRAPH_FIFO         , 0x00000001);
++
++      /* These don't belong here, they're part of a per-channel context */
++      NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
++
++      return 0;
++}
++
++void nv04_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c
+--- git/drivers/gpu/drm-tungsten/nv04_instmem.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,159 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++static void
++nv04_instmem_determine_amount(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      /* Figure out how much instance memory we need */
++      if (dev_priv->card_type >= NV_40) {
++              /* We'll want more instance memory than this on some NV4x cards.
++               * There's a 16MB aperture to play with that maps onto the end
++               * of vram.  For now, only reserve a small piece until we know
++               * more about what each chipset requires.
++               */
++              dev_priv->ramin_rsvd_vram = (1*1024* 1024);
++      } else {
++              /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
++               *     exist in vram on those cards as well?
++               */
++              dev_priv->ramin_rsvd_vram = (512*1024);
++      }
++      DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
++
++      /* Clear all of it, except the BIOS image that's in the first 64KiB */
++      for (i=(64*1024); i<dev_priv->ramin_rsvd_vram; i+=4)
++              NV_WI32(i, 0x00000000);
++}
++
++static void
++nv04_instmem_configure_fixed_tables(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      /* FIFO hash table (RAMHT)
++       *   use 4k hash table at RAMIN+0x10000
++       *   TODO: extend the hash table
++       */
++      dev_priv->ramht_offset = 0x10000;
++      dev_priv->ramht_bits   = 9;
++      dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
++      DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
++                                                dev_priv->ramht_size);
++
++      /* FIFO runout table (RAMRO) - 512k at 0x11200 */
++      dev_priv->ramro_offset = 0x11200;
++      dev_priv->ramro_size   = 512;
++      DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
++                                                dev_priv->ramro_size);
++
++      /* FIFO context table (RAMFC)
++       *   NV40  : Not sure exactly how to position RAMFC on some cards,
++       *           0x30002 seems to position it at RAMIN+0x20000 on these
++       *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
++       *   Others: Position RAMFC at RAMIN+0x11400
++       */
++      switch(dev_priv->card_type)
++      {
++              case NV_40:
++              case NV_44:
++                      dev_priv->ramfc_offset = 0x20000;
++                      dev_priv->ramfc_size   = engine->fifo.channels *
++                                               nouveau_fifo_ctx_size(dev);
++                      break;
++              case NV_30:
++              case NV_20:
++              case NV_17:
++              case NV_11:
++              case NV_10:
++              case NV_04:
++              default:
++                      dev_priv->ramfc_offset = 0x11400;
++                      dev_priv->ramfc_size   = engine->fifo.channels *
++                                               nouveau_fifo_ctx_size(dev);
++                      break;
++      }
++      DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
++                                                dev_priv->ramfc_size);
++}
++
++int nv04_instmem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t offset;
++      int ret = 0;
++
++      nv04_instmem_determine_amount(dev);
++      nv04_instmem_configure_fixed_tables(dev);
++
++      /* Create a heap to manage RAMIN allocations, we don't allocate
++       * the space that was reserved for RAMHT/FC/RO.
++       */
++      offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
++
++      /* On my NV4E, there's *something* clobbering the 16KiB just after
++       * where we setup these fixed tables.  No idea what it is just yet,
++       * so reserve this space on all NV4X cards for now.
++       */
++      if (dev_priv->card_type >= NV_40)
++              offset += 16*1024;
++
++      ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
++                                  offset, dev_priv->ramin_rsvd_vram - offset);
++      if (ret) {
++              dev_priv->ramin_heap = NULL;
++              DRM_ERROR("Failed to init RAMIN heap\n");
++      }
++
++      return ret;
++}
++
++void
++nv04_instmem_takedown(struct drm_device *dev)
++{
++}
++
++int
++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
++{
++      if (gpuobj->im_backing)
++              return -EINVAL;
++
++      return 0;
++}
++
++void
++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (gpuobj && gpuobj->im_backing) {
++              if (gpuobj->im_bound)
++                      dev_priv->Engine.instmem.unbind(dev, gpuobj);
++              gpuobj->im_backing = NULL;
++      }
++}
++
++int
++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      if (!gpuobj->im_pramin || gpuobj->im_bound)
++              return -EINVAL;
++
++      gpuobj->im_bound = 1;
++      return 0;
++}
++
++int
++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      if (gpuobj->im_bound == 0)
++              return -EINVAL;
++
++      gpuobj->im_bound = 0;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_mc.c git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c
+--- git/drivers/gpu/drm-tungsten/nv04_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,22 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Power up everything, resetting each individual unit will
++       * be done later if needed.
++       */
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      return 0;
++}
++
++void
++nv04_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_timer.c git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c
+--- git/drivers/gpu/drm-tungsten/nv04_timer.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,53 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_timer_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000);
++      NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF);
++
++      /* Just use the pre-existing values when possible for now; these regs
++       * are not written in nv (driver writer missed a /4 on the address), and
++       * writing 8 and 3 to the correct regs breaks the timings on the LVDS
++       * hardware sequencing microcode.
++       * A correct solution (involving calculations with the GPU PLL) can
++       * be done when kernel modesetting lands
++       */
++      if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) {
++              NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008);
++              NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003);
++      }
++
++      return 0;
++}
++
++uint64_t
++nv04_timer_read(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t low;
++      /* From kmmio dumps on nv28 this looks like how the blob does this.
++       * It reads the high dword twice, before and after.
++       * The only explanation seems to be that the 64-bit timer counter
++       * advances between high and low dword reads and may corrupt the
++       * result. Not confirmed.
++       */
++      uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1);
++      uint32_t high1;
++      do {
++              high1 = high2;
++              low = NV_READ(NV04_PTIMER_TIME_0);
++              high2 = NV_READ(NV04_PTIMER_TIME_1);
++      } while(high1 != high2);
++      return (((uint64_t)high2) << 32) | (uint64_t)low;
++}
++
++void
++nv04_timer_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_fb.c git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c
+--- git/drivers/gpu/drm-tungsten/nv10_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,25 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv10_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_bar_size;
++      int i;
++
++      fb_bar_size = drm_get_resource_len(dev, 0) - 1;
++      for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(NV10_PFB_TILE(i), 0);
++              NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
++      }
++
++      return 0;
++}
++
++void
++nv10_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv10_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv10_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV10_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV10_RAMFC_##offset/4)
++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
++
++int
++nv10_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV10_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv10_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
++                                              NV10_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Fill entries that are seen filled in dumps of nvidia driver just
++       * after channel's is put into DMA mode
++       */
++      RAMFC_WR(DMA_PUT       , chan->pushbuf_base);
++      RAMFC_WR(DMA_GET       , chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE  , chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH     , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                               NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                               0);
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
++      return 0;
++}
++
++void
++nv10_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv10_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET          , RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT          , RAMFC_RD(DMA_PUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT          , RAMFC_RD(REF_CNT));
++
++      tmp = RAMFC_RD(DMA_INSTANCE);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE     , tmp & 0xFFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT       , tmp >> 16);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE        , RAMFC_RD(DMA_STATE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH        , RAMFC_RD(DMA_FETCH));
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE           , RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1            , RAMFC_RD(PULL1_ENGINE));
++
++      if (dev_priv->chipset >= 0x17) {
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE,
++                       RAMFC_RD(ACQUIRE_VALUE));
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP,
++                       RAMFC_RD(ACQUIRE_TIMESTAMP));
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT,
++                       RAMFC_RD(ACQUIRE_TIMEOUT));
++              NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE,
++                       RAMFC_RD(SEMAPHORE));
++              NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE,
++                       RAMFC_RD(DMA_SUBROUTINE));
++      }
++
++      /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv10_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT          , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET          , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      RAMFC_WR(REF_CNT          , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
++      tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
++      RAMFC_WR(DMA_INSTANCE     , tmp);
++
++      RAMFC_WR(DMA_STATE        , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++      RAMFC_WR(DMA_FETCH        , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
++      RAMFC_WR(ENGINE           , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE     , NV_READ(NV04_PFIFO_CACHE1_PULL1));
++
++      if (dev_priv->chipset >= 0x17) {
++              RAMFC_WR(ACQUIRE_VALUE,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++              RAMFC_WR(ACQUIRE_TIMESTAMP,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
++              RAMFC_WR(ACQUIRE_TIMEOUT,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++              RAMFC_WR(SEMAPHORE,
++                       NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
++              RAMFC_WR(DMA_SUBROUTINE,
++                       NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_graph.c git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c
+--- git/drivers/gpu/drm-tungsten/nv10_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,914 @@
++/*
++ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++#define NV10_FIFO_NUMBER 32
++
++struct pipe_state {
++      uint32_t pipe_0x0000[0x040/4];
++      uint32_t pipe_0x0040[0x010/4];
++      uint32_t pipe_0x0200[0x0c0/4];
++      uint32_t pipe_0x4400[0x080/4];
++      uint32_t pipe_0x6400[0x3b0/4];
++      uint32_t pipe_0x6800[0x2f0/4];
++      uint32_t pipe_0x6c00[0x030/4];
++      uint32_t pipe_0x7000[0x130/4];
++      uint32_t pipe_0x7400[0x0c0/4];
++      uint32_t pipe_0x7800[0x0c0/4];
++};
++
++static int nv10_graph_ctx_regs [] = {
++NV10_PGRAPH_CTX_SWITCH1,
++NV10_PGRAPH_CTX_SWITCH2,
++NV10_PGRAPH_CTX_SWITCH3,
++NV10_PGRAPH_CTX_SWITCH4,
++NV10_PGRAPH_CTX_SWITCH5,
++NV10_PGRAPH_CTX_CACHE1,       /* 8 values from 0x400160 to 0x40017c */
++NV10_PGRAPH_CTX_CACHE2,       /* 8 values from 0x400180 to 0x40019c */
++NV10_PGRAPH_CTX_CACHE3,       /* 8 values from 0x4001a0 to 0x4001bc */
++NV10_PGRAPH_CTX_CACHE4,       /* 8 values from 0x4001c0 to 0x4001dc */
++NV10_PGRAPH_CTX_CACHE5,       /* 8 values from 0x4001e0 to 0x4001fc */
++0x00400164,
++0x00400184,
++0x004001a4,
++0x004001c4,
++0x004001e4,
++0x00400168,
++0x00400188,
++0x004001a8,
++0x004001c8,
++0x004001e8,
++0x0040016c,
++0x0040018c,
++0x004001ac,
++0x004001cc,
++0x004001ec,
++0x00400170,
++0x00400190,
++0x004001b0,
++0x004001d0,
++0x004001f0,
++0x00400174,
++0x00400194,
++0x004001b4,
++0x004001d4,
++0x004001f4,
++0x00400178,
++0x00400198,
++0x004001b8,
++0x004001d8,
++0x004001f8,
++0x0040017c,
++0x0040019c,
++0x004001bc,
++0x004001dc,
++0x004001fc,
++NV10_PGRAPH_CTX_USER,
++NV04_PGRAPH_DMA_START_0,
++NV04_PGRAPH_DMA_START_1,
++NV04_PGRAPH_DMA_LENGTH,
++NV04_PGRAPH_DMA_MISC,
++NV10_PGRAPH_DMA_PITCH,
++NV04_PGRAPH_BOFFSET0,
++NV04_PGRAPH_BBASE0,
++NV04_PGRAPH_BLIMIT0,
++NV04_PGRAPH_BOFFSET1,
++NV04_PGRAPH_BBASE1,
++NV04_PGRAPH_BLIMIT1,
++NV04_PGRAPH_BOFFSET2,
++NV04_PGRAPH_BBASE2,
++NV04_PGRAPH_BLIMIT2,
++NV04_PGRAPH_BOFFSET3,
++NV04_PGRAPH_BBASE3,
++NV04_PGRAPH_BLIMIT3,
++NV04_PGRAPH_BOFFSET4,
++NV04_PGRAPH_BBASE4,
++NV04_PGRAPH_BLIMIT4,
++NV04_PGRAPH_BOFFSET5,
++NV04_PGRAPH_BBASE5,
++NV04_PGRAPH_BLIMIT5,
++NV04_PGRAPH_BPITCH0,
++NV04_PGRAPH_BPITCH1,
++NV04_PGRAPH_BPITCH2,
++NV04_PGRAPH_BPITCH3,
++NV04_PGRAPH_BPITCH4,
++NV10_PGRAPH_SURFACE,
++NV10_PGRAPH_STATE,
++NV04_PGRAPH_BSWIZZLE2,
++NV04_PGRAPH_BSWIZZLE5,
++NV04_PGRAPH_BPIXEL,
++NV10_PGRAPH_NOTIFY,
++NV04_PGRAPH_PATT_COLOR0,
++NV04_PGRAPH_PATT_COLOR1,
++NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
++0x00400904,
++0x00400908,
++0x0040090c,
++0x00400910,
++0x00400914,
++0x00400918,
++0x0040091c,
++0x00400920,
++0x00400924,
++0x00400928,
++0x0040092c,
++0x00400930,
++0x00400934,
++0x00400938,
++0x0040093c,
++0x00400940,
++0x00400944,
++0x00400948,
++0x0040094c,
++0x00400950,
++0x00400954,
++0x00400958,
++0x0040095c,
++0x00400960,
++0x00400964,
++0x00400968,
++0x0040096c,
++0x00400970,
++0x00400974,
++0x00400978,
++0x0040097c,
++0x00400980,
++0x00400984,
++0x00400988,
++0x0040098c,
++0x00400990,
++0x00400994,
++0x00400998,
++0x0040099c,
++0x004009a0,
++0x004009a4,
++0x004009a8,
++0x004009ac,
++0x004009b0,
++0x004009b4,
++0x004009b8,
++0x004009bc,
++0x004009c0,
++0x004009c4,
++0x004009c8,
++0x004009cc,
++0x004009d0,
++0x004009d4,
++0x004009d8,
++0x004009dc,
++0x004009e0,
++0x004009e4,
++0x004009e8,
++0x004009ec,
++0x004009f0,
++0x004009f4,
++0x004009f8,
++0x004009fc,
++NV04_PGRAPH_PATTERN,  /* 2 values from 0x400808 to 0x40080c */
++0x0040080c,
++NV04_PGRAPH_PATTERN_SHAPE,
++NV03_PGRAPH_MONO_COLOR0,
++NV04_PGRAPH_ROP3,
++NV04_PGRAPH_CHROMA,
++NV04_PGRAPH_BETA_AND,
++NV04_PGRAPH_BETA_PREMULT,
++0x00400e70,
++0x00400e74,
++0x00400e78,
++0x00400e7c,
++0x00400e80,
++0x00400e84,
++0x00400e88,
++0x00400e8c,
++0x00400ea0,
++0x00400ea4,
++0x00400ea8,
++0x00400e90,
++0x00400e94,
++0x00400e98,
++0x00400e9c,
++NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */
++NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20 to 0x400f3c */
++0x00400f04,
++0x00400f24,
++0x00400f08,
++0x00400f28,
++0x00400f0c,
++0x00400f2c,
++0x00400f10,
++0x00400f30,
++0x00400f14,
++0x00400f34,
++0x00400f18,
++0x00400f38,
++0x00400f1c,
++0x00400f3c,
++NV10_PGRAPH_XFMODE0,
++NV10_PGRAPH_XFMODE1,
++NV10_PGRAPH_GLOBALSTATE0,
++NV10_PGRAPH_GLOBALSTATE1,
++NV04_PGRAPH_STORED_FMT,
++NV04_PGRAPH_SOURCE_COLOR,
++NV03_PGRAPH_ABS_X_RAM,        /* 32 values from 0x400400 to 0x40047c */
++NV03_PGRAPH_ABS_Y_RAM,        /* 32 values from 0x400480 to 0x4004fc */
++0x00400404,
++0x00400484,
++0x00400408,
++0x00400488,
++0x0040040c,
++0x0040048c,
++0x00400410,
++0x00400490,
++0x00400414,
++0x00400494,
++0x00400418,
++0x00400498,
++0x0040041c,
++0x0040049c,
++0x00400420,
++0x004004a0,
++0x00400424,
++0x004004a4,
++0x00400428,
++0x004004a8,
++0x0040042c,
++0x004004ac,
++0x00400430,
++0x004004b0,
++0x00400434,
++0x004004b4,
++0x00400438,
++0x004004b8,
++0x0040043c,
++0x004004bc,
++0x00400440,
++0x004004c0,
++0x00400444,
++0x004004c4,
++0x00400448,
++0x004004c8,
++0x0040044c,
++0x004004cc,
++0x00400450,
++0x004004d0,
++0x00400454,
++0x004004d4,
++0x00400458,
++0x004004d8,
++0x0040045c,
++0x004004dc,
++0x00400460,
++0x004004e0,
++0x00400464,
++0x004004e4,
++0x00400468,
++0x004004e8,
++0x0040046c,
++0x004004ec,
++0x00400470,
++0x004004f0,
++0x00400474,
++0x004004f4,
++0x00400478,
++0x004004f8,
++0x0040047c,
++0x004004fc,
++NV03_PGRAPH_ABS_UCLIP_XMIN,
++NV03_PGRAPH_ABS_UCLIP_XMAX,
++NV03_PGRAPH_ABS_UCLIP_YMIN,
++NV03_PGRAPH_ABS_UCLIP_YMAX,
++0x00400550,
++0x00400558,
++0x00400554,
++0x0040055c,
++NV03_PGRAPH_ABS_UCLIPA_XMIN,
++NV03_PGRAPH_ABS_UCLIPA_XMAX,
++NV03_PGRAPH_ABS_UCLIPA_YMIN,
++NV03_PGRAPH_ABS_UCLIPA_YMAX,
++NV03_PGRAPH_ABS_ICLIP_XMAX,
++NV03_PGRAPH_ABS_ICLIP_YMAX,
++NV03_PGRAPH_XY_LOGIC_MISC0,
++NV03_PGRAPH_XY_LOGIC_MISC1,
++NV03_PGRAPH_XY_LOGIC_MISC2,
++NV03_PGRAPH_XY_LOGIC_MISC3,
++NV03_PGRAPH_CLIPX_0,
++NV03_PGRAPH_CLIPX_1,
++NV03_PGRAPH_CLIPY_0,
++NV03_PGRAPH_CLIPY_1,
++NV10_PGRAPH_COMBINER0_IN_ALPHA,
++NV10_PGRAPH_COMBINER1_IN_ALPHA,
++NV10_PGRAPH_COMBINER0_IN_RGB,
++NV10_PGRAPH_COMBINER1_IN_RGB,
++NV10_PGRAPH_COMBINER_COLOR0,
++NV10_PGRAPH_COMBINER_COLOR1,
++NV10_PGRAPH_COMBINER0_OUT_ALPHA,
++NV10_PGRAPH_COMBINER1_OUT_ALPHA,
++NV10_PGRAPH_COMBINER0_OUT_RGB,
++NV10_PGRAPH_COMBINER1_OUT_RGB,
++NV10_PGRAPH_COMBINER_FINAL0,
++NV10_PGRAPH_COMBINER_FINAL1,
++0x00400e00,
++0x00400e04,
++0x00400e08,
++0x00400e0c,
++0x00400e10,
++0x00400e14,
++0x00400e18,
++0x00400e1c,
++0x00400e20,
++0x00400e24,
++0x00400e28,
++0x00400e2c,
++0x00400e30,
++0x00400e34,
++0x00400e38,
++0x00400e3c,
++NV04_PGRAPH_PASSTHRU_0,
++NV04_PGRAPH_PASSTHRU_1,
++NV04_PGRAPH_PASSTHRU_2,
++NV10_PGRAPH_DIMX_TEXTURE,
++NV10_PGRAPH_WDIMX_TEXTURE,
++NV10_PGRAPH_DVD_COLORFMT,
++NV10_PGRAPH_SCALED_FORMAT,
++NV04_PGRAPH_MISC24_0,
++NV04_PGRAPH_MISC24_1,
++NV04_PGRAPH_MISC24_2,
++NV03_PGRAPH_X_MISC,
++NV03_PGRAPH_Y_MISC,
++NV04_PGRAPH_VALID1,
++NV04_PGRAPH_VALID2,
++};
++
++static int nv17_graph_ctx_regs [] = {
++NV10_PGRAPH_DEBUG_4,
++0x004006b0,
++0x00400eac,
++0x00400eb0,
++0x00400eb4,
++0x00400eb8,
++0x00400ebc,
++0x00400ec0,
++0x00400ec4,
++0x00400ec8,
++0x00400ecc,
++0x00400ed0,
++0x00400ed4,
++0x00400ed8,
++0x00400edc,
++0x00400ee0,
++0x00400a00,
++0x00400a04,
++};
++
++struct graph_state {
++      int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])];
++      int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])];
++      struct pipe_state pipe_state;
++};
++
++static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      int i;
++#define PIPE_SAVE(addr) \
++      do { \
++              NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
++              for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
++                      fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
++      } while (0)
++
++      PIPE_SAVE(0x4400);
++      PIPE_SAVE(0x0200);
++      PIPE_SAVE(0x6400);
++      PIPE_SAVE(0x6800);
++      PIPE_SAVE(0x6c00);
++      PIPE_SAVE(0x7000);
++      PIPE_SAVE(0x7400);
++      PIPE_SAVE(0x7800);
++      PIPE_SAVE(0x0040);
++      PIPE_SAVE(0x0000);
++
++#undef PIPE_SAVE
++}
++
++static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      int i;
++      uint32_t xfmode0, xfmode1;
++#define PIPE_RESTORE(addr) \
++      do { \
++              NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
++              for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
++                      NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
++      } while (0)
++
++
++      nouveau_wait_for_idle(dev);
++      /* XXX check haiku comments */
++      xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
++      xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
++      NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
++      NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
++      for (i = 0; i < 4; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++      for (i = 0; i < 4; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
++      for (i = 0; i < 3; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
++      for (i = 0; i < 3; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
++      NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
++
++
++      PIPE_RESTORE(0x0200);
++      nouveau_wait_for_idle(dev);
++
++      /* restore XFMODE */
++      NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
++      NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
++      PIPE_RESTORE(0x6400);
++      PIPE_RESTORE(0x6800);
++      PIPE_RESTORE(0x6c00);
++      PIPE_RESTORE(0x7000);
++      PIPE_RESTORE(0x7400);
++      PIPE_RESTORE(0x7800);
++      PIPE_RESTORE(0x4400);
++      PIPE_RESTORE(0x0000);
++      PIPE_RESTORE(0x0040);
++      nouveau_wait_for_idle(dev);
++
++#undef PIPE_RESTORE
++}
++
++static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      uint32_t *fifo_pipe_state_addr;
++      int i;
++#define PIPE_INIT(addr) \
++      do { \
++              fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
++      } while (0)
++#define PIPE_INIT_END(addr) \
++      do { \
++              if (fifo_pipe_state_addr != \
++                              sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
++                      DRM_ERROR("incomplete pipe init for 0x%x :  %p/%p\n", addr, fifo_pipe_state_addr, \
++                                      sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
++      } while (0)
++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
++
++      PIPE_INIT(0x0200);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0200);
++
++      PIPE_INIT(0x6400);
++      for (i = 0; i < 211; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f000000);
++      NV_WRITE_PIPE_INIT(0x3f000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      PIPE_INIT_END(0x6400);
++
++      PIPE_INIT(0x6800);
++      for (i = 0; i < 162; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      for (i = 0; i < 25; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x6800);
++
++      PIPE_INIT(0x6c00);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0xbf800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x6c00);
++
++      PIPE_INIT(0x7000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      for (i = 0; i < 35; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7000);
++
++      PIPE_INIT(0x7400);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7400);
++
++      PIPE_INIT(0x7800);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7800);
++
++      PIPE_INIT(0x4400);
++      for (i = 0; i < 32; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x4400);
++
++      PIPE_INIT(0x0000);
++      for (i = 0; i < 16; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0000);
++
++      PIPE_INIT(0x0040);
++      for (i = 0; i < 4; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0040);
++
++#undef PIPE_INIT
++#undef PIPE_INIT_END
++#undef NV_WRITE_PIPE_INIT
++}
++
++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++      int i;
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
++              if (nv10_graph_ctx_regs[i] == reg)
++                      return i;
++      }
++      DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg);
++      return -1;
++}
++
++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++      int i;
++      for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) {
++              if (nv17_graph_ctx_regs[i] == reg)
++                      return i;
++      }
++      DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg);
++      return -1;
++}
++
++int nv10_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
++              NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
++      if (dev_priv->chipset>=0x17) {
++              for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
++                      NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]);
++      }
++
++      nv10_graph_load_pipe(chan);
++
++      return 0;
++}
++
++int nv10_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
++              pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]);
++      if (dev_priv->chipset>=0x17) {
++              for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
++                      pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]);
++      }
++
++      nv10_graph_save_pipe(chan);
++
++      return 0;
++}
++
++void nouveau_nv10_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv;
++      struct nouveau_engine *engine;
++      struct nouveau_channel *next, *last;
++      int chid;
++
++      if (!dev) {
++              DRM_DEBUG("Invalid drm_device\n");
++              return;
++      }
++      dev_priv = dev->dev_private;
++      if (!dev_priv) {
++              DRM_DEBUG("Invalid drm_nouveau_private\n");
++              return;
++      }
++      if (!dev_priv->fifos) {
++              DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
++              return;
++      }
++      engine = &dev_priv->Engine;
++
++      chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) &
++              (engine->fifo.channels - 1);
++      next = dev_priv->fifos[chid];
++
++      if (!next) {
++              DRM_ERROR("Invalid next channel\n");
++              return;
++      }
++
++      chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & 
++              (engine->fifo.channels - 1);
++      last = dev_priv->fifos[chid];
++
++      if (!last) {
++              DRM_INFO("WARNING: Invalid last channel, switch to %x\n",
++                        next->id);
++      } else {
++              DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
++                       last->id, next->id);
++      }
++
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      if (last) {
++              nouveau_wait_for_idle(dev);
++              nv10_graph_save_context(last);
++      }
++
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
++
++      nouveau_wait_for_idle(dev);
++
++      nv10_graph_load_context(next);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO,0x1);
++}
++
++#define NV_WRITE_CTX(reg, val) do { \
++      int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
++      if (offset > 0) \
++              pgraph_ctx->nv10[offset] = val; \
++      } while (0)
++
++#define NV17_WRITE_CTX(reg, val) do { \
++      int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
++      if (offset > 0) \
++              pgraph_ctx->nv17[offset] = val; \
++      } while (0)
++
++int nv10_graph_create_context(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx;
++
++      DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
++
++      chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
++                                            DRM_MEM_DRIVER);
++
++      if (pgraph_ctx == NULL)
++              return -ENOMEM;
++
++      /* mmio trace suggest that should be done in ddx with methods/objects */
++#if 0
++      uint32_t tmp, vramsz;
++      /* per channel init from ddx */
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      /*XXX the original ddx code, does this in 2 steps :
++       * tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++       * NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++       * tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++       * NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++       */
++      tmp |= 0x00020100;
++      NV_WRITE_CTX(NV10_PGRAPH_SURFACE, tmp);
++
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE_CTX(NV04_PGRAPH_BOFFSET0, 0);
++      NV_WRITE_CTX(NV04_PGRAPH_BOFFSET1, 0);
++      NV_WRITE_CTX(NV04_PGRAPH_BLIMIT0 , vramsz);
++      NV_WRITE_CTX(NV04_PGRAPH_BLIMIT1 , vramsz);
++
++      NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
++      NV_WRITE_CTX(NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
++
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++#endif
++
++      NV_WRITE_CTX(0x00400e88, 0x08000000);
++      NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
++      NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
++      NV_WRITE_CTX(0x00400e10, 0x00001000);
++      NV_WRITE_CTX(0x00400e14, 0x00001000);
++      NV_WRITE_CTX(0x00400e30, 0x00080008);
++      NV_WRITE_CTX(0x00400e34, 0x00080008);
++      if (dev_priv->chipset>=0x17) {
++              /* is it really needed ??? */
++              NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
++              NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
++              NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
++              NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
++              NV17_WRITE_CTX(0x00400ec0, 0x00000080);
++              NV17_WRITE_CTX(0x00400ed0, 0x00000080);
++      }
++      NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
++
++      nv10_graph_create_pipe(chan);
++      return 0;
++}
++
++void nv10_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int chid;
++
++      drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
++      chan->pgraph_ctx = NULL;
++
++      chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
++
++      /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ????
++       */
++#if 0
++      /* does this avoid a potential context switch while we are written graph
++       * reg, or we should mask graph interrupt ???
++       */
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      if (chid == chan->id) {
++              DRM_INFO("cleanning a channel with graph in current context\n");
++              nouveau_wait_for_idle(dev);
++              DRM_INFO("reseting current graph context\n");
++              /* can't be call here because of dynamic mem alloc */
++              //nv10_graph_create_context(chan);
++              nv10_graph_load_context(chan);
++      }
++      NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
++#else
++      if (chid == chan->id) {
++              DRM_INFO("cleanning a channel with graph in current context\n");
++      }
++#endif
++}
++
++int nv10_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
++      //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
++      NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
++                                    (1<<29) |
++                                    (1<<31));
++      if (dev_priv->chipset>=0x17) {
++              NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
++              NV_WRITE(0x004006b0, 0x40000020);
++      }
++      else
++              NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
++
++      /* copy tile info from PFB */
++      for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
++              NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
++              NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
++              NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
++      }
++
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      return 0;
++}
++
++void nv10_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv20_graph.c git-nokia/drivers/gpu/drm-tungsten/nv20_graph.c
+--- git/drivers/gpu/drm-tungsten/nv20_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv20_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,913 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/*
++ * NV20
++ * -----
++ * There are 3 families :
++ * NV20 is 0x10de:0x020*
++ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
++ * NV2A is 0x10de:0x02A0
++ *
++ * NV30
++ * -----
++ * There are 3 families :
++ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
++ * NV34 is 0x10de:0x032*
++ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
++ *
++ * Not seen in the wild, no dumps (probably NV35) :
++ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
++ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
++ *
++ */
++
++#define NV20_GRCTX_SIZE (3580*4)
++#define NV25_GRCTX_SIZE (3529*4)
++#define NV2A_GRCTX_SIZE (3500*4)
++
++#define NV30_31_GRCTX_SIZE (24392)
++#define NV34_GRCTX_SIZE    (18140)
++#define NV35_36_GRCTX_SIZE (22396)
++
++static void nv20_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++/*
++write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements:
+++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
+++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++
+++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
+++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000
+++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303
+++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000
+++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000
+++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008
+++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000
++
+++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000
+++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
++*/
++      INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101);
++      INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111);
++      INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008);
++      for (i = 0; i < 16; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff);
++      INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001);
++      INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000);
++      INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001);
++      INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000);
++      INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000);
++
++/*
++...
+++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++...
+++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
++*/
++      for (i = 0; i < 0x880; i += 0x10) {
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9);
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c);
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b);
++      }
++
++/*
++write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements:
+++0x00742fbc: 3f800000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x281c/4), 0x3f800000);
++
++/*
++write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements:
+++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
+++0x0074301c: 00000000 bf800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000);
++      INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000);
++      INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000);
++      INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000);
++      INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000);
++      INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000);
++      INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000);
++
++/*
++write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements:
+++0x00742fcc: 00000000 3f800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000);
++
++/*
++write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements:
+++0x0074302c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements:
+++0x00743c9c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements:
+++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000);
++
++/*
++write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements:
+++0x00743c6c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements:
+++0x00743ccc: 00000000 000003f8 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8);
++
++/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */
++      INSTANCE_WR(ctx, 0x3540/4, 0x002fe000);
++
++/*
++write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements:
+++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
++*/
++      for (i = 0; i < 8; ++i)
++              INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c);
++}
++
++static void nv2a_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x33c/4, 0xffff0000);
++      for(i = 0x3a0; i< 0x3a8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x47c/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x4a8/4, 0x44400000);
++      for(i = 0x4d4; i< 0x4e4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x4f4; i< 0x504; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080000);
++      for(i = 0x50c; i< 0x51c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x51c; i< 0x52c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x000105b8);
++      for(i = 0x52c; i< 0x53c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for(i = 0x55c; i< 0x59c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x5fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x604/4, 0x00004000);
++      INSTANCE_WR(ctx, 0x610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x618/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x61c/4, 0x00010000);
++
++      for (i=0x1a9c; i <= 0x22fc/4; i += 32) {
++              INSTANCE_WR(ctx, i/4    , 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++
++      INSTANCE_WR(ctx, 0x269c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26dc/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x26ec/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x2700/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x3024/4, 0x000fe000);
++      INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8);
++      INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000);
++      for(i = 0x341c; i< 0x343c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x001c527c);
++}
++
++static void nv25_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++/*
++write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements:
+++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
+++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++
+++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
+++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000
+++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000
+++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303
+++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000
+++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8
++
+++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000
+++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000
+++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000
+++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d1c: 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101);
++      INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111);
++      INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080);
++      INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001);
++      INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000);
++      INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000);
++      INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008);
++      for (i=0; i<16; ++i)
++              INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff);
++
++/*
++write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements:
+++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0
+++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000
+++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000
+++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
+++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++...
+++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000
+++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080);
++      INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000);
++      INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040);
++      INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080);
++      INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0);
++      INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001);
++      INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000);
++      INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001);
++      INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000);
++      INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000);
++      for (i=0; i < 0x880/4; i+=4) {
++              INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9);
++              INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c);
++              INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b);
++      }
++
++/*
++write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements:
+++0x00742e24: 3f800000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2704/4), 0x3f800000);
++
++/*
++write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements:
+++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
+++0x00742e84: 00000000 bf800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000);
++      INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000);
++      INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000);
++      INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000);
++      INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000);
++      INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000);
++      INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000);
++
++/*
++write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements:
+++0x00742e34: 00000000 3f800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000);
++
++/*
++write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements:
+++0x00742e94: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements:
+++0x00743804: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements:
+++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000);
++
++/*
++write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements:
+++0x007437d4: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements:
+++0x00743824: 00000000 000003f8 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8);
++
++/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */
++      INSTANCE_WR(ctx, 0x3468/4, 0x002fe000);
++
++/*
++write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements:
+++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
++*/
++      for (i=0; i<8; ++i)
++              INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c);
++}
++
++static void nv30_31_graph_context_init(struct drm_device *dev,
++                                       struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x410/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x428/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x444/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x448/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x44c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x460/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x48c/4, 0xffff0000);
++      for(i = 0x4e0; i< 0x4e8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4ec/4, 0x00011100);
++      for(i = 0x508; i< 0x548; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x58c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x590/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x594/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x598/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000);
++      for(i = 0x600; i< 0x640; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x640; i< 0x680; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6c0; i< 0x700; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x700; i< 0x740; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x740; i< 0x780; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x85c/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x860/4, 0x00010000);
++      for(i = 0x864; i< 0x874; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x1f18; i<= 0x3088 ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x30b8; i< 0x30c8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x344c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3808/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x381c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3848/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x384c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3850/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x3858/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x385c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3864/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);
++}
++
++static void nv34_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x40c/4, 0x01000101);
++      INSTANCE_WR(ctx, 0x420/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x440/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x480/4, 0xffff0000);
++      for(i = 0x4d4; i< 0x4dc; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4e0/4, 0x00011100);
++      for(i = 0x4fc; i< 0x53c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x57c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x580/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x584/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x588/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000);
++      for(i = 0x5f0; i< 0x630; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x630; i< 0x670; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6b0; i< 0x6f0; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x6f0; i< 0x730; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x730; i< 0x770; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x850/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x854/4, 0x00010000);
++      for(i = 0x858; i< 0x868; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x15ac; i<= 0x271c ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x274c; i< 0x275c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2edc/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x2eec/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000);
++}
++
++static void nv35_36_graph_context_init(struct drm_device *dev,
++                                       struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x40c/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x420/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x440/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x488/4, 0xffff0000);
++      for(i = 0x4dc; i< 0x4e4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4e8/4, 0x00011100);
++      for(i = 0x504; i< 0x544; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x588/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x58c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x590/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x594/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000);
++      for(i = 0x604; i< 0x644; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x644; i< 0x684; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6c4; i< 0x704; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x704; i< 0x744; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x744; i< 0x784; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x860/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x864/4, 0x00010000);
++      for(i = 0x868; i< 0x878; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x1f1c; i<= 0x308c ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x30bc; i< 0x30cc; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x3450/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x380c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3820/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x384c/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x3850/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3854/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x385c/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x3860/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3868/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);
++}
++
++int nv20_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
++      unsigned int ctx_size;
++      unsigned int idoffs = 0x28/4;
++      int ret;
++
++      switch (dev_priv->chipset) {
++      case 0x20:
++              ctx_size = NV20_GRCTX_SIZE;
++              ctx_init = nv20_graph_context_init;
++              idoffs = 0;
++              break;
++      case 0x25:
++      case 0x28:
++              ctx_size = NV25_GRCTX_SIZE;
++              ctx_init = nv25_graph_context_init;
++              break;
++      case 0x2a:
++              ctx_size = NV2A_GRCTX_SIZE;
++              ctx_init = nv2a_graph_context_init;
++              idoffs = 0;
++              break;
++      case 0x30:
++      case 0x31:
++              ctx_size = NV30_31_GRCTX_SIZE;
++              ctx_init = nv30_31_graph_context_init;
++              break;
++      case 0x34:
++              ctx_size = NV34_GRCTX_SIZE;
++              ctx_init = nv34_graph_context_init;
++              break;
++      case 0x35:
++      case 0x36:
++              ctx_size = NV35_36_GRCTX_SIZE;
++              ctx_init = nv35_36_graph_context_init;
++              break;
++      default:
++              ctx_size = 0;
++              ctx_init = nv35_36_graph_context_init;
++              DRM_ERROR("Please contact the devs if you want your NV%x"
++                        " card to work\n", dev_priv->chipset);
++              return -ENOSYS;
++              break;
++      }
++
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
++                                        NVOBJ_FLAG_ZERO_ALLOC,
++                                        &chan->ramin_grctx)))
++              return ret;
++
++      /* Initialise default context values */
++      ctx_init(dev, chan->ramin_grctx->gpuobj);
++
++      /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
++      INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1);
++                                                           /* CTX_USER */
++
++      INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
++                      chan->ramin_grctx->instance >> 4);
++
++      return 0;
++}
++
++void nv20_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (chan->ramin_grctx)
++              nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++
++      INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
++}
++
++int nv20_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
++               NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++
++      nouveau_wait_for_idle(dev);
++      return 0;
++}
++
++int nv20_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
++               NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
++
++      nouveau_wait_for_idle(dev);
++      return 0;
++}
++
++static void nv20_graph_rdi(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i, writecount = 32;
++      uint32_t rdi_index = 0x2c80000;
++
++      if (dev_priv->chipset == 0x20) {
++              rdi_index = 0x3d0000;
++              writecount = 15;
++      }
++
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index);
++      for (i = 0; i < writecount; i++)
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
++
++      nouveau_wait_for_idle(dev);
++}
++
++int nv20_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv =
++              (struct drm_nouveau_private *)dev->dev_private;
++      uint32_t tmp, vramsz;
++      int ret, i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      if (!dev_priv->ctx_table) {
++              /* Create Context Pointer Table */
++              dev_priv->ctx_table_size = 32 * 4;
++              if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++                                                dev_priv->ctx_table_size, 16,
++                                                NVOBJ_FLAG_ZERO_ALLOC,
++                                                &dev_priv->ctx_table)))
++                      return ret;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
++               dev_priv->ctx_table->instance >> 4);
++
++      nv20_graph_rdi(dev);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
++      NV_WRITE(0x40009C           , 0x00000040);
++
++      if (dev_priv->chipset >= 0x25) {
++              NV_WRITE(0x400890, 0x00080000);
++              NV_WRITE(0x400610, 0x304B1FB6);
++              NV_WRITE(0x400B80, 0x18B82880);
++              NV_WRITE(0x400B84, 0x44000000);
++              NV_WRITE(0x400098, 0x40000080);
++              NV_WRITE(0x400B88, 0x000000ff);
++      } else {
++              NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */
++              NV_WRITE(0x400094, 0x00000005);
++              NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */
++              NV_WRITE(0x400B84, 0x24000000);
++              NV_WRITE(0x400098, 0x00000040);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
++      }
++
++      /* copy tile info from PFB */
++      for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
++                      /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i)));
++              NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
++                      /* which is NV40_PGRAPH_TSIZE0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i)));
++              NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
++                      /* which is NV40_PGRAPH_TILE0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i)));
++      }
++      for (i = 0; i < 8; i++) {
++              NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4));
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));
++      }
++      NV_WRITE(0x4009a0, NV_READ(0x100324));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324));
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      /* begin RAM config */
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1));
++      NV_WRITE(0x400820, 0);
++      NV_WRITE(0x400824, 0);
++      NV_WRITE(0x400864, vramsz-1);
++      NV_WRITE(0x400868, vramsz-1);
++
++      /* interesting.. the below overwrites some of the tile setup above.. */
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++
++      return 0;
++}
++
++void nv20_graph_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
++}
++
++int nv30_graph_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++//    uint32_t vramsz, tmp;
++      int ret, i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      if (!dev_priv->ctx_table) {
++              /* Create Context Pointer Table */
++              dev_priv->ctx_table_size = 32 * 4;
++              if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++                                                dev_priv->ctx_table_size, 16,
++                                                NVOBJ_FLAG_ZERO_ALLOC,
++                                                &dev_priv->ctx_table)))
++                      return ret;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
++                      dev_priv->ctx_table->instance >> 4);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
++      NV_WRITE(0x400890, 0x01b463ff);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475);
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
++      NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
++      NV_WRITE(0x400B80, 0x1003d888);
++      NV_WRITE(0x400B84, 0x0c000000);
++      NV_WRITE(0x400098, 0x00000000);
++      NV_WRITE(0x40009C, 0x0005ad00);
++      NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2
++      NV_WRITE(0x4000a0, 0x00000000);
++      NV_WRITE(0x4000a4, 0x00000008);
++      NV_WRITE(0x4008a8, 0xb784a400);
++      NV_WRITE(0x400ba0, 0x002f8685);
++      NV_WRITE(0x400ba4, 0x00231f3f);
++      NV_WRITE(0x4008a4, 0x40000020);
++
++      if (dev_priv->chipset == 0x34) {
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002);
++      }
++
++      NV_WRITE(0x4000c0, 0x00000016);
++
++      /* copy tile info from PFB */
++      for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
++                      /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
++              NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
++                      /* which is NV40_PGRAPH_TSIZE0(i) ?? */
++              NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
++                      /* which is NV40_PGRAPH_TILE0(i) ?? */
++      }
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(0x0040075c             , 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      /* begin RAM config */
++//    vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++      if (dev_priv->chipset != 0x34) {
++              NV_WRITE(0x400750, 0x00EA0000);
++              NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x400750, 0x00EA0004);
++              NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1));
++      }
++
++#if 0
++      NV_WRITE(0x400820, 0);
++      NV_WRITE(0x400824, 0);
++      NV_WRITE(0x400864, vramsz-1);
++      NV_WRITE(0x400868, vramsz-1);
++
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      /* per-context state, doesn't belong here */
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++#endif
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_fb.c git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c
+--- git/drivers/gpu/drm-tungsten/nv40_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,62 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv40_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_bar_size, tmp;
++      int num_tiles;
++      int i;
++
++      /* This is strictly a NV4x register (don't know about NV5x). */
++      /* The blob sets these to all kinds of values, and they mess up our setup. */
++      /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
++      /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
++      /* Any idea what this is? */
++      NV_WRITE(NV40_PFB_UNK_800, 0x1);
++
++      switch (dev_priv->chipset) {
++      case 0x40:
++      case 0x45:
++              tmp = NV_READ(NV10_PFB_CLOSE_PAGE2);
++              NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15));
++              num_tiles = NV10_PFB_TILE__SIZE;
++              break;
++      case 0x46: /* G72 */
++      case 0x47: /* G70 */
++      case 0x49: /* G71 */
++      case 0x4b: /* G73 */
++      case 0x4c: /* C51 (G7X version) */
++              num_tiles = NV40_PFB_TILE__SIZE_1;
++              break;
++      default:
++              num_tiles = NV40_PFB_TILE__SIZE_0;
++              break;
++      }
++
++      fb_bar_size = drm_get_resource_len(dev, 0) - 1;
++      switch (dev_priv->chipset) {
++      case 0x40:
++              for (i=0; i<num_tiles; i++) {
++                      NV_WRITE(NV10_PFB_TILE(i), 0);
++                      NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
++              }
++              break;
++      default:
++              for (i=0; i<num_tiles; i++) {
++                      NV_WRITE(NV40_PFB_TILE(i), 0);
++                      NV_WRITE(NV40_PFB_TLIMIT(i), fb_bar_size);
++              }
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv40_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv40_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv40_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,209 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV40_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV40_RAMFC_##offset/4)
++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE))
++#define NV40_RAMFC__SIZE 128
++
++int
++nv40_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
++                                              NV40_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Fill entries that are seen filled in dumps of nvidia driver just
++       * after channel's is put into DMA mode
++       */
++      RAMFC_WR(DMA_PUT       , chan->pushbuf_base);
++      RAMFC_WR(DMA_GET       , chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE  , chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH     , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                               NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                               0x30000000 /* no idea.. */);
++      RAMFC_WR(DMA_SUBROUTINE, 0);
++      RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4);
++      RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
++      return 0;
++}
++
++void
++nv40_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      if (chan->ramfc)
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv40_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp, tmp2;
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET          , RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT          , RAMFC_RD(DMA_PUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT          , RAMFC_RD(REF_CNT));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE     , RAMFC_RD(DMA_INSTANCE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT       , RAMFC_RD(DMA_DCOUNT));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE        , RAMFC_RD(DMA_STATE));
++
++      /* No idea what 0x2058 is.. */
++      tmp   = RAMFC_RD(DMA_FETCH);
++      tmp2  = NV_READ(0x2058) & 0xFFF;
++      tmp2 |= (tmp & 0x30000000);
++      NV_WRITE(0x2058, tmp2);
++      tmp  &= ~0x30000000;
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH        , tmp);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE           , RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1            , RAMFC_RD(PULL1_ENGINE));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE    , RAMFC_RD(ACQUIRE_VALUE));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT  , RAMFC_RD(ACQUIRE_TIMEOUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE        , RAMFC_RD(SEMAPHORE));
++      NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE   , RAMFC_RD(DMA_SUBROUTINE));
++      NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE          , RAMFC_RD(GRCTX_INSTANCE));
++      NV_WRITE(0x32e4, RAMFC_RD(UNK_40));
++      /* NVIDIA does this next line twice... */
++      NV_WRITE(0x32e8, RAMFC_RD(UNK_44));
++      NV_WRITE(0x2088, RAMFC_RD(UNK_4C));
++      NV_WRITE(0x3300, RAMFC_RD(UNK_50));
++
++      /* not sure what part is PUT, and which is GET.. never seen a non-zero
++       * value appear in a mmio-trace yet..
++       */
++#if 0
++      tmp = NV_READ(UNK_84);
++      NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???);
++      NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???);
++#endif
++
++      /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
++      tmp  = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
++      tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF;
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
++
++      /* Set channel active, and in DMA mode */
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      /* Reset DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv40_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT          , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET          , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      RAMFC_WR(REF_CNT          , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
++      RAMFC_WR(DMA_INSTANCE     , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
++      RAMFC_WR(DMA_DCOUNT       , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT));
++      RAMFC_WR(DMA_STATE        , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH);
++      tmp |= NV_READ(0x2058) & 0x30000000;
++      RAMFC_WR(DMA_FETCH        , tmp);
++
++      RAMFC_WR(ENGINE           , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE     , NV_READ(NV04_PFIFO_CACHE1_PULL1));
++      RAMFC_WR(ACQUIRE_VALUE    , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++      tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
++      RAMFC_WR(ACQUIRE_TIMESTAMP, tmp);
++      RAMFC_WR(ACQUIRE_TIMEOUT  , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++      RAMFC_WR(SEMAPHORE        , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
++
++      /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
++       * more involved depending on the value of 0x3228?
++       */
++      RAMFC_WR(DMA_SUBROUTINE   , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++
++      RAMFC_WR(GRCTX_INSTANCE   , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
++
++      /* No idea what the below is for exactly, ripped from a mmio-trace */
++      RAMFC_WR(UNK_40           , NV_READ(NV40_PFIFO_UNK32E4));
++
++      /* NVIDIA do this next line twice.. bug? */
++      RAMFC_WR(UNK_44           , NV_READ(0x32e8));
++      RAMFC_WR(UNK_4C           , NV_READ(0x2088));
++      RAMFC_WR(UNK_50           , NV_READ(0x3300));
++
++#if 0 /* no real idea which is PUT/GET in UNK_48.. */
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_GET);
++      tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16);
++      RAMFC_WR(UNK_48           , tmp);
++#endif
++
++      return 0;
++}
++
++int
++nv40_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_fifo_init(dev)))
++              return ret;
++
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_graph.c git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c
+--- git/drivers/gpu/drm-tungsten/nv40_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2193 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++/*TODO: deciper what each offset in the context represents. The below
++ *      contexts are taken from dumps just after the 3D object is
++ *      created.
++ */
++static void
++nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      /* Always has the "instance address" of itself at offset 0 */
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      /* unknown */
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00180/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00184/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00188/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0018c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004d0/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00504/4, 0x00011100);
++      for (i=0x00520; i<=0x0055c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x00594/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x005b4/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00610/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00618/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00628/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00);
++      /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */
++      /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */
++      for (i=0x006C0; i<=0x006fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */
++      for (i=0x00700; i<=0x0073c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */
++      /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */
++      for (i=0x00780; i<=0x007bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */
++      for (i=0x007c0; i<=0x007fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */
++      for (i=0x00800; i<=0x0083c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */
++      /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */
++      for (i=0x00880; i<=0x008bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      /* unknown */
++      for (i=0x00910; i<=0x0091c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00920; i<=0x0092c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00940; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00960; i<=0x0096c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00980/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x009b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00);
++      INSTANCE_WR(ctx, 0x009d4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x80800001);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c00/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c04/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c08/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c44/4, 0x00000001);
++      for (i=0x03008; i<=0x03080; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x05288; i<=0x08570; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x08628; i<=0x08e18; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0bd28; i<=0x0f010; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0f0c8; i<=0x0f8b8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x127c8; i<=0x15ab0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x15b68; i<=0x16358; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x19268; i<=0x1c550; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x1c608; i<=0x1cdf8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x1fd08; i<=0x22ff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x230a8; i<=0x23898; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x267a8; i<=0x29a90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x29b48; i<=0x2a338; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
++      for (i = 0x00000178; i <= 0x00000180; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
++      for (i = 0x00000194; i <= 0x000001b0; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
++      for (i = 0x00000350; i <= 0x0000035c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000430/4, 0x00011100);
++      for (i = 0x0000044c; i <= 0x00000488; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00000538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00);
++      for (i = 0x000005dc; i <= 0x00000618; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i = 0x0000061c; i <= 0x00000658; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i = 0x0000069c; i <= 0x000006d8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i = 0x000006dc; i <= 0x00000718; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i = 0x0000071c; i <= 0x00000758; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i = 0x0000079c; i <= 0x000007d8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i = 0x0000082c; i <= 0x00000838; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i = 0x0000083c; i <= 0x00000848; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i = 0x0000085c; i <= 0x00000868; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i = 0x0000087c; i <= 0x00000888; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff);
++      for (i = 0x00000ad4; i <= 0x00000ae4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001);
++      for (i = 0x00002ee8; i <= 0x00002f60; i += 8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x00005168; i <= 0x00007358; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00007368; i <= 0x00007758; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x0000a068; i <= 0x0000c258; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x0000c268; i <= 0x0000c658; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x0000ef68; i <= 0x00011158; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00011168; i <= 0x00011558; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x00013e68; i <= 0x00016058; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00016068; i <= 0x00016458; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++};
++
++static void
++nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00194/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00198/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a4/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a8/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001ac/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00);
++      for (i=0x005dc; i<=0x00618; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x0061c; i<=0x00658; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x0069c; i<=0x006d8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006dc; i<=0x00718; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x0071c; i<=0x00758; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x0079c; i<=0x007d8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x0082c; i<=0x00838; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x0083c; i<=0x00848; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x0085c; i<=0x00868; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x0087c; i<=0x00888; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x0089c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
++      for (i=0x02ec0; i<=0x02f38; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x04c80; i<=0x06e70; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x06e80; i<=0x07270; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x096c0; i<=0x0b8b0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0b8c0; i<=0x0bcb0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0e100; i<=0x102f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x10300; i<=0x106f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++};
++
++static void
++nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00138/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00144/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00184/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0018c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00190/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00194/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00198/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001a4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00370/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00374/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00378/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003a4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x003b8/4, 0x00003010);
++      INSTANCE_WR(ctx, 0x003dc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e8/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00400/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00404/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00410/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00414/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00418/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004b0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x004d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004ec/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00500/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00504/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00508/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0050c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00510/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00514/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00518/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00520/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00524/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00528/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00530/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00534/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00538/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0053c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00011100);
++      for (i=0x00578; i<0x005b4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x00608/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00658/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00664/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00674/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00);
++      for (i=0x0070c; i<=0x00748; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x0074c; i<=0x00788; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x007cc; i<=0x00808; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x0080c; i<=0x00848; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x0084c; i<=0x00888; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x008cc; i<=0x00908; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x0095c; i<=0x00968; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x0096c; i<=0x00978; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x0098c; i<=0x00998; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009ac; i<=0x009b8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a00/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x00a28/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00a60/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00aec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b30/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b38/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bec/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000);
++      for (i=0x017f8; i<=0x01870; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x035b8; i<=0x057a8; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x057b8; i<=0x05ba8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x07f38; i<=0x0a128; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0a138; i<=0x0a528; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0c8b8; i<=0x0eaa8; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0eab8; i<=0x0eea8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++/* This may only work on 7800 AGP cards, will include a warning */
++static void
++nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00000178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
++      for (i=0x00000194; i<=0x000001b0; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00000350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
++      for (i=0x000003c0; i<=0x000003fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00000454/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00000458/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00000474/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000490/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000);
++      for (i=0x000004a4; i<=0x000004e0; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000500/4, 0x00011100);
++      for (i=0x0000051c; i<=0x00000558; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00000590/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00000608/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00);
++      for (i=0x000006b0; i<=0x000006ec; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x000006f0; i<=0x0000072c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00000770; i<=0x000007ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x000007b0; i<=0x000007ec; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x000007f0; i<=0x0000082c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00000870; i<=0x000008ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000910/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x00000914/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x00000918/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202);
++      for (i=0x00000930; i<=0x0000095c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00000970/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00);
++      INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001);
++      for (i=0x00000b10; i<=0x00000b8c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff);
++      for (i=0x00000bdc; i<=0x00000bf8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000);
++      for (i=0x00003000; i<=0x00003078; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00004dc0; i<=0x00006fb0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x00006fc0; i<=0x000073b0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00009800; i<=0x0000b9f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0000ba00; i<=0x00010430; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00010440; i<=0x00010830; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00012c80; i<=0x00014e70; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x00014e80; i<=0x00015270; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x000176c0; i<=0x000198b0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x000198c0; i<=0x00019cb0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0001c100; i<=0x0001e2f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0001e300; i<=0x0001e6f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
++      for (i=0x00750; i<=0x0078c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00790; i<=0x007cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00810; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x00850; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00890; i<=0x008cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00910; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x009a0; i<=0x009ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x009b0; i<=0x009bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x009d0; i<=0x009dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009f0; i<=0x009fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
++      for(i=0x030a0; i<=0x03118; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x098a0; i<=0x0ba90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x0baa0; i<=0x0be90; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x0e2e0; i<=0x0fff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x10008; i<=0x104d0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x104e0; i<=0x108d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x12d20; i<=0x14f10; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x14f20; i<=0x15310; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x17760; i<=0x19950; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x19960; i<=0x19d50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x1c1a0; i<=0x1e390; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x1e3a0; i<=0x1e790; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x20be0; i<=0x22dd0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x22de0; i<=0x231d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00003010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
++      for (i=0x005d8; i<=0x00614; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00618; i<=0x00654; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00698; i<=0x006d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006d8; i<=0x00714; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00718; i<=0x00754; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00798; i<=0x007d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00828; i<=0x00834; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00838; i<=0x00844; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00858; i<=0x00864; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00878; i<=0x00884; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
++      for (i=0x016c0; i<=0x01738; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03840; i<=0x05670; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05680; i<=0x05a70; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x07e00; i<=0x09ff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0a000; i<=0x0a3f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0c780; i<=0x0e970; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0e980; i<=0x0ed70; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
++      for (i=0x00750; i<=0x0078c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00790; i<=0x007cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00810; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x00850; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00890; i<=0x008cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00910; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x009a0; i<=0x009ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x009b0; i<=0x009bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x009d0; i<=0x009dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009f0; i<=0x009fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
++      for(i=0x030a0; i<=0x03118; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x098a0; i<=0x0ba90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x0baa0; i<=0x0be90; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x0e2e0; i<=0x0fff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x10008; i<=0x104d0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x104e0; i<=0x108d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x12d20; i<=0x14f10; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x14f20; i<=0x15310; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x17760; i<=0x19950; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x19960; i<=0x19d50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00434/4, 0x00011100);
++      for (i=0x00450; i<0x0048c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c4/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00530/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00534/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x0053c/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00);
++      for (i=0x005e0; i<=0x0061c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00620; i<=0x0065c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x006a0; i<=0x006dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006e0; i<=0x0071c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00720; i<=0x0075c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x007a0; i<=0x007dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00830; i<=0x0083c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00840; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00860; i<=0x0086c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00880; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x008a0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008fc/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00934/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a74/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001);
++      for (i=0x016a0; i<0x01718; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03460; i<0x05650; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05660; i<0x05a50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
++      for (i=0x005d8; i<=0x00614; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00618; i<=0x00654; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00698; i<=0x006d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006d8; i<=0x00714; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00718; i<=0x00754; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00798; i<=0x007d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00828; i<=0x00834; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00838; i<=0x00844; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00858; i<=0x00864; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00878; i<=0x00884; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00a94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001);
++      for (i=0x01668; i<=0x016e0; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03428; i<=0x05618; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05628; i<=0x05a18; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++int
++nv40_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
++      int ret;
++
++      /* These functions populate the graphics context with a whole heap
++       * of default state.  All these functions are very similar, with
++       * a minimal amount of chipset-specific changes.  However, as we're
++       * currently dependant on the context programs used by the NVIDIA
++       * binary driver these functions must match the layout expected by
++       * them.  Hopefully at some point this will all change.
++       */
++      switch (dev_priv->chipset) {
++      case 0x40:
++              ctx_init = nv40_graph_context_init;
++              break;
++      case 0x41:
++      case 0x42:
++              ctx_init = nv41_graph_context_init;
++              break;
++      case 0x43:
++              ctx_init = nv43_graph_context_init;
++              break;
++      case 0x46:
++              ctx_init = nv46_graph_context_init;
++              break;
++      case 0x47:
++              ctx_init = nv47_graph_context_init;
++              break;
++      case 0x49:
++              ctx_init = nv49_graph_context_init;
++              break;
++      case 0x44:
++      case 0x4a:
++              ctx_init = nv4a_graph_context_init;
++              break;
++      case 0x4b:
++              ctx_init = nv4b_graph_context_init;
++              break;
++      case 0x4c:
++      case 0x67:
++              ctx_init = nv4c_graph_context_init;
++              break;
++      case 0x4e:
++              ctx_init = nv4e_graph_context_init;
++              break;
++      default:
++              ctx_init = nv40_graph_context_init;
++              break;
++      }
++
++      /* Allocate a 175KiB block of PRAMIN to store the context.  This
++       * is massive overkill for a lot of chipsets, but it should be safe
++       * until we're able to implement this properly (will happen at more
++       * or less the same time we're able to write our own context programs.
++       */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
++                                        NVOBJ_FLAG_ZERO_ALLOC,
++                                        &chan->ramin_grctx)))
++              return ret;
++
++      /* Initialise default context values */
++      ctx_init(dev, chan->ramin_grctx->gpuobj);
++
++      return 0;
++}
++
++void
++nv40_graph_destroy_context(struct nouveau_channel *chan)
++{
++      nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
++}
++
++static int
++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t old_cp, tv = 1000, tmp;
++      int i;
++
++      old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++
++      tmp  = NV_READ(NV40_PGRAPH_CTXCTL_0310);
++      tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
++                    NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
++
++      tmp  = NV_READ(NV40_PGRAPH_CTXCTL_0304);
++      tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
++
++      nouveau_wait_for_idle(dev);
++
++      for (i = 0; i < tv; i++) {
++              if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
++                      break;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
++
++      if (i == tv) {
++              uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
++              DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
++              DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
++                        ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
++                        ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
++              DRM_ERROR("0x40030C = 0x%08x\n",
++                        NV_READ(NV40_PGRAPH_CTXCTL_030C));
++              return -EBUSY;
++      }
++
++      return 0;
++}
++
++/* Save current context (from PGRAPH) into the channel's context */
++int
++nv40_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      return nv40_graph_transfer_context(dev, inst, 1);
++}
++
++/* Restore the context for a specific channel into PGRAPH */
++int
++nv40_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++      int ret;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      ret = nv40_graph_transfer_context(dev, inst, 0);
++      if (ret)
++              return ret;
++
++      /* 0x40032C, no idea of it's exact function.  Could simply be a
++       * record of the currently active PGRAPH context.  It's currently
++       * unknown as to what bit 24 does.  The nv ddx has it set, so we will
++       * set it here too.
++       */
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR,
++               (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) |
++                NV40_PGRAPH_CTXCTL_CUR_LOADED);
++      /* 0x32E0 records the instance address of the active FIFO's PGRAPH
++       * context.  If at any time this doesn't match 0x40032C, you will
++       * recieve PGRAPH_INTR_CONTEXT_SWITCH
++       */
++      NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst);
++      return 0;
++}
++
++/* These blocks of "magic numbers" are actually a microcode that the GPU uses
++ * to control how graphics contexts get saved and restored between PRAMIN
++ * and PGRAPH during a context switch.  We're currently using values seen
++ * in mmio-traces of the binary driver.
++ */
++static uint32_t nv40_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406,
++      0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216,
++      0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100,
++      0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029,
++      0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6,
++      0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b,
++      0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a,
++      0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
++      0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008,
++      0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901,
++      0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038,
++      0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684,
++      0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68,
++      0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006,
++      0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284,
++      0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++static uint32_t nv41_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
++      0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800,
++      0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
++      0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
++      0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
++      0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480,
++      0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a,
++      0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv43_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
++      0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a,
++      0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965,
++      0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
++      0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
++      0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350,
++      0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006,
++      0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
++      0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++static uint32_t nv44_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
++      0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
++      0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
++      0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
++      0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
++      0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
++      0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
++      0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
++      0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
++      0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
++      0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
++      0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
++      0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
++      0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
++      0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
++      0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
++      0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
++      0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
++      0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
++      0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
++      0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
++      0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
++      0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
++      0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv46_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
++      0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a,
++      0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691,
++      0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022,
++      0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
++      0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910,
++      0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14,
++      0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80,
++      0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709,
++      0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
++      0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320,
++      0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081,
++      0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv47_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606,
++      0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12,
++      0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a,
++      0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
++      0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901,
++      0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19,
++      0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00,
++      0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000,
++      0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a,
++      0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006,
++      0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318,
++      0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002,
++      0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f,
++      0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880,
++      0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c,
++      0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006,
++      0x0060000e, ~0
++};
++
++//this is used for nv49 and nv4b
++static uint32_t nv49_4b_ctx_prog[] ={
++      0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
++      0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000,
++      0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e,
++      0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000,
++      0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a,
++      0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210,
++      0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280,
++      0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140,
++      0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118,
++      0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d,
++      0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
++      0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800,
++      0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00,
++      0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e,
++      0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600,
++      0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a,
++      0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88,
++      0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f,
++      0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280,
++      0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68,
++      0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e,
++      0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b,
++      0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e,
++      0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60,
++      0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e,
++      0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005,
++      0x00700006, 0x0060000e, ~0
++};
++
++
++static uint32_t nv4a_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
++      0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
++      0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,
++      0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,
++      0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,
++      0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,
++      0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,
++      0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,
++      0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,
++      0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv4c_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
++      0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
++      0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
++      0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
++      0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
++      0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
++      0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
++      0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
++      0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
++      0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
++      0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
++      0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv4e_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
++      0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06,
++      0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
++      0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06,
++      0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006,
++      0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
++      0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++/*
++ * G70                0x47
++ * G71                0x49
++ * NV45               0x48
++ * G72[M]     0x46
++ * G73                0x4b
++ * C51_G7X    0x4c
++ * C51                0x4e
++ */
++int
++nv40_graph_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv =
++              (struct drm_nouveau_private *)dev->dev_private;
++      uint32_t *ctx_prog;
++      uint32_t vramsz, tmp;
++      int i, j;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      switch (dev_priv->chipset) {
++      case 0x40: ctx_prog = nv40_ctx_prog; break;
++      case 0x41:
++      case 0x42: ctx_prog = nv41_ctx_prog; break;
++      case 0x43: ctx_prog = nv43_ctx_prog; break;
++      case 0x44: ctx_prog = nv44_ctx_prog; break;
++      case 0x46: ctx_prog = nv46_ctx_prog; break;
++      case 0x47: ctx_prog = nv47_ctx_prog; break;
++      case 0x49: ctx_prog = nv49_4b_ctx_prog; break;
++      case 0x4a: ctx_prog = nv4a_ctx_prog; break;
++      case 0x4b: ctx_prog = nv49_4b_ctx_prog; break;
++      case 0x4c:
++      case 0x67: ctx_prog = nv4c_ctx_prog; break;
++      case 0x4e: ctx_prog = nv4e_ctx_prog; break;
++      default:
++              DRM_ERROR("Context program for 0x%02x unavailable\n",
++                        dev_priv->chipset);
++              ctx_prog = NULL;
++              break;
++      }
++
++      /* Load the context program onto the card */
++      if (ctx_prog) {
++              DRM_DEBUG("Loading context program\n");
++              i = 0;
++
++              NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++              while (ctx_prog[i] != ~0) {
++                      NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]);
++                      i++;
++              }
++      }
++
++      /* No context present currently */
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055);
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
++      NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      j = NV_READ(0x1540) & 0xff;
++      if (j) {
++              for (i=0; !(j&1); j>>=1, i++);
++              NV_WRITE(0x405000, i);
++      }
++
++      if (dev_priv->chipset == 0x40) {
++              NV_WRITE(0x4009b0, 0x83280fff);
++              NV_WRITE(0x4009b4, 0x000000a0);
++      } else {
++              NV_WRITE(0x400820, 0x83280eff);
++              NV_WRITE(0x400824, 0x000000a0);
++      }
++
++      switch (dev_priv->chipset) {
++      case 0x40:
++      case 0x45:
++              NV_WRITE(0x4009b8, 0x0078e366);
++              NV_WRITE(0x4009bc, 0x0000014c);
++              break;
++      case 0x41:
++      case 0x42: /* pciid also 0x00Cx */
++//    case 0x0120: //XXX (pciid)
++              NV_WRITE(0x400828, 0x007596ff);
++              NV_WRITE(0x40082c, 0x00000108);
++              break;
++      case 0x43:
++              NV_WRITE(0x400828, 0x0072cb77);
++              NV_WRITE(0x40082c, 0x00000108);
++              break;
++      case 0x44:
++      case 0x46: /* G72 */
++      case 0x4a:
++      case 0x4c: /* G7x-based C51 */
++      case 0x4e:
++              NV_WRITE(0x400860, 0);
++              NV_WRITE(0x400864, 0);
++              break;
++      case 0x47: /* G70 */
++      case 0x49: /* G71 */
++      case 0x4b: /* G73 */
++              NV_WRITE(0x400828, 0x07830610);
++              NV_WRITE(0x40082c, 0x0000016A);
++              break;
++      default:
++              break;
++      }
++
++      NV_WRITE(0x400b38, 0x2ffff800);
++      NV_WRITE(0x400b3c, 0x00006000);
++
++      /* copy tile info from PFB */
++      switch (dev_priv->chipset) {
++      case 0x40: /* vanilla NV40 */
++              for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++                      tmp = NV_READ(NV10_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      case 0x44:
++      case 0x4a:
++      case 0x4e: /* NV44-based cores don't have 0x406900? */
++              for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++              }
++              break;
++      case 0x46:
++      case 0x47:
++      case 0x49:
++      case 0x4b: /* G7X-based cores */
++              for (i=0; i<NV40_PFB_TILE__SIZE_1; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV47_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV47_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV47_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV47_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      default: /* everything else */
++              for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      }
++
++      /* begin RAM config */
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      switch (dev_priv->chipset) {
++      case 0x40:
++              NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x400820, 0);
++              NV_WRITE(0x400824, 0);
++              NV_WRITE(0x400864, vramsz);
++              NV_WRITE(0x400868, vramsz);
++              break;
++      default:
++              switch (dev_priv->chipset) {
++              case 0x46:
++              case 0x47:
++              case 0x49:
++              case 0x4b:
++                      NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0));
++                      NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1));
++                      break;
++              default:
++                      NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0));
++                      NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1));
++                      break;
++              }
++              NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x400840, 0);
++              NV_WRITE(0x400844, 0);
++              NV_WRITE(0x4008A0, vramsz);
++              NV_WRITE(0x4008A4, vramsz);
++              break;
++      }
++
++      /* per-context state, doesn't belong here */
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++
++      return 0;
++}
++
++void nv40_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_mc.c git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c
+--- git/drivers/gpu/drm-tungsten/nv40_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,38 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv40_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      /* Power up everything, resetting each individual unit will
++       * be done later if needed.
++       */
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      switch (dev_priv->chipset) {
++      case 0x44:
++      case 0x46: /* G72 */
++      case 0x4e:
++      case 0x4c: /* C51_G7X */
++              tmp = NV_READ(NV40_PFB_020C);
++              NV_WRITE(NV40_PMC_1700, tmp);
++              NV_WRITE(NV40_PMC_1704, 0);
++              NV_WRITE(NV40_PMC_1708, 0);
++              NV_WRITE(NV40_PMC_170C, tmp);
++              break;
++      default:
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv40_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv50_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,343 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++struct nv50_fifo_priv {
++      struct nouveau_gpuobj_ref *thingo[2];
++      int cur_thingo;
++};
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_fifo_init_thingo(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
++      struct nouveau_gpuobj_ref *cur;
++      int i, nr;
++
++      DRM_DEBUG("\n");
++
++      cur = priv->thingo[priv->cur_thingo];
++      priv->cur_thingo = !priv->cur_thingo;
++
++      /* We never schedule channel 0 or 127 */
++      for (i = 1, nr = 0; i < 127; i++) {
++              if (dev_priv->fifos[i]) {
++                      INSTANCE_WR(cur->gpuobj, nr++, i);
++              }
++      }
++      NV_WRITE(0x32f4, cur->instance >> 12);
++      NV_WRITE(0x32ec, nr);
++      NV_WRITE(0x2500, 0x101);
++}
++
++static int
++nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[channel];
++      uint32_t inst;
++
++      DRM_DEBUG("ch%d\n", channel);
++
++      if (!chan->ramfc)
++              return -EINVAL;
++
++      if (IS_G80) inst = chan->ramfc->instance >> 12;
++      else        inst = chan->ramfc->instance >> 8;
++      NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
++               inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
++
++      if (!nt) nv50_fifo_init_thingo(dev);
++      return 0;
++}
++
++static void
++nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
++
++      if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
++      else        inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
++      NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
++
++      if (!nt) nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_reset(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_fifo_init_intr(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
++      NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
++}
++
++static void
++nv50_fifo_init_context_table(struct drm_device *dev)
++{
++      int i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++)
++              nv50_fifo_channel_disable(dev, i, 1);
++      nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_regs__nv(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x250c, 0x6f3cfc34);
++}
++
++static void
++nv50_fifo_init_regs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x2500, 0);
++      NV_WRITE(0x3250, 0);
++      NV_WRITE(0x3220, 0);
++      NV_WRITE(0x3204, 0);
++      NV_WRITE(0x3210, 0);
++      NV_WRITE(0x3270, 0);
++
++      /* Enable dummy channels setup by nv50_instmem.c */
++      nv50_fifo_channel_enable(dev, 0, 1);
++      nv50_fifo_channel_enable(dev, 127, 1);
++}
++
++int
++nv50_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
++      if (!priv)
++              return -ENOMEM;
++      dev_priv->Engine.fifo.priv = priv;
++
++      nv50_fifo_init_reset(dev);
++      nv50_fifo_init_intr(dev);
++
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
++      if (ret) {
++              DRM_ERROR("error creating thingo0: %d\n", ret);
++              return ret;
++      }
++
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
++      if (ret) {
++              DRM_ERROR("error creating thingo1: %d\n", ret);
++              return ret;
++      }
++
++      nv50_fifo_init_context_table(dev);
++      nv50_fifo_init_regs__nv(dev);
++      nv50_fifo_init_regs(dev);
++
++      return 0;
++}
++
++void
++nv50_fifo_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
++
++      DRM_DEBUG("\n");
++
++      if (!priv)
++              return;
++
++      nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
++      nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
++
++      dev_priv->Engine.fifo.priv = NULL;
++      drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
++}
++
++int
++nv50_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV50_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv50_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramfc = NULL;
++      int ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      if (IS_G80) {
++              uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
++              uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
++              ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset,
++                                            0x100, NVOBJ_FLAG_ZERO_ALLOC |
++                                            NVOBJ_FLAG_ZERO_FREE, &ramfc,
++                                            &chan->ramfc);
++              if (ret)
++                      return ret;
++      } else {
++              ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
++                                           NVOBJ_FLAG_ZERO_ALLOC |
++                                           NVOBJ_FLAG_ZERO_FREE,
++                                           &chan->ramfc);
++              if (ret)
++                      return ret;
++              ramfc = chan->ramfc->gpuobj;
++      }
++
++      INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4);
++      INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
++      INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */
++      INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff);
++      INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff);
++      INSTANCE_WR(ramfc, 0x10/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x08/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x40/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0);
++      INSTANCE_WR(ramfc, 0x54/4, 0x000f0000);
++      INSTANCE_WR(ramfc, 0x7c/4, 0x30000001);
++      INSTANCE_WR(ramfc, 0x78/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1);
++
++      if (!IS_G80) {
++              INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id);
++              INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
++
++              INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
++              INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
++      }
++
++      ret = nv50_fifo_channel_enable(dev, chan->id, 0);
++      if (ret) {
++              DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nv50_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      nv50_fifo_channel_disable(dev, chan->id, 0);
++
++      /* Dummy channel, also used on ch 127 */
++      if (chan->id == 0)
++              nv50_fifo_channel_disable(dev, 127, 0);
++
++      if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id)
++              NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127);
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv50_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      /*XXX: incomplete, only touches the regs that NV does */
++
++      NV_WRITE(0x3244, 0);
++      NV_WRITE(0x3240, 0);
++
++      NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4));
++      NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4));
++      NV_WRITE(0x3254, 1);
++      NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4));
++
++      if (!IS_G80) {
++              NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4));
++              NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
++      }
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
++      return 0;
++}
++
++int
++nv50_fifo_save_context(struct nouveau_channel *chan)
++{
++      DRM_DEBUG("ch%d\n", chan->id);
++      DRM_ERROR("stub!\n");
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_graph.c git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c
+--- git/drivers/gpu/drm-tungsten/nv50_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,8286 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_graph_init_reset(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_graph_init_intr(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff);
++      NV_WRITE(0x400138, 0xffffffff);
++      NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);
++}
++
++static void
++nv50_graph_init_regs__nv(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x400804, 0xc0000000);
++      NV_WRITE(0x406800, 0xc0000000);
++      NV_WRITE(0x400c04, 0xc0000000);
++      NV_WRITE(0x401804, 0xc0000000);
++      NV_WRITE(0x405018, 0xc0000000);
++      NV_WRITE(0x402000, 0xc0000000);
++
++      NV_WRITE(0x400108, 0xffffffff);
++
++      NV_WRITE(0x400824, 0x00004000);
++      NV_WRITE(0x400500, 0x00010001);
++}
++
++static void
++nv50_graph_init_regs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */);
++}
++
++static uint32_t nv50_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x00417e4d, 0x00401e44, 0x00401e05, 0x00401e0d, 0x00415a06,
++      0x00600005, 0x004015c5, 0x00600011, 0x00401c0b, 0x0090ffff, 0x0091ffff,
++      0x00200020, 0x00600008, 0x0050004c, 0x00600009, 0x00415a45, 0x0041754d,
++      0x0070009d, 0x004022cf, 0x0070009f, 0x0050009f, 0x00401fc0, 0x00200080,
++      0x00600008, 0x00401f4f, 0x00401fc0, 0x004025cc, 0x00700081, 0x00200000,
++      0x00600006, 0x00700000, 0x00111bfc, 0x00700080, 0x00700083, 0x00200047,
++      0x00600006, 0x0011020a, 0x002005c0, 0x00600007, 0x00300000, 0x00c000ff,
++      0x00c800ff, 0x00416507, 0x00202627, 0x008000ff, 0x00403c8c, 0x005000cb, 
++      0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, 0x00170202, 0x0011020a,
++      0x00200032, 0x0010020d, 0x001b0242, 0x00120302, 0x00140402, 0x00180500,
++      0x00130509, 0x00150550, 0x00110605, 0x001e0607, 0x00110700, 0x00110900,
++      0x00110902, 0x00110a00, 0x00160b02, 0x00110b28, 0x00140b2b, 0x00110c01,
++      0x00111400, 0x00111405, 0x00111407, 0x00111409, 0x0011140b, 0x002000ea,
++      0x00101500, 0x0040640f, 0x0040644b, 0x00213700, 0x00600007, 0x00200440,
++      0x008800ff, 0x0070008f, 0x0040648c, 0x005000cb, 0x00000000, 0x001118f8,
++      0x0020002b, 0x00101a05, 0x00131c00, 0x00111c04, 0x00141c20, 0x00111c25,
++      0x00131c40, 0x00111c44, 0x00141c60, 0x00111c65, 0x00131c80, 0x00111c84,
++      0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00111cc4, 0x00141ce0, 0x00111ce5,
++      0x00131d00, 0x00111d04, 0x00141d20, 0x00111d25, 0x00131d40, 0x00111d44,
++      0x00141d60, 0x00111d65, 0x00131f00, 0x00191f40, 0x00409ee0, 0x00200217,
++      0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
++      0x00122100, 0x00122103, 0x00162200, 0x0040960f, 0x0040964b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040968c, 0x005000cb,
++      0x00000000, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
++      0x0011238b, 0x00192394, 0x0040b0e1, 0x00200285, 0x00600006, 0x00200044,
++      0x00102480, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, 0x00122503,
++      0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, 0x00122780,
++      0x0011278b, 0x00192794, 0x0040cce2, 0x002002f3, 0x00600006, 0x00200044,
++      0x00102880, 0x001128c6, 0x001528c9, 0x0040c00f, 0x0040c04b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040c08c, 0x005000cb,
++      0x00000000, 0x001928d0, 0x00122900, 0x00122903, 0x00162a00, 0x00122a07,
++      0x00112a80, 0x00112b00, 0x00112b02, 0x00122b80, 0x00112b8b, 0x00192b94, 
++      0x0040dee3, 0x00200361, 0x00600006, 0x00200044, 0x00102c80, 0x00112cc6,
++      0x00152cc9, 0x00192cd0, 0x00122d00, 0x00122d03, 0x00162e00, 0x00122e07,
++      0x00112e80, 0x00112f00, 0x00112f02, 0x00122f80, 0x00112f8b, 0x00192f94,
++      0x0040fae4, 0x002003cf, 0x00600006, 0x00200044, 0x00103080, 0x0040ec0f,
++      0x0040ec4b, 0x00213700, 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 
++      0x0040ec8c, 0x005000cb, 0x00000000, 0x001130c6, 0x001530c9, 0x001930d0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338b, 0x00193394, 0x00410ce5, 0x0020043d,
++      0x00600006, 0x00200044, 0x00103480, 0x001134c6, 0x001534c9, 0x001934d0,
++      0x00123500, 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700,
++      0x00113702, 0x00123780, 0x0011378b, 0x00193794, 0x004128e6, 0x002004ab,
++      0x00600006, 0x00200044, 0x00103880, 0x00411a0f, 0x00411a4b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x00411a8c, 0x005000cb,
++      0x00000000, 0x001138c6, 0x001538c9, 0x001938d0, 0x00123900, 0x00123903,
++      0x00163a00, 0x00123a07, 0x00113a80, 0x00113b00, 0x00113b02, 0x00123b80,
++      0x00113b8b, 0x00193b94, 0x00413ae7, 0x00200519, 0x00600006, 0x00200044,
++      0x00103c80, 0x00113cc6, 0x00153cc9, 0x00193cd0, 0x00123d00, 0x00123d03,
++      0x00163e00, 0x00123e07, 0x00113e80, 0x00113f00, 0x00113f02, 0x00123f80,
++      0x00113f8b, 0x00193f94, 0x00000000, 0x0041410f, 0x005000cb, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x005000cb, 0x00414487, 0x0060000a,
++      0x00000000, 0x00415300, 0x007000a0, 0x00700080, 0x002005c0, 0x00600007,
++      0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, 0x00200000,
++      0x00600006, 0x00111bfe, 0x0041754d, 0x00700000, 0x00200000, 0x00600006,
++      0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, 0x00600004,
++      0x0050004a, 0x00415f88, 0x0060000b, 0x00200000, 0x00600006, 0x00700000,
++      0x0041750b, 0x00111bfd, 0x00402e4d, 0x00202627, 0x008000fd, 0x005000cb,
++      0x00c00002, 0x002005c0, 0x00600007, 0x0020015f, 0x00800002, 0x005000cb,
++      0x00c01802, 0x002024c8, 0x00800002, 0x005000cb, 0x00403a4d, 0x0060000b,
++      0x0041734d, 0x00700001, 0x00700003, 0x00417906, 0x00417a05, 0x0060000d,
++      0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, 0x0070001c,
++      0x0060000c, ~0
++};
++
++static uint32_t nv84_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06,
++      0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801,
++      0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
++      0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d,
++      0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
++      0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007,
++      0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007,
++      0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff,
++      0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
++      0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
++      0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c,
++      0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
++      0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
++      0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4,
++      0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed,
++      0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
++      0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
++      0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1,
++      0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c,
++      0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500,
++      0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
++      0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb,
++      0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0,
++      0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00,
++      0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3,
++      0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c,
++      0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00,
++      0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02,
++      0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389,
++      0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5,
++      0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c,
++      0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500,
++      0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
++      0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f,
++      0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb,
++      0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080,
++      0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb,
++      0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000,
++      0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d,
++      0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000,
++      0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916,
++      0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160,
++      0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb,
++      0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003,
++      0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,
++      0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0
++};
++ 
++static uint32_t nv86_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,
++      0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,
++      0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
++      0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,
++      0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
++      0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,
++      0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,
++      0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,
++      0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
++      0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
++      0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
++      0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,
++      0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
++      0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
++      0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,
++      0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,
++      0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
++      0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,
++      0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,
++      0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,
++      0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,
++      0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,
++      0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,
++      0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,
++      0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,
++      0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,
++      0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,
++      0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,
++      0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,
++      0x0060000c, ~0
++};
++
++static uint32_t nv92_ctx_voodoo[] = {
++      0x0070008E, 0x0070009C, 0x00200020, 0x00600008, 0x0050004C, 0x00400E89,
++      0x00200000, 0x00600007, 0x00300000, 0x00C000FF, 0x00200000, 0x008000FF,
++      0x00700009, 0x0041924D, 0x00402944, 0x00402905, 0x0040290D, 0x00416E06,
++      0x00600005, 0x004015C5, 0x00600011, 0x0040270B, 0x004021C5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004A, 0x00219600, 0x00600007, 0x00C02701,
++      0x0020002E, 0x00800001, 0x005000CB, 0x0090FFFF, 0x0091FFFF, 0x00200020,
++      0x00600008, 0x0050004C, 0x00600009, 0x00416E45, 0x0041894D, 0x0070009D,
++      0x00402DCF, 0x0070009F, 0x0050009F, 0x00402AC0, 0x00200080, 0x00600008,
++      0x00402A4F, 0x00402AC0, 0x004030CC, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111BFC, 0x00700083, 0x00300000, 0x00219600, 0x00600007,
++      0x00C00A01, 0x0020001E, 0x00800001, 0x005000CB, 0x00C000FF, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020A, 0x00200540, 0x00600007,
++      0x00300000, 0x00C000FF, 0x00C800FF, 0x00417907, 0x00202DD2, 0x008000FF,
++      0x0040508C, 0x005000CB, 0x00A0023F, 0x00200040, 0x00600006, 0x0070000F,
++      0x00170202, 0x0011020A, 0x00200032, 0x0010020D, 0x001C0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000F,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110A00, 0x00160B02,
++      0x00120B28, 0x00140B2B, 0x00110C01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140B, 0x002000CB, 0x00101500, 0x0040790F, 0x0040794B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040798C,
++      0x005000CB, 0x00000000, 0x00141A05, 0x00131A0C, 0x00131C00, 0x00121C04,
++      0x00141C20, 0x00111C25, 0x00131C40, 0x00121C44, 0x00141C60, 0x00111C65,
++      0x00131C80, 0x00121C84, 0x00141CA0, 0x00111CA5, 0x00131CC0, 0x00121CC4,
++      0x00141CE0, 0x00111CE5, 0x00131F00, 0x00191F40, 0x0040A1E0, 0x002001C9,
++      0x00600006, 0x00200044, 0x00102080, 0x001120C6, 0x001520C9, 0x001920D0,
++      0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
++      0x00112302, 0x00122380, 0x0011238B, 0x00112394, 0x0011239C, 0x0040BEE1,
++      0x00200230, 0x00600006, 0x00200044, 0x00102480, 0x0040AF0F, 0x0040AF4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040AF8C,
++      0x005000CB, 0x00000000, 0x001124C6, 0x001524C9, 0x001924D0, 0x00122500,
++      0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
++      0x00122780, 0x0011278B, 0x00112794, 0x0011279C, 0x0040D1E2, 0x00200297,
++      0x00600006, 0x00200044, 0x00102880, 0x001128C6, 0x001528C9, 0x001928D0,
++      0x00122900, 0x00122903, 0x00162A00, 0x00122A07, 0x00112A80, 0x00112B00,
++      0x00112B02, 0x00122B80, 0x00112B8B, 0x00112B94, 0x00112B9C, 0x0040EEE3,
++      0x002002FE, 0x00600006, 0x00200044, 0x00102C80, 0x0040DF0F, 0x0040DF4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040DF8C,
++      0x005000CB, 0x00000000, 0x00112CC6, 0x00152CC9, 0x00192CD0, 0x00122D00,
++      0x00122D03, 0x00162E00, 0x00122E07, 0x00112E80, 0x00112F00, 0x00112F02,
++      0x00122F80, 0x00112F8B, 0x00112F94, 0x00112F9C, 0x004101E4, 0x00200365,
++      0x00600006, 0x00200044, 0x00103080, 0x001130C6, 0x001530C9, 0x001930D0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338B, 0x00113394, 0x0011339C, 0x00411EE5,
++      0x002003CC, 0x00600006, 0x00200044, 0x00103480, 0x00410F0F, 0x00410F4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00410F8C,
++      0x005000CB, 0x00000000, 0x001134C6, 0x001534C9, 0x001934D0, 0x00123500,
++      0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
++      0x00123780, 0x0011378B, 0x00113794, 0x0011379C, 0x004131E6, 0x00200433,
++      0x00600006, 0x00200044, 0x00103880, 0x001138C6, 0x001538C9, 0x001938D0,
++      0x00123900, 0x00123903, 0x00163A00, 0x00123A07, 0x00113A80, 0x00113B00,
++      0x00113B02, 0x00123B80, 0x00113B8B, 0x00113B94, 0x00113B9C, 0x00414EE7,
++      0x0020049A, 0x00600006, 0x00200044, 0x00103C80, 0x00413F0F, 0x00413F4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00413F8C,
++      0x005000CB, 0x00000000, 0x00113CC6, 0x00153CC9, 0x00193CD0, 0x00123D00,
++      0x00123D03, 0x00163E00, 0x00123E07, 0x00113E80, 0x00113F00, 0x00113F02,
++      0x00123F80, 0x00113F8B, 0x00113F94, 0x00113F9C, 0x00000000, 0x0041550F,
++      0x005000CB, 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x005000CB,
++      0x00415887, 0x0060000A, 0x00000000, 0x00416700, 0x007000A0, 0x00700080,
++      0x00200540, 0x00600007, 0x00200004, 0x00C000FF, 0x008000FF, 0x005000CB,
++      0x00700000, 0x00200000, 0x00600006, 0x00111BFE, 0x0041894D, 0x00700000,
++      0x00200000, 0x00600006, 0x00111BFE, 0x00700080, 0x0070001D, 0x0040114D,
++      0x00700081, 0x00600004, 0x0050004A, 0x00417388, 0x0060000B, 0x00200000,
++      0x00600006, 0x00700000, 0x0041890B, 0x00111BFD, 0x0040424D, 0x00202DD2,
++      0x008000FD, 0x005000CB, 0x00C00002, 0x00200540, 0x00600007, 0x00200160,
++      0x00800002, 0x005000CB, 0x00C01802, 0x00202C72, 0x00800002, 0x005000CB,
++      0x00404E4D, 0x0060000B, 0x0041874D, 0x00700001, 0x00700003, 0x00418D06,
++      0x00418E05, 0x0060000D, 0x00700005, 0x0070000D, 0x00700006, 0x0070000B,
++      0x0070000E, 0x0070001C, 0x0060000C, ~0
++};
++
++static uint32_t nvaa_ctx_voodoo[] = {
++      0x0070009c, 0x00300000, 0x0044f109, 0x00402d09, 0x0040e551, 0x00400a44,
++      0x00400a05, 0x00400a0d, 0x0070008e, 0x0040124d, 0x0070009d, 0x0045004d,
++      0x00700097, 0x00450121, 0x004446a1, 0x0044764d, 0x0044824d, 0x0070001d,
++      0x00401806, 0x00600005, 0x00444445, 0x0044308b, 0x00401845, 0x0040234d,
++      0x00700081, 0x00401ccf, 0x0070009f, 0x0050009f, 0x0044dc4d, 0x00700017,
++      0x0040230b, 0x00447d4d, 0x00450221, 0x004456a1, 0x007000a0, 0x00700001,
++      0x00700003, 0x00402706, 0x00402805, 0x0060000d, 0x00700005, 0x0070000d,
++      0x00700006, 0x00700002, 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c,
++      0x00000000, 0x0090ffff, 0x0091ffff, 0x0044d44d, 0x00600009, 0x0048004d,
++      0x00700096, 0x00403acf, 0x0070009f, 0x0050009f, 0x0040e551, 0x004036c0,
++      0x00200080, 0x00600008, 0x0040364f, 0x004036c0, 0x00403ecc, 0x00403651,
++      0x00700016, 0x0048004d, 0x00600011, 0x0048004d, 0x0044364d, 0x0070008e,
++      0x00700081, 0x0044704d, 0x00447d4d, 0x00700083, 0x00300000, 0x00212740,
++      0x00600007, 0x00c00b01, 0x00200022, 0x00800001, 0x005000cb, 0x00c000ff,
++      0x00445e4d, 0x0048004d, 0x0044ce08, 0x0044734d, 0x00448b4d, 0x00445e4d,
++      0x0044e24d, 0x0044764d, 0x0044824d, 0x0048004d, 0x00700083, 0x0045034d,
++      0x00a0023f, 0x00200040, 0x00600006, 0x0044fc4d, 0x00448d4d, 0x002001d0,
++      0x0044b860, 0x00200280, 0x0038ffff, 0x0044cc4d, 0x00300000, 0x005000cb,
++      0x00451c4d, 0x005000cb, 0x0044d007, 0x0048004d, 0x0044794d, 0x00111bfc,
++      0x0048004d, 0x0044794d, 0x00111bfd, 0x0048004d, 0x0044794d, 0x00111bfe,
++      0x0048004d, 0x00200000, 0x00700000, 0x00600006, 0x0048004d, 0x00200001,
++      0x00600006, 0x0044fc4d, 0x0011020a, 0x0048004d, 0x00300000, 0x00c3ffff,
++      0x00200000, 0x00600007, 0x00700000, 0x00200008, 0x008000ff, 0x005000cb,
++      0x0048004d, 0x00000000, 0x0048004d, 0x00000000, 0x00170202, 0x00200032,
++      0x0010020d, 0x001e0242, 0x001102c0, 0x00120302, 0x00150402, 0x00180500,
++      0x00130509, 0x00150550, 0x00110605, 0x00200013, 0x00100607, 0x00110700,
++      0x00110900, 0x00120902, 0x00110a00, 0x00160b02, 0x00120b28, 0x00140b2b,
++      0x00110c01, 0x00110d01, 0x00111400, 0x00111405, 0x00111407, 0x00111409,
++      0x0011140b, 0x002000d4, 0x00101500, 0x00141a05, 0x00131a0c, 0x00131c00,
++      0x00131c04, 0x00141c20, 0x00131c25, 0x00131f00, 0x00131f04, 0x00111f08,
++      0x00111f0b, 0x00200015, 0x00101f40, 0x0048004d, 0x00600006, 0x00451c4d,
++      0x00112020, 0x00112022, 0x00200085, 0x00102040, 0x001120c8, 0x001420ca,
++      0x001b20cf, 0x00122100, 0x00122103, 0x00162140, 0x00122147, 0x00122153,
++      0x001121a0, 0x001221c0, 0x001121cb, 0x001121d4, 0x001521d8, 0x0048004d,
++      0x00000000, 0x0048004d, 0x0060000b, 0x0048004d, 0x0060000a, 0x0048004d,
++      0x0060000b, 0x0040d24d, 0x00200020, 0x00600008, 0x0050004c, 0x0048004d,
++      0x002003e8, 0x00600008, 0x0050004c, 0x0048004d, 0x00600004, 0x0050004a,
++      0x0048004d, 0x00c000ff, 0x00c800ff, 0x0048004d, 0x00c000ff, 0x00c800ff,
++      0x0048004d, 0x00700016, 0x0070008e, 0x00700082, 0x00500041, 0x0044d84d,
++      0x00700095, 0x005000d1, 0x00600016, 0x00500052, 0x00700002, 0x00700015,
++      0x0040284d, 0x0070008e, 0x0044d44d, 0x00200000, 0x00600007, 0x00300000,
++      0x00c000ff, 0x00200000, 0x008000ff, 0x00700009, 0x0070000e, 0x0048004d,
++      0x00700080, 0x00480017, 0x00700000, 0x0048004d, 0x0048004d, 0x0048004d,
++      0x0048004d, 0x0070008e, 0x0044d44d, 0x00700083, 0x0044df4d, 0x00450c4d,
++      0x0070000f, 0x00410b8c, 0x005000cb, 0x0048004d, 0x00200280, 0x00600007,
++      0x00452307, 0x00451187, 0x0048004d, 0x00000000, 0x00202070, 0x0044fc4d,
++      0x008000ff, 0x0048004d, 0x00210600, 0x00600007, 0x00200428, 0x0044fc4d,
++      0x008800ff, 0x0048004d, 0x0048000f, 0x0048004b, 0x0045164d, 0x0070008f,
++      0x0048008c, 0x005000cb, 0x0048004d, 0x00202070, 0x0044fc4d, 0x008000fd,
++      0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200161, 0x0044fc4d,
++      0x00800002, 0x005000cb, 0x00c00002, 0x00201f0e, 0x0044fc4d, 0x00800002,
++      0x005000cb, 0x0048004d, ~0
++};
++
++static int
++nv50_graph_init_ctxctl(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t *voodoo = NULL;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->chipset) {
++      case 0x50:
++              voodoo = nv50_ctx_voodoo;
++              break;
++      case 0x84:
++              voodoo = nv84_ctx_voodoo;
++              break;
++      case 0x86:
++              voodoo = nv86_ctx_voodoo;
++              break;
++      case 0x92:
++              voodoo = nv92_ctx_voodoo;
++              break;
++      case 0xaa:
++              voodoo = nvaa_ctx_voodoo;
++              break;
++      default:
++              DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset);
++              return -EINVAL;
++      }
++
++      NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++      while (*voodoo != ~0) {
++              NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo);
++              voodoo++;
++      }
++
++      NV_WRITE(0x400320, 4);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
++
++      return 0;
++}
++
++int
++nv50_graph_init(struct drm_device *dev)
++{
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      nv50_graph_init_reset(dev);
++      nv50_graph_init_intr(dev);
++      nv50_graph_init_regs__nv(dev);
++      nv50_graph_init_regs(dev);
++
++      ret = nv50_graph_init_ctxctl(dev);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++void
++nv50_graph_takedown(struct drm_device *dev)
++{
++      DRM_DEBUG("\n");
++}
++
++static void
++nv50_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x00120/4, 0xff400040);
++      INSTANCE_WR(ctx, 0x00124/4, 0xfff00080);
++      INSTANCE_WR(ctx, 0x00128/4, 0xfff70090);
++      INSTANCE_WR(ctx, 0x0012c/4, 0xffe806a8);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00214/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00254/4, 0x0001fd87);
++      INSTANCE_WR(ctx, 0x00268/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x0026c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002a4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002a8/4, 0x0001005f);
++      INSTANCE_WR(ctx, 0x002b0/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002b4/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x002c8/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x002e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x002e8/4, 0x00300080);
++      INSTANCE_WR(ctx, 0x002ec/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00308/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0030c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00318/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0031c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00334/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00338/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0033c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00350/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00354/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00360/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x0000000a);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00420/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00438/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00450/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004a8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004c8/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x004cc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00558/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00600/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00628/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x00630/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x00638/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00648/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0065c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00660/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00668/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00678/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00680/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00688/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00690/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00698/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x006ac/4, 0x00000f80);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x00730/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x00754/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x00758/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00760/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00760/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00760/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x00778/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x0077c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00784/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00784/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00784/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00784/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x0079c/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007a0/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007a8/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x007c0/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007c4/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007cc/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x007e4/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007e8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007f0/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x00808/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x0080c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00814/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00814/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00814/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00814/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x0082c/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x00834/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00840/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x00844/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x0085c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00860/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00864/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00874/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00878/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0089c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x008a4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x008ac/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x008b4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x008b8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x008f8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0091c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00924/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00934/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00938/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00960/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x0096c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00984/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00984/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00984/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00984/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x009e4/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x009e8/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00a14/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00a1c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00a30/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00a64/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00a6c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00a70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00a94/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00a9c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ab0/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00ad4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00adc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ae4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00aec/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00b18/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00b24/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00b9c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00ba0/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00be4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00c14/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00c24/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00c28/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00c4c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c50/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c64/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00c68/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ca4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00ca8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00cd0/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00d54/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00d84/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d88/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00d8c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d9c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00da0/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00dc4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00dcc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00dd4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00de0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00e04/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e08/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00e0c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e1c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00e20/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00e44/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00e4c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00e54/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00e5c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00e60/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00e88/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00e94/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00f0c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00f10/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00f3c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00f40/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00f44/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00f54/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00f58/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00f7c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00f84/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00f8c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00f94/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00fbc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fc0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00fc4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fd4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00ffc/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01004/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0100c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01014/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01018/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01040/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x0104c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01064/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x01064/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x01064/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x01064/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x010c4/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x010c8/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x010f4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x010f8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x010fc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0110c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01110/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0113c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01144/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0114c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01150/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01174/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01178/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0117c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0118c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01190/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x011b4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x011bc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x011c4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x011cc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x011d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x011f8/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x01204/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01244/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x01244/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x01264/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x0127c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x01280/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x012ac/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x012b0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x012b4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x012c4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x012c8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x012ec/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x012f4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x012fc/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01304/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01308/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0132c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01330/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x01334/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01344/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01348/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0136c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01374/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0137c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01384/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01388/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x013b0/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x013bc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x01434/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x01438/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01444/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01444/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01464/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01468/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0146c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0147c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01480/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x014a4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x014ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x014b4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x014bc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x014c0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x014e4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x014e8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x014ec/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x014fc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01500/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x01524/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0152c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01534/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0153c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01540/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01568/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x01574/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x015ec/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x015f0/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02b40/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x02b60/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02b80/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02ba0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02bc0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02be0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c60/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02c80/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02ca0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02cc0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0c600/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44f80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44fa0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44fc0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45000/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45040/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x45060/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45080/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x450e0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45100/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x45160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4c9a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4cc80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4ce00/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4ce20/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4ce60/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4cee0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4cf20/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x4d080/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4d0a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4d0c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4d1e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4d260/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4d480/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4d4a0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x4d4c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d4e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d500/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d520/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d940/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d960/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d980/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4daa0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4dac0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4dae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db40/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x4db80/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01784/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01824/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x01a04/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01bc4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01be4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c24/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c44/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x01c84/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x01e24/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x042e4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04324/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e84/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15524/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15764/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15784/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x157c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x157e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15804/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15824/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x15864/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x15924/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15964/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15984/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x159a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x159c4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x159e4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x15ac4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b04/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b44/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15be4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15c24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15c44/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x15cc4/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x16444/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x164e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x16544/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x16584/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x165a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x165c4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x165e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16604/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16624/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x185a4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x185c4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x18664/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x187e4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x18804/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x16708/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x16768/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x16948/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x16a28/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16a48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x16aa8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16d08/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x16de8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ee8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x16f08/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17108/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x171a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x171c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x171e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x17268/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17288/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x17508/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17528/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17548/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17568/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17588/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17608/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17628/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17648/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17668/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17688/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17708/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x17be8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x17c08/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17c68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17ca8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17cc8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17ce8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17d08/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x18108/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x18128/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x18608/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x18648/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18668/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18688/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18728/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x18768/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x188a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x188c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x188e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18908/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18ec8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18ee8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18f28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18fa8/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18fc8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18fe8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19028/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19048/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x19088/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x190a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x190c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19108/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x19188/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x191a8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x19288/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x192a8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x199c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19a28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1a148/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x1a168/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x1a1c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a4a8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a508/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1a588/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a5a8/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1aa68/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1aaa8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1aae8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ab08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ab48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1aba8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1abe8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac48/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1ac68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac88/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x1acc8/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x25528/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x25548/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x25588/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x255a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x255c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x25608/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x25648/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x256c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x256e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25708/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25728/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25748/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25768/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25788/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25808/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25828/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25848/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25868/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25888/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x258a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25d48/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x25d68/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x25dc8/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x0180c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0184c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x019ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01a0c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01a6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01b4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01ccc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0216c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0218c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0220c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0222c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0224c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0226c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0228c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0230c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0232c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0234c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0268c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x026cc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x027ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0282c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x029cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02acc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02bcc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02cac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02ccc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02cec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02d0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02d6c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x02dac/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0306c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0308c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x030ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x030cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x030ec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0310c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0312c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031ac/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x031cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03e8c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0402c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0404c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x040ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0418c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x042ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x042cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0430c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0458c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x047ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x047cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x047ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0480c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0482c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0484c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0486c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0488c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0490c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0492c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0494c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0496c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0498c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x04ccc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x04d0c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x04dec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0500c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0510c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0520c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x052ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0530c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0532c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0534c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0536c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x053ac/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x053ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x056ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x056cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x056ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0570c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0572c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0574c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0576c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x057ec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0580c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0648c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x064cc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0666c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0668c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x066ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x067cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x068ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0690c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0694c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06bcc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x06dec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06eac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06ecc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06eec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06fac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06fcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0730c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0734c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0742c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0746c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x074ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0764c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0774c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0784c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x078ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0790c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0792c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0794c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0796c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0798c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x079ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x079ec/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x07a2c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x07cec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d6c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07d8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07dac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07e2c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x07e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08acc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08b0c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x08cac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08ccc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x08d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08e0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08f2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08f4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x08f8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0920c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0942c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0944c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0946c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0948c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0950c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0952c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0954c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0956c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0958c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0960c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0994c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0998c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x09a6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09aac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09aec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09c8c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x09d8c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x09e8c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x09f2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x09fac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09fcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x09fec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a02c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0a06c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0a32c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a34c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0a36c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a38c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a3ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0a3cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a3ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a46c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0a48c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b10c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0b14c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0b2ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b30c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0b36c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b44c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0b56c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b58c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0b5cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b84c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0ba6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0ba8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0baac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bacc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0baec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bf8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0bfcc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0c0ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c0ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c12c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c2cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0c3cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0c4cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0c56c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c58c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c5ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c60c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c62c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c66c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0c6ac/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0c96c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c98c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c9ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c9cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c9ec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ca0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ca2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0caac/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0cacc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0d74c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0d78c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0d92c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0d94c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0d9ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0da8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0dbac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0dbcc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0dc0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0de8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0e0ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e0cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e0ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e10c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e12c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e14c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e16c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e18c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e20c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e22c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e24c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e26c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e28c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e5cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0e60c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0e6ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e72c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e76c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e90c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0ea0c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0eb0c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0ebac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ebcc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ebec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ec0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ec2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ec4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ec6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ecac/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0ecec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0efac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0efcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0efec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f00c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f02c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0f04c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f06c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f0ec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0f10c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01730/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a30/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b90/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02050/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02070/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02090/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b0/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02250/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16710/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16950/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b10/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b30/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b50/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f90/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17010/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17050/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17150/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17230/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17250/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17290/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17430/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17470/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17490/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17530/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17550/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17570/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17590/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17630/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17730/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17750/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17850/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17910/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd0/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18310/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18330/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18350/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18370/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18390/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18510/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18530/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18550/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18570/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18590/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18610/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18630/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18650/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18670/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18690/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18810/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18830/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18870/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18930/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e90/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19190/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19210/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19270/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19350/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19410/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19470/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19510/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19530/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19730/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19750/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19830/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19990/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b0/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f0/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a10/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a50/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d90/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a410/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a430/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a450/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a470/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a530/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea70/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee50/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee70/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee90/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f050/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f110/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f330/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f350/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f590/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f610/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f630/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f810/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f830/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f850/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f890/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f910/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f930/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f970/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f990/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa90/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff50/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30030/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30070/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30610/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30630/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30650/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30670/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30690/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30710/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30810/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30830/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30850/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30870/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30890/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30910/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30930/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30950/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30970/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30990/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a10/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a50/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b50/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b90/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c30/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c50/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c90/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d30/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31010/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31030/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31050/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31570/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31630/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31730/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31810/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31830/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31850/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31870/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31890/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a90/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b10/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b90/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d10/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d30/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d50/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d70/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01734/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a14/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a34/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b74/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b94/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02054/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02074/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02094/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b4/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02254/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16714/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16954/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b14/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b34/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b54/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f94/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17014/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17054/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17154/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17234/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17254/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17294/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17434/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17474/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17494/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17534/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17554/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17574/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17594/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17614/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17634/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17734/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17754/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17854/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17914/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18314/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18334/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18354/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18374/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18394/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18514/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18534/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18554/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18574/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18594/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18614/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18634/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18654/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18674/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18694/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f4/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18814/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18834/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18874/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18934/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a54/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf4/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e94/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19194/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19214/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19274/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19354/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19414/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19474/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b4/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19514/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19534/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19734/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19754/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19774/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b4/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19834/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19994/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b4/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d4/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a14/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a54/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a94/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d74/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d94/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e94/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f14/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a414/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a434/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a454/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a474/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a514/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a534/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea74/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee54/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee74/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee94/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f054/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f114/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f314/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f334/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f354/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f374/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f514/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f594/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f614/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f634/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f794/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f814/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f834/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f854/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f894/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f934/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f994/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa94/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc14/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc74/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff54/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30034/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30074/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30614/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30634/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30654/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30674/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30694/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30714/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30814/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30834/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30854/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30874/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30894/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30914/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30934/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30954/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30974/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30994/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a14/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a54/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b54/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b94/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c34/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c54/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c94/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d34/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31014/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31034/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31054/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31574/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31614/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31734/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31774/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d4/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31814/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31834/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31854/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31874/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31894/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a94/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b14/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b94/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d14/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d34/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d54/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d74/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db4/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01738/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a38/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b98/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02058/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02078/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02098/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b8/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02118/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02258/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16718/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16958/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b18/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b38/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f98/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17058/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17158/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17238/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17258/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17298/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17438/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17458/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17478/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17498/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17538/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17558/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17578/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17598/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17638/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17738/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17758/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17858/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17918/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd8/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18318/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18338/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18358/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18378/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18398/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18518/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18538/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18558/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18578/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18598/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18618/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18638/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18658/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18678/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18698/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f8/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18818/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18838/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18878/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18938/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a58/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf8/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e98/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19198/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19218/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19278/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19358/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19418/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19478/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b8/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19518/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19538/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19738/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19758/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19838/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b8/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d8/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a18/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a58/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d98/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a418/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a438/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a458/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a478/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a518/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a538/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea78/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee78/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f058/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f118/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f318/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f338/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f358/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f378/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f518/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f598/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f618/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f638/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f798/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f818/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f838/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f858/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f898/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f918/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f998/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa98/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc18/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30038/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30078/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30618/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30638/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30658/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30678/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30698/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30718/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30818/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30838/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30858/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30878/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30898/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30918/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30938/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30958/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30978/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30998/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a18/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b58/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c38/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c98/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d38/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31018/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31038/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31058/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31578/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31638/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31738/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d8/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31818/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31838/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31858/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31878/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31898/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a98/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b18/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b98/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d18/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d38/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d58/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d78/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0173c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a3c/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01adc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b7c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b9c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bbc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0205c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0207c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0209c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020bc/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020dc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020fc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0211c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021dc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0225c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1671c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1695c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16adc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16afc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b1c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b3c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b5c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16dbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f9c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ffc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1701c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1705c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1715c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1723c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1725c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1729c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172bc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172dc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x1743c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1745c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1747c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1749c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1753c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1755c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1757c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1759c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1761c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1763c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1773c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1775c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1785c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1791c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bfc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cdc/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182bc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182dc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182fc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1831c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1833c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1835c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1837c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1839c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184bc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184fc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1851c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1853c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1855c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1857c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1859c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185bc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185dc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185fc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1861c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1863c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1865c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1867c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1869c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186bc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187fc/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x1881c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1883c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1887c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188dc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188fc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1893c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189dc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a5c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cbc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cdc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cfc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e9c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1919c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x1921c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1927c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1935c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1941c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1947c/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194bc/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194dc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194fc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1951c/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x1953c/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x1973c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1975c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1977c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197bc/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1983c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1995c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1999c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199dc/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199fc/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a1c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a5c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a9c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d9c/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e9c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ebc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19edc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19efc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3dc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a41c/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a43c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a45c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a47c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a51c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a53c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecbc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee5c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee7c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee9c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eebc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f05c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f11c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2fc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f31c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f33c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f35c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f37c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4bc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f51c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f59c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5fc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f61c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f63c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f79c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f81c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f83c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f85c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f89c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f91c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f93c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f95c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f97c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f99c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa9c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fabc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbbc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc1c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fddc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fedc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff5c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fffc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3003c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x3007c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3061c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3063c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3065c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3067c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3069c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306bc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306dc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306fc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3071c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x3081c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x3083c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3085c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3087c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3089c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308bc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308fc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3091c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3093c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3095c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3097c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3099c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309bc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309dc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309fc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a1c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a5c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b5c/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b9c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c5c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c9c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d3c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30dbc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30ddc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fbc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ffc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3101c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3103c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3105c/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310dc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311dc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314fc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x3157c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315dc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x3161c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3163c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3173c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3177c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317dc/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x3181c/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x3183c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x3185c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x3187c/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x3189c/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a9c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31abc/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31adc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b1c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b9c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d1c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d3c/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d5c/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d7c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31dbc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x4dc00/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4dc40/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc60/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc80/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dca0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd00/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd60/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd80/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dda0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dde0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4de00/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df80/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dfa0/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfc0/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfe0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e040/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e0a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e120/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e140/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e2a0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e380/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e400/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e420/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e440/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e460/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e4a0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e560/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e580/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5e0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e700/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e7a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8e0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e900/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e920/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e940/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e980/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55e00/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc24/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc44/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc64/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc84/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dce4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd44/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd64/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd84/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddc4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4dde4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df64/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df84/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfa4/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfc4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e024/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e084/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0a4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e104/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e124/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e284/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e364/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e384/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e404/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e424/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e484/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e544/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e564/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5c4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6e4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e784/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8c4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e904/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e924/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e944/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e964/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e984/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55de4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc28/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc48/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc68/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc88/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dce8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd48/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd68/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd88/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4dde8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df68/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df88/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfa8/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfc8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e028/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e088/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0a8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e108/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e128/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e288/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e368/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e388/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e408/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e428/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e488/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e548/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e568/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5c8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6e8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e788/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8c8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e908/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e928/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e948/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e968/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e988/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55de8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e28/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc2c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc4c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc6c/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc8c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd4c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd6c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddcc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df6c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df8c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfac/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfcc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e02c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e08c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0ac/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e10c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e12c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e28c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e36c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e38c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3ac/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3cc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e40c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e42c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e44c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e48c/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e54c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e56c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e78c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e90c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e92c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e94c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e96c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e98c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9cc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55dec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc30/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc50/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc70/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc90/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd50/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd70/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df90/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb0/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e090/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e290/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e370/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e390/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e410/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e430/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e490/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e550/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e570/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e790/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e910/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e930/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e970/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e990/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc34/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc54/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc74/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc94/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd54/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd74/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df74/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df94/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb4/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e034/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e094/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e134/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e294/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e374/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e394/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e414/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e434/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e494/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e554/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e574/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e794/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e934/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e994/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc38/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc58/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc78/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc98/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd58/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd78/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df98/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb8/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e038/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e098/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e118/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e138/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e298/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e378/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e398/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e418/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e438/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e458/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e498/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e558/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e578/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e798/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e918/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e938/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc3c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc5c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc7c/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc9c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd5c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd7c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dddc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddfc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df7c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df9c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfbc/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfdc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e03c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e09c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0bc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e11c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e13c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e29c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e37c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e39c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3bc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3dc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e41c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e43c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e45c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e49c/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e55c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e57c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5bc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5dc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6fc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e79c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e91c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e93c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e95c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e97c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e99c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9dc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55dfc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00858/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00774/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00784/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00798/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007bc/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007e0/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00804/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00814/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00828/4, 0x00000000);
++}
++
++static void
++nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x00130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x0022c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00258/4, 0x00000187);
++      INSTANCE_WR(ctx, 0x0026c/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x00270/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002ac/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df);
++      INSTANCE_WR(ctx, 0x002b8/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002d0/4, 0x01000000);
++      INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x002f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080);
++      INSTANCE_WR(ctx, 0x002fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00318/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0031c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00328/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0032c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00344/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00348/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00360/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00364/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00378/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0037c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00380/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00384/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0038c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00394/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00398/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003a8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003c0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003dc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00404/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00420/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00434/4, 0x00000029);
++      INSTANCE_WR(ctx, 0x00438/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x00440/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00444/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00448/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0045c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x004e4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004ec/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004f0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00500/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00504/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00508/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x0050c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00520/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00530/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00534/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00560/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00564/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00570/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00588/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x00590/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00614/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0061c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00624/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00630/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80);
++      INSTANCE_WR(ctx, 0x00684/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080);
++
++      INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x006e8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x006f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00700/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00710/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00718/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00724/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00728/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00738/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00740/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00744/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00750/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00760/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00768/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00774/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00778/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x00784/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x0078c/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00798/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x0079c/4, 0x00000022);
++
++      INSTANCE_WR(ctx, 0x007b4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007b8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x007bc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x007d0/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x007f4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x007fc/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00804/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0080c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00810/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00834/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00838/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0083c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00850/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00874/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0087c/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00884/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0088c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00890/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x008b8/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00904/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x0090c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00910/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00918/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00924/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00928/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00930/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00950/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00954/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00958/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0096c/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00990/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00998/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x009a8/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x009d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x009d4/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x009d8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x009ec/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00a28/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00a78/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00a80/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00a84/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00acc/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00aec/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00af4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b08/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00b34/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00b44/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00b48/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00b74/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b88/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00c18/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00c20/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00c44/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c50/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00c64/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00c68/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00c88/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00d08/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00d10/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00d24/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00d48/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00d50/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00d60/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00db0/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00db4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00db8/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00de0/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00de8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00dec/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00e00/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00e04/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00e24/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e28/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00e40/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00e64/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00e80/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00eec/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00efc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00f00/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00f28/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00f50/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00f54/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00f58/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00f74/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00f84/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00f88/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x01000/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01008/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x01010/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01018/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x0101c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x01040/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01044/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x01048/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0105c/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x01080/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01088/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x01090/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01098/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x0109c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x010c4/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x010e8/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x010ec/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x010f0/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x010f4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x01110/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x01118/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x01120/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01124/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01130/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01138/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x0113c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x01148/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x01230/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01284/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0130c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01324/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x014ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x014f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01504/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0150c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01530/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x0156c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x015d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01630/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0164c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01650/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01670/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01690/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x016c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x016e4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01724/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01744/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0176c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01784/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0178c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x017cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01924/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01b30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b50/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b90/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01c70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01c90/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0218c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x022ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x022ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0232c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x024cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x026cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x027ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0280c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0282c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0284c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0286c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x02bac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02bec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02cec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0398c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x03bec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03dec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x03e44/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x040cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0480c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0492c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0496c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x049ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04eac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04eec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0522c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0524c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0526c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0528c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x052cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0536c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0538c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x083a0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x083c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x083e0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08400/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08420/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08440/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x084a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x084c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x084e0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08500/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08520/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x11e40/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x11e60/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15044/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x152e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15304/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15324/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15344/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x15384/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x15444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15484/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x15504/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x155e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15624/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15664/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15704/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15744/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15764/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x157e4/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x16004/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x16064/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x160a4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x160c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x160e4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x16104/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16124/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16144/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x161b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x161d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16228/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x16408/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x16410/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x164e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16508/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x16568/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16590/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16730/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x167b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x16870/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x169c8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16a10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16a70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16b10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x16c68/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16c70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16c88/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16d10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16d28/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x16d48/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16de8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f30/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16f50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f90/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17008/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17010/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17028/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17048/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17050/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17068/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17070/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17088/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17090/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17108/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17128/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17148/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17168/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17188/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17208/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x17210/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x17370/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17410/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x174d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17570/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x176f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17708/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17710/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17750/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17768/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17810/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17828/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x17850/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17c88/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x17db0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17df0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17e10/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17e30/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e50/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e70/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e90/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18010/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18030/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18050/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18070/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18090/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x180b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x180d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x180f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18110/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18130/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18150/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18168/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x18170/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18190/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x181a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x181c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x181e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x18208/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18228/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18248/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18288/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x182c8/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x182f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18310/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18330/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x183d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x183f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18408/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18428/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18430/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x18448/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18468/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x18550/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x186b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18750/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x187b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x187d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x187f0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18870/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18990/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18b08/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18b48/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18b68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18be8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x18d10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f);
++      INSTANCE_WR(ctx, 0x18d68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18d70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18db0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18de8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18e08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18e48/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18e50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18f30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x19010/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x19030/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19050/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19070/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x194f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19530/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19550/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19570/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19630/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19708/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19768/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x198f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19910/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19930/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x199d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19a90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e88/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x19f08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f50/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19f70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f90/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a070/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a090/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1a808/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1a848/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a888/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1a988/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d328/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x2d348/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x2d468/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d488/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d508/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d528/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d548/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d568/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d588/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d608/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d628/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d648/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2e990/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f070/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f110/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f330/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f350/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f390/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f410/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f430/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f490/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x2f770/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f810/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f970/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x301b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x301d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x301f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30210/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30230/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30250/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30270/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30290/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x302b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x303d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x303f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30410/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30430/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30450/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30470/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30490/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x304b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x304d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x304f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30510/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30530/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30550/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30570/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30590/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x305b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x305d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x306f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30710/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30730/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x307d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x307f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30830/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x30950/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30b50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30b90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x30c70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x31110/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x31170/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x311b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x311d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31250/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x312f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31330/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31410/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31430/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31450/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31470/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31710/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x318f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31930/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31950/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31970/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4a800/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a840/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a980/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52500/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x526a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x526c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52700/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x52780/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x52920/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x52940/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52960/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52a80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x52b00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x52d40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52d60/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53200/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53220/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53240/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53260/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53280/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53300/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53320/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53340/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53360/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53380/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53400/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x53460/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x53500/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53524/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53540/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53544/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53560/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53564/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53580/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53584/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x535a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x535e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53600/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53660/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53684/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x536a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x536a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x536c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53824/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53840/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53844/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53860/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53864/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53880/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53884/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x538a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x538e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53900/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53944/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53984/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x539a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x539a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x539c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c64/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53c80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53c84/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53de4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x53e00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x53e24/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53e40/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53e44/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x53e60/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x54004/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54020/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54144/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x54160/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x54164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54180/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54184/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x541c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x541c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54200/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54204/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54244/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54260/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x5b700/4, 0x00000001);
++}
++
++static void
++nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x10C/4, 0x30);
++      INSTANCE_WR(ctx, 0x1D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
++      INSTANCE_WR(ctx, 0x22C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x258/4, 0x187);
++      INSTANCE_WR(ctx, 0x26C/4, 0x1018);
++      INSTANCE_WR(ctx, 0x270/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2AC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF);
++      INSTANCE_WR(ctx, 0x2B8/4, 0x600);
++      INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
++      INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x2F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F8/4, 0x80);
++      INSTANCE_WR(ctx, 0x2FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x318/4, 0x2);
++      INSTANCE_WR(ctx, 0x31C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328/4, 0x1);
++      INSTANCE_WR(ctx, 0x32C/4, 0x100);
++      INSTANCE_WR(ctx, 0x344/4, 0x2);
++      INSTANCE_WR(ctx, 0x348/4, 0x1);
++      INSTANCE_WR(ctx, 0x34C/4, 0x1);
++      INSTANCE_WR(ctx, 0x35C/4, 0x1);
++      INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x36C/4, 0x1);
++      INSTANCE_WR(ctx, 0x370/4, 0x1);
++      INSTANCE_WR(ctx, 0x378/4, 0x1);
++      INSTANCE_WR(ctx, 0x37C/4, 0x1);
++      INSTANCE_WR(ctx, 0x380/4, 0x1);
++      INSTANCE_WR(ctx, 0x384/4, 0x4);
++      INSTANCE_WR(ctx, 0x388/4, 0x1);
++      INSTANCE_WR(ctx, 0x38C/4, 0x1);
++      INSTANCE_WR(ctx, 0x390/4, 0x1);
++      INSTANCE_WR(ctx, 0x394/4, 0x7);
++      INSTANCE_WR(ctx, 0x398/4, 0x1);
++      INSTANCE_WR(ctx, 0x39C/4, 0x7);
++      INSTANCE_WR(ctx, 0x3A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3C0/4, 0x100);
++      INSTANCE_WR(ctx, 0x3C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D4/4, 0x100);
++      INSTANCE_WR(ctx, 0x3D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x404/4, 0x4);
++      INSTANCE_WR(ctx, 0x408/4, 0x70);
++      INSTANCE_WR(ctx, 0x40C/4, 0x80);
++      INSTANCE_WR(ctx, 0x420/4, 0xC);
++      INSTANCE_WR(ctx, 0x428/4, 0x8);
++      INSTANCE_WR(ctx, 0x42C/4, 0x14);
++      INSTANCE_WR(ctx, 0x434/4, 0x29);
++      INSTANCE_WR(ctx, 0x438/4, 0x27);
++      INSTANCE_WR(ctx, 0x43C/4, 0x26);
++      INSTANCE_WR(ctx, 0x440/4, 0x8);
++      INSTANCE_WR(ctx, 0x444/4, 0x4);
++      INSTANCE_WR(ctx, 0x448/4, 0x27);
++      INSTANCE_WR(ctx, 0x454/4, 0x1);
++      INSTANCE_WR(ctx, 0x458/4, 0x2);
++      INSTANCE_WR(ctx, 0x45C/4, 0x3);
++      INSTANCE_WR(ctx, 0x460/4, 0x4);
++      INSTANCE_WR(ctx, 0x464/4, 0x5);
++      INSTANCE_WR(ctx, 0x468/4, 0x6);
++      INSTANCE_WR(ctx, 0x46C/4, 0x7);
++      INSTANCE_WR(ctx, 0x470/4, 0x1);
++      INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x4E4/4, 0x80);
++      INSTANCE_WR(ctx, 0x4E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x4EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x4F0/4, 0x3);
++      INSTANCE_WR(ctx, 0x4F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x500/4, 0x12);
++      INSTANCE_WR(ctx, 0x504/4, 0x10);
++      INSTANCE_WR(ctx, 0x508/4, 0xC);
++      INSTANCE_WR(ctx, 0x50C/4, 0x1);
++      INSTANCE_WR(ctx, 0x51C/4, 0x4);
++      INSTANCE_WR(ctx, 0x520/4, 0x2);
++      INSTANCE_WR(ctx, 0x524/4, 0x4);
++      INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x55C/4, 0x4);
++      INSTANCE_WR(ctx, 0x560/4, 0x14);
++      INSTANCE_WR(ctx, 0x564/4, 0x1);
++      INSTANCE_WR(ctx, 0x570/4, 0x2);
++      INSTANCE_WR(ctx, 0x57C/4, 0x1);
++      INSTANCE_WR(ctx, 0x584/4, 0x2);
++      INSTANCE_WR(ctx, 0x588/4, 0x1000);
++      INSTANCE_WR(ctx, 0x58C/4, 0xE00);
++      INSTANCE_WR(ctx, 0x590/4, 0x1000);
++      INSTANCE_WR(ctx, 0x594/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x59C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BC/4, 0x200);
++      INSTANCE_WR(ctx, 0x5C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C8/4, 0x70);
++      INSTANCE_WR(ctx, 0x5CC/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC/4, 0x70);
++      INSTANCE_WR(ctx, 0x5E0/4, 0x80);
++      INSTANCE_WR(ctx, 0x5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x60C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x614/4, 0x2);
++      INSTANCE_WR(ctx, 0x61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x624/4, 0x1);
++      INSTANCE_WR(ctx, 0x62C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x630/4, 0xCF);
++      INSTANCE_WR(ctx, 0x634/4, 0x1);
++      INSTANCE_WR(ctx, 0x63C/4, 0xF80);
++      INSTANCE_WR(ctx, 0x684/4, 0x7F0080);
++      INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080);
++      INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6E8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6F0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6F4/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x700/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x710/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x718/4, 0x1000);
++      INSTANCE_WR(ctx, 0x71C/4, 0x1F);
++      INSTANCE_WR(ctx, 0x720/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x724/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x728/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x734/4, 0x10040);
++      INSTANCE_WR(ctx, 0x73C/4, 0x22);
++      INSTANCE_WR(ctx, 0x748/4, 0x10040);
++      INSTANCE_WR(ctx, 0x74C/4, 0x22);
++      INSTANCE_WR(ctx, 0x764/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x768/4, 0x160000);
++      INSTANCE_WR(ctx, 0x76C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x780/4, 0x8C0000);
++      INSTANCE_WR(ctx, 0x7A4/4, 0x10401);
++      INSTANCE_WR(ctx, 0x7AC/4, 0x78);
++      INSTANCE_WR(ctx, 0x7B4/4, 0xBF);
++      INSTANCE_WR(ctx, 0x7BC/4, 0x1210);
++      INSTANCE_WR(ctx, 0x7C0/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x7E4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7E8/4, 0x160000);
++      INSTANCE_WR(ctx, 0x7EC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x800/4, 0x8C0000);
++      INSTANCE_WR(ctx, 0x824/4, 0x10401);
++      INSTANCE_WR(ctx, 0x82C/4, 0x78);
++      INSTANCE_WR(ctx, 0x834/4, 0xBF);
++      INSTANCE_WR(ctx, 0x83C/4, 0x1210);
++      INSTANCE_WR(ctx, 0x840/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x868/4, 0x27070);
++      INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x88C/4, 0x120407);
++      INSTANCE_WR(ctx, 0x890/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x894/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x898/4, 0x30201);
++      INSTANCE_WR(ctx, 0x8B4/4, 0x40);
++      INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x8BC/4, 0x141210);
++      INSTANCE_WR(ctx, 0x8C0/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x8C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x8C8/4, 0x3);
++      INSTANCE_WR(ctx, 0x8D4/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x8D8/4, 0x100);
++      INSTANCE_WR(ctx, 0x8DC/4, 0x3800);
++      INSTANCE_WR(ctx, 0x8E0/4, 0x404040);
++      INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x8EC/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x7BA0/4, 0x21);
++      INSTANCE_WR(ctx, 0x7BC0/4, 0x1);
++      INSTANCE_WR(ctx, 0x7BE0/4, 0x2);
++      INSTANCE_WR(ctx, 0x7C00/4, 0x100);
++      INSTANCE_WR(ctx, 0x7C20/4, 0x100);
++      INSTANCE_WR(ctx, 0x7C40/4, 0x1);
++      INSTANCE_WR(ctx, 0x7CA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x7CC0/4, 0x2);
++      INSTANCE_WR(ctx, 0x7CE0/4, 0x100);
++      INSTANCE_WR(ctx, 0x7D00/4, 0x100);
++      INSTANCE_WR(ctx, 0x7D20/4, 0x1);
++      INSTANCE_WR(ctx, 0x11640/4, 0x4);
++      INSTANCE_WR(ctx, 0x11660/4, 0x4);
++      INSTANCE_WR(ctx, 0x49FE0/4, 0x4);
++      INSTANCE_WR(ctx, 0x4A000/4, 0x4);
++      INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A040/4, 0x3);
++      INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x4A0E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x4A100/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A180/4, 0x27);
++      INSTANCE_WR(ctx, 0x4A1E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x51A20/4, 0x1);
++      INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x51F00/4, 0x80);
++      INSTANCE_WR(ctx, 0x51F80/4, 0x80);
++      INSTANCE_WR(ctx, 0x51FC0/4, 0x3F);
++      INSTANCE_WR(ctx, 0x52120/4, 0x2);
++      INSTANCE_WR(ctx, 0x52140/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x52160/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x52280/4, 0x4);
++      INSTANCE_WR(ctx, 0x52300/4, 0x4);
++      INSTANCE_WR(ctx, 0x52540/4, 0x1);
++      INSTANCE_WR(ctx, 0x52560/4, 0x1001);
++      INSTANCE_WR(ctx, 0x52580/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52C00/4, 0x10);
++      INSTANCE_WR(ctx, 0x52C60/4, 0x3);
++      INSTANCE_WR(ctx, 0xA84/4, 0xF);
++      INSTANCE_WR(ctx, 0xB24/4, 0x20);
++      INSTANCE_WR(ctx, 0xD04/4, 0x1A);
++      INSTANCE_WR(ctx, 0xEC4/4, 0x4);
++      INSTANCE_WR(ctx, 0xEE4/4, 0x4);
++      INSTANCE_WR(ctx, 0xF24/4, 0x4);
++      INSTANCE_WR(ctx, 0xF44/4, 0x8);
++      INSTANCE_WR(ctx, 0xF84/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x1124/4, 0xF);
++      INSTANCE_WR(ctx, 0x3604/4, 0xF);
++      INSTANCE_WR(ctx, 0x3644/4, 0x1);
++      INSTANCE_WR(ctx, 0x41A4/4, 0xF);
++      INSTANCE_WR(ctx, 0x14844/4, 0xF);
++      INSTANCE_WR(ctx, 0x14AE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14B04/4, 0x100);
++      INSTANCE_WR(ctx, 0x14B24/4, 0x100);
++      INSTANCE_WR(ctx, 0x14B44/4, 0x11);
++      INSTANCE_WR(ctx, 0x14B84/4, 0x8);
++      INSTANCE_WR(ctx, 0x14C44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14C84/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CA4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CC4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CE4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x14D04/4, 0x2);
++      INSTANCE_WR(ctx, 0x14DE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E24/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E64/4, 0x1);
++      INSTANCE_WR(ctx, 0x14F04/4, 0x4);
++      INSTANCE_WR(ctx, 0x14F44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14F64/4, 0x15);
++      INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480);
++      INSTANCE_WR(ctx, 0x15764/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x15804/4, 0x100);
++      INSTANCE_WR(ctx, 0x15864/4, 0x10001);
++      INSTANCE_WR(ctx, 0x158A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x158C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x158E4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x15904/4, 0x1);
++      INSTANCE_WR(ctx, 0x15924/4, 0x4);
++      INSTANCE_WR(ctx, 0x15944/4, 0x2);
++      INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15C68/4, 0x4);
++      INSTANCE_WR(ctx, 0x15C88/4, 0x1A);
++      INSTANCE_WR(ctx, 0x15CE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x16028/4, 0xF);
++      INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16148/4, 0x11);
++      INSTANCE_WR(ctx, 0x16348/4, 0x4);
++      INSTANCE_WR(ctx, 0x163E8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16408/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x16428/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x164A8/4, 0x5);
++      INSTANCE_WR(ctx, 0x164C8/4, 0x52);
++      INSTANCE_WR(ctx, 0x16568/4, 0x1);
++      INSTANCE_WR(ctx, 0x16788/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16808/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16828/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16848/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16868/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16888/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16908/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16928/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16948/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16968/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16988/4, 0x10);
++      INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x16E88/4, 0x5);
++      INSTANCE_WR(ctx, 0x16EE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16FA8/4, 0x3);
++      INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x173C8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x17408/4, 0x3);
++      INSTANCE_WR(ctx, 0x178E8/4, 0x102);
++      INSTANCE_WR(ctx, 0x17928/4, 0x4);
++      INSTANCE_WR(ctx, 0x17948/4, 0x4);
++      INSTANCE_WR(ctx, 0x17968/4, 0x4);
++      INSTANCE_WR(ctx, 0x17988/4, 0x4);
++      INSTANCE_WR(ctx, 0x179A8/4, 0x4);
++      INSTANCE_WR(ctx, 0x179C8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17A08/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17A48/4, 0x102);
++      INSTANCE_WR(ctx, 0x17B88/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BA8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18228/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18288/4, 0x804);
++      INSTANCE_WR(ctx, 0x182C8/4, 0x4);
++      INSTANCE_WR(ctx, 0x182E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18308/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18348/4, 0x4);
++      INSTANCE_WR(ctx, 0x18368/4, 0x4);
++      INSTANCE_WR(ctx, 0x183A8/4, 0x10);
++      INSTANCE_WR(ctx, 0x18448/4, 0x804);
++      INSTANCE_WR(ctx, 0x18468/4, 0x1);
++      INSTANCE_WR(ctx, 0x18488/4, 0x1A);
++      INSTANCE_WR(ctx, 0x184A8/4, 0x7F);
++      INSTANCE_WR(ctx, 0x184E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x18508/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18548/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18568/4, 0x4);
++      INSTANCE_WR(ctx, 0x18588/4, 0x4);
++      INSTANCE_WR(ctx, 0x185C8/4, 0x10);
++      INSTANCE_WR(ctx, 0x18648/4, 0x1);
++      INSTANCE_WR(ctx, 0x18668/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18748/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x18768/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18E88/4, 0x1);
++      INSTANCE_WR(ctx, 0x18EE8/4, 0x10);
++      INSTANCE_WR(ctx, 0x19608/4, 0x88);
++      INSTANCE_WR(ctx, 0x19628/4, 0x88);
++      INSTANCE_WR(ctx, 0x19688/4, 0x4);
++      INSTANCE_WR(ctx, 0x19968/4, 0x26);
++      INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x19A48/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19A68/4, 0x10);
++      INSTANCE_WR(ctx, 0x19F88/4, 0x52);
++      INSTANCE_WR(ctx, 0x19FC8/4, 0x26);
++      INSTANCE_WR(ctx, 0x1A008/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A028/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A068/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1A108/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A128/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A168/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A188/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x24A48/4, 0x4);
++      INSTANCE_WR(ctx, 0x24A68/4, 0x4);
++      INSTANCE_WR(ctx, 0x24AA8/4, 0x80);
++      INSTANCE_WR(ctx, 0x24AC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x24AE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x24B28/4, 0x27);
++      INSTANCE_WR(ctx, 0x24B68/4, 0x26);
++      INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C08/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C28/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C48/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C68/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C88/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D08/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D28/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D48/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D68/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D88/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0xB0C/4, 0x2);
++      INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0xCEC/4, 0x1);
++      INSTANCE_WR(ctx, 0xD0C/4, 0x10);
++      INSTANCE_WR(ctx, 0xD6C/4, 0x1);
++      INSTANCE_WR(ctx, 0xE0C/4, 0x4);
++      INSTANCE_WR(ctx, 0xE2C/4, 0x400);
++      INSTANCE_WR(ctx, 0xE4C/4, 0x300);
++      INSTANCE_WR(ctx, 0xE6C/4, 0x1001);
++      INSTANCE_WR(ctx, 0xE8C/4, 0x15);
++      INSTANCE_WR(ctx, 0xF4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x106C/4, 0x1);
++      INSTANCE_WR(ctx, 0x108C/4, 0x10);
++      INSTANCE_WR(ctx, 0x10CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x134C/4, 0x10);
++      INSTANCE_WR(ctx, 0x156C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x158C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x160C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x162C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x164C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x166C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x170C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x172C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x1A8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x1ACC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x1BAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1BEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1DCC/4, 0x11);
++      INSTANCE_WR(ctx, 0x1ECC/4, 0xF);
++      INSTANCE_WR(ctx, 0x1FCC/4, 0x11);
++      INSTANCE_WR(ctx, 0x20AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x20CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x20EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x210C/4, 0x2);
++      INSTANCE_WR(ctx, 0x212C/4, 0x1);
++      INSTANCE_WR(ctx, 0x214C/4, 0x2);
++      INSTANCE_WR(ctx, 0x216C/4, 0x1);
++      INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x24AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x24CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x24EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x250C/4, 0x1);
++      INSTANCE_WR(ctx, 0x252C/4, 0x2);
++      INSTANCE_WR(ctx, 0x254C/4, 0x1);
++      INSTANCE_WR(ctx, 0x256C/4, 0x1);
++      INSTANCE_WR(ctx, 0x25EC/4, 0x11);
++      INSTANCE_WR(ctx, 0x260C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328C/4, 0x2);
++      INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x346C/4, 0x1);
++      INSTANCE_WR(ctx, 0x348C/4, 0x10);
++      INSTANCE_WR(ctx, 0x34EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x358C/4, 0x4);
++      INSTANCE_WR(ctx, 0x35AC/4, 0x400);
++      INSTANCE_WR(ctx, 0x35CC/4, 0x300);
++      INSTANCE_WR(ctx, 0x35EC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x360C/4, 0x15);
++      INSTANCE_WR(ctx, 0x36CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x37EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x380C/4, 0x10);
++      INSTANCE_WR(ctx, 0x384C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3ACC/4, 0x10);
++      INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x420C/4, 0x10);
++      INSTANCE_WR(ctx, 0x424C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x432C/4, 0x1);
++      INSTANCE_WR(ctx, 0x436C/4, 0x1);
++      INSTANCE_WR(ctx, 0x43AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x454C/4, 0x11);
++      INSTANCE_WR(ctx, 0x464C/4, 0xF);
++      INSTANCE_WR(ctx, 0x474C/4, 0x11);
++      INSTANCE_WR(ctx, 0x482C/4, 0x1);
++      INSTANCE_WR(ctx, 0x484C/4, 0x1);
++      INSTANCE_WR(ctx, 0x486C/4, 0x1);
++      INSTANCE_WR(ctx, 0x488C/4, 0x2);
++      INSTANCE_WR(ctx, 0x48AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x48CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x48EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x4C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4C4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x4C6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4C8C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CAC/4, 0x2);
++      INSTANCE_WR(ctx, 0x4CCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x4D6C/4, 0x11);
++      INSTANCE_WR(ctx, 0x4D8C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA30/4, 0x4);
++      INSTANCE_WR(ctx, 0xCF0/4, 0x4);
++      INSTANCE_WR(ctx, 0xD10/4, 0x4);
++      INSTANCE_WR(ctx, 0xD30/4, 0x608080);
++      INSTANCE_WR(ctx, 0xDD0/4, 0x4);
++      INSTANCE_WR(ctx, 0xE30/4, 0x4);
++      INSTANCE_WR(ctx, 0xE50/4, 0x4);
++      INSTANCE_WR(ctx, 0xE70/4, 0x80);
++      INSTANCE_WR(ctx, 0xE90/4, 0x1E00);
++      INSTANCE_WR(ctx, 0xEB0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1350/4, 0x4);
++      INSTANCE_WR(ctx, 0x1370/4, 0x80);
++      INSTANCE_WR(ctx, 0x1390/4, 0x4);
++      INSTANCE_WR(ctx, 0x13B0/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x13D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x13F0/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x1410/4, 0x4);
++      INSTANCE_WR(ctx, 0x14B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x14D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1550/4, 0x4);
++      INSTANCE_WR(ctx, 0x159F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x15A10/4, 0x3);
++      INSTANCE_WR(ctx, 0x15C50/4, 0xF);
++      INSTANCE_WR(ctx, 0x15DD0/4, 0x4);
++      INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15F70/4, 0x1);
++      INSTANCE_WR(ctx, 0x15FF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x160B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16250/4, 0x1);
++      INSTANCE_WR(ctx, 0x16270/4, 0x1);
++      INSTANCE_WR(ctx, 0x16290/4, 0x2);
++      INSTANCE_WR(ctx, 0x162B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x162D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x162F0/4, 0x2);
++      INSTANCE_WR(ctx, 0x16310/4, 0x1);
++      INSTANCE_WR(ctx, 0x16350/4, 0x11);
++      INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x164B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x16530/4, 0x11);
++      INSTANCE_WR(ctx, 0x16550/4, 0x1);
++      INSTANCE_WR(ctx, 0x16590/4, 0xCF);
++      INSTANCE_WR(ctx, 0x165B0/4, 0xCF);
++      INSTANCE_WR(ctx, 0x165D0/4, 0xCF);
++      INSTANCE_WR(ctx, 0x16730/4, 0x1);
++      INSTANCE_WR(ctx, 0x16750/4, 0x1);
++      INSTANCE_WR(ctx, 0x16770/4, 0x2);
++      INSTANCE_WR(ctx, 0x16790/4, 0x1);
++      INSTANCE_WR(ctx, 0x167B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x167D0/4, 0x2);
++      INSTANCE_WR(ctx, 0x167F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16830/4, 0x1);
++      INSTANCE_WR(ctx, 0x16850/4, 0x1);
++      INSTANCE_WR(ctx, 0x16870/4, 0x1);
++      INSTANCE_WR(ctx, 0x16890/4, 0x1);
++      INSTANCE_WR(ctx, 0x168B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x168D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x168F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16910/4, 0x1);
++      INSTANCE_WR(ctx, 0x16930/4, 0x11);
++      INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16A50/4, 0xF);
++      INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x16BB0/4, 0x11);
++      INSTANCE_WR(ctx, 0x16BD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16C50/4, 0x4);
++      INSTANCE_WR(ctx, 0x16D10/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB0/4, 0x11);
++      INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F30/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F50/4, 0x1);
++      INSTANCE_WR(ctx, 0x16F90/4, 0x1);
++      INSTANCE_WR(ctx, 0x16FD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17010/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17050/4, 0x1);
++      INSTANCE_WR(ctx, 0x17090/4, 0x1);
++      INSTANCE_WR(ctx, 0x175F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x17610/4, 0x8);
++      INSTANCE_WR(ctx, 0x17630/4, 0x8);
++      INSTANCE_WR(ctx, 0x17650/4, 0x8);
++      INSTANCE_WR(ctx, 0x17670/4, 0x8);
++      INSTANCE_WR(ctx, 0x17690/4, 0x8);
++      INSTANCE_WR(ctx, 0x176B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x176D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x176F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17810/4, 0x400);
++      INSTANCE_WR(ctx, 0x17830/4, 0x400);
++      INSTANCE_WR(ctx, 0x17850/4, 0x400);
++      INSTANCE_WR(ctx, 0x17870/4, 0x400);
++      INSTANCE_WR(ctx, 0x17890/4, 0x400);
++      INSTANCE_WR(ctx, 0x178B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x178D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x178F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x17910/4, 0x300);
++      INSTANCE_WR(ctx, 0x17930/4, 0x300);
++      INSTANCE_WR(ctx, 0x17950/4, 0x300);
++      INSTANCE_WR(ctx, 0x17970/4, 0x300);
++      INSTANCE_WR(ctx, 0x17990/4, 0x300);
++      INSTANCE_WR(ctx, 0x179B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x179D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x179F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x17A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A30/4, 0xF);
++      INSTANCE_WR(ctx, 0x17B30/4, 0x20);
++      INSTANCE_WR(ctx, 0x17B50/4, 0x11);
++      INSTANCE_WR(ctx, 0x17B70/4, 0x100);
++      INSTANCE_WR(ctx, 0x17BB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17C10/4, 0x40);
++      INSTANCE_WR(ctx, 0x17C30/4, 0x100);
++      INSTANCE_WR(ctx, 0x17C70/4, 0x3);
++      INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17D90/4, 0x2);
++      INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17EF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17F90/4, 0x4);
++      INSTANCE_WR(ctx, 0x17FD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17FF0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18010/4, 0x300);
++      INSTANCE_WR(ctx, 0x18030/4, 0x1001);
++      INSTANCE_WR(ctx, 0x180B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x181D0/4, 0xF);
++      INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18550/4, 0x11);
++      INSTANCE_WR(ctx, 0x185B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x185F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18610/4, 0x1);
++      INSTANCE_WR(ctx, 0x18690/4, 0x1);
++      INSTANCE_WR(ctx, 0x18730/4, 0x1);
++      INSTANCE_WR(ctx, 0x18770/4, 0x1);
++      INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x18830/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x18850/4, 0x40);
++      INSTANCE_WR(ctx, 0x18870/4, 0x100);
++      INSTANCE_WR(ctx, 0x18890/4, 0x10100);
++      INSTANCE_WR(ctx, 0x188B0/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18B50/4, 0x1);
++      INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x18BB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x18D30/4, 0x1);
++      INSTANCE_WR(ctx, 0x18D70/4, 0x1);
++      INSTANCE_WR(ctx, 0x18D90/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x18E30/4, 0x1A);
++}
++
++static void
++nv92_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x10C/4, 0x30);
++      INSTANCE_WR(ctx, 0x1D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
++      INSTANCE_WR(ctx, 0x22C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x258/4, 0x187);
++      INSTANCE_WR(ctx, 0x26C/4, 0x1018);
++      INSTANCE_WR(ctx, 0x270/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2AC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2B0/4, 0x42500DF);
++      INSTANCE_WR(ctx, 0x2B8/4, 0x600);
++      INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
++      INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x2F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F8/4, 0x80);
++      INSTANCE_WR(ctx, 0x2FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x318/4, 0x2);
++      INSTANCE_WR(ctx, 0x31C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328/4, 0x1);
++      INSTANCE_WR(ctx, 0x32C/4, 0x100);
++      INSTANCE_WR(ctx, 0x344/4, 0x2);
++      INSTANCE_WR(ctx, 0x348/4, 0x1);
++      INSTANCE_WR(ctx, 0x34C/4, 0x1);
++      INSTANCE_WR(ctx, 0x35C/4, 0x1);
++      INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x36C/4, 0x1);
++      INSTANCE_WR(ctx, 0x370/4, 0x1);
++      INSTANCE_WR(ctx, 0x378/4, 0x1);
++      INSTANCE_WR(ctx, 0x37C/4, 0x1);
++      INSTANCE_WR(ctx, 0x380/4, 0x1);
++      INSTANCE_WR(ctx, 0x384/4, 0x4);
++      INSTANCE_WR(ctx, 0x388/4, 0x1);
++      INSTANCE_WR(ctx, 0x38C/4, 0x1);
++      INSTANCE_WR(ctx, 0x390/4, 0x1);
++      INSTANCE_WR(ctx, 0x394/4, 0x7);
++      INSTANCE_WR(ctx, 0x398/4, 0x1);
++      INSTANCE_WR(ctx, 0x39C/4, 0x7);
++      INSTANCE_WR(ctx, 0x3A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3C0/4, 0x100);
++      INSTANCE_WR(ctx, 0x3C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D4/4, 0x100);
++      INSTANCE_WR(ctx, 0x3D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x404/4, 0x4);
++      INSTANCE_WR(ctx, 0x408/4, 0x70);
++      INSTANCE_WR(ctx, 0x40C/4, 0x80);
++      INSTANCE_WR(ctx, 0x420/4, 0xC);
++      INSTANCE_WR(ctx, 0x428/4, 0x8);
++      INSTANCE_WR(ctx, 0x42C/4, 0x14);
++      INSTANCE_WR(ctx, 0x434/4, 0x29);
++      INSTANCE_WR(ctx, 0x438/4, 0x27);
++      INSTANCE_WR(ctx, 0x43C/4, 0x26);
++      INSTANCE_WR(ctx, 0x440/4, 0x8);
++      INSTANCE_WR(ctx, 0x444/4, 0x4);
++      INSTANCE_WR(ctx, 0x448/4, 0x27);
++      INSTANCE_WR(ctx, 0x454/4, 0x1);
++      INSTANCE_WR(ctx, 0x458/4, 0x2);
++      INSTANCE_WR(ctx, 0x45C/4, 0x3);
++      INSTANCE_WR(ctx, 0x460/4, 0x4);
++      INSTANCE_WR(ctx, 0x464/4, 0x5);
++      INSTANCE_WR(ctx, 0x468/4, 0x6);
++      INSTANCE_WR(ctx, 0x46C/4, 0x7);
++      INSTANCE_WR(ctx, 0x470/4, 0x1);
++      INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x4E4/4, 0x80);
++      INSTANCE_WR(ctx, 0x4E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x4EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x4F0/4, 0x3);
++      INSTANCE_WR(ctx, 0x4F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x500/4, 0x12);
++      INSTANCE_WR(ctx, 0x504/4, 0x10);
++      INSTANCE_WR(ctx, 0x508/4, 0xC);
++      INSTANCE_WR(ctx, 0x50C/4, 0x1);
++      INSTANCE_WR(ctx, 0x51C/4, 0x4);
++      INSTANCE_WR(ctx, 0x520/4, 0x2);
++      INSTANCE_WR(ctx, 0x524/4, 0x4);
++      INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x55C/4, 0x4);
++      INSTANCE_WR(ctx, 0x560/4, 0x14);
++      INSTANCE_WR(ctx, 0x564/4, 0x1);
++      INSTANCE_WR(ctx, 0x570/4, 0x2);
++      INSTANCE_WR(ctx, 0x57C/4, 0x1);
++      INSTANCE_WR(ctx, 0x584/4, 0x2);
++      INSTANCE_WR(ctx, 0x588/4, 0x1000);
++      INSTANCE_WR(ctx, 0x58C/4, 0xE00);
++      INSTANCE_WR(ctx, 0x590/4, 0x1000);
++      INSTANCE_WR(ctx, 0x594/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x59C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BC/4, 0x200);
++      INSTANCE_WR(ctx, 0x5C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C8/4, 0x70);
++      INSTANCE_WR(ctx, 0x5CC/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC/4, 0x70);
++      INSTANCE_WR(ctx, 0x5E0/4, 0x80);
++      INSTANCE_WR(ctx, 0x5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x60C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x614/4, 0x2);
++      INSTANCE_WR(ctx, 0x61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x624/4, 0x1);
++      INSTANCE_WR(ctx, 0x62C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x630/4, 0xCF);
++      INSTANCE_WR(ctx, 0x634/4, 0x1);
++      INSTANCE_WR(ctx, 0x63C/4, 0x1F80);
++      INSTANCE_WR(ctx, 0x654/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x658/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x660/4, 0x1000);
++      INSTANCE_WR(ctx, 0x664/4, 0x1F);
++      INSTANCE_WR(ctx, 0x668/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x66C/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x670/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x67C/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x680/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x688/4, 0x1000);
++      INSTANCE_WR(ctx, 0x68C/4, 0x1F);
++      INSTANCE_WR(ctx, 0x690/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x694/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x698/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6A4/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6A8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6B0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6B4/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6B8/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6BC/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x6C0/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6CC/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6D0/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6DC/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6E0/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6E4/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x6E8/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6F4/4, 0x390040);
++      INSTANCE_WR(ctx, 0x6FC/4, 0x22);
++      INSTANCE_WR(ctx, 0x708/4, 0x390040);
++      INSTANCE_WR(ctx, 0x70C/4, 0x22);
++      INSTANCE_WR(ctx, 0x724/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x728/4, 0x160000);
++      INSTANCE_WR(ctx, 0x72C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x73C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x740/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x764/4, 0x10401);
++      INSTANCE_WR(ctx, 0x76C/4, 0x78);
++      INSTANCE_WR(ctx, 0x774/4, 0xBF);
++      INSTANCE_WR(ctx, 0x77C/4, 0x1210);
++      INSTANCE_WR(ctx, 0x780/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x7A4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7A8/4, 0x160000);
++      INSTANCE_WR(ctx, 0x7AC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7BC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x7C0/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x7E4/4, 0x10401);
++      INSTANCE_WR(ctx, 0x7EC/4, 0x78);
++      INSTANCE_WR(ctx, 0x7F4/4, 0xBF);
++      INSTANCE_WR(ctx, 0x7FC/4, 0x1210);
++      INSTANCE_WR(ctx, 0x800/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x828/4, 0x27070);
++      INSTANCE_WR(ctx, 0x834/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x84C/4, 0x120407);
++      INSTANCE_WR(ctx, 0x850/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x854/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x858/4, 0x30201);
++      INSTANCE_WR(ctx, 0x874/4, 0x40);
++      INSTANCE_WR(ctx, 0x878/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x87C/4, 0x141210);
++      INSTANCE_WR(ctx, 0x880/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x884/4, 0x1);
++      INSTANCE_WR(ctx, 0x888/4, 0x3);
++      INSTANCE_WR(ctx, 0x894/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x898/4, 0x100);
++      INSTANCE_WR(ctx, 0x89C/4, 0x3800);
++      INSTANCE_WR(ctx, 0x8A0/4, 0x404040);
++      INSTANCE_WR(ctx, 0x8A4/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x8AC/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x8B0/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x8C0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x8C4/4, 0x160000);
++      INSTANCE_WR(ctx, 0x8C8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x8D8/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x8DC/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x900/4, 0x10401);
++      INSTANCE_WR(ctx, 0x908/4, 0x78);
++      INSTANCE_WR(ctx, 0x910/4, 0xBF);
++      INSTANCE_WR(ctx, 0x918/4, 0x1210);
++      INSTANCE_WR(ctx, 0x91C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x940/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x944/4, 0x160000);
++      INSTANCE_WR(ctx, 0x948/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x958/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x95C/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x980/4, 0x10401);
++      INSTANCE_WR(ctx, 0x988/4, 0x78);
++      INSTANCE_WR(ctx, 0x990/4, 0xBF);
++      INSTANCE_WR(ctx, 0x998/4, 0x1210);
++      INSTANCE_WR(ctx, 0x99C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x9C4/4, 0x27070);
++      INSTANCE_WR(ctx, 0x9D0/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x9E8/4, 0x120407);
++      INSTANCE_WR(ctx, 0x9EC/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x9F0/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x9F4/4, 0x30201);
++      INSTANCE_WR(ctx, 0xA10/4, 0x40);
++      INSTANCE_WR(ctx, 0xA14/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xA18/4, 0x141210);
++      INSTANCE_WR(ctx, 0xA1C/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xA20/4, 0x1);
++      INSTANCE_WR(ctx, 0xA24/4, 0x3);
++      INSTANCE_WR(ctx, 0xA30/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xA34/4, 0x100);
++      INSTANCE_WR(ctx, 0xA38/4, 0x3800);
++      INSTANCE_WR(ctx, 0xA3C/4, 0x404040);
++      INSTANCE_WR(ctx, 0xA40/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xA48/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xA4C/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xA5C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xA60/4, 0x160000);
++      INSTANCE_WR(ctx, 0xA64/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xA74/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xA78/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xA9C/4, 0x10401);
++      INSTANCE_WR(ctx, 0xAA4/4, 0x78);
++      INSTANCE_WR(ctx, 0xAAC/4, 0xBF);
++      INSTANCE_WR(ctx, 0xAB4/4, 0x1210);
++      INSTANCE_WR(ctx, 0xAB8/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xADC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xAE0/4, 0x160000);
++      INSTANCE_WR(ctx, 0xAE4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xAF4/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xAF8/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xB1C/4, 0x10401);
++      INSTANCE_WR(ctx, 0xB24/4, 0x78);
++      INSTANCE_WR(ctx, 0xB2C/4, 0xBF);
++      INSTANCE_WR(ctx, 0xB34/4, 0x1210);
++      INSTANCE_WR(ctx, 0xB38/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xB60/4, 0x27070);
++      INSTANCE_WR(ctx, 0xB6C/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xB84/4, 0x120407);
++      INSTANCE_WR(ctx, 0xB88/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xB8C/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xB90/4, 0x30201);
++      INSTANCE_WR(ctx, 0xBAC/4, 0x40);
++      INSTANCE_WR(ctx, 0xBB0/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xBB4/4, 0x141210);
++      INSTANCE_WR(ctx, 0xBB8/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xBBC/4, 0x1);
++      INSTANCE_WR(ctx, 0xBC0/4, 0x3);
++      INSTANCE_WR(ctx, 0xBCC/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xBD0/4, 0x100);
++      INSTANCE_WR(ctx, 0xBD4/4, 0x3800);
++      INSTANCE_WR(ctx, 0xBD8/4, 0x404040);
++      INSTANCE_WR(ctx, 0xBDC/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xBE4/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xBE8/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xBF8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xBFC/4, 0x160000);
++      INSTANCE_WR(ctx, 0xC00/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC10/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xC14/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xC38/4, 0x10401);
++      INSTANCE_WR(ctx, 0xC40/4, 0x78);
++      INSTANCE_WR(ctx, 0xC48/4, 0xBF);
++      INSTANCE_WR(ctx, 0xC50/4, 0x1210);
++      INSTANCE_WR(ctx, 0xC54/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xC78/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC7C/4, 0x160000);
++      INSTANCE_WR(ctx, 0xC80/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC90/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xC94/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xCB8/4, 0x10401);
++      INSTANCE_WR(ctx, 0xCC0/4, 0x78);
++      INSTANCE_WR(ctx, 0xCC8/4, 0xBF);
++      INSTANCE_WR(ctx, 0xCD0/4, 0x1210);
++      INSTANCE_WR(ctx, 0xCD4/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xCFC/4, 0x27070);
++      INSTANCE_WR(ctx, 0xD08/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xD20/4, 0x120407);
++      INSTANCE_WR(ctx, 0xD24/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xD28/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xD2C/4, 0x30201);
++      INSTANCE_WR(ctx, 0xD48/4, 0x40);
++      INSTANCE_WR(ctx, 0xD4C/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xD50/4, 0x141210);
++      INSTANCE_WR(ctx, 0xD54/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xD58/4, 0x1);
++      INSTANCE_WR(ctx, 0xD5C/4, 0x3);
++      INSTANCE_WR(ctx, 0xD68/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xD6C/4, 0x100);
++      INSTANCE_WR(ctx, 0xD70/4, 0x3800);
++      INSTANCE_WR(ctx, 0xD74/4, 0x404040);
++      INSTANCE_WR(ctx, 0xD78/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xD80/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xD84/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xD94/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xD98/4, 0x160000);
++      INSTANCE_WR(ctx, 0xD9C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xDAC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xDB0/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xDD4/4, 0x10401);
++      INSTANCE_WR(ctx, 0xDDC/4, 0x78);
++      INSTANCE_WR(ctx, 0xDE4/4, 0xBF);
++      INSTANCE_WR(ctx, 0xDEC/4, 0x1210);
++      INSTANCE_WR(ctx, 0xDF0/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xE14/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xE18/4, 0x160000);
++      INSTANCE_WR(ctx, 0xE1C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xE2C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xE30/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xE54/4, 0x10401);
++      INSTANCE_WR(ctx, 0xE5C/4, 0x78);
++      INSTANCE_WR(ctx, 0xE64/4, 0xBF);
++      INSTANCE_WR(ctx, 0xE6C/4, 0x1210);
++      INSTANCE_WR(ctx, 0xE70/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xE98/4, 0x27070);
++      INSTANCE_WR(ctx, 0xEA4/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xEBC/4, 0x120407);
++      INSTANCE_WR(ctx, 0xEC0/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xEC4/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xEC8/4, 0x30201);
++      INSTANCE_WR(ctx, 0xEE4/4, 0x40);
++      INSTANCE_WR(ctx, 0xEE8/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xEEC/4, 0x141210);
++      INSTANCE_WR(ctx, 0xEF0/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xEF4/4, 0x1);
++      INSTANCE_WR(ctx, 0xEF8/4, 0x3);
++      INSTANCE_WR(ctx, 0xF04/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xF08/4, 0x100);
++      INSTANCE_WR(ctx, 0xF0C/4, 0x3800);
++      INSTANCE_WR(ctx, 0xF10/4, 0x404040);
++      INSTANCE_WR(ctx, 0xF14/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xF1C/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xF20/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xF30/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xF34/4, 0x160000);
++      INSTANCE_WR(ctx, 0xF38/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xF48/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xF4C/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xF70/4, 0x10401);
++      INSTANCE_WR(ctx, 0xF78/4, 0x78);
++      INSTANCE_WR(ctx, 0xF80/4, 0xBF);
++      INSTANCE_WR(ctx, 0xF88/4, 0x1210);
++      INSTANCE_WR(ctx, 0xF8C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xFB0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xFB4/4, 0x160000);
++      INSTANCE_WR(ctx, 0xFB8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xFC8/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xFCC/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xFF0/4, 0x10401);
++      INSTANCE_WR(ctx, 0xFF8/4, 0x78);
++      INSTANCE_WR(ctx, 0x1000/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1008/4, 0x1210);
++      INSTANCE_WR(ctx, 0x100C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x1034/4, 0x27070);
++      INSTANCE_WR(ctx, 0x1040/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x1058/4, 0x120407);
++      INSTANCE_WR(ctx, 0x105C/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x1060/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x1064/4, 0x30201);
++      INSTANCE_WR(ctx, 0x1080/4, 0x40);
++      INSTANCE_WR(ctx, 0x1084/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x1088/4, 0x141210);
++      INSTANCE_WR(ctx, 0x108C/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x1090/4, 0x1);
++      INSTANCE_WR(ctx, 0x1094/4, 0x3);
++      INSTANCE_WR(ctx, 0x10A0/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x10A4/4, 0x100);
++      INSTANCE_WR(ctx, 0x10A8/4, 0x3800);
++      INSTANCE_WR(ctx, 0x10AC/4, 0x404040);
++      INSTANCE_WR(ctx, 0x10B0/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x10B8/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x10BC/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x10CC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x10D0/4, 0x160000);
++      INSTANCE_WR(ctx, 0x10D4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x10E4/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x10E8/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x110C/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1114/4, 0x78);
++      INSTANCE_WR(ctx, 0x111C/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1124/4, 0x1210);
++      INSTANCE_WR(ctx, 0x1128/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x114C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1150/4, 0x160000);
++      INSTANCE_WR(ctx, 0x1154/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1164/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1168/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x118C/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1194/4, 0x78);
++      INSTANCE_WR(ctx, 0x119C/4, 0xBF);
++      INSTANCE_WR(ctx, 0x11A4/4, 0x1210);
++      INSTANCE_WR(ctx, 0x11A8/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x11D0/4, 0x27070);
++      INSTANCE_WR(ctx, 0x11DC/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x11F4/4, 0x120407);
++      INSTANCE_WR(ctx, 0x11F8/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x11FC/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x1200/4, 0x30201);
++      INSTANCE_WR(ctx, 0x121C/4, 0x40);
++      INSTANCE_WR(ctx, 0x1220/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x1224/4, 0x141210);
++      INSTANCE_WR(ctx, 0x1228/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x122C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1230/4, 0x3);
++      INSTANCE_WR(ctx, 0x123C/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x1240/4, 0x100);
++      INSTANCE_WR(ctx, 0x1244/4, 0x3800);
++      INSTANCE_WR(ctx, 0x1248/4, 0x404040);
++      INSTANCE_WR(ctx, 0x124C/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x1254/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x1258/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x1268/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x126C/4, 0x160000);
++      INSTANCE_WR(ctx, 0x1270/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1280/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1284/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x12A8/4, 0x10401);
++      INSTANCE_WR(ctx, 0x12B0/4, 0x78);
++      INSTANCE_WR(ctx, 0x12B8/4, 0xBF);
++      INSTANCE_WR(ctx, 0x12C0/4, 0x1210);
++      INSTANCE_WR(ctx, 0x12C4/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x12E8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x12EC/4, 0x160000);
++      INSTANCE_WR(ctx, 0x12F0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1300/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1304/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x1328/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1330/4, 0x78);
++      INSTANCE_WR(ctx, 0x1338/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1340/4, 0x1210);
++      INSTANCE_WR(ctx, 0x1344/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x136C/4, 0x27070);
++      INSTANCE_WR(ctx, 0x1378/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x1390/4, 0x120407);
++      INSTANCE_WR(ctx, 0x1394/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x1398/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x139C/4, 0x30201);
++      INSTANCE_WR(ctx, 0x13B8/4, 0x40);
++      INSTANCE_WR(ctx, 0x13BC/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x13C0/4, 0x141210);
++      INSTANCE_WR(ctx, 0x13C4/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x13C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x13CC/4, 0x3);
++      INSTANCE_WR(ctx, 0x13D8/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x13DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x13E0/4, 0x3800);
++      INSTANCE_WR(ctx, 0x13E4/4, 0x404040);
++      INSTANCE_WR(ctx, 0x13E8/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x13F0/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x13F4/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x8620/4, 0x21);
++      INSTANCE_WR(ctx, 0x8640/4, 0x1);
++      INSTANCE_WR(ctx, 0x8660/4, 0x2);
++      INSTANCE_WR(ctx, 0x8680/4, 0x100);
++      INSTANCE_WR(ctx, 0x86A0/4, 0x100);
++      INSTANCE_WR(ctx, 0x86C0/4, 0x1);
++      INSTANCE_WR(ctx, 0x8720/4, 0x1);
++      INSTANCE_WR(ctx, 0x8740/4, 0x2);
++      INSTANCE_WR(ctx, 0x8760/4, 0x100);
++      INSTANCE_WR(ctx, 0x8780/4, 0x100);
++      INSTANCE_WR(ctx, 0x87A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x1B8C0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1B8E0/4, 0x4);
++      INSTANCE_WR(ctx, 0x54260/4, 0x4);
++      INSTANCE_WR(ctx, 0x54280/4, 0x4);
++      INSTANCE_WR(ctx, 0x542A0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x542C0/4, 0x3);
++      INSTANCE_WR(ctx, 0x54300/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x54340/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x54360/4, 0x1);
++      INSTANCE_WR(ctx, 0x54380/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x543E0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x54400/4, 0x27);
++      INSTANCE_WR(ctx, 0x54460/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BCA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BF80/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5C120/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C140/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C180/4, 0x80);
++      INSTANCE_WR(ctx, 0x5C200/4, 0x80);
++      INSTANCE_WR(ctx, 0x5C240/4, 0x3F);
++      INSTANCE_WR(ctx, 0x5C3A0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5C3C0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C3E0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C500/4, 0x4);
++      INSTANCE_WR(ctx, 0x5C580/4, 0x4);
++      INSTANCE_WR(ctx, 0x5C7C0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C7E0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x5C800/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C820/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C840/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C860/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5CC80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE80/4, 0x10);
++      INSTANCE_WR(ctx, 0x5CEE0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1584/4, 0xF);
++      INSTANCE_WR(ctx, 0x1624/4, 0x20);
++      INSTANCE_WR(ctx, 0x1804/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19C4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19E4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A24/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A44/4, 0x8);
++      INSTANCE_WR(ctx, 0x1A84/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x1C24/4, 0xF);
++      INSTANCE_WR(ctx, 0x4104/4, 0xF);
++      INSTANCE_WR(ctx, 0x4144/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CA4/4, 0xF);
++      INSTANCE_WR(ctx, 0x15344/4, 0xF);
++      INSTANCE_WR(ctx, 0x155E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x15604/4, 0x100);
++      INSTANCE_WR(ctx, 0x15624/4, 0x100);
++      INSTANCE_WR(ctx, 0x15644/4, 0x11);
++      INSTANCE_WR(ctx, 0x15684/4, 0x8);
++      INSTANCE_WR(ctx, 0x15744/4, 0x1);
++      INSTANCE_WR(ctx, 0x15784/4, 0x1);
++      INSTANCE_WR(ctx, 0x157A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x157C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x157E4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x15804/4, 0x2);
++      INSTANCE_WR(ctx, 0x158E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x15924/4, 0x1);
++      INSTANCE_WR(ctx, 0x15944/4, 0x1);
++      INSTANCE_WR(ctx, 0x15964/4, 0x1);
++      INSTANCE_WR(ctx, 0x15A04/4, 0x4);
++      INSTANCE_WR(ctx, 0x15A44/4, 0x1);
++      INSTANCE_WR(ctx, 0x15A64/4, 0x15);
++      INSTANCE_WR(ctx, 0x15AE4/4, 0x4444480);
++      INSTANCE_WR(ctx, 0x16264/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x16304/4, 0x100);
++      INSTANCE_WR(ctx, 0x16364/4, 0x10001);
++      INSTANCE_WR(ctx, 0x163A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x163C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x163E4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x16404/4, 0x1);
++      INSTANCE_WR(ctx, 0x16424/4, 0x4);
++      INSTANCE_WR(ctx, 0x16444/4, 0x2);
++      INSTANCE_WR(ctx, 0x183C4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x183E4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18484/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18604/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18624/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16508/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x16568/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x16748/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16828/4, 0x4);
++      INSTANCE_WR(ctx, 0x16848/4, 0x1A);
++      INSTANCE_WR(ctx, 0x168A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B08/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x16BE8/4, 0xF);
++      INSTANCE_WR(ctx, 0x16CE8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16D08/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F08/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FA8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16FC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x16FE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x17068/4, 0x5);
++      INSTANCE_WR(ctx, 0x17088/4, 0x52);
++      INSTANCE_WR(ctx, 0x17128/4, 0x1);
++      INSTANCE_WR(ctx, 0x17348/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17368/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17388/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17408/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17428/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17448/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17468/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17488/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17508/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17528/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17548/4, 0x10);
++      INSTANCE_WR(ctx, 0x17A28/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x17A48/4, 0x5);
++      INSTANCE_WR(ctx, 0x17AA8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AE8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B08/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B28/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B48/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B68/4, 0x3);
++      INSTANCE_WR(ctx, 0x17F68/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x17F88/4, 0x1A);
++      INSTANCE_WR(ctx, 0x17FC8/4, 0x3);
++      INSTANCE_WR(ctx, 0x184A8/4, 0x102);
++      INSTANCE_WR(ctx, 0x184E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18508/4, 0x4);
++      INSTANCE_WR(ctx, 0x18528/4, 0x4);
++      INSTANCE_WR(ctx, 0x18548/4, 0x4);
++      INSTANCE_WR(ctx, 0x18568/4, 0x4);
++      INSTANCE_WR(ctx, 0x18588/4, 0x4);
++      INSTANCE_WR(ctx, 0x185C8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x18608/4, 0x102);
++      INSTANCE_WR(ctx, 0x18748/4, 0x4);
++      INSTANCE_WR(ctx, 0x18768/4, 0x4);
++      INSTANCE_WR(ctx, 0x18788/4, 0x4);
++      INSTANCE_WR(ctx, 0x187A8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18DE8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18E48/4, 0x804);
++      INSTANCE_WR(ctx, 0x18E88/4, 0x4);
++      INSTANCE_WR(ctx, 0x18EA8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18EC8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18F08/4, 0x4);
++      INSTANCE_WR(ctx, 0x18F28/4, 0x4);
++      INSTANCE_WR(ctx, 0x18F68/4, 0x10);
++      INSTANCE_WR(ctx, 0x19008/4, 0x804);
++      INSTANCE_WR(ctx, 0x19028/4, 0x1);
++      INSTANCE_WR(ctx, 0x19048/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19068/4, 0x7F);
++      INSTANCE_WR(ctx, 0x190A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x190C8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x19108/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x19128/4, 0x4);
++      INSTANCE_WR(ctx, 0x19148/4, 0x4);
++      INSTANCE_WR(ctx, 0x19188/4, 0x10);
++      INSTANCE_WR(ctx, 0x19208/4, 0x1);
++      INSTANCE_WR(ctx, 0x19228/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x19308/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x19328/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x19A48/4, 0x1);
++      INSTANCE_WR(ctx, 0x19AA8/4, 0x10);
++      INSTANCE_WR(ctx, 0x1A1C8/4, 0x88);
++      INSTANCE_WR(ctx, 0x1A1E8/4, 0x88);
++      INSTANCE_WR(ctx, 0x1A248/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A528/4, 0x26);
++      INSTANCE_WR(ctx, 0x1A588/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x1A608/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1A628/4, 0x10);
++      INSTANCE_WR(ctx, 0x1AB48/4, 0x52);
++      INSTANCE_WR(ctx, 0x1AB88/4, 0x26);
++      INSTANCE_WR(ctx, 0x1ABC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1ABE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AC28/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1AC88/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1ACC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1ACE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AD28/4, 0x80);
++      INSTANCE_WR(ctx, 0x1AD48/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AD68/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x1ADA8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2D608/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D628/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D668/4, 0x80);
++      INSTANCE_WR(ctx, 0x2D688/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D6A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2D6E8/4, 0x27);
++      INSTANCE_WR(ctx, 0x2D728/4, 0x26);
++      INSTANCE_WR(ctx, 0x2D7A8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D7C8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D7E8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D808/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D828/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D848/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D868/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D888/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8A8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8C8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8E8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D908/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D928/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D948/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D968/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D988/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2DE28/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x2DE48/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x2DEA8/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x160C/4, 0x2);
++      INSTANCE_WR(ctx, 0x164C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x180C/4, 0x10);
++      INSTANCE_WR(ctx, 0x186C/4, 0x1);
++      INSTANCE_WR(ctx, 0x190C/4, 0x4);
++      INSTANCE_WR(ctx, 0x192C/4, 0x400);
++      INSTANCE_WR(ctx, 0x194C/4, 0x300);
++      INSTANCE_WR(ctx, 0x196C/4, 0x1001);
++      INSTANCE_WR(ctx, 0x198C/4, 0x15);
++      INSTANCE_WR(ctx, 0x1A4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1B6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1B8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x1BCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1E4C/4, 0x10);
++      INSTANCE_WR(ctx, 0x206C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x208C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x210C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x212C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x214C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x216C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x218C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x220C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x222C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x224C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x258C/4, 0x10);
++      INSTANCE_WR(ctx, 0x25CC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x26AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x26EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x272C/4, 0x1);
++      INSTANCE_WR(ctx, 0x28CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x29CC/4, 0xF);
++      INSTANCE_WR(ctx, 0x2ACC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2BAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2BCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2BEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2C0C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2C4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2C6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2CAC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2CEC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FCC/4, 0x2);
++      INSTANCE_WR(ctx, 0x2FEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x300C/4, 0x1);
++      INSTANCE_WR(ctx, 0x302C/4, 0x2);
++      INSTANCE_WR(ctx, 0x304C/4, 0x1);
++      INSTANCE_WR(ctx, 0x306C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EC/4, 0x11);
++      INSTANCE_WR(ctx, 0x310C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D8C/4, 0x2);
++      INSTANCE_WR(ctx, 0x3DCC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x3F6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x3FEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x408C/4, 0x4);
++      INSTANCE_WR(ctx, 0x40AC/4, 0x400);
++      INSTANCE_WR(ctx, 0x40CC/4, 0x300);
++      INSTANCE_WR(ctx, 0x40EC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x410C/4, 0x15);
++      INSTANCE_WR(ctx, 0x41CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x42EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x430C/4, 0x10);
++      INSTANCE_WR(ctx, 0x434C/4, 0x1);
++      INSTANCE_WR(ctx, 0x45CC/4, 0x10);
++      INSTANCE_WR(ctx, 0x47EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x480C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x482C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x484C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x486C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x488C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x490C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x492C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x494C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x496C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x498C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x49AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x49CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x4D0C/4, 0x10);
++      INSTANCE_WR(ctx, 0x4D4C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x4E2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4E6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4EAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x504C/4, 0x11);
++      INSTANCE_WR(ctx, 0x514C/4, 0xF);
++      INSTANCE_WR(ctx, 0x524C/4, 0x11);
++      INSTANCE_WR(ctx, 0x532C/4, 0x1);
++      INSTANCE_WR(ctx, 0x534C/4, 0x1);
++      INSTANCE_WR(ctx, 0x536C/4, 0x1);
++      INSTANCE_WR(ctx, 0x538C/4, 0x2);
++      INSTANCE_WR(ctx, 0x53AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x53CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x53EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x542C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x546C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x572C/4, 0x1);
++      INSTANCE_WR(ctx, 0x574C/4, 0x2);
++      INSTANCE_WR(ctx, 0x576C/4, 0x1);
++      INSTANCE_WR(ctx, 0x578C/4, 0x1);
++      INSTANCE_WR(ctx, 0x57AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x57CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x57EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x586C/4, 0x11);
++      INSTANCE_WR(ctx, 0x588C/4, 0x1);
++      INSTANCE_WR(ctx, 0x650C/4, 0x2);
++      INSTANCE_WR(ctx, 0x654C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x66EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x670C/4, 0x10);
++      INSTANCE_WR(ctx, 0x676C/4, 0x1);
++      INSTANCE_WR(ctx, 0x680C/4, 0x4);
++      INSTANCE_WR(ctx, 0x682C/4, 0x400);
++      INSTANCE_WR(ctx, 0x684C/4, 0x300);
++      INSTANCE_WR(ctx, 0x686C/4, 0x1001);
++      INSTANCE_WR(ctx, 0x688C/4, 0x15);
++      INSTANCE_WR(ctx, 0x694C/4, 0x2);
++      INSTANCE_WR(ctx, 0x6A6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x6A8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x6ACC/4, 0x1);
++      INSTANCE_WR(ctx, 0x6D4C/4, 0x10);
++      INSTANCE_WR(ctx, 0x6F6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6F8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FCC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x700C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x702C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x704C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x706C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x708C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x710C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x712C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x714C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x748C/4, 0x10);
++      INSTANCE_WR(ctx, 0x74CC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x75AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x75EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x762C/4, 0x1);
++      INSTANCE_WR(ctx, 0x77CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x78CC/4, 0xF);
++      INSTANCE_WR(ctx, 0x79CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x7AAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7ACC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7AEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7B0C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7B2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7B4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7B6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7BAC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x7BEC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x7EAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7ECC/4, 0x2);
++      INSTANCE_WR(ctx, 0x7EEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F0C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F2C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7F4C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7FEC/4, 0x11);
++      INSTANCE_WR(ctx, 0x800C/4, 0x1);
++      INSTANCE_WR(ctx, 0x8C8C/4, 0x2);
++      INSTANCE_WR(ctx, 0x8CCC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x8E6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x8E8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x8EEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x8F8C/4, 0x4);
++      INSTANCE_WR(ctx, 0x8FAC/4, 0x400);
++      INSTANCE_WR(ctx, 0x8FCC/4, 0x300);
++      INSTANCE_WR(ctx, 0x8FEC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x900C/4, 0x15);
++      INSTANCE_WR(ctx, 0x90CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x91EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x920C/4, 0x10);
++      INSTANCE_WR(ctx, 0x924C/4, 0x1);
++      INSTANCE_WR(ctx, 0x94CC/4, 0x10);
++      INSTANCE_WR(ctx, 0x96EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x970C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x972C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x974C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x976C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x978C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x980C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x982C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x984C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x986C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x988C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x98AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x98CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x9C0C/4, 0x10);
++      INSTANCE_WR(ctx, 0x9C4C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x9D2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x9D6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x9DAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x9F4C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA04C/4, 0xF);
++      INSTANCE_WR(ctx, 0xA14C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA22C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA24C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA26C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA28C/4, 0x2);
++      INSTANCE_WR(ctx, 0xA2AC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA2CC/4, 0x2);
++      INSTANCE_WR(ctx, 0xA2EC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA32C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0xA36C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0xA62C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA64C/4, 0x2);
++      INSTANCE_WR(ctx, 0xA66C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA68C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA6AC/4, 0x2);
++      INSTANCE_WR(ctx, 0xA6CC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA6EC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA76C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA78C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1530/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1810/4, 0x4);
++      INSTANCE_WR(ctx, 0x1830/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1930/4, 0x4);
++      INSTANCE_WR(ctx, 0x1950/4, 0x4);
++      INSTANCE_WR(ctx, 0x1970/4, 0x80);
++      INSTANCE_WR(ctx, 0x1990/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E30/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E50/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E70/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E90/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F70/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F90/4, 0x3);
++      INSTANCE_WR(ctx, 0x2010/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x16710/4, 0xF);
++      INSTANCE_WR(ctx, 0x16890/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16910/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A30/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B70/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D10/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D30/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D50/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D70/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D90/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB0/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E10/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F10/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F70/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF0/4, 0x11);
++      INSTANCE_WR(ctx, 0x17010/4, 0x1);
++      INSTANCE_WR(ctx, 0x17050/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17070/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17090/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17210/4, 0x1);
++      INSTANCE_WR(ctx, 0x17230/4, 0x2);
++      INSTANCE_WR(ctx, 0x17250/4, 0x1);
++      INSTANCE_WR(ctx, 0x17270/4, 0x1);
++      INSTANCE_WR(ctx, 0x17290/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17310/4, 0x1);
++      INSTANCE_WR(ctx, 0x17330/4, 0x1);
++      INSTANCE_WR(ctx, 0x17350/4, 0x1);
++      INSTANCE_WR(ctx, 0x17370/4, 0x1);
++      INSTANCE_WR(ctx, 0x17390/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17510/4, 0xF);
++      INSTANCE_WR(ctx, 0x17610/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17670/4, 0x11);
++      INSTANCE_WR(ctx, 0x17690/4, 0x1);
++      INSTANCE_WR(ctx, 0x17710/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17870/4, 0x11);
++      INSTANCE_WR(ctx, 0x17970/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A50/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A90/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD0/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B50/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x18110/4, 0x8);
++      INSTANCE_WR(ctx, 0x18130/4, 0x8);
++      INSTANCE_WR(ctx, 0x18150/4, 0x8);
++      INSTANCE_WR(ctx, 0x18170/4, 0x8);
++      INSTANCE_WR(ctx, 0x18190/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18310/4, 0x400);
++      INSTANCE_WR(ctx, 0x18330/4, 0x400);
++      INSTANCE_WR(ctx, 0x18350/4, 0x400);
++      INSTANCE_WR(ctx, 0x18370/4, 0x400);
++      INSTANCE_WR(ctx, 0x18390/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x18410/4, 0x300);
++      INSTANCE_WR(ctx, 0x18430/4, 0x300);
++      INSTANCE_WR(ctx, 0x18450/4, 0x300);
++      INSTANCE_WR(ctx, 0x18470/4, 0x300);
++      INSTANCE_WR(ctx, 0x18490/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F0/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F0/4, 0x20);
++      INSTANCE_WR(ctx, 0x18610/4, 0x11);
++      INSTANCE_WR(ctx, 0x18630/4, 0x100);
++      INSTANCE_WR(ctx, 0x18670/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D0/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x18730/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18850/4, 0x2);
++      INSTANCE_WR(ctx, 0x18870/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A50/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A90/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD0/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B70/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C90/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F90/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19010/4, 0x11);
++      INSTANCE_WR(ctx, 0x19070/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19150/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19230/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F0/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19310/4, 0x40);
++      INSTANCE_WR(ctx, 0x19330/4, 0x100);
++      INSTANCE_WR(ctx, 0x19350/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19370/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19610/4, 0x1);
++      INSTANCE_WR(ctx, 0x19650/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19670/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D0/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19830/4, 0x1);
++      INSTANCE_WR(ctx, 0x19850/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19870/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19890/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F0/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19930/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF0/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C10/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C30/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD0/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D30/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D50/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D70/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D90/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A230/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A250/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A270/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A290/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A370/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A390/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A410/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB10/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC90/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED10/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE30/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF70/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F110/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F130/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F150/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F170/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F190/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F210/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F310/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F370/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F410/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F450/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F470/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F490/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F610/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F630/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F650/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F670/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F690/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F710/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F730/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F750/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F770/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F790/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F910/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA10/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA70/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA90/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB10/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC70/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE10/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE50/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE90/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED0/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF10/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF50/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x30510/4, 0x8);
++      INSTANCE_WR(ctx, 0x30530/4, 0x8);
++      INSTANCE_WR(ctx, 0x30550/4, 0x8);
++      INSTANCE_WR(ctx, 0x30570/4, 0x8);
++      INSTANCE_WR(ctx, 0x30590/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x30710/4, 0x400);
++      INSTANCE_WR(ctx, 0x30730/4, 0x400);
++      INSTANCE_WR(ctx, 0x30750/4, 0x400);
++      INSTANCE_WR(ctx, 0x30770/4, 0x400);
++      INSTANCE_WR(ctx, 0x30790/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x30810/4, 0x300);
++      INSTANCE_WR(ctx, 0x30830/4, 0x300);
++      INSTANCE_WR(ctx, 0x30850/4, 0x300);
++      INSTANCE_WR(ctx, 0x30870/4, 0x300);
++      INSTANCE_WR(ctx, 0x30890/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F0/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F0/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A10/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A30/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A70/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD0/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF0/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B30/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C50/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E50/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E90/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB0/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED0/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F70/4, 0x11);
++      INSTANCE_WR(ctx, 0x31070/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31090/4, 0xF);
++      INSTANCE_WR(ctx, 0x31390/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31410/4, 0x11);
++      INSTANCE_WR(ctx, 0x31470/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31550/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31630/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F0/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31710/4, 0x40);
++      INSTANCE_WR(ctx, 0x31730/4, 0x100);
++      INSTANCE_WR(ctx, 0x31750/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31770/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A50/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A70/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD0/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C30/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C50/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C70/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C90/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF0/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1534/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1814/4, 0x4);
++      INSTANCE_WR(ctx, 0x1834/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1934/4, 0x4);
++      INSTANCE_WR(ctx, 0x1954/4, 0x4);
++      INSTANCE_WR(ctx, 0x1974/4, 0x80);
++      INSTANCE_WR(ctx, 0x1994/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E34/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E54/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E74/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E94/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F74/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F94/4, 0x3);
++      INSTANCE_WR(ctx, 0x2014/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B4/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x16714/4, 0xF);
++      INSTANCE_WR(ctx, 0x16894/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16914/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A34/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B74/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D14/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D34/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D54/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D74/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D94/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB4/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E14/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F14/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F74/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF4/4, 0x11);
++      INSTANCE_WR(ctx, 0x17014/4, 0x1);
++      INSTANCE_WR(ctx, 0x17054/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17074/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17094/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17214/4, 0x1);
++      INSTANCE_WR(ctx, 0x17234/4, 0x2);
++      INSTANCE_WR(ctx, 0x17254/4, 0x1);
++      INSTANCE_WR(ctx, 0x17274/4, 0x1);
++      INSTANCE_WR(ctx, 0x17294/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17314/4, 0x1);
++      INSTANCE_WR(ctx, 0x17334/4, 0x1);
++      INSTANCE_WR(ctx, 0x17354/4, 0x1);
++      INSTANCE_WR(ctx, 0x17374/4, 0x1);
++      INSTANCE_WR(ctx, 0x17394/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17514/4, 0xF);
++      INSTANCE_WR(ctx, 0x17614/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17674/4, 0x11);
++      INSTANCE_WR(ctx, 0x17694/4, 0x1);
++      INSTANCE_WR(ctx, 0x17714/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17874/4, 0x11);
++      INSTANCE_WR(ctx, 0x17974/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A14/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A54/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A94/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD4/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B14/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B54/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B4/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D4/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F4/4, 0x8);
++      INSTANCE_WR(ctx, 0x18114/4, 0x8);
++      INSTANCE_WR(ctx, 0x18134/4, 0x8);
++      INSTANCE_WR(ctx, 0x18154/4, 0x8);
++      INSTANCE_WR(ctx, 0x18174/4, 0x8);
++      INSTANCE_WR(ctx, 0x18194/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D4/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F4/4, 0x400);
++      INSTANCE_WR(ctx, 0x18314/4, 0x400);
++      INSTANCE_WR(ctx, 0x18334/4, 0x400);
++      INSTANCE_WR(ctx, 0x18354/4, 0x400);
++      INSTANCE_WR(ctx, 0x18374/4, 0x400);
++      INSTANCE_WR(ctx, 0x18394/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B4/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D4/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F4/4, 0x300);
++      INSTANCE_WR(ctx, 0x18414/4, 0x300);
++      INSTANCE_WR(ctx, 0x18434/4, 0x300);
++      INSTANCE_WR(ctx, 0x18454/4, 0x300);
++      INSTANCE_WR(ctx, 0x18474/4, 0x300);
++      INSTANCE_WR(ctx, 0x18494/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B4/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F4/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F4/4, 0x20);
++      INSTANCE_WR(ctx, 0x18614/4, 0x11);
++      INSTANCE_WR(ctx, 0x18634/4, 0x100);
++      INSTANCE_WR(ctx, 0x18674/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D4/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F4/4, 0x100);
++      INSTANCE_WR(ctx, 0x18734/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D4/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18854/4, 0x2);
++      INSTANCE_WR(ctx, 0x18874/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A54/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A94/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB4/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD4/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF4/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B74/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C94/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F94/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19014/4, 0x11);
++      INSTANCE_WR(ctx, 0x19074/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19154/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19234/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B4/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F4/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19314/4, 0x40);
++      INSTANCE_WR(ctx, 0x19334/4, 0x100);
++      INSTANCE_WR(ctx, 0x19354/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19374/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19614/4, 0x1);
++      INSTANCE_WR(ctx, 0x19654/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19674/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D4/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19834/4, 0x1);
++      INSTANCE_WR(ctx, 0x19854/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19874/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19894/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B4/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F4/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19934/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C14/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C34/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D34/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D54/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D74/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D94/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A234/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A254/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A274/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A294/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A374/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A394/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A414/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B4/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB14/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC94/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED14/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE34/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF74/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F114/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F134/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F154/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F174/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F194/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F214/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F314/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F374/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F414/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F454/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F474/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F494/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F614/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F634/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F654/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F674/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F694/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F714/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F734/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F754/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F774/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F794/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F914/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA14/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA74/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA94/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB14/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC74/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE14/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE54/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE94/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED4/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF14/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF54/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B4/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D4/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F4/4, 0x8);
++      INSTANCE_WR(ctx, 0x30514/4, 0x8);
++      INSTANCE_WR(ctx, 0x30534/4, 0x8);
++      INSTANCE_WR(ctx, 0x30554/4, 0x8);
++      INSTANCE_WR(ctx, 0x30574/4, 0x8);
++      INSTANCE_WR(ctx, 0x30594/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D4/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F4/4, 0x400);
++      INSTANCE_WR(ctx, 0x30714/4, 0x400);
++      INSTANCE_WR(ctx, 0x30734/4, 0x400);
++      INSTANCE_WR(ctx, 0x30754/4, 0x400);
++      INSTANCE_WR(ctx, 0x30774/4, 0x400);
++      INSTANCE_WR(ctx, 0x30794/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B4/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D4/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F4/4, 0x300);
++      INSTANCE_WR(ctx, 0x30814/4, 0x300);
++      INSTANCE_WR(ctx, 0x30834/4, 0x300);
++      INSTANCE_WR(ctx, 0x30854/4, 0x300);
++      INSTANCE_WR(ctx, 0x30874/4, 0x300);
++      INSTANCE_WR(ctx, 0x30894/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B4/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F4/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F4/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A14/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A34/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A74/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD4/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF4/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B34/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD4/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C54/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E54/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E94/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB4/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED4/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF4/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F74/4, 0x11);
++      INSTANCE_WR(ctx, 0x31074/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31094/4, 0xF);
++      INSTANCE_WR(ctx, 0x31394/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31414/4, 0x11);
++      INSTANCE_WR(ctx, 0x31474/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31554/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31634/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B4/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F4/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31714/4, 0x40);
++      INSTANCE_WR(ctx, 0x31734/4, 0x100);
++      INSTANCE_WR(ctx, 0x31754/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31774/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A14/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A54/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A74/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD4/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C34/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C54/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C74/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C94/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB4/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF4/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1538/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1818/4, 0x4);
++      INSTANCE_WR(ctx, 0x1838/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1938/4, 0x4);
++      INSTANCE_WR(ctx, 0x1958/4, 0x4);
++      INSTANCE_WR(ctx, 0x1978/4, 0x80);
++      INSTANCE_WR(ctx, 0x1998/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E38/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E58/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E78/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E98/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB8/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F78/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F98/4, 0x3);
++      INSTANCE_WR(ctx, 0x2018/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B8/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D8/4, 0x3);
++      INSTANCE_WR(ctx, 0x16718/4, 0xF);
++      INSTANCE_WR(ctx, 0x16898/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16918/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A38/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B78/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D18/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D38/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D58/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D78/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D98/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E18/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F18/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F78/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF8/4, 0x11);
++      INSTANCE_WR(ctx, 0x17018/4, 0x1);
++      INSTANCE_WR(ctx, 0x17058/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17078/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17098/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17218/4, 0x1);
++      INSTANCE_WR(ctx, 0x17238/4, 0x2);
++      INSTANCE_WR(ctx, 0x17258/4, 0x1);
++      INSTANCE_WR(ctx, 0x17278/4, 0x1);
++      INSTANCE_WR(ctx, 0x17298/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17318/4, 0x1);
++      INSTANCE_WR(ctx, 0x17338/4, 0x1);
++      INSTANCE_WR(ctx, 0x17358/4, 0x1);
++      INSTANCE_WR(ctx, 0x17378/4, 0x1);
++      INSTANCE_WR(ctx, 0x17398/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17518/4, 0xF);
++      INSTANCE_WR(ctx, 0x17618/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17678/4, 0x11);
++      INSTANCE_WR(ctx, 0x17698/4, 0x1);
++      INSTANCE_WR(ctx, 0x17718/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17878/4, 0x11);
++      INSTANCE_WR(ctx, 0x17978/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A18/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A58/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A98/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B18/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B58/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B8/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D8/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F8/4, 0x8);
++      INSTANCE_WR(ctx, 0x18118/4, 0x8);
++      INSTANCE_WR(ctx, 0x18138/4, 0x8);
++      INSTANCE_WR(ctx, 0x18158/4, 0x8);
++      INSTANCE_WR(ctx, 0x18178/4, 0x8);
++      INSTANCE_WR(ctx, 0x18198/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D8/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F8/4, 0x400);
++      INSTANCE_WR(ctx, 0x18318/4, 0x400);
++      INSTANCE_WR(ctx, 0x18338/4, 0x400);
++      INSTANCE_WR(ctx, 0x18358/4, 0x400);
++      INSTANCE_WR(ctx, 0x18378/4, 0x400);
++      INSTANCE_WR(ctx, 0x18398/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B8/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D8/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F8/4, 0x300);
++      INSTANCE_WR(ctx, 0x18418/4, 0x300);
++      INSTANCE_WR(ctx, 0x18438/4, 0x300);
++      INSTANCE_WR(ctx, 0x18458/4, 0x300);
++      INSTANCE_WR(ctx, 0x18478/4, 0x300);
++      INSTANCE_WR(ctx, 0x18498/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B8/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F8/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F8/4, 0x20);
++      INSTANCE_WR(ctx, 0x18618/4, 0x11);
++      INSTANCE_WR(ctx, 0x18638/4, 0x100);
++      INSTANCE_WR(ctx, 0x18678/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D8/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F8/4, 0x100);
++      INSTANCE_WR(ctx, 0x18738/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D8/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18858/4, 0x2);
++      INSTANCE_WR(ctx, 0x18878/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A58/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A98/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB8/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD8/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF8/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B78/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C98/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F98/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19018/4, 0x11);
++      INSTANCE_WR(ctx, 0x19078/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19158/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19238/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B8/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F8/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19318/4, 0x40);
++      INSTANCE_WR(ctx, 0x19338/4, 0x100);
++      INSTANCE_WR(ctx, 0x19358/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19378/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19618/4, 0x1);
++      INSTANCE_WR(ctx, 0x19658/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19678/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19838/4, 0x1);
++      INSTANCE_WR(ctx, 0x19858/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19878/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19898/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B8/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19938/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF8/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C18/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C38/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD8/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D38/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D58/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D78/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D98/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A238/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A258/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A278/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A298/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B8/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A378/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A398/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A418/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B8/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D8/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB18/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC98/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED18/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE38/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF78/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F118/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F138/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F158/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F178/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F198/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F218/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F318/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F378/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F418/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F458/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F478/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F498/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F618/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F638/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F658/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F678/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F698/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F718/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F738/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F758/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F778/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F798/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F918/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA18/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA78/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA98/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB18/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC78/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE18/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE58/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE98/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF18/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF58/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B8/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D8/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F8/4, 0x8);
++      INSTANCE_WR(ctx, 0x30518/4, 0x8);
++      INSTANCE_WR(ctx, 0x30538/4, 0x8);
++      INSTANCE_WR(ctx, 0x30558/4, 0x8);
++      INSTANCE_WR(ctx, 0x30578/4, 0x8);
++      INSTANCE_WR(ctx, 0x30598/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D8/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F8/4, 0x400);
++      INSTANCE_WR(ctx, 0x30718/4, 0x400);
++      INSTANCE_WR(ctx, 0x30738/4, 0x400);
++      INSTANCE_WR(ctx, 0x30758/4, 0x400);
++      INSTANCE_WR(ctx, 0x30778/4, 0x400);
++      INSTANCE_WR(ctx, 0x30798/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B8/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D8/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F8/4, 0x300);
++      INSTANCE_WR(ctx, 0x30818/4, 0x300);
++      INSTANCE_WR(ctx, 0x30838/4, 0x300);
++      INSTANCE_WR(ctx, 0x30858/4, 0x300);
++      INSTANCE_WR(ctx, 0x30878/4, 0x300);
++      INSTANCE_WR(ctx, 0x30898/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B8/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F8/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F8/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A18/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A38/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A78/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD8/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF8/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B38/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD8/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C58/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E58/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E98/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB8/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED8/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF8/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F78/4, 0x11);
++      INSTANCE_WR(ctx, 0x31078/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31098/4, 0xF);
++      INSTANCE_WR(ctx, 0x31398/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31418/4, 0x11);
++      INSTANCE_WR(ctx, 0x31478/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31558/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31638/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B8/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F8/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31718/4, 0x40);
++      INSTANCE_WR(ctx, 0x31738/4, 0x100);
++      INSTANCE_WR(ctx, 0x31758/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31778/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A18/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A58/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A78/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C38/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C58/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C78/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C98/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB8/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x153C/4, 0x4);
++      INSTANCE_WR(ctx, 0x17FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x181C/4, 0x4);
++      INSTANCE_WR(ctx, 0x183C/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18DC/4, 0x4);
++      INSTANCE_WR(ctx, 0x193C/4, 0x4);
++      INSTANCE_WR(ctx, 0x195C/4, 0x4);
++      INSTANCE_WR(ctx, 0x197C/4, 0x80);
++      INSTANCE_WR(ctx, 0x199C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E3C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E5C/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E9C/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EBC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1EDC/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F9C/4, 0x3);
++      INSTANCE_WR(ctx, 0x201C/4, 0x4);
++      INSTANCE_WR(ctx, 0x164BC/4, 0x4);
++      INSTANCE_WR(ctx, 0x164DC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1671C/4, 0xF);
++      INSTANCE_WR(ctx, 0x1689C/4, 0x4);
++      INSTANCE_WR(ctx, 0x168BC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168DC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168FC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x1691C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16ABC/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DBC/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E1C/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F1C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FFC/4, 0x11);
++      INSTANCE_WR(ctx, 0x1701C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1705C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x1707C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x1709C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1721C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1723C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1725C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1727C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1729C/4, 0x2);
++      INSTANCE_WR(ctx, 0x172BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x172FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1731C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1733C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1735C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1737C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1739C/4, 0x1);
++      INSTANCE_WR(ctx, 0x173BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x173DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x173FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x174FC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x1751C/4, 0xF);
++      INSTANCE_WR(ctx, 0x1761C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1767C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1769C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1771C/4, 0x4);
++      INSTANCE_WR(ctx, 0x177DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1787C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1797C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17ADC/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x180BC/4, 0x8);
++      INSTANCE_WR(ctx, 0x180DC/4, 0x8);
++      INSTANCE_WR(ctx, 0x180FC/4, 0x8);
++      INSTANCE_WR(ctx, 0x1811C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1813C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1815C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1817C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1819C/4, 0x8);
++      INSTANCE_WR(ctx, 0x181BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x182BC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x182FC/4, 0x400);
++      INSTANCE_WR(ctx, 0x1831C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1833C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1835C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1837C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1839C/4, 0x400);
++      INSTANCE_WR(ctx, 0x183BC/4, 0x400);
++      INSTANCE_WR(ctx, 0x183DC/4, 0x300);
++      INSTANCE_WR(ctx, 0x183FC/4, 0x300);
++      INSTANCE_WR(ctx, 0x1841C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1843C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1845C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1847C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1849C/4, 0x300);
++      INSTANCE_WR(ctx, 0x184BC/4, 0x300);
++      INSTANCE_WR(ctx, 0x184DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x184FC/4, 0xF);
++      INSTANCE_WR(ctx, 0x185FC/4, 0x20);
++      INSTANCE_WR(ctx, 0x1861C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1863C/4, 0x100);
++      INSTANCE_WR(ctx, 0x1867C/4, 0x1);
++      INSTANCE_WR(ctx, 0x186DC/4, 0x40);
++      INSTANCE_WR(ctx, 0x186FC/4, 0x100);
++      INSTANCE_WR(ctx, 0x1873C/4, 0x3);
++      INSTANCE_WR(ctx, 0x187DC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1885C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1887C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x18ABC/4, 0x400);
++      INSTANCE_WR(ctx, 0x18ADC/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AFC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C9C/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F9C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1901C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1907C/4, 0x4);
++      INSTANCE_WR(ctx, 0x190BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x190DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1915C/4, 0x1);
++      INSTANCE_WR(ctx, 0x191FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1923C/4, 0x1);
++      INSTANCE_WR(ctx, 0x192BC/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192FC/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x1931C/4, 0x40);
++      INSTANCE_WR(ctx, 0x1933C/4, 0x100);
++      INSTANCE_WR(ctx, 0x1935C/4, 0x10100);
++      INSTANCE_WR(ctx, 0x1937C/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195DC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195FC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x1961C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1965C/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1967C/4, 0x1);
++      INSTANCE_WR(ctx, 0x196DC/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1983C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1985C/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x1987C/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x1989C/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198BC/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198FC/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1993C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BFC/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C3C/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CDC/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D3C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D7C/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D9C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A23C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A25C/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A27C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A29C/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2BC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2DC/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A37C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A39C/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A41C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8BC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8DC/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB1C/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC9C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECBC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECDC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECFC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED1C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEBC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F11C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F13C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F15C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F17C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F19C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F21C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F31C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F37C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F41C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F45C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F47C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F49C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F63C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F65C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F67C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F69C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F71C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F73C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F75C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F77C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F79C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8FC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F91C/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA1C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDFC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FEDC/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x304BC/4, 0x8);
++      INSTANCE_WR(ctx, 0x304DC/4, 0x8);
++      INSTANCE_WR(ctx, 0x304FC/4, 0x8);
++      INSTANCE_WR(ctx, 0x3051C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3053C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3055C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3057C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3059C/4, 0x8);
++      INSTANCE_WR(ctx, 0x305BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x306BC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x306FC/4, 0x400);
++      INSTANCE_WR(ctx, 0x3071C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3073C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3075C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3077C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3079C/4, 0x400);
++      INSTANCE_WR(ctx, 0x307BC/4, 0x400);
++      INSTANCE_WR(ctx, 0x307DC/4, 0x300);
++      INSTANCE_WR(ctx, 0x307FC/4, 0x300);
++      INSTANCE_WR(ctx, 0x3081C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3083C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3085C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3087C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3089C/4, 0x300);
++      INSTANCE_WR(ctx, 0x308BC/4, 0x300);
++      INSTANCE_WR(ctx, 0x308DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x308FC/4, 0xF);
++      INSTANCE_WR(ctx, 0x309FC/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A1C/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A3C/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30ADC/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AFC/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B3C/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BDC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DBC/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EBC/4, 0x400);
++      INSTANCE_WR(ctx, 0x30EDC/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EFC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x3107C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x3109C/4, 0xF);
++      INSTANCE_WR(ctx, 0x3139C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x3141C/4, 0x11);
++      INSTANCE_WR(ctx, 0x3147C/4, 0x4);
++      INSTANCE_WR(ctx, 0x314BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x314DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3155C/4, 0x1);
++      INSTANCE_WR(ctx, 0x315FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3163C/4, 0x1);
++      INSTANCE_WR(ctx, 0x316BC/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316FC/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x3171C/4, 0x40);
++      INSTANCE_WR(ctx, 0x3173C/4, 0x100);
++      INSTANCE_WR(ctx, 0x3175C/4, 0x10100);
++      INSTANCE_WR(ctx, 0x3177C/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319DC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319FC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A5C/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31ADC/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BFC/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C5C/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C7C/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C9C/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CBC/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CFC/4, 0x1A);
++      INSTANCE_WR(ctx, 0x5D000/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D040/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D060/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D080/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D0A0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D100/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D160/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D1A0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1C0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D340/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D360/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D380/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D3A0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D400/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D460/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D4A0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4C0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D620/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D700/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D720/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D740/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D760/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D780/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D7A0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D820/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8E0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D900/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D940/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D960/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA80/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB20/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC60/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC80/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCC0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCE0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD00/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD20/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD60/4, 0x4);
++      INSTANCE_WR(ctx, 0x651C0/4, 0x11);
++      INSTANCE_WR(ctx, 0x65200/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D024/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D044/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D064/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D084/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D144/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D184/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1A4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D324/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D344/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D364/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D384/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D444/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D484/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4A4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D604/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6E4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D704/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D724/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D744/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D764/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D784/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D804/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8C4/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8E4/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D924/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D944/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA64/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB04/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC44/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC64/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC84/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCC4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD04/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD44/4, 0x4);
++      INSTANCE_WR(ctx, 0x651A4/4, 0x11);
++      INSTANCE_WR(ctx, 0x651E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D028/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D048/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D068/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D088/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D148/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D188/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1A8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D328/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D348/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D368/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D388/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D448/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D488/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4A8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D608/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6E8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D708/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D728/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D748/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D768/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D788/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7A8/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D808/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8C8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D928/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D948/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA68/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB08/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC48/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC68/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC88/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCC8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD08/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD48/4, 0x4);
++      INSTANCE_WR(ctx, 0x651A8/4, 0x11);
++      INSTANCE_WR(ctx, 0x651E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D02C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D04C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D06C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D08C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D14C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D18C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D32C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D34C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D36C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D38C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D44C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D48C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D60C/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6EC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D70C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D72C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D74C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D76C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D78C/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7AC/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D80C/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8CC/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D92C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D94C/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA6C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB0C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC8C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCAC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD0C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD4C/4, 0x4);
++      INSTANCE_WR(ctx, 0x651AC/4, 0x11);
++      INSTANCE_WR(ctx, 0x651EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D030/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D050/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D070/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D090/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D150/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D190/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D330/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D350/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D370/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D390/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D450/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D490/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D610/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D710/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D730/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D750/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D770/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D790/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D810/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D930/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D950/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB10/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC50/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC70/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC90/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD10/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD50/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D034/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D054/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D074/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D094/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D154/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D194/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D334/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D354/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D374/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D394/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D454/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D494/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D614/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D714/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D734/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D754/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D774/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D794/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D814/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D4/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F4/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D934/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D954/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB14/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC54/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC74/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC94/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD14/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD54/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D038/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D058/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D078/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D098/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D158/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D198/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D338/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D358/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D378/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D398/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D458/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D498/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D618/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D718/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D738/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D758/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D778/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D798/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B8/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D818/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F8/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D938/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D958/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB18/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC58/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC78/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC98/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD18/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD58/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D03C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D05C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D07C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D09C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D15C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D19C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D33C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D35C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D37C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D39C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D45C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D49C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D61C/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6FC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D71C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D73C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D75C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D77C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D79C/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7BC/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D81C/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8DC/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D93C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D95C/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCBC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCFC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x651BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x651FC/4, 0x1);
++}
++
++static void
++nvaa_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00220/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x00238/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00264/4, 0x00000187);
++      INSTANCE_WR(ctx, 0x00278/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x0027c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002cc/4, 0x042500df);
++      INSTANCE_WR(ctx, 0x002d4/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002ec/4, 0x01000000);
++      INSTANCE_WR(ctx, 0x002f0/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002f8/4, 0x00000800);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00310/4, 0x000e0080);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00338/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0033c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00350/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00380/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00384/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00394/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00400/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00414/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00444/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x0044c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00450/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000029);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00524/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00540/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00544/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00548/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00558/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00588/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0058c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x00604/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x000000f0);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x00618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0061c/4, 0x000000f0);
++      INSTANCE_WR(ctx, 0x00620/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x00628/4, 0x00000009);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00638/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00640/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00650/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00658/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00660/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00668/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00670/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00674/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00678/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00680/4, 0x00001f80);
++      INSTANCE_WR(ctx, 0x00698/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x006a4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x006b0/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x006b4/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x006cc/4, 0x003d0040);
++      INSTANCE_WR(ctx, 0x006d4/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x003d0040);
++      INSTANCE_WR(ctx, 0x006f8/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00740/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00748/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0074c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00750/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00764/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00788/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00790/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00798/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x007a0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x007a4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x007b0/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x007c8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x007d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007e0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x007e4/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00808/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00810/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00818/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00820/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00824/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00830/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x00848/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0084c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00850/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00860/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00864/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00888/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00890/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00898/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x008a0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x008a4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x008b0/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x008c8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00908/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00910/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00918/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00920/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00924/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00930/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x0094c/4, 0x01127070);
++      INSTANCE_WR(ctx, 0x0095c/4, 0x07ffffff);
++      INSTANCE_WR(ctx, 0x00978/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00978/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00978/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00978/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x003fe006);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x003fe000);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x0cf7f007);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x02bf7fff);
++      INSTANCE_WR(ctx, 0x07ba0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x07bc0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07be0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07c00/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07c20/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07c40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07ca0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07cc0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07ce0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07d00/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07d20/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1a7c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a7e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a800/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a820/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a840/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a860/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a880/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a900/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a920/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a940/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a960/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a980/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a9a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1ae40/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1ae60/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1aec0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x1aee0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1af80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x1b020/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1b080/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b0c0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b0e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1b100/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b120/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1b140/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1b160/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1be20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1bf00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1bf20/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1bf80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1c1e0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1c2c0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1c3c0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1c3e0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1c5e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1c640/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1c6a0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1c6c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1c6e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1c760/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x1c780/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1c820/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1ca40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1ca60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1ca80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1caa0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cac0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cba0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cbc0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cbe0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc40/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1d120/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x1d140/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x1d1a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1d1e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d200/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d220/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d240/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d260/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1d2e0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1d300/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1d340/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1dae0/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x1db20/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db40/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db60/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dca0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dcc0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dd00/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x1dd40/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x1de80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dea0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dec0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dee0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a24/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a64/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00a84/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00ae4/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0b344/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b364/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b3a4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0b3c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b3e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b424/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x0b464/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x010c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x010e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39a68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39a88/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39aa8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39ac8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x39b08/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39b48/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x39b68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x39b88/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39ba8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39bc8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x39c28/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39c48/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x39ca8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x414e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x417c8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00acc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00b6c/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x00d6c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x00f2c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f4c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f8c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00fac/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00fec/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0118c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0362c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0366c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x041cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1484c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15950/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x159b0/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00a34/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00c74/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00e14/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00e54/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x00ff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01014/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01074/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x01154/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x01174/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x01194/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x01254/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01374/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01394/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01654/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01874/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01894/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018b4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018d4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018f4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01914/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01934/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01954/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01974/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01994/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019b4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019d4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019f4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a14/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a34/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a54/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d94/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01dd4/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x01eb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01ef4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f94/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x02114/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02214/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02314/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x023f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02414/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02434/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02454/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02474/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02494/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x024b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x024f4/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x02534/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x028b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x028d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x028f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02934/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02a14/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02a34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x00b78/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00b98/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00fb8/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ff8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01018/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01038/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01458/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01478/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01498/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014b8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014d8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014f8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01518/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01538/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01558/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01578/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01598/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015b8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015d8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015f8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01618/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01638/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01658/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x016b8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01878/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x01898/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x018d8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01958/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01a38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01a58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01a78/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01a98/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x01ad8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x01b98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01bd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01bf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c38/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x01c58/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01d38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01d78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01d98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01db8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01e58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01e98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01eb8/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x01f38/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x02698/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x026d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02758/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x02798/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x027b8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x027d8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x027f8/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x02818/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x02b58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02cd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02cf8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d18/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d38/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02e78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03178/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03198/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x031d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03218/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03238/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03278/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03378/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x033d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x03458/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x034b8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x034d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x034f8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x03658/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03678/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03698/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x036b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x036d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x036f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03718/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03758/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03798/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03818/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03838/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03858/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03958/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x03978/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x03a78/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x03ad8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03af8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03b78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x03c38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03cd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03dd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x03e58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03e78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03eb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03f38/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x03f78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04518/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04538/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04558/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04578/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04598/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04618/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04718/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x04738/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04758/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04778/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04798/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04818/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04838/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04858/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04878/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04898/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04918/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04958/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04a58/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x04a78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04a98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x04ad8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04b38/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x04b58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x04b98/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x04c38/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x04cb8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04cd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x04e18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04eb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x04ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04f18/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04f38/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04f58/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x04fd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x050d8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x050f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x053f8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05418/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x05498/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x054f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x05538/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05558/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x055d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05678/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05718/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x05758/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x057d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05a18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05a38/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x05b38/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05b58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x05c58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x05c78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05df8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x05e18/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x05e38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05e78/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x05e98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05ef8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x06018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06058/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06078/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x06098/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x060b8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x060d8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x06118/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x06158/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x063f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06418/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06438/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x064d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06538/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06558/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06578/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x06598/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x065b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06a58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06a78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x06a98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06ab8/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x06ad8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x06af8/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x06b18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06bb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06bd8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x06c58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0aef8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0af18/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00b1c/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x00b5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00b9c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00c3c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x00cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x0000007f);
++      INSTANCE_WR(ctx, 0x00d7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d9c/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00dfc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00e1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00e5c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00edc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00efc/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00fdc/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x00ffc/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x0171c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0177c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01e9c/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x01ebc/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x01f1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021fc/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x0225c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022dc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x022fc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0281c/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x0285c/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x0289c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x028bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x028fc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0295c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x41800/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41840/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x41860/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x41880/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x418a0/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x418c0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x41920/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x41940/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x41960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x419c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41a00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x41a20/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x41ba0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41be0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x41ca0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41cc0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41ce0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41d00/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41d20/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41d40/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x41d60/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x41d80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41dc0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x41e80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41ea0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x41ee0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x41f00/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x42020/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x420c0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x42200/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x42220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42240/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42260/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x42280/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x422a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x422c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42300/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x49700/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x49740/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0012c/4, 0x00000002);
++}
++
++int
++nv50_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int grctx_size = 0x70000, hdr;
++      int ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC |
++                                   NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
++      if (ret)
++              return ret;
++
++      hdr = IS_G80 ? 0x200 : 0x20;
++      INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002);
++      INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
++                                         grctx_size - 1);
++      INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
++      INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0);
++      INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
++      INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
++
++      INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4,
++                  chan->ramin->instance >> 12);
++      if (dev_priv->chipset == 0xaa)
++              INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00004/4, 0x00000002);
++      else
++              INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002);
++
++      switch (dev_priv->chipset) {
++      case 0x50:
++              nv50_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x84:
++              nv84_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x86:
++              nv86_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x92:
++              nv92_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0xaa:
++              nvaa_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      default:
++              /* This is complete crack, it accidently used to make at
++               * least some G8x cards work partially somehow, though there's
++               * no good reason why - and it stopped working as the rest
++               * of the code got off the drugs..
++               */
++              ret = engine->graph.load_context(chan);
++              if (ret) {
++                      DRM_ERROR("Error hacking up context: %d\n", ret);
++                      return ret;
++              }
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv50_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i, hdr;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      hdr = IS_G80 ? 0x200 : 0x20;
++      for (i=hdr; i<hdr+24; i+=4)
++              INSTANCE_WR(chan->ramin->gpuobj, i/4, 0);
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++}
++
++static int
++nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t old_cp, tv = 20000;
++      int i;
++
++      DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);
++
++      old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(0x400824, NV_READ(0x400824) |
++               (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
++                       NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
++
++      for (i = 0; i < tv; i++) {
++              if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
++                      break;
++      }
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
++
++      if (i == tv) {
++              DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
++              DRM_ERROR("0x40030C = 0x%08x\n",
++                        NV_READ(NV40_PGRAPH_CTXCTL_030C));
++              return -EBUSY;
++      }
++
++      return 0;
++}
++
++int
++nv50_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst = chan->ramin->instance >> 12;
++      int ret; (void)ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++#if 0
++      if ((ret = nv50_graph_transfer_context(dev, inst, 0)))
++              return ret;
++#endif
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(0x400320, 4);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31));
++
++      return 0;
++}
++
++int
++nv50_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      uint32_t inst = chan->ramin->instance >> 12;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      return nv50_graph_transfer_context(dev, inst, 1);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c
+--- git/drivers/gpu/drm-tungsten/nv50_instmem.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,324 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++typedef struct {
++      uint32_t save1700[5]; /* 0x1700->0x1710 */
++
++      struct nouveau_gpuobj_ref *pramin_pt;
++      struct nouveau_gpuobj_ref *pramin_bar;
++} nv50_instmem_priv;
++
++#define NV50_INSTMEM_PAGE_SHIFT 12
++#define NV50_INSTMEM_PAGE_SIZE  (1 << NV50_INSTMEM_PAGE_SHIFT)
++#define NV50_INSTMEM_PT_SIZE(a)       (((a) >> 12) << 3)
++
++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
++ */
++#define BAR0_WI32(g,o,v) do {                                     \
++      uint32_t offset;                                          \
++      if ((g)->im_backing) {                                    \
++              offset = (g)->im_backing->start;                  \
++      } else {                                                  \
++              offset  = chan->ramin->gpuobj->im_backing->start; \
++              offset += (g)->im_pramin->start;                  \
++      }                                                         \
++      offset += (o);                                            \
++      NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v));             \
++} while(0)
++
++int
++nv50_instmem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan;
++      uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
++      nv50_instmem_priv *priv;
++      int ret, i;
++      uint32_t v;
++
++      priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
++      if (!priv)
++              return -ENOMEM;
++      dev_priv->Engine.instmem.priv = priv;
++
++      /* Save state, will restore at takedown. */
++      for (i = 0x1700; i <= 0x1710; i+=4)
++              priv->save1700[(i-0x1700)/4] = NV_READ(i);
++
++      /* Reserve the last MiB of VRAM, we should probably try to avoid
++       * setting up the below tables over the top of the VBIOS image at
++       * some point.
++       */
++      dev_priv->ramin_rsvd_vram = 1 << 20;
++      c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
++      c_size   = 128 << 10;
++      c_vmpd   = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
++      c_ramfc  = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
++      c_base   = c_vmpd + 0x4000;
++      pt_size  = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size);
++
++      DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset);
++      DRM_DEBUG("    VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
++      DRM_DEBUG("  Aperture size: %d MiB\n",
++                (uint32_t)dev_priv->ramin->size >> 20);
++      DRM_DEBUG("        PT size: %d KiB\n", pt_size >> 10);
++
++      NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
++
++      /* Create a fake channel, and use it as our "dummy" channels 0/127.
++       * The main reason for creating a channel is so we can use the gpuobj
++       * code.  However, it's probably worth noting that NVIDIA also setup
++       * their channels 0/127 with the same values they configure here.
++       * So, there may be some other reason for doing this.
++       *
++       * Have to create the entire channel manually, as the real channel
++       * creation code assumes we have PRAMIN access, and we don't until
++       * we're done here.
++       */
++      chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER);
++      if (!chan)
++              return -ENOMEM;
++      chan->id = 0;
++      chan->dev = dev;
++      chan->file_priv = (struct drm_file *)-2;
++      dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
++
++      /* Channel's PRAMIN object + heap */
++      if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0,
++                                         NULL, &chan->ramin)))
++              return ret;
++
++      if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
++              return -ENOMEM;
++
++      /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
++      if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
++                                         0x4000, 0, NULL, &chan->ramfc)))
++              return ret;
++
++      for (i = 0; i < c_vmpd; i += 4)
++              BAR0_WI32(chan->ramin->gpuobj, i, 0);
++
++      /* VM page directory */
++      if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
++                                         0x4000, 0, &chan->vm_pd, NULL)))
++              return ret;
++      for (i = 0; i < 0x4000; i += 8) {
++              BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
++              BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
++      }
++
++      /* PRAMIN page table, cheat and map into VM at 0x0000000000.
++       * We map the entire fake channel into the start of the PRAMIN BAR
++       */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
++                                        0, &priv->pramin_pt)))
++              return ret;
++
++      for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) {
++              if (v < (c_offset + c_size))
++                      BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
++              else
++                      BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
++              BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++      }
++
++      BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
++      BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
++
++      /* DMA object for PRAMIN BAR */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
++                                        &priv->pramin_bar)))
++              return ret;
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
++
++      /* Poke the relevant regs, and pray it works :) */
++      NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
++      NV_WRITE(NV50_PUNK_UNK1710, 0);
++      NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
++                                       NV50_PUNK_BAR_CFG_BASE_VALID);
++      NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0);
++      NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
++                                      NV50_PUNK_BAR3_CTXDMA_VALID);
++
++      /* Assume that praying isn't enough, check that we can re-read the
++       * entire fake channel back from the PRAMIN BAR */
++      for (i = 0; i < c_size; i+=4) {
++              if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) {
++                      DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i);
++                      return -EINVAL;
++              }
++      }
++
++      /* Global PRAMIN heap */
++      if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
++                                c_size, dev_priv->ramin->size - c_size)) {
++              dev_priv->ramin_heap = NULL;
++              DRM_ERROR("Failed to init RAMIN heap\n");
++      }
++
++      /*XXX: incorrect, but needed to make hash func "work" */
++      dev_priv->ramht_offset = 0x10000;
++      dev_priv->ramht_bits   = 9;
++      dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
++      return 0;
++}
++
++void
++nv50_instmem_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      struct nouveau_channel *chan = dev_priv->fifos[0];
++      int i;
++
++      DRM_DEBUG("\n");
++
++      if (!priv)
++              return;
++
++      /* Restore state from before init */
++      for (i = 0x1700; i <= 0x1710; i+=4)
++              NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
++
++      nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
++      nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
++
++      /* Destroy dummy channel */
++      if (chan) {
++              nouveau_gpuobj_del(dev, &chan->vm_pd);
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++              nouveau_mem_takedown(&chan->ramin_heap);
++
++              dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
++              drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
++      }
++
++      dev_priv->Engine.instmem.priv = NULL;
++      drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
++}
++
++int
++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
++{
++      if (gpuobj->im_backing)
++              return -EINVAL;
++
++      *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
++      if (*sz == 0)
++              return -EINVAL;
++
++      gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
++                                             *sz, NOUVEAU_MEM_FB |
++                                             NOUVEAU_MEM_NOVM,
++                                             (struct drm_file *)-2);
++      if (!gpuobj->im_backing) {
++              DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++void
++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (gpuobj && gpuobj->im_backing) {
++              if (gpuobj->im_bound)
++                      dev_priv->Engine.instmem.unbind(dev, gpuobj);
++              nouveau_mem_free(dev, gpuobj->im_backing);
++              gpuobj->im_backing = NULL;
++      }
++}
++
++int
++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      uint32_t pte, pte_end, vram;
++
++      if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
++              return -EINVAL;
++
++      DRM_DEBUG("st=0x%0llx sz=0x%0llx\n",
++                gpuobj->im_pramin->start, gpuobj->im_pramin->size);
++
++      pte     = (gpuobj->im_pramin->start >> 12) << 3;
++      pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++      vram    = gpuobj->im_backing->start;
++
++      DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
++                gpuobj->im_pramin->start, pte, pte_end);
++      DRM_DEBUG("first vram page: 0x%llx\n",
++                gpuobj->im_backing->start);
++
++      while (pte < pte_end) {
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
++
++              pte += 8;
++              vram += NV50_INSTMEM_PAGE_SIZE;
++      }
++
++      gpuobj->im_bound = 1;
++      return 0;
++}
++
++int
++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      uint32_t pte, pte_end;
++
++      if (gpuobj->im_bound == 0)
++              return -EINVAL;
++
++      pte     = (gpuobj->im_pramin->start >> 12) << 3;
++      pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++      while (pte < pte_end) {
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
++              pte += 8;
++      }
++
++      gpuobj->im_bound = 0;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_mc.c git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c
+--- git/drivers/gpu/drm-tungsten/nv50_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,43 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nv50_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      return 0;
++}
++
++void nv50_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.c git-nokia/drivers/gpu/drm-tungsten/nv_drv.c
+--- git/drivers/gpu/drm-tungsten/nv_drv.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,94 @@
++/* nv_drv.c -- nv driver -*- linux-c -*-
++ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright 2005 Lars Knoll <lars@trolltech.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Daryll Strauss <daryll@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Lars Knoll <lars@trolltech.com>
++ */
++
++#include "drmP.h"
++#include "nv_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      nv_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR | DRIVER_USE_AGP,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init nv_init(void)
++{
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit nv_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(nv_init);
++module_exit(nv_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.h git-nokia/drivers/gpu/drm-tungsten/nv_drv.h
+--- git/drivers/gpu/drm-tungsten/nv_drv.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,52 @@
++/* nv_drv.h -- NV DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
++ *
++ * Copyright 2005 Lars Knoll <lars@trolltech.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Lars Knoll <lars@trolltech.com>
++ */
++
++#ifndef __NV_H__
++#define __NV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Lars Knoll"
++
++#define DRIVER_NAME           "nv"
++#define DRIVER_DESC           "NV"
++#define DRIVER_DATE           "20051006"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     1
++
++#define NV04 04
++#define NV10 10
++#define NV20 20
++#define NV30 30
++#define NV40 40
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drm.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h
+--- git/drivers/gpu/drm-tungsten/pvr2d_drm.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,42 @@
++/* pvr2d_drm.h -- Public header for the PVR2D helper module -*- linux-c -*- */
++
++#ifndef __PVR2D_DRM_H__
++#define __PVR2D_DRM_H__
++
++
++/* This wouldn't work with 64 bit userland */
++struct drm_pvr2d_virt2phys {
++      uint32_t virt;
++      uint32_t length;
++      uint32_t phys_array;
++      uint32_t handle;
++};
++
++struct drm_pvr2d_buf_release {
++      uint32_t handle;
++};
++
++enum drm_pvr2d_cflush_type {
++      DRM_PVR2D_CFLUSH_FROM_GPU = 1,
++      DRM_PVR2D_CFLUSH_TO_GPU = 2
++};
++
++struct drm_pvr2d_cflush {
++      enum drm_pvr2d_cflush_type type;
++      uint32_t virt;
++      uint32_t length;
++};
++
++#define DRM_PVR2D_VIRT2PHYS   0x0
++#define DRM_PVR2D_BUF_RELEASE 0x1
++#define DRM_PVR2D_CFLUSH      0x2
++
++#define DRM_IOCTL_PVR2D_VIRT2PHYS DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR2D_VIRT2PHYS, \
++                                        struct drm_pvr2d_virt2phys)
++#define DRM_IOCTL_PVR2D_BUF_RELEASE DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_BUF_RELEASE, \
++                                        struct drm_pvr2d_buf_release)
++#define DRM_IOCTL_PVR2D_CFLUSH DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_CFLUSH, \
++                                     struct drm_pvr2d_cflush)
++
++
++#endif /* __PVR2D_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.c git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c
+--- git/drivers/gpu/drm-tungsten/pvr2d_drv.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,537 @@
++
++#include "drmP.h"
++#include "drm_pciids.h"
++
++#include "pvr2d_drm.h"
++#include "pvr2d_drv.h"
++
++#define PVR2D_SHMEM_HASH_ORDER 12
++
++struct pvr2d_dev {
++      rwlock_t hash_lock;
++      struct drm_open_hash shmem_hash;
++};
++
++struct pvr2d_buf {
++      struct pvr2d_dev *dev_priv;
++      struct drm_hash_item hash;
++      struct page **pages;
++      struct kref kref;
++      uint32_t num_pages;
++};
++
++/*
++ * This pvr2d_ref object is needed strictly because
++ * idr_for_each doesn't exist in 2.6.22. With kernels
++ * supporting this function, we can use it to traverse
++ * the file list of buffers at file release.
++ */
++
++struct pvr2d_ref{
++      struct list_head head;
++      struct pvr2d_buf *buf;
++};
++
++struct pvr2d_file {
++      spinlock_t lock;
++      struct list_head ref_list;
++      struct idr buf_idr;
++};
++
++static inline struct pvr2d_dev *pvr2d_dp(struct drm_device *dev)
++{
++      return (struct pvr2d_dev *) dev->dev_private;
++}
++
++static inline struct pvr2d_file *pvr2d_fp(struct drm_file *file_priv)
++{
++      return (struct pvr2d_file *) file_priv->driver_priv;
++}
++
++
++static void
++pvr2d_free_buf(struct pvr2d_buf *buf)
++{
++      uint32_t i;
++
++      for (i=0; i<buf->num_pages; ++i) {
++              struct page *page = buf->pages[i];
++
++              if (!PageReserved(page))
++                      set_page_dirty_lock(page);
++
++              put_page(page);
++      }
++
++      kfree(buf->pages);
++      kfree(buf);
++}
++
++static void
++pvr2d_release_buf(struct kref *kref)
++{
++      struct pvr2d_buf *buf =
++              container_of(kref, struct pvr2d_buf, kref);
++
++      struct pvr2d_dev *dev_priv = buf->dev_priv;
++
++      drm_ht_remove_item(&dev_priv->shmem_hash, &buf->hash);
++      write_unlock(&dev_priv->hash_lock);
++      pvr2d_free_buf(buf);
++      write_lock(&dev_priv->hash_lock);
++}
++
++static struct pvr2d_buf *
++pvr2d_alloc_buf(struct pvr2d_dev *dev_priv, uint32_t num_pages)
++{
++      struct pvr2d_buf *buf = kmalloc(sizeof(*buf), GFP_KERNEL);
++
++      if (unlikely(!buf))
++              return NULL;
++
++      buf->pages = kmalloc(num_pages * sizeof(*buf->pages), GFP_KERNEL);
++      if (unlikely(!buf->pages))
++              goto out_err0;
++
++      buf->dev_priv = dev_priv;
++      buf->num_pages = num_pages;
++
++
++      DRM_DEBUG("pvr2d_alloc_buf successfully completed.\n");
++      return buf;
++
++out_err0:
++      kfree(buf);
++
++      return NULL;
++}
++
++
++static struct pvr2d_buf*
++pvr2d_lookup_buf(struct pvr2d_dev *dev_priv, struct page *first_phys)
++{
++      struct drm_hash_item *hash;
++      struct pvr2d_buf *buf = NULL;
++      int ret;
++
++      read_lock(&dev_priv->hash_lock);
++      ret = drm_ht_find_item(&dev_priv->shmem_hash,
++                             (unsigned long)first_phys,
++                             &hash);
++
++      if (likely(ret == 0)) {
++              buf = drm_hash_entry(hash, struct pvr2d_buf, hash);
++              kref_get(&buf->kref);
++      }
++      read_unlock(&dev_priv->hash_lock);
++
++      if (buf != NULL) {
++              DRM_INFO("pvr2d_lookup_buf found already used buffer.\n");
++      }
++
++      return buf;
++}
++
++
++static int
++pvr2d_virt2phys(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_pvr2d_virt2phys *v2p = data;
++      uint32_t i;
++      unsigned nr_pages = ((v2p->virt & ~PAGE_MASK) + v2p->length + PAGE_SIZE -
++                           1) / PAGE_SIZE;
++      struct page *first_page;
++      struct pvr2d_buf *buf = NULL;
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_ref *ref;
++      int ret;
++
++
++      /*
++       * Obtain a global hash key for the pvr2d buffer structure.
++       * We use the address of the struct page of the first
++       * page.
++       */
++
++      down_read(&current->mm->mmap_sem);
++        ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK,
++                             1, WRITE, 0, &first_page, NULL);
++        up_read(&current->mm->mmap_sem);
++
++      if (unlikely(ret < 1)) {
++              DRM_ERROR("Failed getting first page: %d\n", ret);
++              return -ENOMEM;
++      }
++
++      /*
++       * Look up buffer already in the hash table, or create
++       * and insert a new one.
++       */
++
++      while(buf == NULL) {
++              buf = pvr2d_lookup_buf(dev_priv, first_page);
++
++              if (likely(buf != NULL))
++                      break;
++
++              buf = pvr2d_alloc_buf(dev_priv, nr_pages);
++              if (unlikely(buf == NULL)) {
++                      DRM_ERROR("Failed allocating pvr2d buffer.\n");
++                      ret = -ENOMEM;
++                      goto out_put;
++              }
++
++              down_read(&current->mm->mmap_sem);
++              ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK,
++                                   nr_pages, WRITE, 0, buf->pages, NULL);
++              up_read(&current->mm->mmap_sem);
++
++              if (unlikely(ret < nr_pages)) {
++                      DRM_ERROR("Failed getting user pages.\n");
++                      buf->num_pages = ret;
++                      ret = -ENOMEM;
++                      pvr2d_free_buf(buf);
++                      goto out_put;
++              }
++
++              kref_init(&buf->kref);
++              buf->hash.key = (unsigned long) first_page;
++
++              write_lock(&dev_priv->hash_lock);
++              ret = drm_ht_insert_item(&dev_priv->shmem_hash, &buf->hash);
++              write_unlock(&dev_priv->hash_lock);
++
++              if (unlikely(ret == -EINVAL)) {
++
++                      /*
++                       * Somebody raced us and already
++                       * inserted this buffer.
++                       * Very unlikely, but retry anyway.
++                       */
++
++                      pvr2d_free_buf(buf);
++                      buf = NULL;
++              }
++      }
++
++      /*
++       * Create a reference object that is used for unreferencing
++       * either by user action or when the drm file is closed.
++       */
++
++      ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++      if (unlikely(ref == NULL))
++              goto out_err0;
++
++      ref->buf = buf;
++      do {
++              if (idr_pre_get(&pvr2d_fpriv->buf_idr, GFP_KERNEL) == 0) {
++                      ret = -ENOMEM;
++                      DRM_ERROR("Failed idr_pre_get\n");
++                      goto out_err1;
++              }
++
++              spin_lock( &pvr2d_fpriv->lock );
++              ret = idr_get_new( &pvr2d_fpriv->buf_idr, ref, &v2p->handle);
++
++              if (likely(ret == 0))
++                      list_add_tail(&ref->head, &pvr2d_fpriv->ref_list);
++
++              spin_unlock( &pvr2d_fpriv->lock );
++
++      } while (unlikely(ret == -EAGAIN));
++
++      if (unlikely(ret != 0))
++              goto out_err1;
++
++
++      /*
++       * Copy info to user-space.
++       */
++
++      DRM_DEBUG("Converting range of %u bytes at virtual 0x%08x, physical array at 0x%08x\n",
++               v2p->length, v2p->virt, v2p->phys_array);
++
++      for (i = 0; i < nr_pages; i++) {
++              uint32_t physical = (uint32_t)page_to_pfn(buf->pages[i]) << PAGE_SHIFT;
++              DRM_DEBUG("Virtual 0x%08lx => Physical 0x%08x\n",
++                       v2p->virt + i * PAGE_SIZE, physical);
++
++              if (DRM_COPY_TO_USER((void*)(v2p->phys_array +
++                                           i * sizeof(uint32_t)),
++                                   &physical, sizeof(uint32_t))) {
++                      ret = -EFAULT;
++                      goto out_err2;
++              }
++
++      }
++
++#ifdef CONFIG_X86
++      /* XXX: Quick'n'dirty hack to avoid corruption on Poulsbo, remove when
++       * there's a better solution
++       */
++      wbinvd();
++#endif
++
++      DRM_DEBUG("pvr2d_virt2phys returning handle 0x%08x\n",
++               v2p->handle);
++
++out_put:
++      put_page(first_page);
++      return ret;
++
++out_err2:
++      spin_lock( &pvr2d_fpriv->lock );
++      list_del(&ref->head);
++      idr_remove( &pvr2d_fpriv->buf_idr, v2p->handle);
++      spin_unlock( &pvr2d_fpriv->lock );
++out_err1:
++      kfree(ref);
++out_err0:
++      write_lock(&dev_priv->hash_lock);
++      kref_put(&buf->kref, &pvr2d_release_buf);
++      write_unlock(&dev_priv->hash_lock);
++      put_page(first_page);
++      return ret;
++}
++
++
++static int
++pvr2d_buf_release(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct drm_pvr2d_buf_release *br = data;
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_buf *buf;
++      struct pvr2d_ref *ref;
++
++      DRM_DEBUG("pvr2d_buf_release releasing 0x%08x\n",
++                br->handle);
++
++      spin_lock( &pvr2d_fpriv->lock );
++      ref = idr_find( &pvr2d_fpriv->buf_idr, br->handle);
++
++      if (unlikely(ref == NULL)) {
++              spin_unlock( &pvr2d_fpriv->lock );
++              DRM_ERROR("Could not find pvr2d buf to unref.\n");
++              return -EINVAL;
++      }
++      (void) idr_remove( &pvr2d_fpriv->buf_idr, br->handle);
++      list_del(&ref->head);
++      spin_unlock( &pvr2d_fpriv->lock );
++
++      buf = ref->buf;
++      kfree(ref);
++
++      write_lock(&dev_priv->hash_lock);
++      kref_put(&buf->kref, &pvr2d_release_buf);
++      write_unlock(&dev_priv->hash_lock);
++
++      return 0;
++}
++
++static int
++pvr2d_cflush(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_pvr2d_cflush *cf = data;
++
++      switch (cf->type) {
++      case DRM_PVR2D_CFLUSH_FROM_GPU:
++              DRM_DEBUG("DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n",
++                        cf->virt, cf->length);
++#ifdef CONFIG_ARM
++              dmac_inv_range((const void*)cf->virt,
++                             (const void*)(cf->virt + cf->length));
++#endif
++              return 0;
++      case DRM_PVR2D_CFLUSH_TO_GPU:
++              DRM_DEBUG("DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n",
++                        cf->virt, cf->length);
++#ifdef CONFIG_ARM
++              dmac_clean_range((const void*)cf->virt,
++                               (const void*)(cf->virt + cf->length));
++#endif
++              return 0;
++      default:
++              DRM_ERROR("Invalid cflush type 0x%x\n", cf->type);
++              return -EINVAL;
++      }
++}
++
++static int
++pvr2d_open(struct inode *inode, struct file *filp)
++{
++      int ret;
++      struct pvr2d_file *pvr2d_fpriv;
++      struct drm_file *file_priv;
++
++      pvr2d_fpriv = kmalloc(sizeof(*pvr2d_fpriv), GFP_KERNEL);
++      if (unlikely(pvr2d_fpriv == NULL))
++              return -ENOMEM;
++
++      pvr2d_fpriv->lock = SPIN_LOCK_UNLOCKED;
++      INIT_LIST_HEAD(&pvr2d_fpriv->ref_list);
++      idr_init(&pvr2d_fpriv->buf_idr);
++
++      ret = drm_open(inode, filp);
++
++      if (unlikely(ret != 0)) {
++              idr_destroy(&pvr2d_fpriv->buf_idr);
++              kfree(pvr2d_fpriv);
++              return ret;
++      }
++
++      file_priv = filp->private_data;
++      file_priv->driver_priv = pvr2d_fpriv;
++
++      DRM_DEBUG("pvr2d_open completed successfully.\n");
++      return 0;
++};
++
++
++static int
++pvr2d_release(struct inode *inode, struct file *filp)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_buf *buf;
++      struct pvr2d_ref *ref, *next;
++
++      /*
++       * At this point we're the only user of the list, so
++       * it should be safe to release the file lock whenever we want to.
++       */
++
++      spin_lock(&pvr2d_fpriv->lock);
++
++      list_for_each_entry_safe(ref, next, &pvr2d_fpriv->ref_list,
++                               head) {
++              list_del(&ref->head);
++              buf = ref->buf;
++              kfree(ref);
++              spin_unlock(&pvr2d_fpriv->lock);
++              write_lock(&dev_priv->hash_lock);
++              kref_put(&buf->kref, &pvr2d_release_buf);
++              write_unlock(&dev_priv->hash_lock);
++              spin_lock(&pvr2d_fpriv->lock);
++      }
++
++      idr_remove_all(&pvr2d_fpriv->buf_idr);
++      idr_destroy(&pvr2d_fpriv->buf_idr);
++      spin_unlock(&pvr2d_fpriv->lock);
++
++      kfree(pvr2d_fpriv);
++
++      DRM_DEBUG("pvr2d_release calling drm_release.\n");
++      return drm_release(inode, filp);
++}
++
++static int pvr2d_load(struct drm_device *dev, unsigned long chipset)
++{
++      struct pvr2d_dev *dev_priv;
++      int ret;
++
++      dev_priv = kmalloc(sizeof(*dev_priv), GFP_KERNEL);
++      if (unlikely(dev_priv == NULL))
++              return -ENOMEM;
++
++      rwlock_init(&dev_priv->hash_lock);
++      ret = drm_ht_create(&dev_priv->shmem_hash,
++                         PVR2D_SHMEM_HASH_ORDER);
++
++      if (unlikely(ret != 0))
++              goto out_err0;
++
++      dev->dev_private = dev_priv;
++
++      DRM_DEBUG("pvr2d_load completed successfully.\n");
++      return 0;
++out_err0:
++      kfree(dev_priv);
++      return ret;
++}
++
++
++static int pvr2d_unload(struct drm_device *dev)
++{
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++
++      drm_ht_remove(&dev_priv->shmem_hash);
++      kfree(dev_priv);
++      DRM_DEBUG("pvr2d_unload completed successfully.\n");
++      return 0;
++}
++
++static struct pci_device_id pciidlist[] = {
++      pvr2d_PCI_IDS
++};
++
++struct drm_ioctl_desc pvr2d_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_PVR2D_VIRT2PHYS, pvr2d_virt2phys, 0),
++      DRM_IOCTL_DEF(DRM_PVR2D_BUF_RELEASE, pvr2d_buf_release, 0),
++      DRM_IOCTL_DEF(DRM_PVR2D_CFLUSH, pvr2d_cflush, 0)
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = pvr2d_ioctls,
++      .num_ioctls = DRM_ARRAY_SIZE(pvr2d_ioctls),
++      .load = pvr2d_load,
++      .unload = pvr2d_unload,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = pvr2d_open,
++              .release = pvr2d_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init pvr2d_init(void)
++{
++#ifdef CONFIG_PCI
++      return drm_init(&driver, pciidlist);
++#else
++      return drm_get_dev(NULL, NULL, &driver);
++#endif
++}
++
++static void __exit pvr2d_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(pvr2d_init);
++module_exit(pvr2d_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h
+--- git/drivers/gpu/drm-tungsten/pvr2d_drv.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,19 @@
++/* -*- linux-c -*- */
++
++#ifndef __PVR2D_H__
++#define __PVR2D_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Tungsten Graphics Inc."
++
++#define DRIVER_NAME           "pvr2d"
++#define DRIVER_DESC           "PVR2D kernel helper"
++#define DRIVER_DATE           "20080811"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_cce.c git-nokia/drivers/gpu/drm-tungsten/r128_cce.c
+--- git/drivers/gpu/drm-tungsten/r128_cce.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_cce.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,933 @@
++/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
++ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
++ */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++#define R128_FIFO_DEBUG               0
++
++/* CCE microcode (from ATI) */
++static u32 r128_cce_microcode[] = {
++      0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
++      1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
++      599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
++      11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
++      262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
++      1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
++      30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
++      1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
++      15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
++      12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
++      46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
++      459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
++      18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
++      15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
++      268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
++      15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
++      1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
++      3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
++      1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
++      15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
++      180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
++      114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
++      33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
++      1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
++      14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
++      1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
++      198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
++      114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
++      1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
++      1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
++      16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
++      174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
++      33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
++      33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
++      409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++};
++
++static int R128_READ_PLL(struct drm_device * dev, int addr)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++
++      R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
++      return R128_READ(R128_CLOCK_CNTL_DATA);
++}
++
++#if R128_FIFO_DEBUG
++static void r128_status(drm_r128_private_t * dev_priv)
++{
++      printk("GUI_STAT           = 0x%08x\n",
++             (unsigned int)R128_READ(R128_GUI_STAT));
++      printk("PM4_STAT           = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_STAT));
++      printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
++      printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
++      printk("PM4_MICRO_CNTL     = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
++      printk("PM4_BUFFER_CNTL    = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
++}
++#endif
++
++/* ================================================================
++ * Engine, FIFO control
++ */
++
++static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
++{
++      u32 tmp;
++      int i;
++
++      tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
++      R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
++{
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
++              if (slots >= entries)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
++{
++      int i, ret;
++
++      ret = r128_do_wait_for_fifo(dev_priv, 64);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
++                      r128_do_pixcache_flush(dev_priv);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++/* ================================================================
++ * CCE control, initialization
++ */
++
++/* Load the microcode for the CCE */
++static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
++{
++      int i;
++
++      DRM_DEBUG("\n");
++
++      r128_do_wait_for_idle(dev_priv);
++
++      R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
++      for (i = 0; i < 256; i++) {
++              R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
++              R128_WRITE(R128_PM4_MICROCODE_DATAL,
++                         r128_cce_microcode[i * 2 + 1]);
++      }
++}
++
++/* Flush any pending commands to the CCE.  This should only be used just
++ * prior to a wait for idle, as it informs the engine that the command
++ * stream is ending.
++ */
++static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
++{
++      u32 tmp;
++
++      tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
++}
++
++/* Wait for the CCE to go idle.
++ */
++int r128_do_cce_idle(drm_r128_private_t * dev_priv)
++{
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
++                      int pm4stat = R128_READ(R128_PM4_STAT);
++                      if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
++                           dev_priv->cce_fifo_size) &&
++                          !(pm4stat & (R128_PM4_BUSY |
++                                       R128_PM4_GUI_ACTIVE))) {
++                              return r128_do_pixcache_flush(dev_priv);
++                      }
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      r128_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++/* Start the Concurrent Command Engine.
++ */
++static void r128_do_cce_start(drm_r128_private_t * dev_priv)
++{
++      r128_do_wait_for_idle(dev_priv);
++
++      R128_WRITE(R128_PM4_BUFFER_CNTL,
++                 dev_priv->cce_mode | dev_priv->ring.size_l2qw
++                 | R128_PM4_BUFFER_CNTL_NOUPDATE);
++      R128_READ(R128_PM4_BUFFER_ADDR);        /* as per the sample code */
++      R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
++
++      dev_priv->cce_running = 1;
++}
++
++/* Reset the Concurrent Command Engine.  This will not flush any pending
++ * commands, so you must wait for the CCE command stream to complete
++ * before calling this routine.
++ */
++static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
++{
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
++      R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
++      dev_priv->ring.tail = 0;
++}
++
++/* Stop the Concurrent Command Engine.  This will not flush any pending
++ * commands, so you must flush the command stream and wait for the CCE
++ * to go idle before calling this routine.
++ */
++static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
++{
++      R128_WRITE(R128_PM4_MICRO_CNTL, 0);
++      R128_WRITE(R128_PM4_BUFFER_CNTL,
++                 R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
++
++      dev_priv->cce_running = 0;
++}
++
++/* Reset the engine.  This will stop the CCE if it is running.
++ */
++static int r128_do_engine_reset(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
++
++      r128_do_pixcache_flush(dev_priv);
++
++      clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
++      mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
++
++      R128_WRITE_PLL(R128_MCLK_CNTL,
++                     mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
++
++      gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
++
++      /* Taken from the sample code - do not change */
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
++      R128_READ(R128_GEN_RESET_CNTL);
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
++      R128_READ(R128_GEN_RESET_CNTL);
++
++      R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
++      R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
++
++      /* Reset the CCE ring */
++      r128_do_cce_reset(dev_priv);
++
++      /* The CCE is no longer running after an engine reset */
++      dev_priv->cce_running = 0;
++
++      /* Reset any pending vertex, indirect buffers */
++      r128_freelist_reset(dev);
++
++      return 0;
++}
++
++static void r128_cce_init_ring_buffer(struct drm_device * dev,
++                                    drm_r128_private_t * dev_priv)
++{
++      u32 ring_start;
++      u32 tmp;
++
++      DRM_DEBUG("\n");
++
++      /* The manual (p. 2) says this address is in "VM space".  This
++       * means it's an offset from the start of AGP space.
++       */
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci)
++              ring_start = dev_priv->cce_ring->offset - dev->agp->base;
++      else
++#endif
++              ring_start = dev_priv->cce_ring->offset -
++                              (unsigned long)dev->sg->virtual;
++
++      R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
++
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
++      R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
++
++      /* Set watermark control */
++      R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
++                 ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
++                 | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
++                 | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
++                 | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
++
++      /* Force read.  Why?  Because it's in the examples... */
++      R128_READ(R128_PM4_BUFFER_ADDR);
++
++      /* Turn on bus mastering */
++      tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
++      R128_WRITE(R128_BUS_CNTL, tmp);
++}
++
++static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
++{
++      drm_r128_private_t *dev_priv;
++
++      DRM_DEBUG("\n");
++
++      dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_r128_private_t));
++
++      dev_priv->is_pci = init->is_pci;
++
++      if (dev_priv->is_pci && !dev->sg) {
++              DRM_ERROR("PCI GART memory not allocated!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->usec_timeout = init->usec_timeout;
++      if (dev_priv->usec_timeout < 1 ||
++          dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
++              DRM_DEBUG("TIMEOUT problem!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->cce_mode = init->cce_mode;
++
++      /* GH: Simple idle check.
++       */
++      atomic_set(&dev_priv->idle_count, 0);
++
++      /* We don't support anything other than bus-mastering ring mode,
++       * but the ring can be in either AGP or PCI space for the ring
++       * read pointer.
++       */
++      if ((init->cce_mode != R128_PM4_192BM) &&
++          (init->cce_mode != R128_PM4_128BM_64INDBM) &&
++          (init->cce_mode != R128_PM4_64BM_128INDBM) &&
++          (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
++              DRM_DEBUG("Bad cce_mode!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      switch (init->cce_mode) {
++      case R128_PM4_NONPM4:
++              dev_priv->cce_fifo_size = 0;
++              break;
++      case R128_PM4_192PIO:
++      case R128_PM4_192BM:
++              dev_priv->cce_fifo_size = 192;
++              break;
++      case R128_PM4_128PIO_64INDBM:
++      case R128_PM4_128BM_64INDBM:
++              dev_priv->cce_fifo_size = 128;
++              break;
++      case R128_PM4_64PIO_128INDBM:
++      case R128_PM4_64BM_128INDBM:
++      case R128_PM4_64PIO_64VCBM_64INDBM:
++      case R128_PM4_64BM_64VCBM_64INDBM:
++      case R128_PM4_64PIO_64VCPIO_64INDPIO:
++              dev_priv->cce_fifo_size = 64;
++              break;
++      }
++
++      switch (init->fb_bpp) {
++      case 16:
++              dev_priv->color_fmt = R128_DATATYPE_RGB565;
++              break;
++      case 32:
++      default:
++              dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
++              break;
++      }
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      switch (init->depth_bpp) {
++      case 16:
++              dev_priv->depth_fmt = R128_DATATYPE_RGB565;
++              break;
++      case 24:
++      case 32:
++      default:
++              dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
++              break;
++      }
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++      dev_priv->span_offset = init->span_offset;
++
++      dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
++                                        (dev_priv->front_offset >> 5));
++      dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
++                                       (dev_priv->back_offset >> 5));
++      dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
++                                        (dev_priv->depth_offset >> 5) |
++                                        R128_DST_TILE);
++      dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
++                                       (dev_priv->span_offset >> 5));
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("could not find mmio region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->cce_ring) {
++              DRM_ERROR("could not find cce ring region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
++      if (!dev_priv->ring_rptr) {
++              DRM_ERROR("could not find ring read pointer!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              DRM_ERROR("could not find dma buffer region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      if (!dev_priv->is_pci) {
++              dev_priv->agp_textures =
++                  drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("could not find agp texture region!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev_priv->sarea_priv =
++          (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                                init->sarea_priv_offset);
++
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci) {
++              drm_core_ioremap(dev_priv->cce_ring, dev);
++              drm_core_ioremap(dev_priv->ring_rptr, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev_priv->cce_ring->handle ||
++                  !dev_priv->ring_rptr->handle ||
++                  !dev->agp_buffer_map->handle) {
++                      DRM_ERROR("Could not ioremap agp regions!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -ENOMEM;
++              }
++      } else
++#endif
++      {
++              dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
++              dev_priv->ring_rptr->handle =
++                  (void *)dev_priv->ring_rptr->offset;
++              dev->agp_buffer_map->handle =
++                  (void *)dev->agp_buffer_map->offset;
++      }
++
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci)
++              dev_priv->cce_buffers_offset = dev->agp->base;
++      else
++#endif
++              dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
++
++      dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
++      dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
++                            + init->ring_size / sizeof(u32));
++      dev_priv->ring.size = init->ring_size;
++      dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
++
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++
++      dev_priv->ring.high_mark = 128;
++
++      dev_priv->sarea_priv->last_frame = 0;
++      R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
++
++      dev_priv->sarea_priv->last_dispatch = 0;
++      R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
++
++#if __OS_HAS_AGP
++      if (dev_priv->is_pci) {
++#endif
++              dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
++              dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
++              dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
++              dev_priv->gart_info.addr = NULL;
++              dev_priv->gart_info.bus_addr = 0;
++              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++              if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
++                      DRM_ERROR("failed to init PCI GART!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -ENOMEM;
++              }
++              R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
++#if __OS_HAS_AGP
++      }
++#endif
++
++      r128_cce_init_ring_buffer(dev, dev_priv);
++      r128_cce_load_microcode(dev_priv);
++
++      dev->dev_private = (void *)dev_priv;
++
++      r128_do_engine_reset(dev);
++
++      return 0;
++}
++
++int r128_do_cleanup_cce(struct drm_device * dev)
++{
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_r128_private_t *dev_priv = dev->dev_private;
++
++#if __OS_HAS_AGP
++              if (!dev_priv->is_pci) {
++                      if (dev_priv->cce_ring != NULL)
++                              drm_core_ioremapfree(dev_priv->cce_ring, dev);
++                      if (dev_priv->ring_rptr != NULL)
++                              drm_core_ioremapfree(dev_priv->ring_rptr, dev);
++                      if (dev->agp_buffer_map != NULL) {
++                              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                              dev->agp_buffer_map = NULL;
++                      }
++              } else
++#endif
++              {
++                      if (dev_priv->gart_info.bus_addr)
++                              if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
++                                      DRM_ERROR("failed to cleanup PCI GART!\n");
++              }
++
++              drm_free(dev->dev_private, sizeof(drm_r128_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++      }
++
++      return 0;
++}
++
++int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case R128_INIT_CCE:
++              return r128_do_init_cce(dev, init);
++      case R128_CLEANUP_CCE:
++              return r128_do_cleanup_cce(dev);
++      }
++
++      return -EINVAL;
++}
++
++int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
++              DRM_DEBUG("while CCE running\n");
++              return 0;
++      }
++
++      r128_do_cce_start(dev_priv);
++
++      return 0;
++}
++
++/* Stop the CCE.  The engine must have been idled before calling this
++ * routine.
++ */
++int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_cce_stop_t *stop = data;
++      int ret;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Flush any pending CCE commands.  This ensures any outstanding
++       * commands are exectuted by the engine before we turn it off.
++       */
++      if (stop->flush) {
++              r128_do_cce_flush(dev_priv);
++      }
++
++      /* If we fail to make the engine go idle, we return an error
++       * code so that the DRM ioctl wrapper can try again.
++       */
++      if (stop->idle) {
++              ret = r128_do_cce_idle(dev_priv);
++              if (ret)
++                      return ret;
++      }
++
++      /* Finally, we can turn off the CCE.  If the engine isn't idle,
++       * we will get some dropped triangles as they won't be fully
++       * rendered before the CCE is shut down.
++       */
++      r128_do_cce_stop(dev_priv);
++
++      /* Reset the engine */
++      r128_do_engine_reset(dev);
++
++      return 0;
++}
++
++/* Just reset the CCE ring.  Called as part of an X Server engine reset.
++ */
++int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_DEBUG("called before init done\n");
++              return -EINVAL;
++      }
++
++      r128_do_cce_reset(dev_priv);
++
++      /* The CCE is no longer running after an engine reset */
++      dev_priv->cce_running = 0;
++
++      return 0;
++}
++
++int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cce_running) {
++              r128_do_cce_flush(dev_priv);
++      }
++
++      return r128_do_cce_idle(dev_priv);
++}
++
++int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return r128_do_engine_reset(dev);
++}
++
++int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      return -EINVAL;
++}
++
++/* ================================================================
++ * Freelist management
++ */
++#define R128_BUFFER_USED      0xffffffff
++#define R128_BUFFER_FREE      0
++
++#if 0
++static int r128_freelist_init(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_freelist_t *entry;
++      int i;
++
++      dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
++      if (dev_priv->head == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
++      dev_priv->head->age = R128_BUFFER_USED;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++
++              entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
++              if (!entry)
++                      return -ENOMEM;
++
++              entry->age = R128_BUFFER_FREE;
++              entry->buf = buf;
++              entry->prev = dev_priv->head;
++              entry->next = dev_priv->head->next;
++              if (!entry->next)
++                      dev_priv->tail = entry;
++
++              buf_priv->discard = 0;
++              buf_priv->dispatched = 0;
++              buf_priv->list_entry = entry;
++
++              dev_priv->head->next = entry;
++
++              if (dev_priv->head->next)
++                      dev_priv->head->next->prev = entry;
++      }
++
++      return 0;
++
++}
++#endif
++
++static struct drm_buf *r128_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++
++      /* FIXME: Optimize -- use freelist code */
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++              if (buf->file_priv == 0)
++                      return buf;
++      }
++
++      for (t = 0; t < dev_priv->usec_timeout; t++) {
++              u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->pending && buf_priv->age <= done_age) {
++                              /* The buffer has been processed, so it
++                               * can now be used.
++                               */
++                              buf->pending = 0;
++                              return buf;
++                      }
++              }
++              DRM_UDELAY(1);
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++void r128_freelist_reset(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++              buf_priv->age = 0;
++      }
++}
++
++/* ================================================================
++ * CCE command submission
++ */
++
++int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
++{
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring;
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              r128_update_ring_snapshot(dev_priv);
++              if (ring->space >= n)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This is being ignored... */
++      DRM_ERROR("failed!\n");
++      return -EBUSY;
++}
++
++static int r128_cce_get_buffers(struct drm_device * dev,
++                              struct drm_file *file_priv,
++                              struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = r128_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int ret = 0;
++      struct drm_dma *d = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = r128_cce_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drm.h git-nokia/drivers/gpu/drm-tungsten/r128_drm.h
+--- git/drivers/gpu/drm-tungsten/r128_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,326 @@
++/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
++ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
++ */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ */
++
++#ifndef __R128_DRM_H__
++#define __R128_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the X server file (r128_sarea.h)
++ */
++#ifndef __R128_SAREA_DEFINES__
++#define __R128_SAREA_DEFINES__
++
++/* What needs to be changed for the current vertex buffer?
++ */
++#define R128_UPLOAD_CONTEXT           0x001
++#define R128_UPLOAD_SETUP             0x002
++#define R128_UPLOAD_TEX0              0x004
++#define R128_UPLOAD_TEX1              0x008
++#define R128_UPLOAD_TEX0IMAGES                0x010
++#define R128_UPLOAD_TEX1IMAGES                0x020
++#define R128_UPLOAD_CORE              0x040
++#define R128_UPLOAD_MASKS             0x080
++#define R128_UPLOAD_WINDOW            0x100
++#define R128_UPLOAD_CLIPRECTS         0x200   /* handled client-side */
++#define R128_REQUIRE_QUIESCENCE               0x400
++#define R128_UPLOAD_ALL                       0x7ff
++
++#define R128_FRONT                    0x1
++#define R128_BACK                     0x2
++#define R128_DEPTH                    0x4
++
++/* Primitive types
++ */
++#define R128_POINTS                   0x1
++#define R128_LINES                    0x2
++#define R128_LINE_STRIP                       0x3
++#define R128_TRIANGLES                        0x4
++#define R128_TRIANGLE_FAN             0x5
++#define R128_TRIANGLE_STRIP           0x6
++
++/* Vertex/indirect buffer size
++ */
++#define R128_BUFFER_SIZE              16384
++
++/* Byte offsets for indirect buffer data
++ */
++#define R128_INDEX_PRIM_OFFSET                20
++#define R128_HOSTDATA_BLIT_OFFSET     32
++
++/* Keep these small for testing.
++ */
++#define R128_NR_SAREA_CLIPRECTS               12
++
++/* There are 2 heaps (local/AGP).  Each region within a heap is a
++ *  minimum of 64k, and there are at most 64 of them per heap.
++ */
++#define R128_LOCAL_TEX_HEAP           0
++#define R128_AGP_TEX_HEAP             1
++#define R128_NR_TEX_HEAPS             2
++#define R128_NR_TEX_REGIONS           64
++#define R128_LOG_TEX_GRANULARITY      16
++
++#define R128_NR_CONTEXT_REGS          12
++
++#define R128_MAX_TEXTURE_LEVELS               11
++#define R128_MAX_TEXTURE_UNITS                2
++
++#endif                                /* __R128_SAREA_DEFINES__ */
++
++typedef struct {
++      /* Context state - can be written in one large chunk */
++      unsigned int dst_pitch_offset_c;
++      unsigned int dp_gui_master_cntl_c;
++      unsigned int sc_top_left_c;
++      unsigned int sc_bottom_right_c;
++      unsigned int z_offset_c;
++      unsigned int z_pitch_c;
++      unsigned int z_sten_cntl_c;
++      unsigned int tex_cntl_c;
++      unsigned int misc_3d_state_cntl_reg;
++      unsigned int texture_clr_cmp_clr_c;
++      unsigned int texture_clr_cmp_msk_c;
++      unsigned int fog_color_c;
++
++      /* Texture state */
++      unsigned int tex_size_pitch_c;
++      unsigned int constant_color_c;
++
++      /* Setup state */
++      unsigned int pm4_vc_fpu_setup;
++      unsigned int setup_cntl;
++
++      /* Mask state */
++      unsigned int dp_write_mask;
++      unsigned int sten_ref_mask_c;
++      unsigned int plane_3d_mask_c;
++
++      /* Window state */
++      unsigned int window_xy_offset;
++
++      /* Core state */
++      unsigned int scale_3d_cntl;
++} drm_r128_context_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int tex_cntl;
++      unsigned int tex_combine_cntl;
++      unsigned int tex_size_pitch;
++      unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
++      unsigned int tex_border_color;
++} drm_r128_texture_regs_t;
++
++typedef struct drm_r128_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex buffer.
++       */
++      drm_r128_context_regs_t context_state;
++      drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
++      unsigned int dirty;
++      unsigned int vertsize;
++      unsigned int vc_format;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int last_frame;
++      unsigned int last_dispatch;
++
++      struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
++      unsigned int tex_age[R128_NR_TEX_HEAPS];
++      int ctx_owner;
++      int pfAllowPageFlip;    /* number of 3d windows (0,1,2 or more) */
++      int pfCurrentPage;      /* which buffer is being displayed? */
++} drm_r128_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmR128.h)
++ */
++
++/* Rage 128 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_R128_INIT       0x00
++#define DRM_R128_CCE_START  0x01
++#define DRM_R128_CCE_STOP   0x02
++#define DRM_R128_CCE_RESET  0x03
++#define DRM_R128_CCE_IDLE   0x04
++/* 0x05 not used */
++#define DRM_R128_RESET      0x06
++#define DRM_R128_SWAP       0x07
++#define DRM_R128_CLEAR      0x08
++#define DRM_R128_VERTEX     0x09
++#define DRM_R128_INDICES    0x0a
++#define DRM_R128_BLIT       0x0b
++#define DRM_R128_DEPTH      0x0c
++#define DRM_R128_STIPPLE    0x0d
++/* 0x0e not used */
++#define DRM_R128_INDIRECT   0x0f
++#define DRM_R128_FULLSCREEN 0x10
++#define DRM_R128_CLEAR2     0x11
++#define DRM_R128_GETPARAM   0x12
++#define DRM_R128_FLIP       0x13
++
++#define DRM_IOCTL_R128_INIT       DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
++#define DRM_IOCTL_R128_CCE_START  DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_START)
++#define DRM_IOCTL_R128_CCE_STOP   DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
++#define DRM_IOCTL_R128_CCE_RESET  DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
++#define DRM_IOCTL_R128_CCE_IDLE   DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
++/* 0x05 not used */
++#define DRM_IOCTL_R128_RESET      DRM_IO(  DRM_COMMAND_BASE + DRM_R128_RESET)
++#define DRM_IOCTL_R128_SWAP       DRM_IO(  DRM_COMMAND_BASE + DRM_R128_SWAP)
++#define DRM_IOCTL_R128_CLEAR      DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
++#define DRM_IOCTL_R128_VERTEX     DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
++#define DRM_IOCTL_R128_INDICES    DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
++#define DRM_IOCTL_R128_BLIT       DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
++#define DRM_IOCTL_R128_DEPTH      DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
++#define DRM_IOCTL_R128_STIPPLE    DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
++/* 0x0e not used */
++#define DRM_IOCTL_R128_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
++#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
++#define DRM_IOCTL_R128_CLEAR2     DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
++#define DRM_IOCTL_R128_GETPARAM   DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
++#define DRM_IOCTL_R128_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_R128_FLIP)
++
++typedef struct drm_r128_init {
++      enum {
++              R128_INIT_CCE = 0x01,
++              R128_CLEANUP_CCE = 0x02
++      } func;
++      unsigned long sarea_priv_offset;
++      int is_pci;
++      int cce_mode;
++      int cce_secure;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int span_offset;
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++} drm_r128_init_t;
++
++typedef struct drm_r128_cce_stop {
++      int flush;
++      int idle;
++} drm_r128_cce_stop_t;
++
++typedef struct drm_r128_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;
++} drm_r128_clear_t;
++
++typedef struct drm_r128_vertex {
++      int prim;
++      int idx;                /* Index of vertex buffer */
++      int count;              /* Number of vertices in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_r128_vertex_t;
++
++typedef struct drm_r128_indices {
++      int prim;
++      int idx;
++      int start;
++      int end;
++      int discard;            /* Client finished with buffer? */
++} drm_r128_indices_t;
++
++typedef struct drm_r128_blit {
++      int idx;
++      int pitch;
++      int offset;
++      int format;
++      unsigned short x, y;
++      unsigned short width, height;
++} drm_r128_blit_t;
++
++typedef struct drm_r128_depth {
++      enum {
++              R128_WRITE_SPAN = 0x01,
++              R128_WRITE_PIXELS = 0x02,
++              R128_READ_SPAN = 0x03,
++              R128_READ_PIXELS = 0x04
++      } func;
++      int n;
++      int __user *x;
++      int __user *y;
++      unsigned int __user *buffer;
++      unsigned char __user *mask;
++} drm_r128_depth_t;
++
++typedef struct drm_r128_stipple {
++      unsigned int __user *mask;
++} drm_r128_stipple_t;
++
++typedef struct drm_r128_indirect {
++      int idx;
++      int start;
++      int end;
++      int discard;
++} drm_r128_indirect_t;
++
++typedef struct drm_r128_fullscreen {
++      enum {
++              R128_INIT_FULLSCREEN = 0x01,
++              R128_CLEANUP_FULLSCREEN = 0x02
++      } func;
++} drm_r128_fullscreen_t;
++
++/* 2.3: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define R128_PARAM_IRQ_NR            1
++
++typedef struct drm_r128_getparam {
++      int param;
++      void __user *value;
++} drm_r128_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.c git-nokia/drivers/gpu/drm-tungsten/r128_drv.c
+--- git/drivers/gpu/drm-tungsten/r128_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,113 @@
++/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
++ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      r128_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
++          DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof(drm_r128_buf_priv_t),
++      .preclose = r128_driver_preclose,
++      .lastclose = r128_driver_lastclose,
++      .get_vblank_counter = r128_get_vblank_counter,
++      .enable_vblank = r128_enable_vblank,
++      .disable_vblank = r128_disable_vblank,
++      .irq_preinstall = r128_driver_irq_preinstall,
++      .irq_postinstall = r128_driver_irq_postinstall,
++      .irq_uninstall = r128_driver_irq_uninstall,
++      .irq_handler = r128_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = r128_ioctls,
++      .dma_ioctl = r128_cce_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = r128_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init r128_init(void)
++{
++      driver.num_ioctls = r128_max_ioctl;
++
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit r128_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(r128_init);
++module_exit(r128_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.h git-nokia/drivers/gpu/drm-tungsten/r128_drv.h
+--- git/drivers/gpu/drm-tungsten/r128_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,525 @@
++/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
++ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
++ */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Michel D�zer <daenzerm@student.ethz.ch>
++ */
++
++#ifndef __R128_DRV_H__
++#define __R128_DRV_H__
++
++/* General customization:
++ */
++#define DRIVER_AUTHOR         "Gareth Hughes, VA Linux Systems Inc."
++
++#define DRIVER_NAME           "r128"
++#define DRIVER_DESC           "ATI Rage 128"
++#define DRIVER_DATE           "20030725"
++
++/* Interface history:
++ *
++ * ??  - ??
++ * 2.4 - Add support for ycbcr textures (no new ioctls)
++ * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
++ */
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          5
++#define DRIVER_PATCHLEVEL     0
++
++#define GET_RING_HEAD(dev_priv)               R128_READ( R128_PM4_BUFFER_DL_RPTR )
++
++typedef struct drm_r128_freelist {
++      unsigned int age;
++      struct drm_buf *buf;
++      struct drm_r128_freelist *next;
++      struct drm_r128_freelist *prev;
++} drm_r128_freelist_t;
++
++typedef struct drm_r128_ring_buffer {
++      u32 *start;
++      u32 *end;
++      int size;
++      int size_l2qw;
++
++      u32 tail;
++      u32 tail_mask;
++      int space;
++
++      int high_mark;
++} drm_r128_ring_buffer_t;
++
++typedef struct drm_r128_private {
++      drm_r128_ring_buffer_t ring;
++      drm_r128_sarea_t *sarea_priv;
++
++      int cce_mode;
++      int cce_fifo_size;
++      int cce_running;
++
++      drm_r128_freelist_t *head;
++      drm_r128_freelist_t *tail;
++
++      int usec_timeout;
++      int is_pci;
++      unsigned long cce_buffers_offset;
++
++      atomic_t idle_count;
++
++      int page_flipping;
++      int current_page;
++      u32 crtc_offset;
++      u32 crtc_offset_cntl;
++
++      atomic_t vbl_received;
++
++      u32 color_fmt;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      u32 depth_fmt;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++      unsigned int span_offset;
++
++      u32 front_pitch_offset_c;
++      u32 back_pitch_offset_c;
++      u32 depth_pitch_offset_c;
++      u32 span_pitch_offset_c;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *cce_ring;
++      drm_local_map_t *ring_rptr;
++      drm_local_map_t *agp_textures;
++      struct drm_ati_pcigart_info gart_info;
++} drm_r128_private_t;
++
++typedef struct drm_r128_buf_priv {
++      u32 age;
++      int prim;
++      int discard;
++      int dispatched;
++      drm_r128_freelist_t *list_entry;
++} drm_r128_buf_priv_t;
++
++extern struct drm_ioctl_desc r128_ioctls[];
++extern int r128_max_ioctl;
++
++                              /* r128_cce.c */
++extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++extern void r128_freelist_reset(struct drm_device * dev);
++
++extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
++
++extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
++extern int r128_do_cleanup_cce(struct drm_device * dev);
++
++extern int r128_enable_vblank(struct drm_device *dev, int crtc);
++extern void r128_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
++extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
++extern void r128_driver_irq_preinstall(struct drm_device * dev);
++extern int r128_driver_irq_postinstall(struct drm_device * dev);
++extern void r128_driver_irq_uninstall(struct drm_device * dev);
++extern void r128_driver_lastclose(struct drm_device * dev);
++extern void r128_driver_preclose(struct drm_device * dev,
++                               struct drm_file *file_priv);
++
++extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
++                            unsigned long arg);
++
++/* Register definitions, register access macros and drmAddMap constants
++ * for Rage 128 kernel driver.
++ */
++
++#define R128_AUX_SC_CNTL              0x1660
++#     define R128_AUX1_SC_EN                  (1 << 0)
++#     define R128_AUX1_SC_MODE_OR             (0 << 1)
++#     define R128_AUX1_SC_MODE_NAND           (1 << 1)
++#     define R128_AUX2_SC_EN                  (1 << 2)
++#     define R128_AUX2_SC_MODE_OR             (0 << 3)
++#     define R128_AUX2_SC_MODE_NAND           (1 << 3)
++#     define R128_AUX3_SC_EN                  (1 << 4)
++#     define R128_AUX3_SC_MODE_OR             (0 << 5)
++#     define R128_AUX3_SC_MODE_NAND           (1 << 5)
++#define R128_AUX1_SC_LEFT             0x1664
++#define R128_AUX1_SC_RIGHT            0x1668
++#define R128_AUX1_SC_TOP              0x166c
++#define R128_AUX1_SC_BOTTOM           0x1670
++#define R128_AUX2_SC_LEFT             0x1674
++#define R128_AUX2_SC_RIGHT            0x1678
++#define R128_AUX2_SC_TOP              0x167c
++#define R128_AUX2_SC_BOTTOM           0x1680
++#define R128_AUX3_SC_LEFT             0x1684
++#define R128_AUX3_SC_RIGHT            0x1688
++#define R128_AUX3_SC_TOP              0x168c
++#define R128_AUX3_SC_BOTTOM           0x1690
++
++#define R128_BRUSH_DATA0              0x1480
++#define R128_BUS_CNTL                 0x0030
++#     define R128_BUS_MASTER_DIS              (1 << 6)
++
++#define R128_CLOCK_CNTL_INDEX         0x0008
++#define R128_CLOCK_CNTL_DATA          0x000c
++#     define R128_PLL_WR_EN                   (1 << 7)
++#define R128_CONSTANT_COLOR_C         0x1d34
++#define R128_CRTC_OFFSET              0x0224
++#define R128_CRTC_OFFSET_CNTL         0x0228
++#     define R128_CRTC_OFFSET_FLIP_CNTL       (1 << 16)
++
++#define R128_DP_GUI_MASTER_CNTL               0x146c
++#       define R128_GMC_SRC_PITCH_OFFSET_CNTL (1    <<  0)
++#       define R128_GMC_DST_PITCH_OFFSET_CNTL (1    <<  1)
++#     define R128_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
++#     define R128_GMC_BRUSH_NONE              (15   <<  4)
++#     define R128_GMC_DST_16BPP               (4    <<  8)
++#     define R128_GMC_DST_24BPP               (5    <<  8)
++#     define R128_GMC_DST_32BPP               (6    <<  8)
++#       define R128_GMC_DST_DATATYPE_SHIFT    8
++#     define R128_GMC_SRC_DATATYPE_COLOR      (3    << 12)
++#     define R128_DP_SRC_SOURCE_MEMORY        (2    << 24)
++#     define R128_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
++#     define R128_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
++#     define R128_GMC_AUX_CLIP_DIS            (1    << 29)
++#     define R128_GMC_WR_MSK_DIS              (1    << 30)
++#     define R128_ROP3_S                      0x00cc0000
++#     define R128_ROP3_P                      0x00f00000
++#define R128_DP_WRITE_MASK            0x16cc
++#define R128_DST_PITCH_OFFSET_C               0x1c80
++#     define R128_DST_TILE                    (1 << 31)
++
++#define R128_GEN_INT_CNTL             0x0040
++#     define R128_CRTC_VBLANK_INT_EN          (1 <<  0)
++#define R128_GEN_INT_STATUS           0x0044
++#     define R128_CRTC_VBLANK_INT             (1 <<  0)
++#     define R128_CRTC_VBLANK_INT_AK          (1 <<  0)
++#define R128_GEN_RESET_CNTL           0x00f0
++#     define R128_SOFT_RESET_GUI              (1 <<  0)
++
++#define R128_GUI_SCRATCH_REG0         0x15e0
++#define R128_GUI_SCRATCH_REG1         0x15e4
++#define R128_GUI_SCRATCH_REG2         0x15e8
++#define R128_GUI_SCRATCH_REG3         0x15ec
++#define R128_GUI_SCRATCH_REG4         0x15f0
++#define R128_GUI_SCRATCH_REG5         0x15f4
++
++#define R128_GUI_STAT                 0x1740
++#     define R128_GUI_FIFOCNT_MASK            0x0fff
++#     define R128_GUI_ACTIVE                  (1 << 31)
++
++#define R128_MCLK_CNTL                        0x000f
++#     define R128_FORCE_GCP                   (1 << 16)
++#     define R128_FORCE_PIPE3D_CP             (1 << 17)
++#     define R128_FORCE_RCP                   (1 << 18)
++
++#define R128_PC_GUI_CTLSTAT           0x1748
++#define R128_PC_NGUI_CTLSTAT          0x0184
++#     define R128_PC_FLUSH_GUI                (3 << 0)
++#     define R128_PC_RI_GUI                   (1 << 2)
++#     define R128_PC_FLUSH_ALL                0x00ff
++#     define R128_PC_BUSY                     (1 << 31)
++
++#define R128_PCI_GART_PAGE            0x017c
++#define R128_PRIM_TEX_CNTL_C          0x1cb0
++
++#define R128_SCALE_3D_CNTL            0x1a00
++#define R128_SEC_TEX_CNTL_C           0x1d00
++#define R128_SEC_TEXTURE_BORDER_COLOR_C       0x1d3c
++#define R128_SETUP_CNTL                       0x1bc4
++#define R128_STEN_REF_MASK_C          0x1d40
++
++#define R128_TEX_CNTL_C                       0x1c9c
++#     define R128_TEX_CACHE_FLUSH             (1 << 23)
++
++#define R128_WAIT_UNTIL                       0x1720
++#     define R128_EVENT_CRTC_OFFSET           (1 << 0)
++#define R128_WINDOW_XY_OFFSET         0x1bcc
++
++/* CCE registers
++ */
++#define R128_PM4_BUFFER_OFFSET                0x0700
++#define R128_PM4_BUFFER_CNTL          0x0704
++#     define R128_PM4_MASK                    (15 << 28)
++#     define R128_PM4_NONPM4                  (0  << 28)
++#     define R128_PM4_192PIO                  (1  << 28)
++#     define R128_PM4_192BM                   (2  << 28)
++#     define R128_PM4_128PIO_64INDBM          (3  << 28)
++#     define R128_PM4_128BM_64INDBM           (4  << 28)
++#     define R128_PM4_64PIO_128INDBM          (5  << 28)
++#     define R128_PM4_64BM_128INDBM           (6  << 28)
++#     define R128_PM4_64PIO_64VCBM_64INDBM    (7  << 28)
++#     define R128_PM4_64BM_64VCBM_64INDBM     (8  << 28)
++#     define R128_PM4_64PIO_64VCPIO_64INDPIO  (15 << 28)
++#     define R128_PM4_BUFFER_CNTL_NOUPDATE    (1  << 27)
++
++#define R128_PM4_BUFFER_WM_CNTL               0x0708
++#     define R128_WMA_SHIFT                   0
++#     define R128_WMB_SHIFT                   8
++#     define R128_WMC_SHIFT                   16
++#     define R128_WB_WM_SHIFT                 24
++
++#define R128_PM4_BUFFER_DL_RPTR_ADDR  0x070c
++#define R128_PM4_BUFFER_DL_RPTR               0x0710
++#define R128_PM4_BUFFER_DL_WPTR               0x0714
++#     define R128_PM4_BUFFER_DL_DONE          (1 << 31)
++
++#define R128_PM4_VC_FPU_SETUP         0x071c
++
++#define R128_PM4_IW_INDOFF            0x0738
++#define R128_PM4_IW_INDSIZE           0x073c
++
++#define R128_PM4_STAT                 0x07b8
++#     define R128_PM4_FIFOCNT_MASK            0x0fff
++#     define R128_PM4_BUSY                    (1 << 16)
++#     define R128_PM4_GUI_ACTIVE              (1 << 31)
++
++#define R128_PM4_MICROCODE_ADDR               0x07d4
++#define R128_PM4_MICROCODE_RADDR      0x07d8
++#define R128_PM4_MICROCODE_DATAH      0x07dc
++#define R128_PM4_MICROCODE_DATAL      0x07e0
++
++#define R128_PM4_BUFFER_ADDR          0x07f0
++#define R128_PM4_MICRO_CNTL           0x07fc
++#     define R128_PM4_MICRO_FREERUN           (1 << 30)
++
++#define R128_PM4_FIFO_DATA_EVEN               0x1000
++#define R128_PM4_FIFO_DATA_ODD                0x1004
++
++/* CCE command packets
++ */
++#define R128_CCE_PACKET0              0x00000000
++#define R128_CCE_PACKET1              0x40000000
++#define R128_CCE_PACKET2              0x80000000
++#define R128_CCE_PACKET3              0xC0000000
++#     define R128_CNTL_HOSTDATA_BLT           0x00009400
++#     define R128_CNTL_PAINT_MULTI            0x00009A00
++#     define R128_CNTL_BITBLT_MULTI           0x00009B00
++#     define R128_3D_RNDR_GEN_INDX_PRIM       0x00002300
++
++#define R128_CCE_PACKET_MASK          0xC0000000
++#define R128_CCE_PACKET_COUNT_MASK    0x3fff0000
++#define R128_CCE_PACKET0_REG_MASK     0x000007ff
++#define R128_CCE_PACKET1_REG0_MASK    0x000007ff
++#define R128_CCE_PACKET1_REG1_MASK    0x003ff800
++
++#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE               0x00000000
++#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT      0x00000001
++#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE               0x00000002
++#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE  0x00000003
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST   0x00000004
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN    0x00000005
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP  0x00000006
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2  0x00000007
++#define R128_CCE_VC_CNTL_PRIM_WALK_IND                0x00000010
++#define R128_CCE_VC_CNTL_PRIM_WALK_LIST               0x00000020
++#define R128_CCE_VC_CNTL_PRIM_WALK_RING               0x00000030
++#define R128_CCE_VC_CNTL_NUM_SHIFT            16
++
++#define R128_DATATYPE_VQ              0
++#define R128_DATATYPE_CI4             1
++#define R128_DATATYPE_CI8             2
++#define R128_DATATYPE_ARGB1555                3
++#define R128_DATATYPE_RGB565          4
++#define R128_DATATYPE_RGB888          5
++#define R128_DATATYPE_ARGB8888                6
++#define R128_DATATYPE_RGB332          7
++#define R128_DATATYPE_Y8              8
++#define R128_DATATYPE_RGB8            9
++#define R128_DATATYPE_CI16            10
++#define R128_DATATYPE_YVYU422         11
++#define R128_DATATYPE_VYUY422         12
++#define R128_DATATYPE_AYUV444         14
++#define R128_DATATYPE_ARGB4444                15
++
++/* Constants */
++#define R128_AGP_OFFSET                       0x02000000
++
++#define R128_WATERMARK_L              16
++#define R128_WATERMARK_M              8
++#define R128_WATERMARK_N              8
++#define R128_WATERMARK_K              128
++
++#define R128_MAX_USEC_TIMEOUT         100000  /* 100 ms */
++
++#define R128_LAST_FRAME_REG           R128_GUI_SCRATCH_REG0
++#define R128_LAST_DISPATCH_REG                R128_GUI_SCRATCH_REG1
++#define R128_MAX_VB_AGE                       0x7fffffff
++#define R128_MAX_VB_VERTS             (0xffff)
++
++#define R128_RING_HIGH_MARK           128
++
++#define R128_PERFORMANCE_BOXES                0
++
++#define R128_PCIGART_TABLE_SIZE         32768
++
++#define R128_READ(reg)                DRM_READ32(  dev_priv->mmio, (reg) )
++#define R128_WRITE(reg,val)   DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#define R128_READ8(reg)               DRM_READ8(   dev_priv->mmio, (reg) )
++#define R128_WRITE8(reg,val)  DRM_WRITE8(  dev_priv->mmio, (reg), (val) )
++
++#define R128_WRITE_PLL(addr,val)                                      \
++do {                                                                  \
++      R128_WRITE8(R128_CLOCK_CNTL_INDEX,                              \
++                  ((addr) & 0x1f) | R128_PLL_WR_EN);                  \
++      R128_WRITE(R128_CLOCK_CNTL_DATA, (val));                        \
++} while (0)
++
++#define CCE_PACKET0( reg, n )         (R128_CCE_PACKET0 |             \
++                                       ((n) << 16) | ((reg) >> 2))
++#define CCE_PACKET1( reg0, reg1 )     (R128_CCE_PACKET1 |             \
++                                       (((reg1) >> 2) << 11) | ((reg0) >> 2))
++#define CCE_PACKET2()                 (R128_CCE_PACKET2)
++#define CCE_PACKET3( pkt, n )         (R128_CCE_PACKET3 |             \
++                                       (pkt) | ((n) << 16))
++
++static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
++{
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring;
++      ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
++      if (ring->space <= 0)
++              ring->space += ring->size;
++}
++
++/* ================================================================
++ * Misc helper macros
++ */
++
++#define RING_SPACE_TEST_WITH_RETURN( dev_priv )                               \
++do {                                                                  \
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i;          \
++      if ( ring->space < ring->high_mark ) {                          \
++              for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {      \
++                      r128_update_ring_snapshot( dev_priv );          \
++                      if ( ring->space >= ring->high_mark )           \
++                              goto __ring_space_done;                 \
++                      DRM_UDELAY(1);                          \
++              }                                                       \
++              DRM_ERROR( "ring space check failed!\n" );              \
++              return -EBUSY;                          \
++      }                                                               \
++ __ring_space_done:                                                   \
++      ;                                                               \
++} while (0)
++
++#define VB_AGE_TEST_WITH_RETURN( dev_priv )                           \
++do {                                                                  \
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;            \
++      if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) {           \
++              int __ret = r128_do_cce_idle( dev_priv );               \
++              if ( __ret ) return __ret;                              \
++              sarea_priv->last_dispatch = 0;                          \
++              r128_freelist_reset( dev );                             \
++      }                                                               \
++} while (0)
++
++#define R128_WAIT_UNTIL_PAGE_FLIPPED() do {                           \
++      OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) );                  \
++      OUT_RING( R128_EVENT_CRTC_OFFSET );                             \
++} while (0)
++
++/* ================================================================
++ * Ring control
++ */
++
++#define R128_VERBOSE  0
++
++#define RING_LOCALS                                                   \
++      int write, _nr; unsigned int tail_mask; volatile u32 *ring;
++
++#define BEGIN_RING( n ) do {                                          \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "BEGIN_RING( %d )\n", (n));                   \
++      }                                                               \
++      if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {              \
++              COMMIT_RING();                                          \
++              r128_wait_ring( dev_priv, (n) * sizeof(u32) );          \
++      }                                                               \
++      _nr = n; dev_priv->ring.space -= (n) * sizeof(u32);             \
++      ring = dev_priv->ring.start;                                    \
++      write = dev_priv->ring.tail;                                    \
++      tail_mask = dev_priv->ring.tail_mask;                           \
++} while (0)
++
++/* You can set this to zero if you want.  If the card locks up, you'll
++ * need to keep this set.  It works around a bug in early revs of the
++ * Rage 128 chipset, where the CCE would read 32 dwords past the end of
++ * the ring buffer before wrapping around.
++ */
++#define R128_BROKEN_CCE       1
++
++#define ADVANCE_RING() do {                                           \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        write, dev_priv->ring.tail );                 \
++      }                                                               \
++      if ( R128_BROKEN_CCE && write < 32 ) {                          \
++              memcpy( dev_priv->ring.end,                             \
++                      dev_priv->ring.start,                           \
++                      write * sizeof(u32) );                          \
++      }                                                               \
++      if (((dev_priv->ring.tail + _nr) & tail_mask) != write) {       \
++              DRM_ERROR(                                              \
++                      "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",        \
++                      ((dev_priv->ring.tail + _nr) & tail_mask),      \
++                      write, __LINE__);                               \
++      } else                                                          \
++              dev_priv->ring.tail = write;                            \
++} while (0)
++
++#define COMMIT_RING() do {                                            \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "COMMIT_RING() tail=0x%06x\n",                \
++                      dev_priv->ring.tail );                          \
++      }                                                               \
++      DRM_MEMORYBARRIER();                                            \
++      R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail );     \
++      R128_READ( R128_PM4_BUFFER_DL_WPTR );                           \
++} while (0)
++
++#define OUT_RING( x ) do {                                            \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",            \
++                         (unsigned int)(x), write );                  \
++      }                                                               \
++      ring[write++] = cpu_to_le32( x );                               \
++      write &= tail_mask;                                             \
++} while (0)
++
++#endif                                /* __R128_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_ioc32.c git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c
+--- git/drivers/gpu/drm-tungsten/r128_ioc32.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,222 @@
++/**
++ * \file r128_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the R128 DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++
++typedef struct drm_r128_init32 {
++      int func;
++      unsigned int sarea_priv_offset;
++      int is_pci;
++      int cce_mode;
++      int cce_secure;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int span_offset;
++
++      unsigned int fb_offset;
++      unsigned int mmio_offset;
++      unsigned int ring_offset;
++      unsigned int ring_rptr_offset;
++      unsigned int buffers_offset;
++      unsigned int agp_textures_offset;
++} drm_r128_init32_t;
++
++static int compat_r128_init(struct file *file, unsigned int cmd,
++                          unsigned long arg)
++{
++      drm_r128_init32_t init32;
++      drm_r128_init_t __user *init;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.is_pci, &init->is_pci)
++          || __put_user(init32.cce_mode, &init->cce_mode)
++          || __put_user(init32.cce_secure, &init->cce_secure)
++          || __put_user(init32.ring_size, &init->ring_size)
++          || __put_user(init32.usec_timeout, &init->usec_timeout)
++          || __put_user(init32.fb_bpp, &init->fb_bpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_bpp, &init->depth_bpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.span_offset, &init->span_offset)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.ring_offset, &init->ring_offset)
++          || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset)
++          || __put_user(init32.agp_textures_offset,
++                        &init->agp_textures_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_INIT, (unsigned long)init);
++}
++
++
++typedef struct drm_r128_depth32 {
++      int func;
++      int n;
++      u32 x;
++      u32 y;
++      u32 buffer;
++      u32 mask;
++} drm_r128_depth32_t;
++
++static int compat_r128_depth(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_r128_depth32_t depth32;
++      drm_r128_depth_t __user *depth;
++
++      if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
++              return -EFAULT;
++
++      depth = compat_alloc_user_space(sizeof(*depth));
++      if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
++          || __put_user(depth32.func, &depth->func)
++          || __put_user(depth32.n, &depth->n)
++          || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
++          || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
++          || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
++                        &depth->buffer)
++          || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
++                        &depth->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
++
++}
++
++typedef struct drm_r128_stipple32 {
++      u32 mask;
++} drm_r128_stipple32_t;
++
++static int compat_r128_stipple(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_r128_stipple32_t stipple32;
++      drm_r128_stipple_t __user *stipple;
++
++      if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
++              return -EFAULT;
++
++      stipple = compat_alloc_user_space(sizeof(*stipple));
++      if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
++          || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
++                        &stipple->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
++}
++
++typedef struct drm_r128_getparam32 {
++      int param;
++      u32 value;
++} drm_r128_getparam32_t;
++
++static int compat_r128_getparam(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_r128_getparam32_t getparam32;
++      drm_r128_getparam_t __user *getparam;
++
++      if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
++              return -EFAULT;
++
++      getparam = compat_alloc_user_space(sizeof(*getparam));
++      if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
++          || __put_user(getparam32.param, &getparam->param)
++          || __put_user((void __user *)(unsigned long)getparam32.value,
++                        &getparam->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
++}
++
++drm_ioctl_compat_t *r128_compat_ioctls[] = {
++      [DRM_R128_INIT] = compat_r128_init,
++      [DRM_R128_DEPTH] = compat_r128_depth,
++      [DRM_R128_STIPPLE] = compat_r128_stipple,
++      [DRM_R128_GETPARAM] = compat_r128_getparam,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
++              fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_irq.c git-nokia/drivers/gpu/drm-tungsten/r128_irq.c
+--- git/drivers/gpu/drm-tungsten/r128_irq.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_irq.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,116 @@
++/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      const drm_r128_private_t *dev_priv = dev->dev_private;
++
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++      int status;
++
++      status = R128_READ(R128_GEN_INT_STATUS);
++
++      /* VBLANK interrupt */
++      if (status & R128_CRTC_VBLANK_INT) {
++              R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              return IRQ_HANDLED;
++      }
++      return IRQ_NONE;
++}
++
++int r128_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++
++      if (crtc != 0) {
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++              return -EINVAL;
++      }
++
++      R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
++      return 0;
++}
++
++void r128_disable_vblank(struct drm_device *dev, int crtc)
++{
++      if (crtc != 0)
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++
++      /*
++       * FIXME: implement proper interrupt disable by using the vblank
++       * counter register (if available)
++       *
++       * R128_WRITE(R128_GEN_INT_CNTL,
++       *            R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
++       */
++}
++
++void r128_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++
++      /* Disable *all* interrupts */
++      R128_WRITE(R128_GEN_INT_CNTL, 0);
++      /* Clear vblank bit if it's already high */
++      R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
++}
++
++int r128_driver_irq_postinstall(struct drm_device * dev)
++{
++      return drm_vblank_init(dev, 1);
++}
++
++void r128_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      /* Disable *all* interrupts */
++      R128_WRITE(R128_GEN_INT_CNTL, 0);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_state.c git-nokia/drivers/gpu/drm-tungsten/r128_state.c
+--- git/drivers/gpu/drm-tungsten/r128_state.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_state.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1681 @@
++/* r128_state.c -- State support for r128 -*- linux-c -*-
++ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++/* ================================================================
++ * CCE hardware state programming functions
++ */
++
++static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
++                               struct drm_clip_rect * boxes, int count)
++{
++      u32 aux_sc_cntl = 0x00000000;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
++
++      if (count >= 1) {
++              OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
++              OUT_RING(boxes[0].x1);
++              OUT_RING(boxes[0].x2 - 1);
++              OUT_RING(boxes[0].y1);
++              OUT_RING(boxes[0].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
++      }
++      if (count >= 2) {
++              OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
++              OUT_RING(boxes[1].x1);
++              OUT_RING(boxes[1].x2 - 1);
++              OUT_RING(boxes[1].y1);
++              OUT_RING(boxes[1].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
++      }
++      if (count >= 3) {
++              OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
++              OUT_RING(boxes[2].x1);
++              OUT_RING(boxes[2].x2 - 1);
++              OUT_RING(boxes[2].y1);
++              OUT_RING(boxes[2].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
++      OUT_RING(aux_sc_cntl);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
++      OUT_RING(ctx->scale_3d_cntl);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(13);
++
++      OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
++      OUT_RING(ctx->dst_pitch_offset_c);
++      OUT_RING(ctx->dp_gui_master_cntl_c);
++      OUT_RING(ctx->sc_top_left_c);
++      OUT_RING(ctx->sc_bottom_right_c);
++      OUT_RING(ctx->z_offset_c);
++      OUT_RING(ctx->z_pitch_c);
++      OUT_RING(ctx->z_sten_cntl_c);
++      OUT_RING(ctx->tex_cntl_c);
++      OUT_RING(ctx->misc_3d_state_cntl_reg);
++      OUT_RING(ctx->texture_clr_cmp_clr_c);
++      OUT_RING(ctx->texture_clr_cmp_msk_c);
++      OUT_RING(ctx->fog_color_c);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(3);
++
++      OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
++      OUT_RING(ctx->setup_cntl);
++      OUT_RING(ctx->pm4_vc_fpu_setup);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(5);
++
++      OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
++      OUT_RING(ctx->dp_write_mask);
++
++      OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
++      OUT_RING(ctx->sten_ref_mask_c);
++      OUT_RING(ctx->plane_3d_mask_c);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
++      OUT_RING(ctx->window_xy_offset);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
++
++      OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
++                           2 + R128_MAX_TEXTURE_LEVELS));
++      OUT_RING(tex->tex_cntl);
++      OUT_RING(tex->tex_combine_cntl);
++      OUT_RING(ctx->tex_size_pitch_c);
++      for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
++              OUT_RING(tex->tex_offset[i]);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
++      OUT_RING(ctx->constant_color_c);
++      OUT_RING(tex->tex_border_color);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
++
++      OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
++      OUT_RING(tex->tex_cntl);
++      OUT_RING(tex->tex_combine_cntl);
++      for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
++              OUT_RING(tex->tex_offset[i]);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
++      OUT_RING(tex->tex_border_color);
++
++      ADVANCE_RING();
++}
++
++static void r128_emit_state(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      DRM_DEBUG("dirty=0x%08x\n", dirty);
++
++      if (dirty & R128_UPLOAD_CORE) {
++              r128_emit_core(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_CORE;
++      }
++
++      if (dirty & R128_UPLOAD_CONTEXT) {
++              r128_emit_context(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & R128_UPLOAD_SETUP) {
++              r128_emit_setup(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
++      }
++
++      if (dirty & R128_UPLOAD_MASKS) {
++              r128_emit_masks(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
++      }
++
++      if (dirty & R128_UPLOAD_WINDOW) {
++              r128_emit_window(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
++      }
++
++      if (dirty & R128_UPLOAD_TEX0) {
++              r128_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
++      }
++
++      if (dirty & R128_UPLOAD_TEX1) {
++              r128_emit_tex1(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
++      }
++
++      /* Turn off the texture cache flushing */
++      sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
++
++      sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
++}
++
++#if R128_PERFORMANCE_BOXES
++/* ================================================================
++ * Performance monitoring functions
++ */
++
++static void r128_clear_box(drm_r128_private_t * dev_priv,
++                         int x, int y, int w, int h, int r, int g, int b)
++{
++      u32 pitch, offset;
++      u32 fb_bpp, color;
++      RING_LOCALS;
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = R128_GMC_DST_16BPP;
++              color = (((r & 0xf8) << 8) |
++                       ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
++              break;
++      case 24:
++              fb_bpp = R128_GMC_DST_24BPP;
++              color = ((r << 16) | (g << 8) | b);
++              break;
++      case 32:
++              fb_bpp = R128_GMC_DST_32BPP;
++              color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
++              break;
++      default:
++              return;
++      }
++
++      offset = dev_priv->back_offset;
++      pitch = dev_priv->back_pitch >> 3;
++
++      BEGIN_RING(6);
++
++      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++               R128_GMC_BRUSH_SOLID_COLOR |
++               fb_bpp |
++               R128_GMC_SRC_DATATYPE_COLOR |
++               R128_ROP3_P |
++               R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
++
++      OUT_RING((pitch << 21) | (offset >> 5));
++      OUT_RING(color);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((w << 16) | h);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
++{
++      if (atomic_read(&dev_priv->idle_count) == 0) {
++              r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
++      } else {
++              atomic_set(&dev_priv->idle_count, 0);
++      }
++}
++
++#endif
++
++/* ================================================================
++ * CCE command dispatch functions
++ */
++
++static void r128_print_dirty(const char *msg, unsigned int flags)
++{
++      DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
++               msg,
++               flags,
++               (flags & R128_UPLOAD_CORE) ? "core, " : "",
++               (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
++               (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
++               (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
++               (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
++               (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
++               (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
++               (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
++               (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
++}
++
++static void r128_cce_dispatch_clear(struct drm_device * dev,
++                                  drm_r128_clear_t * clear)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      unsigned int flags = clear->flags;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      if (dev_priv->page_flipping && dev_priv->current_page == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(R128_FRONT | R128_BACK);
++              if (tmp & R128_FRONT)
++                      flags |= R128_BACK;
++              if (tmp & R128_BACK)
++                      flags |= R128_FRONT;
++      }
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
++                        pbox[i].x1, pbox[i].y1, pbox[i].x2,
++                        pbox[i].y2, flags);
++
++              if (flags & (R128_FRONT | R128_BACK)) {
++                      BEGIN_RING(2);
++
++                      OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
++                      OUT_RING(clear->color_mask);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_FRONT) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->color_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS);
++
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++                      OUT_RING(clear->clear_color);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_BACK) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->color_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS);
++
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++                      OUT_RING(clear->clear_color);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_DEPTH) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(clear->clear_depth);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++      }
++}
++
++static void r128_cce_dispatch_swap(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++#if R128_PERFORMANCE_BOXES
++      /* Do some trivial performance monitoring...
++       */
++      r128_cce_performance_boxes(dev_priv);
++#endif
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              BEGIN_RING(7);
++
++              OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++                       R128_GMC_DST_PITCH_OFFSET_CNTL |
++                       R128_GMC_BRUSH_NONE |
++                       (dev_priv->color_fmt << 8) |
++                       R128_GMC_SRC_DATATYPE_COLOR |
++                       R128_ROP3_S |
++                       R128_DP_SRC_SOURCE_MEMORY |
++                       R128_GMC_CLR_CMP_CNTL_DIS |
++                       R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
++
++              /* Make this work even if front & back are flipped:
++               */
++              if (dev_priv->current_page == 0) {
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++              } else {
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++              }
++
++              OUT_RING((x << 16) | y);
++              OUT_RING((x << 16) | y);
++              OUT_RING((w << 16) | h);
++
++              ADVANCE_RING();
++      }
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
++      OUT_RING(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_dispatch_flip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("page=%d pfCurrentPage=%d\n",
++                dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
++
++#if R128_PERFORMANCE_BOXES
++      /* Do some trivial performance monitoring...
++       */
++      r128_cce_performance_boxes(dev_priv);
++#endif
++
++      BEGIN_RING(4);
++
++      R128_WAIT_UNTIL_PAGE_FLIPPED();
++      OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
++
++      if (dev_priv->current_page == 0) {
++              OUT_RING(dev_priv->back_offset);
++      } else {
++              OUT_RING(dev_priv->front_offset);
++      }
++
++      ADVANCE_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++      dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
++          1 - dev_priv->current_page;
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
++      OUT_RING(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int format = sarea_priv->vc_format;
++      int offset = buf->bus_address;
++      int size = buf->used;
++      int prim = buf_priv->prim;
++      int i = 0;
++      RING_LOCALS;
++      DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
++
++      if (0)
++              r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
++
++      if (buf->used) {
++              buf_priv->dispatched = 1;
++
++              if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
++                      r128_emit_state(dev_priv);
++              }
++
++              do {
++                      /* Emit the next set of up to three cliprects */
++                      if (i < sarea_priv->nbox) {
++                              r128_emit_clip_rects(dev_priv,
++                                                   &sarea_priv->boxes[i],
++                                                   sarea_priv->nbox - i);
++                      }
++
++                      /* Emit the vertex buffer rendering commands */
++                      BEGIN_RING(5);
++
++                      OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
++                      OUT_RING(offset);
++                      OUT_RING(size);
++                      OUT_RING(format);
++                      OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
++                               (size << R128_CCE_VC_CNTL_NUM_SHIFT));
++
++                      ADVANCE_RING();
++
++                      i += 3;
++              } while (i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              buf->used = 0;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++
++      sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++}
++
++static void r128_cce_dispatch_indirect(struct drm_device * dev,
++                                     struct drm_buf * buf, int start, int end)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
++
++      if (start != end) {
++              int offset = buf->bus_address + start;
++              int dwords = (end - start + 3) / sizeof(u32);
++
++              /* Indirect buffer data must be an even number of
++               * dwords, so if we've been given an odd number we must
++               * pad the data with a Type-2 CCE packet.
++               */
++              if (dwords & 1) {
++                      u32 *data = (u32 *)
++                          ((char *)dev->agp_buffer_map->handle
++                           + buf->offset + start);
++                      data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
++              }
++
++              buf_priv->dispatched = 1;
++
++              /* Fire off the indirect buffer */
++              BEGIN_RING(3);
++
++              OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
++              OUT_RING(offset);
++              OUT_RING(dwords);
++
++              ADVANCE_RING();
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the indirect buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              buf->used = 0;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++}
++
++static void r128_cce_dispatch_indices(struct drm_device * dev,
++                                    struct drm_buf * buf,
++                                    int start, int end, int count)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int format = sarea_priv->vc_format;
++      int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
++      int prim = buf_priv->prim;
++      u32 *data;
++      int dwords;
++      int i = 0;
++      RING_LOCALS;
++      DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
++
++      if (0)
++              r128_print_dirty("dispatch_indices", sarea_priv->dirty);
++
++      if (start != end) {
++              buf_priv->dispatched = 1;
++
++              if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
++                      r128_emit_state(dev_priv);
++              }
++
++              dwords = (end - start + 3) / sizeof(u32);
++
++              data = (u32 *) ((char *)dev->agp_buffer_map->handle
++                              + buf->offset + start);
++
++              data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
++                                                dwords - 2));
++
++              data[1] = cpu_to_le32(offset);
++              data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
++              data[3] = cpu_to_le32(format);
++              data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
++                                     (count << 16)));
++
++              if (count & 0x1) {
++#ifdef __LITTLE_ENDIAN
++                      data[dwords - 1] &= 0x0000ffff;
++#else
++                      data[dwords - 1] &= 0xffff0000;
++#endif
++              }
++
++              do {
++                      /* Emit the next set of up to three cliprects */
++                      if (i < sarea_priv->nbox) {
++                              r128_emit_clip_rects(dev_priv,
++                                                   &sarea_priv->boxes[i],
++                                                   sarea_priv->nbox - i);
++                      }
++
++                      r128_cce_dispatch_indirect(dev, buf, start, end);
++
++                      i += 3;
++              } while (i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++
++      sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++}
++
++static int r128_cce_dispatch_blit(struct drm_device * dev,
++                                struct drm_file *file_priv,
++                                drm_r128_blit_t * blit)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      u32 *data;
++      int dword_shift, dwords;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (blit->format) {
++      case R128_DATATYPE_ARGB8888:
++              dword_shift = 0;
++              break;
++      case R128_DATATYPE_ARGB1555:
++      case R128_DATATYPE_RGB565:
++      case R128_DATATYPE_ARGB4444:
++      case R128_DATATYPE_YVYU422:
++      case R128_DATATYPE_VYUY422:
++              dword_shift = 1;
++              break;
++      case R128_DATATYPE_CI8:
++      case R128_DATATYPE_RGB8:
++              dword_shift = 2;
++              break;
++      default:
++              DRM_ERROR("invalid blit format %d\n", blit->format);
++              return -EINVAL;
++      }
++
++      /* Flush the pixel cache, and mark the contents as Read Invalid.
++       * This ensures no pixel data gets mixed up with the texture
++       * data from the host data blit, otherwise part of the texture
++       * image may be corrupted.
++       */
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
++      OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
++
++      ADVANCE_RING();
++
++      /* Dispatch the indirect buffer.
++       */
++      buf = dma->buflist[blit->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", blit->idx);
++              return -EINVAL;
++      }
++
++      buf_priv->discard = 1;
++
++      dwords = (blit->width * blit->height) >> dword_shift;
++
++      data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
++
++      data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
++      data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
++                             R128_GMC_BRUSH_NONE |
++                             (blit->format << 8) |
++                             R128_GMC_SRC_DATATYPE_COLOR |
++                             R128_ROP3_S |
++                             R128_DP_SRC_SOURCE_HOST_DATA |
++                             R128_GMC_CLR_CMP_CNTL_DIS |
++                             R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
++
++      data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
++      data[3] = cpu_to_le32(0xffffffff);
++      data[4] = cpu_to_le32(0xffffffff);
++      data[5] = cpu_to_le32((blit->y << 16) | blit->x);
++      data[6] = cpu_to_le32((blit->height << 16) | blit->width);
++      data[7] = cpu_to_le32(dwords);
++
++      buf->used = (dwords + 8) * sizeof(u32);
++
++      r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
++
++      /* Flush the pixel cache after the blit completes.  This ensures
++       * the texture data is written out to memory before rendering
++       * continues.
++       */
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
++      OUT_RING(R128_PC_FLUSH_GUI);
++
++      ADVANCE_RING();
++
++      return 0;
++}
++
++/* ================================================================
++ * Tiled depth buffer management
++ *
++ * FIXME: These should all set the destination write mask for when we
++ * have hardware stencil support.
++ */
++
++static int r128_cce_dispatch_write_span(struct drm_device * dev,
++                                      drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, x, y;
++      u32 *buffer;
++      u8 *mask;
++      int i, buffer_size, mask_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
++              return -EFAULT;
++      }
++
++      buffer_size = depth->n * sizeof(u32);
++      buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
++      if (buffer == NULL)
++              return -ENOMEM;
++      if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
++              drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      mask_size = depth->n * sizeof(u8);
++      if (depth->mask) {
++              mask = drm_alloc(mask_size, DRM_MEM_BUFS);
++              if (mask == NULL) {
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      return -ENOMEM;
++              }
++              if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      drm_free(mask, mask_size, DRM_MEM_BUFS);
++                      return -EFAULT;
++              }
++
++              for (i = 0; i < count; i++, x++) {
++                      if (mask[i]) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                                       R128_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->depth_fmt << 8) |
++                                       R128_GMC_SRC_DATATYPE_COLOR |
++                                       R128_ROP3_P |
++                                       R128_GMC_CLR_CMP_CNTL_DIS |
++                                       R128_GMC_WR_MSK_DIS);
++
++                              OUT_RING(dev_priv->depth_pitch_offset_c);
++                              OUT_RING(buffer[i]);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((1 << 16) | 1);
++
++                              ADVANCE_RING();
++                      }
++              }
++
++              drm_free(mask, mask_size, DRM_MEM_BUFS);
++      } else {
++              for (i = 0; i < count; i++, x++) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(buffer[i]);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((1 << 16) | 1);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
++                                        drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, *x, *y;
++      u32 *buffer;
++      u8 *mask;
++      int i, xbuf_size, ybuf_size, buffer_size, mask_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      xbuf_size = count * sizeof(*x);
++      ybuf_size = count * sizeof(*y);
++      x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
++      if (x == NULL) {
++              return -ENOMEM;
++      }
++      y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
++      if (y == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      buffer_size = depth->n * sizeof(u32);
++      buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
++      if (buffer == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      if (depth->mask) {
++              mask_size = depth->n * sizeof(u8);
++              mask = drm_alloc(mask_size, DRM_MEM_BUFS);
++              if (mask == NULL) {
++                      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++                      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      return -ENOMEM;
++              }
++              if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
++                      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++                      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      drm_free(mask, mask_size, DRM_MEM_BUFS);
++                      return -EFAULT;
++              }
++
++              for (i = 0; i < count; i++) {
++                      if (mask[i]) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                                       R128_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->depth_fmt << 8) |
++                                       R128_GMC_SRC_DATATYPE_COLOR |
++                                       R128_ROP3_P |
++                                       R128_GMC_CLR_CMP_CNTL_DIS |
++                                       R128_GMC_WR_MSK_DIS);
++
++                              OUT_RING(dev_priv->depth_pitch_offset_c);
++                              OUT_RING(buffer[i]);
++
++                              OUT_RING((x[i] << 16) | y[i]);
++                              OUT_RING((1 << 16) | 1);
++
++                              ADVANCE_RING();
++                      }
++              }
++
++              drm_free(mask, mask_size, DRM_MEM_BUFS);
++      } else {
++              for (i = 0; i < count; i++) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(buffer[i]);
++
++                      OUT_RING((x[i] << 16) | y[i]);
++                      OUT_RING((1 << 16) | 1);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++static int r128_cce_dispatch_read_span(struct drm_device * dev,
++                                     drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, x, y;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
++              return -EFAULT;
++      }
++
++      BEGIN_RING(7);
++
++      OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++      OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++               R128_GMC_DST_PITCH_OFFSET_CNTL |
++               R128_GMC_BRUSH_NONE |
++               (dev_priv->depth_fmt << 8) |
++               R128_GMC_SRC_DATATYPE_COLOR |
++               R128_ROP3_S |
++               R128_DP_SRC_SOURCE_MEMORY |
++               R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
++
++      OUT_RING(dev_priv->depth_pitch_offset_c);
++      OUT_RING(dev_priv->span_pitch_offset_c);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((0 << 16) | 0);
++      OUT_RING((count << 16) | 1);
++
++      ADVANCE_RING();
++
++      return 0;
++}
++
++static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
++                                       drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, *x, *y;
++      int i, xbuf_size, ybuf_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (count > dev_priv->depth_pitch) {
++              count = dev_priv->depth_pitch;
++      }
++
++      xbuf_size = count * sizeof(*x);
++      ybuf_size = count * sizeof(*y);
++      x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
++      if (x == NULL) {
++              return -ENOMEM;
++      }
++      y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
++      if (y == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      for (i = 0; i < count; i++) {
++              BEGIN_RING(7);
++
++              OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++                       R128_GMC_DST_PITCH_OFFSET_CNTL |
++                       R128_GMC_BRUSH_NONE |
++                       (dev_priv->depth_fmt << 8) |
++                       R128_GMC_SRC_DATATYPE_COLOR |
++                       R128_ROP3_S |
++                       R128_DP_SRC_SOURCE_MEMORY |
++                       R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
++
++              OUT_RING(dev_priv->depth_pitch_offset_c);
++              OUT_RING(dev_priv->span_pitch_offset_c);
++
++              OUT_RING((x[i] << 16) | y[i]);
++              OUT_RING((i << 16) | 0);
++              OUT_RING((1 << 16) | 1);
++
++              ADVANCE_RING();
++      }
++
++      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++/* ================================================================
++ * Polygon stipple
++ */
++
++static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(33);
++
++      OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
++      for (i = 0; i < 32; i++) {
++              OUT_RING(stipple[i]);
++      }
++
++      ADVANCE_RING();
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++
++static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_clear_t *clear = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
++
++      r128_cce_dispatch_clear(dev, clear);
++      COMMIT_RING();
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
++
++      return 0;
++}
++
++static int r128_do_init_pageflip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
++      dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
++
++      R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
++      R128_WRITE(R128_CRTC_OFFSET_CNTL,
++                 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
++
++      dev_priv->page_flipping = 1;
++      dev_priv->current_page = 0;
++      dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
++
++      return 0;
++}
++
++static int r128_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
++      R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
++
++      if (dev_priv->current_page != 0) {
++              r128_cce_dispatch_flip(dev);
++              COMMIT_RING();
++      }
++
++      dev_priv->page_flipping = 0;
++      return 0;
++}
++
++/* Swapping and flipping are different operations, need different ioctls.
++ * They can & should be intermixed to support multiple 3d windows.
++ */
++
++static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (!dev_priv->page_flipping)
++              r128_do_init_pageflip(dev);
++
++      r128_cce_dispatch_flip(dev);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
++
++      r128_cce_dispatch_swap(dev);
++      dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
++                                      R128_UPLOAD_MASKS);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (vertex->prim < 0 ||
++          vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      buf->used = vertex->count;
++      buf_priv->prim = vertex->prim;
++      buf_priv->discard = vertex->discard;
++
++      r128_cce_dispatch_vertex(dev, buf);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_indices_t *elts = data;
++      int count;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
++                elts->idx, elts->start, elts->end, elts->discard);
++
++      if (elts->idx < 0 || elts->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        elts->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (elts->prim < 0 ||
++          elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
++              DRM_ERROR("buffer prim %d\n", elts->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[elts->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", elts->idx);
++              return -EINVAL;
++      }
++
++      count = (elts->end - elts->start) / sizeof(u16);
++      elts->start -= R128_INDEX_PRIM_OFFSET;
++
++      if (elts->start & 0x7) {
++              DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
++              return -EINVAL;
++      }
++      if (elts->start < buf->used) {
++              DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
++              return -EINVAL;
++      }
++
++      buf->used = elts->end;
++      buf_priv->prim = elts->prim;
++      buf_priv->discard = elts->discard;
++
++      r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_blit_t *blit = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
++
++      if (blit->idx < 0 || blit->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        blit->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      ret = r128_cce_dispatch_blit(dev, file_priv, blit);
++
++      COMMIT_RING();
++      return ret;
++}
++
++static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_depth_t *depth = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      ret = -EINVAL;
++      switch (depth->func) {
++      case R128_WRITE_SPAN:
++              ret = r128_cce_dispatch_write_span(dev, depth);
++              break;
++      case R128_WRITE_PIXELS:
++              ret = r128_cce_dispatch_write_pixels(dev, depth);
++              break;
++      case R128_READ_SPAN:
++              ret = r128_cce_dispatch_read_span(dev, depth);
++              break;
++      case R128_READ_PIXELS:
++              ret = r128_cce_dispatch_read_pixels(dev, depth);
++              break;
++      }
++
++      COMMIT_RING();
++      return ret;
++}
++
++static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_stipple_t *stipple = data;
++      u32 mask[32];
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      r128_cce_dispatch_stipple(dev, mask);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_indirect_t *indirect = data;
++#if 0
++      RING_LOCALS;
++#endif
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
++                indirect->idx, indirect->start, indirect->end,
++                indirect->discard);
++
++      if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        indirect->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      buf = dma->buflist[indirect->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", indirect->idx);
++              return -EINVAL;
++      }
++
++      if (indirect->start < buf->used) {
++              DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
++                        indirect->start, buf->used);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf->used = indirect->end;
++      buf_priv->discard = indirect->discard;
++
++#if 0
++      /* Wait for the 3D stream to idle before the indirect buffer
++       * containing 2D acceleration commands is processed.
++       */
++      BEGIN_RING(2);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      ADVANCE_RING();
++#endif
++
++      /* Dispatch the indirect buffer full of commands from the
++       * X server.  This is insecure and is thus only available to
++       * privileged clients.
++       */
++      r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case R128_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_r128_private_t *dev_priv = dev->dev_private;
++              if (dev_priv->page_flipping) {
++                      r128_do_cleanup_pageflip(dev);
++              }
++      }
++}
++
++void r128_driver_lastclose(struct drm_device * dev)
++{
++      r128_do_cleanup_cce(dev);
++}
++
++struct drm_ioctl_desc r128_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
++};
++
++int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/r300_cmdbuf.c git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c
+--- git/drivers/gpu/drm-tungsten/r300_cmdbuf.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1198 @@
++/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
++ *
++ * Copyright (C) The Weather Channel, Inc.  2002.
++ * Copyright (C) 2004 Nicolai Haehnle.
++ * All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Nicolai Haehnle <prefect_@gmx.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++#include "r300_reg.h"
++
++#define R300_SIMULTANEOUS_CLIPRECTS           4
++
++/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
++ */
++static const int r300_cliprect_cntl[4] = {
++      0xAAAA,
++      0xEEEE,
++      0xFEFE,
++      0xFFFE
++};
++
++/**
++ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
++ * buffer, starting with index n.
++ */
++static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
++                             drm_radeon_kcmd_buffer_t *cmdbuf, int n)
++{
++      struct drm_clip_rect box;
++      int nr;
++      int i;
++      RING_LOCALS;
++
++      nr = cmdbuf->nbox - n;
++      if (nr > R300_SIMULTANEOUS_CLIPRECTS)
++              nr = R300_SIMULTANEOUS_CLIPRECTS;
++
++      DRM_DEBUG("%i cliprects\n", nr);
++
++      if (nr) {
++              BEGIN_RING(6 + nr * 2);
++              OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
++
++              for (i = 0; i < nr; ++i) {
++                      if (DRM_COPY_FROM_USER_UNCHECKED
++                          (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
++                              DRM_ERROR("copy cliprect faulted\n");
++                              return -EFAULT;
++                      }
++
++                      box.x2--; /* Hardware expects inclusive bottom-right corner */
++                      box.y2--;
++
++                      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++                              box.x1 = (box.x1) &
++                                      R300_CLIPRECT_MASK;
++                              box.y1 = (box.y1) &
++                                      R300_CLIPRECT_MASK;
++                              box.x2 = (box.x2) &
++                                      R300_CLIPRECT_MASK;
++                              box.y2 = (box.y2) &
++                                      R300_CLIPRECT_MASK;
++                      } else {
++                              box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                      }
++
++                      OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
++                               (box.y1 << R300_CLIPRECT_Y_SHIFT));
++                      OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
++                               (box.y2 << R300_CLIPRECT_Y_SHIFT));
++
++              }
++
++              OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
++
++              /* TODO/SECURITY: Force scissors to a safe value, otherwise the
++               * client might be able to trample over memory.
++               * The impact should be very limited, but I'd rather be safe than
++               * sorry.
++               */
++              OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
++              OUT_RING(0);
++              OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
++              ADVANCE_RING();
++      } else {
++              /* Why we allow zero cliprect rendering:
++               * There are some commands in a command buffer that must be submitted
++               * even when there are no cliprects, e.g. DMA buffer discard
++               * or state setting (though state setting could be avoided by
++               * simulating a loss of context).
++               *
++               * Now since the cmdbuf interface is so chaotic right now (and is
++               * bound to remain that way for a bit until things settle down),
++               * it is basically impossible to filter out the commands that are
++               * necessary and those that aren't.
++               *
++               * So I choose the safe way and don't do any filtering at all;
++               * instead, I simply set up the engine so that all rendering
++               * can't produce any fragments.
++               */
++              BEGIN_RING(2);
++              OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
++              ADVANCE_RING();
++      }
++
++      /* flus cache and wait idle clean after cliprect change */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      ADVANCE_RING();
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      return 0;
++}
++
++static u8 r300_reg_flags[0x10000 >> 2];
++
++void r300_init_reg_flags(struct drm_device *dev)
++{
++      int i;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      memset(r300_reg_flags, 0, 0x10000 >> 2);
++#define ADD_RANGE_MARK(reg, count,mark) \
++              for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
++                      r300_reg_flags[i]|=(mark);
++
++#define MARK_SAFE             1
++#define MARK_CHECK_OFFSET     2
++
++#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
++
++      /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
++      ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
++      ADD_RANGE(R300_VAP_CNTL, 1);
++      ADD_RANGE(R300_SE_VTE_CNTL, 2);
++      ADD_RANGE(0x2134, 2);
++      ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
++      ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
++      ADD_RANGE(0x21DC, 1);
++      ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
++      ADD_RANGE(R300_VAP_CLIP_X_0, 4);
++      ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
++      ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
++      ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
++      ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
++      ADD_RANGE(R300_GB_ENABLE, 1);
++      ADD_RANGE(R300_GB_MSPOS0, 5);
++      ADD_RANGE(R300_TX_INVALTAGS, 1);
++      ADD_RANGE(R300_TX_ENABLE, 1);
++      ADD_RANGE(0x4200, 4);
++      ADD_RANGE(0x4214, 1);
++      ADD_RANGE(R300_RE_POINTSIZE, 1);
++      ADD_RANGE(0x4230, 3);
++      ADD_RANGE(R300_RE_LINE_CNT, 1);
++      ADD_RANGE(R300_RE_UNK4238, 1);
++      ADD_RANGE(0x4260, 3);
++      ADD_RANGE(R300_RE_SHADE, 4);
++      ADD_RANGE(R300_RE_POLYGON_MODE, 5);
++      ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
++      ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
++      ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
++      ADD_RANGE(R300_RE_CULL_CNTL, 1);
++      ADD_RANGE(0x42C0, 2);
++      ADD_RANGE(R300_RS_CNTL_0, 2);
++
++      ADD_RANGE(R300_SC_HYPERZ, 2);
++      ADD_RANGE(0x43E8, 1);
++
++      ADD_RANGE(0x46A4, 5);
++
++      ADD_RANGE(R300_RE_FOG_STATE, 1);
++      ADD_RANGE(R300_FOG_COLOR_R, 3);
++      ADD_RANGE(R300_PP_ALPHA_TEST, 2);
++      ADD_RANGE(0x4BD8, 1);
++      ADD_RANGE(R300_PFS_PARAM_0_X, 64);
++      ADD_RANGE(0x4E00, 1);
++      ADD_RANGE(R300_RB3D_CBLEND, 2);
++      ADD_RANGE(R300_RB3D_COLORMASK, 1);
++      ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
++      ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);   /* check offset */
++      ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
++      ADD_RANGE(0x4E50, 9);
++      ADD_RANGE(0x4E88, 1);
++      ADD_RANGE(0x4EA0, 2);
++      ADD_RANGE(R300_ZB_CNTL, 3);
++      ADD_RANGE(R300_ZB_FORMAT, 4);
++      ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);      /* check offset */
++      ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
++      ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
++      ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
++
++      ADD_RANGE(R300_TX_FILTER_0, 16);
++      ADD_RANGE(R300_TX_FILTER1_0, 16);
++      ADD_RANGE(R300_TX_SIZE_0, 16);
++      ADD_RANGE(R300_TX_FORMAT_0, 16);
++      ADD_RANGE(R300_TX_PITCH_0, 16);
++      /* Texture offset is dangerous and needs more checking */
++      ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
++      ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
++      ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
++
++      /* Sporadic registers used as primitives are emitted */
++      ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
++      ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
++      ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
++      ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++              ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
++              ADD_RANGE(R500_US_CONFIG, 2);
++              ADD_RANGE(R500_US_CODE_ADDR, 3);
++              ADD_RANGE(R500_US_FC_CTRL, 1);
++              ADD_RANGE(R500_RS_IP_0, 16);
++              ADD_RANGE(R500_RS_INST_0, 16);
++              ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
++              ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
++              ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
++      } else {
++              ADD_RANGE(R300_PFS_CNTL_0, 3);
++              ADD_RANGE(R300_PFS_NODE_0, 4);
++              ADD_RANGE(R300_PFS_TEXI_0, 64);
++              ADD_RANGE(R300_PFS_INSTR0_0, 64);
++              ADD_RANGE(R300_PFS_INSTR1_0, 64);
++              ADD_RANGE(R300_PFS_INSTR2_0, 64);
++              ADD_RANGE(R300_PFS_INSTR3_0, 64);
++              ADD_RANGE(R300_RS_INTERP_0, 8);
++              ADD_RANGE(R300_RS_ROUTE_0, 8);
++
++      }
++}
++
++static __inline__ int r300_check_range(unsigned reg, int count)
++{
++      int i;
++      if (reg & ~0xffff)
++              return -1;
++      for (i = (reg >> 2); i < (reg >> 2) + count; i++)
++              if (r300_reg_flags[i] != MARK_SAFE)
++                      return 1;
++      return 0;
++}
++
++static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
++                                                        dev_priv,
++                                                        drm_radeon_kcmd_buffer_t
++                                                        * cmdbuf,
++                                                        drm_r300_cmd_header_t
++                                                        header)
++{
++      int reg;
++      int sz;
++      int i;
++      int values[64];
++      RING_LOCALS;
++
++      sz = header.packet0.count;
++      reg = (header.packet0.reghi << 8) | header.packet0.reglo;
++
++      if ((sz > 64) || (sz < 0)) {
++              DRM_ERROR
++                  ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
++                   reg, sz);
++              return -EINVAL;
++      }
++      for (i = 0; i < sz; i++) {
++              values[i] = ((int *)cmdbuf->buf)[i];
++              switch (r300_reg_flags[(reg >> 2) + i]) {
++              case MARK_SAFE:
++                      break;
++              case MARK_CHECK_OFFSET:
++                      if (!radeon_check_offset(dev_priv, (u32) values[i])) {
++                              DRM_ERROR
++                                  ("Offset failed range check (reg=%04x sz=%d)\n",
++                                   reg, sz);
++                              return -EINVAL;
++                      }
++                      break;
++              default:
++                      DRM_ERROR("Register %04x failed check as flag=%02x\n",
++                                reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
++                      return -EINVAL;
++              }
++      }
++
++      BEGIN_RING(1 + sz);
++      OUT_RING(CP_PACKET0(reg, sz - 1));
++      OUT_RING_TABLE(values, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 4;
++      cmdbuf->bufsz -= sz * 4;
++
++      return 0;
++}
++
++/**
++ * Emits a packet0 setting arbitrary registers.
++ * Called by r300_do_cp_cmdbuf.
++ *
++ * Note that checks are performed on contents and addresses of the registers
++ */
++static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      drm_r300_cmd_header_t header)
++{
++      int reg;
++      int sz;
++      RING_LOCALS;
++
++      sz = header.packet0.count;
++      reg = (header.packet0.reghi << 8) | header.packet0.reglo;
++
++      DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz);
++      if (!sz)
++              return 0;
++
++      if (sz * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      if (reg + sz * 4 >= 0x10000) {
++              DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
++                        sz);
++              return -EINVAL;
++      }
++
++      if (r300_check_range(reg, sz)) {
++              /* go and check everything */
++              return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
++                                                         header);
++      }
++      /* the rest of the data is safe to emit, whatever the values the user passed */
++
++      BEGIN_RING(1 + sz);
++      OUT_RING(CP_PACKET0(reg, sz - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 4;
++      cmdbuf->bufsz -= sz * 4;
++
++      return 0;
++}
++
++/**
++ * Uploads user-supplied vertex program instructions or parameters onto
++ * the graphics card.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
++                                  drm_radeon_kcmd_buffer_t *cmdbuf,
++                                  drm_r300_cmd_header_t header)
++{
++      int sz;
++      int addr;
++      RING_LOCALS;
++
++      sz = header.vpu.count;
++      addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
++
++      if (!sz)
++              return 0;
++      if (sz * 16 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      /* VAP is very sensitive so we purge cache before we program it
++       * and we also flush its state before & after */
++      BEGIN_RING(6);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      BEGIN_RING(3 + sz * 4);
++      OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
++      OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
++      ADVANCE_RING();
++
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 16;
++      cmdbuf->bufsz -= sz * 16;
++
++      return 0;
++}
++
++/**
++ * Emit a clear packet from userspace.
++ * Called by r300_emit_packet3.
++ */
++static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
++                                    drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      RING_LOCALS;
++
++      if (8 * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(10);
++      OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
++      OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
++               (1 << R300_PRIM_NUM_VERTICES_SHIFT));
++      OUT_RING_TABLE((int *)cmdbuf->buf, 8);
++      ADVANCE_RING();
++
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      cmdbuf->buf += 8 * 4;
++      cmdbuf->bufsz -= 8 * 4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
++                                             drm_radeon_kcmd_buffer_t *cmdbuf,
++                                             u32 header)
++{
++      int count, i, k;
++#define MAX_ARRAY_PACKET  64
++      u32 payload[MAX_ARRAY_PACKET];
++      u32 narrays;
++      RING_LOCALS;
++
++      count = (header >> 16) & 0x3fff;
++
++      if ((count + 1) > MAX_ARRAY_PACKET) {
++              DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
++                        count);
++              return -EINVAL;
++      }
++      memset(payload, 0, MAX_ARRAY_PACKET * 4);
++      memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
++
++      /* carefully check packet contents */
++
++      narrays = payload[0];
++      k = 0;
++      i = 1;
++      while ((k < narrays) && (i < (count + 1))) {
++              i++;            /* skip attribute field */
++              if (!radeon_check_offset(dev_priv, payload[i])) {
++                      DRM_ERROR
++                          ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
++                           k, i);
++                      return -EINVAL;
++              }
++              k++;
++              i++;
++              if (k == narrays)
++                      break;
++              /* have one more to process, they come in pairs */
++              if (!radeon_check_offset(dev_priv, payload[i])) {
++                      DRM_ERROR
++                          ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
++                           k, i);
++                      return -EINVAL;
++              }
++              k++;
++              i++;
++      }
++      /* do the counts match what we expect ? */
++      if ((k != narrays) || (i != (count + 1))) {
++              DRM_ERROR
++                  ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
++                   k, i, narrays, count + 1);
++              return -EINVAL;
++      }
++
++      /* all clear, output packet */
++
++      BEGIN_RING(count + 2);
++      OUT_RING(header);
++      OUT_RING_TABLE(payload, count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count + 2) * 4;
++      cmdbuf->bufsz -= (count + 2) * 4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
++                                           drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 *cmd = (u32 *) cmdbuf->buf;
++      int count, ret;
++      RING_LOCALS;
++
++      count=(cmd[0]>>16) & 0x3fff;
++
++      if (cmd[0] & 0x8000) {
++              u32 offset;
++
++              if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
++                            | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[2] << 10;
++                      ret = !radeon_check_offset(dev_priv, offset);
++                      if (ret) {
++                              DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
++                              return -EINVAL;
++                      }
++              }
++
++              if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
++                  (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[3] << 10;
++                      ret = !radeon_check_offset(dev_priv, offset);
++                      if (ret) {
++                              DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
++                              return -EINVAL;
++                      }
++
++              }
++      }
++
++      BEGIN_RING(count+2);
++      OUT_RING(cmd[0]);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count+2)*4;
++      cmdbuf->bufsz -= (count+2)*4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
++                                          drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 *cmd;
++      int count;
++      int expected_count;
++      RING_LOCALS;
++
++      cmd = (u32 *) cmdbuf->buf;
++      count = (cmd[0]>>16) & 0x3fff;
++      expected_count = cmd[1] >> 16;
++      if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
++              expected_count = (expected_count+1)/2;
++
++      if (count && count != expected_count) {
++              DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
++                      count, expected_count);
++              return -EINVAL;
++      }
++
++      BEGIN_RING(count+2);
++      OUT_RING(cmd[0]);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count+2)*4;
++      cmdbuf->bufsz -= (count+2)*4;
++
++      if (!count) {
++              drm_r300_cmd_header_t header;
++
++              if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
++                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
++                      return -EINVAL;
++              }
++
++              header.u = *(unsigned int *)cmdbuf->buf;
++
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++              cmd = (u32 *) cmdbuf->buf;
++
++              if (header.header.cmd_type != R300_CMD_PACKET3 ||
++                  header.packet3.packet != R300_CMD_PACKET3_RAW ||
++                  cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
++                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
++                      return -EINVAL;
++              }
++
++              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
++                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
++                      return -EINVAL;
++              }
++              if (!radeon_check_offset(dev_priv, cmd[2])) {
++                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
++                      return -EINVAL;
++              }
++              if (cmd[3] != expected_count) {
++                      DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
++                              cmd[3], expected_count);
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(4);
++              OUT_RING(cmd[0]);
++              OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
++              ADVANCE_RING();
++
++              cmdbuf->buf += 4*4;
++              cmdbuf->bufsz -= 4*4;
++      }
++
++      return 0;
++}
++
++static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
++                                          drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 header;
++      int count;
++      RING_LOCALS;
++
++      if (4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      /* Fixme !! This simply emits a packet without much checking.
++         We need to be smarter. */
++
++      /* obtain first word - actual packet3 header */
++      header = *(u32 *) cmdbuf->buf;
++
++      /* Is it packet 3 ? */
++      if ((header >> 30) != 0x3) {
++              DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
++              return -EINVAL;
++      }
++
++      count = (header >> 16) & 0x3fff;
++
++      /* Check again now that we know how much data to expect */
++      if ((count + 2) * 4 > cmdbuf->bufsz) {
++              DRM_ERROR
++                  ("Expected packet3 of length %d but have only %d bytes left\n",
++                   (count + 2) * 4, cmdbuf->bufsz);
++              return -EINVAL;
++      }
++
++      /* Is it a packet type we know about ? */
++      switch (header & 0xff00) {
++      case RADEON_3D_LOAD_VBPNTR:     /* load vertex array pointers */
++              return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
++
++      case RADEON_CNTL_BITBLT_MULTI:
++              return r300_emit_bitblt_multi(dev_priv, cmdbuf);
++
++      case RADEON_CP_INDX_BUFFER:
++              DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
++              return -EINVAL;
++      case RADEON_CP_3D_DRAW_IMMD_2:
++              /* triggers drawing using in-packet vertex data */
++      case RADEON_CP_3D_DRAW_VBUF_2:
++              /* triggers drawing of vertex buffers setup elsewhere */
++              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
++                                         RADEON_PURGE_EMITED);
++              break;
++      case RADEON_CP_3D_DRAW_INDX_2:
++              /* triggers drawing using indices to vertex buffer */
++              /* whenever we send vertex we clear flush & purge */
++              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
++                                         RADEON_PURGE_EMITED);
++              return r300_emit_draw_indx_2(dev_priv, cmdbuf);
++      case RADEON_WAIT_FOR_IDLE:
++      case RADEON_CP_NOP:
++              /* these packets are safe */
++              break;
++      default:
++              DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
++              return -EINVAL;
++      }
++
++      BEGIN_RING(count + 2);
++      OUT_RING(header);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count + 2) * 4;
++      cmdbuf->bufsz -= (count + 2) * 4;
++
++      return 0;
++}
++
++/**
++ * Emit a rendering packet3 from userspace.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      drm_r300_cmd_header_t header)
++{
++      int n;
++      int ret;
++      char *orig_buf = cmdbuf->buf;
++      int orig_bufsz = cmdbuf->bufsz;
++
++      /* This is a do-while-loop so that we run the interior at least once,
++       * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
++       */
++      n = 0;
++      do {
++              if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
++                      ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
++                      if (ret)
++                              return ret;
++
++                      cmdbuf->buf = orig_buf;
++                      cmdbuf->bufsz = orig_bufsz;
++              }
++
++              switch (header.packet3.packet) {
++              case R300_CMD_PACKET3_CLEAR:
++                      DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
++                      ret = r300_emit_clear(dev_priv, cmdbuf);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_clear failed\n");
++                              return ret;
++                      }
++                      break;
++
++              case R300_CMD_PACKET3_RAW:
++                      DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
++                      ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_raw_packet3 failed\n");
++                              return ret;
++                      }
++                      break;
++
++              default:
++                      DRM_ERROR("bad packet3 type %i at %p\n",
++                                header.packet3.packet,
++                                cmdbuf->buf - sizeof(header));
++                      return -EINVAL;
++              }
++
++              n += R300_SIMULTANEOUS_CLIPRECTS;
++      } while (n < cmdbuf->nbox);
++
++      return 0;
++}
++
++/* Some of the R300 chips seem to be extremely touchy about the two registers
++ * that are configured in r300_pacify.
++ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
++ * sends a command buffer that contains only state setting commands and a
++ * vertex program/parameter upload sequence, this will eventually lead to a
++ * lockup, unless the sequence is bracketed by calls to r300_pacify.
++ * So we should take great care to *always* call r300_pacify before
++ * *anything* 3D related, and again afterwards. This is what the
++ * call bracket in r300_do_cp_cmdbuf is for.
++ */
++
++/**
++ * Emit the sequence to pacify R300.
++ */
++static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
++{
++      uint32_t cache_z, cache_3d, cache_2d;
++      RING_LOCALS;
++
++      cache_z = R300_ZC_FLUSH;
++      cache_2d = R300_RB2D_DC_FLUSH;
++      cache_3d = R300_RB3D_DC_FLUSH;
++      if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
++              /* we can purge, primitive where draw since last purge */
++              cache_z |= R300_ZC_FREE;
++              cache_2d |= R300_RB2D_DC_FREE;
++              cache_3d |= R300_RB3D_DC_FREE;
++      }
++
++      /* flush & purge zbuffer */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
++      OUT_RING(cache_z);
++      ADVANCE_RING();
++      /* flush & purge 3d */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(cache_3d);
++      ADVANCE_RING();
++      /* flush & purge texture */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      /* FIXME: is this one really needed ? */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* flush & purge 2d through E2 as RB2D will trigger lockup */
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(cache_2d);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
++               RADEON_WAIT_HOST_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush & purge flags */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
++}
++
++/**
++ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
++ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
++ * be careful about how this function is called.
++ */
++static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++
++      buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
++      buf->pending = 1;
++      buf->used = 0;
++}
++
++static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
++                        drm_r300_cmd_header_t header)
++{
++      u32 wait_until;
++      RING_LOCALS;
++
++      if (!header.wait.flags)
++              return;
++
++      wait_until = 0;
++
++      switch(header.wait.flags) {
++      case R300_WAIT_2D:
++              wait_until = RADEON_WAIT_2D_IDLE;
++              break;
++      case R300_WAIT_3D:
++              wait_until = RADEON_WAIT_3D_IDLE;
++              break;
++      case R300_NEW_WAIT_2D_3D:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
++              break;
++      case R300_NEW_WAIT_2D_2D_CLEAN:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
++              break;
++      case R300_NEW_WAIT_3D_3D_CLEAN:
++              wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
++              break;
++      case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
++              wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
++              break;
++      default:
++              return;
++      }
++
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(wait_until);
++      ADVANCE_RING();
++}
++
++static int r300_scratch(drm_radeon_private_t *dev_priv,
++                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                      drm_r300_cmd_header_t header)
++{
++      u32 *ref_age_base;
++      u32 i, buf_idx, h_pending;
++      RING_LOCALS;
++
++      if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {
++              return -EINVAL;
++      }
++
++      if (header.scratch.reg >= 5) {
++              return -EINVAL;
++      }
++
++      dev_priv->scratch_ages[header.scratch.reg] ++;
++
++      ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
++
++      cmdbuf->buf += sizeof(uint64_t);
++      cmdbuf->bufsz -= sizeof(uint64_t);
++
++      for (i=0; i < header.scratch.n_bufs; i++) {
++              buf_idx = *(u32 *)cmdbuf->buf;
++              buf_idx *= 2; /* 8 bytes per buf */
++
++              if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              if (h_pending == 0) {
++                      return -EINVAL;
++              }
++
++              h_pending--;
++
++              if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              cmdbuf->buf += sizeof(buf_idx);
++              cmdbuf->bufsz -= sizeof(buf_idx);
++      }
++
++      BEGIN_RING(2);
++      OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
++      OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
++      ADVANCE_RING();
++
++      return 0;
++}
++
++/**
++ * Uploads user-supplied vertex program instructions or parameters onto
++ * the graphics card.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
++                                     drm_radeon_kcmd_buffer_t *cmdbuf,
++                                     drm_r300_cmd_header_t header)
++{
++      int sz;
++      int addr;
++      int type;
++      int clamp;
++      int stride;
++      RING_LOCALS;
++
++      sz = header.r500fp.count;
++      /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
++      addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
++
++      type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
++      clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
++
++      addr |= (type << 16);
++      addr |= (clamp << 17);
++
++      stride = type ? 4 : 6;
++
++      DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
++      if (!sz)
++              return 0;
++      if (sz * stride * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(3 + sz * stride);
++      OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
++      OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
++
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * stride * 4;
++      cmdbuf->bufsz -= sz * stride * 4;
++
++      return 0;
++}
++
++
++/**
++ * Parses and validates a user-supplied command buffer and emits appropriate
++ * commands on the DMA ring buffer.
++ * Called by the ioctl handler function radeon_cp_cmdbuf.
++ */
++int r300_do_cp_cmdbuf(struct drm_device *dev,
++                    struct drm_file *file_priv,
++                    drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf = NULL;
++      int emit_dispatch_age = 0;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      /* pacify */
++      r300_pacify(dev_priv);
++
++      if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
++              ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
++              if (ret)
++                      goto cleanup;
++      }
++
++      while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
++              int idx;
++              drm_r300_cmd_header_t header;
++
++              header.u = *(unsigned int *)cmdbuf->buf;
++
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++
++              switch (header.header.cmd_type) {
++              case R300_CMD_PACKET0:
++                      ret = r300_emit_packet0(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_packet0 failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_VPU:
++                      DRM_DEBUG("R300_CMD_VPU\n");
++                      ret = r300_emit_vpu(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_vpu failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_PACKET3:
++                      DRM_DEBUG("R300_CMD_PACKET3\n");
++                      ret = r300_emit_packet3(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_packet3 failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_END3D:
++                      DRM_DEBUG("R300_CMD_END3D\n");
++                      /* TODO:
++                         Ideally userspace driver should not need to issue this call,
++                         i.e. the drm driver should issue it automatically and prevent
++                         lockups.
++
++                         In practice, we do not understand why this call is needed and what
++                         it does (except for some vague guesses that it has to do with cache
++                         coherence) and so the user space driver does it.
++
++                         Once we are sure which uses prevent lockups the code could be moved
++                         into the kernel and the userspace driver will not
++                         need to use this command.
++
++                         Note that issuing this command does not hurt anything
++                         except, possibly, performance */
++                      r300_pacify(dev_priv);
++                      break;
++
++              case R300_CMD_CP_DELAY:
++                      /* simple enough, we can do it here */
++                      DRM_DEBUG("R300_CMD_CP_DELAY\n");
++                      {
++                              int i;
++                              RING_LOCALS;
++
++                              BEGIN_RING(header.delay.count);
++                              for (i = 0; i < header.delay.count; i++)
++                                      OUT_RING(RADEON_CP_PACKET2);
++                              ADVANCE_RING();
++                      }
++                      break;
++
++              case R300_CMD_DMA_DISCARD:
++                      DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
++                      idx = header.dma.buf_idx;
++                      if (idx < 0 || idx >= dma->buf_count) {
++                              DRM_ERROR("buffer index %d (of %d max)\n",
++                                        idx, dma->buf_count - 1);
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++
++                      buf = dma->buflist[idx];
++                      if (buf->file_priv != file_priv || buf->pending) {
++                              DRM_ERROR("bad buffer %p %p %d\n",
++                                        buf->file_priv, file_priv,
++                                        buf->pending);
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++
++                      emit_dispatch_age = 1;
++                      r300_discard_buffer(dev, buf);
++                      break;
++
++              case R300_CMD_WAIT:
++                      DRM_DEBUG("R300_CMD_WAIT\n");
++                      r300_cmd_wait(dev_priv, header);
++                      break;
++
++              case R300_CMD_SCRATCH:
++                      DRM_DEBUG("R300_CMD_SCRATCH\n");
++                      ret = r300_scratch(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_scratch failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_R500FP:
++                      if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
++                              DRM_ERROR("Calling r500 command on r300 card\n");
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++                      DRM_DEBUG("R300_CMD_R500FP\n");
++                      ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_r500fp failed\n");
++                              goto cleanup;
++                      }
++                      break;
++              default:
++                      DRM_ERROR("bad cmd_type %i at %p\n",
++                                header.header.cmd_type,
++                                cmdbuf->buf - sizeof(header));
++                      ret = -EINVAL;
++                      goto cleanup;
++              }
++      }
++
++      DRM_DEBUG("END\n");
++
++      cleanup:
++      r300_pacify(dev_priv);
++
++      /* We emit the vertex buffer age here, outside the pacifier "brackets"
++       * for two reasons:
++       *  (1) This may coalesce multiple age emissions into a single one and
++       *  (2) more importantly, some chips lock up hard when scratch registers
++       *      are written inside the pacifier bracket.
++       */
++      if (emit_dispatch_age) {
++              RING_LOCALS;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++              RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
++              ADVANCE_RING();
++      }
++
++      COMMIT_RING();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r300_reg.h git-nokia/drivers/gpu/drm-tungsten/r300_reg.h
+--- git/drivers/gpu/drm-tungsten/r300_reg.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r300_reg.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1778 @@
++/**************************************************************************
++
++Copyright (C) 2004-2005 Nicolai Haehnle et al.
++
++Permission is hereby granted, free of charge, to any person obtaining a
++copy of this software and associated documentation files (the "Software"),
++to deal in the Software without restriction, including without limitation
++on the rights to use, copy, modify, merge, publish, distribute, sub
++license, and/or sell copies of the Software, and to permit persons to whom
++the Software is furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice (including the next
++paragraph) shall be included in all copies or substantial portions of the
++Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
++DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++USE OR OTHER DEALINGS IN THE SOFTWARE.
++
++**************************************************************************/
++
++/* *INDENT-OFF* */
++
++#ifndef _R300_REG_H
++#define _R300_REG_H
++
++#define R300_MC_INIT_MISC_LAT_TIMER   0x180
++#     define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT      0
++#     define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT       4
++#     define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT   8
++#     define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT   12
++#     define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT    16
++#     define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT      20
++#     define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT    24
++#     define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT    28
++
++
++#define R300_MC_INIT_GFX_LAT_TIMER    0x154
++#     define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT    0
++#     define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT    4
++#     define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT    8
++#     define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT    12
++#     define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT     16
++#     define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT     20
++#     define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT    24
++#     define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT    28
++
++/*
++ * This file contains registers and constants for the R300. They have been
++ * found mostly by examining command buffers captured using glxtest, as well
++ * as by extrapolating some known registers and constants from the R200.
++ * I am fairly certain that they are correct unless stated otherwise
++ * in comments.
++ */
++
++#define R300_SE_VPORT_XSCALE                0x1D98
++#define R300_SE_VPORT_XOFFSET               0x1D9C
++#define R300_SE_VPORT_YSCALE                0x1DA0
++#define R300_SE_VPORT_YOFFSET               0x1DA4
++#define R300_SE_VPORT_ZSCALE                0x1DA8
++#define R300_SE_VPORT_ZOFFSET               0x1DAC
++
++
++/*
++ * Vertex Array Processing (VAP) Control
++ * Stolen from r200 code from Christoph Brill (It's a guess!)
++ */
++#define R300_VAP_CNTL 0x2080
++
++/* This register is written directly and also starts data section
++ * in many 3d CP_PACKET3's
++ */
++#define R300_VAP_VF_CNTL      0x2084
++#     define  R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
++#     define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
++
++#     define  R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
++      /* State based - direct writes to registers trigger vertex
++           generation */
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
++
++      /* I don't think I saw these three used.. */
++#     define  R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
++#     define  R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
++#     define  R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
++
++      /* index size - when not set the indices are assumed to be 16 bit */
++#     define  R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
++      /* number of vertices */
++#     define  R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
++
++/* BEGIN: Wild guesses */
++#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
++#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
++
++#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
++      /* each of the following is 3 bits wide, specifies number
++         of components */
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
++/* END: Wild guesses */
++
++#define R300_SE_VTE_CNTL                  0x20b0
++#     define     R300_VPORT_X_SCALE_ENA                0x00000001
++#     define     R300_VPORT_X_OFFSET_ENA               0x00000002
++#     define     R300_VPORT_Y_SCALE_ENA                0x00000004
++#     define     R300_VPORT_Y_OFFSET_ENA               0x00000008
++#     define     R300_VPORT_Z_SCALE_ENA                0x00000010
++#     define     R300_VPORT_Z_OFFSET_ENA               0x00000020
++#     define     R300_VTX_XY_FMT                       0x00000100
++#     define     R300_VTX_Z_FMT                        0x00000200
++#     define     R300_VTX_W0_FMT                       0x00000400
++#     define     R300_VTX_W0_NORMALIZE                 0x00000800
++#     define     R300_VTX_ST_DENORMALIZED              0x00001000
++
++/* BEGIN: Vertex data assembly - lots of uncertainties */
++
++/* gap */
++
++#define R300_VAP_CNTL_STATUS              0x2140
++#     define R300_VC_NO_SWAP                  (0 << 0)
++#     define R300_VC_16BIT_SWAP               (1 << 0)
++#     define R300_VC_32BIT_SWAP               (2 << 0)
++#     define R300_VAP_TCL_BYPASS              (1 << 8)
++
++/* gap */
++
++/* Where do we get our vertex data?
++ *
++ * Vertex data either comes either from immediate mode registers or from
++ * vertex arrays.
++ * There appears to be no mixed mode (though we can force the pitch of
++ * vertex arrays to 0, effectively reusing the same element over and over
++ * again).
++ *
++ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
++ * if these registers influence vertex array processing.
++ *
++ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
++ *
++ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
++ *
++ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
++ * into the vertex processor's input registers.
++ * The first word routes the first input, the second word the second, etc.
++ * The corresponding input is routed into the register with the given index.
++ * The list is ended by a word with INPUT_ROUTE_END set.
++ *
++ * Always set COMPONENTS_4 in immediate mode.
++ */
++
++#define R300_VAP_INPUT_ROUTE_0_0            0x2150
++#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
++#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
++#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
++#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
++#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
++#define R300_VAP_INPUT_ROUTE_0_1            0x2154
++#define R300_VAP_INPUT_ROUTE_0_2            0x2158
++#define R300_VAP_INPUT_ROUTE_0_3            0x215C
++#define R300_VAP_INPUT_ROUTE_0_4            0x2160
++#define R300_VAP_INPUT_ROUTE_0_5            0x2164
++#define R300_VAP_INPUT_ROUTE_0_6            0x2168
++#define R300_VAP_INPUT_ROUTE_0_7            0x216C
++
++/* gap */
++
++/* Notes:
++ *  - always set up to produce at least two attributes:
++ *    if vertex program uses only position, fglrx will set normal, too
++ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
++ */
++#define R300_VAP_INPUT_CNTL_0               0x2180
++#       define R300_INPUT_CNTL_0_COLOR           0x00000001
++#define R300_VAP_INPUT_CNTL_1               0x2184
++#       define R300_INPUT_CNTL_POS               0x00000001
++#       define R300_INPUT_CNTL_NORMAL            0x00000002
++#       define R300_INPUT_CNTL_COLOR             0x00000004
++#       define R300_INPUT_CNTL_TC0               0x00000400
++#       define R300_INPUT_CNTL_TC1               0x00000800
++#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
++#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
++#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
++#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
++#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
++#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
++
++/* gap */
++
++/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
++ * are set to a swizzling bit pattern, other words are 0.
++ *
++ * In immediate mode, the pattern is always set to xyzw. In vertex array
++ * mode, the swizzling pattern is e.g. used to set zw components in texture
++ * coordinates with only tweo components.
++ */
++#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
++#       define R300_INPUT_ROUTE_SELECT_X    0
++#       define R300_INPUT_ROUTE_SELECT_Y    1
++#       define R300_INPUT_ROUTE_SELECT_Z    2
++#       define R300_INPUT_ROUTE_SELECT_W    3
++#       define R300_INPUT_ROUTE_SELECT_ZERO 4
++#       define R300_INPUT_ROUTE_SELECT_ONE  5
++#       define R300_INPUT_ROUTE_SELECT_MASK 7
++#       define R300_INPUT_ROUTE_X_SHIFT     0
++#       define R300_INPUT_ROUTE_Y_SHIFT     3
++#       define R300_INPUT_ROUTE_Z_SHIFT     6
++#       define R300_INPUT_ROUTE_W_SHIFT     9
++#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
++#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
++#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
++#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
++#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
++#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
++#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
++#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
++
++/* END: Vertex data assembly */
++
++/* gap */
++
++/* BEGIN: Upload vertex program and data */
++
++/*
++ * The programmable vertex shader unit has a memory bank of unknown size
++ * that can be written to in 16 byte units by writing the address into
++ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
++ *
++ * Pointers into the memory bank are always in multiples of 16 bytes.
++ *
++ * The memory bank is divided into areas with fixed meaning.
++ *
++ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
++ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
++ * whereas the difference between known addresses suggests size 512.
++ *
++ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
++ * Native reported limits and the VPI layout suggest size 256, whereas
++ * difference between known addresses suggests size 512.
++ *
++ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
++ * floating point pointsize. The exact purpose of this state is uncertain,
++ * as there is also the R300_RE_POINTSIZE register.
++ *
++ * Multiple vertex programs and parameter sets can be loaded at once,
++ * which could explain the size discrepancy.
++ */
++#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
++#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
++#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
++#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
++
++/* gap */
++
++#define R300_VAP_PVS_UPLOAD_DATA            0x2208
++
++/* END: Upload vertex program and data */
++
++/* gap */
++
++/* I do not know the purpose of this register. However, I do know that
++ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
++ * for normal rendering.
++ */
++#define R300_VAP_UNKNOWN_221C               0x221C
++#       define R300_221C_NORMAL                  0x00000000
++#       define R300_221C_CLEAR                   0x0001C000
++
++/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
++ * plane is per-pixel and the second plane is per-vertex.
++ *
++ * This was determined by experimentation alone but I believe it is correct.
++ *
++ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
++ */
++#define R300_VAP_CLIP_X_0                   0x2220
++#define R300_VAP_CLIP_X_1                   0x2224
++#define R300_VAP_CLIP_Y_0                   0x2228
++#define R300_VAP_CLIP_Y_1                   0x2230
++
++/* gap */
++
++/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
++ * rendering commands and overwriting vertex program parameters.
++ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
++ * avoids bugs caused by still running shaders reading bad data from memory.
++ */
++#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
++
++/* Absolutely no clue what this register is about. */
++#define R300_VAP_UNKNOWN_2288               0x2288
++#       define R300_2288_R300                    0x00750000 /* -- nh */
++#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
++
++/* gap */
++
++/* Addresses are relative to the vertex program instruction area of the
++ * memory bank. PROGRAM_END points to the last instruction of the active
++ * program
++ *
++ * The meaning of the two UNKNOWN fields is obviously not known. However,
++ * experiments so far have shown that both *must* point to an instruction
++ * inside the vertex program, otherwise the GPU locks up.
++ *
++ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
++ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
++ * position takes place.
++ *
++ * Most likely this is used to ignore rest of the program in cases
++ * where group of verts arent visible. For some reason this "section"
++ * is sometimes accepted other instruction that have no relationship with
++ * position calculations.
++ */
++#define R300_VAP_PVS_CNTL_1                 0x22D0
++#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
++#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
++#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
++/* Addresses are relative the the vertex program parameters area. */
++#define R300_VAP_PVS_CNTL_2                 0x22D4
++#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
++#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
++#define R300_VAP_PVS_CNTL_3              0x22D8
++#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
++#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
++
++/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
++ * immediate vertices
++ */
++#define R300_VAP_VTX_COLOR_R                0x2464
++#define R300_VAP_VTX_COLOR_G                0x2468
++#define R300_VAP_VTX_COLOR_B                0x246C
++#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
++#define R300_VAP_VTX_POS_0_Y_1              0x2494
++#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
++#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
++#define R300_VAP_VTX_POS_0_Y_2              0x24A4
++#define R300_VAP_VTX_POS_0_Z_2              0x24A8
++/* write 0 to indicate end of packet? */
++#define R300_VAP_VTX_END_OF_PKT             0x24AC
++
++/* gap */
++
++/* These are values from r300_reg/r300_reg.h - they are known to be correct
++ * and are here so we can use one register file instead of several
++ * - Vladimir
++ */
++#define R300_GB_VAP_RASTER_VTX_FMT_0  0x4000
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT        (1<<0)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT    (1<<1)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT    (1<<2)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT    (1<<3)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT    (1<<4)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE        (0xf<<5)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT    (0x1<<16)
++
++#define R300_GB_VAP_RASTER_VTX_FMT_1  0x4004
++      /* each of the following is 3 bits wide, specifies number
++         of components */
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT       0
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT       3
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT       6
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT       9
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT       12
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT       15
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT       18
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT       21
++
++/* UNK30 seems to enables point to quad transformation on textures
++ * (or something closely related to that).
++ * This bit is rather fatal at the time being due to lackings at pixel
++ * shader side
++ */
++#define R300_GB_ENABLE        0x4008
++#     define R300_GB_POINT_STUFF_ENABLE       (1<<0)
++#     define R300_GB_LINE_STUFF_ENABLE        (1<<1)
++#     define R300_GB_TRIANGLE_STUFF_ENABLE    (1<<2)
++#     define R300_GB_STENCIL_AUTO_ENABLE      (1<<4)
++#     define R300_GB_UNK31                    (1<<31)
++      /* each of the following is 2 bits wide */
++#define R300_GB_TEX_REPLICATE 0
++#define R300_GB_TEX_ST                1
++#define R300_GB_TEX_STR               2
++#     define R300_GB_TEX0_SOURCE_SHIFT        16
++#     define R300_GB_TEX1_SOURCE_SHIFT        18
++#     define R300_GB_TEX2_SOURCE_SHIFT        20
++#     define R300_GB_TEX3_SOURCE_SHIFT        22
++#     define R300_GB_TEX4_SOURCE_SHIFT        24
++#     define R300_GB_TEX5_SOURCE_SHIFT        26
++#     define R300_GB_TEX6_SOURCE_SHIFT        28
++#     define R300_GB_TEX7_SOURCE_SHIFT        30
++
++/* MSPOS - positions for multisample antialiasing (?) */
++#define R300_GB_MSPOS0        0x4010
++      /* shifts - each of the fields is 4 bits */
++#     define R300_GB_MSPOS0__MS_X0_SHIFT      0
++#     define R300_GB_MSPOS0__MS_Y0_SHIFT      4
++#     define R300_GB_MSPOS0__MS_X1_SHIFT      8
++#     define R300_GB_MSPOS0__MS_Y1_SHIFT      12
++#     define R300_GB_MSPOS0__MS_X2_SHIFT      16
++#     define R300_GB_MSPOS0__MS_Y2_SHIFT      20
++#     define R300_GB_MSPOS0__MSBD0_Y          24
++#     define R300_GB_MSPOS0__MSBD0_X          28
++
++#define R300_GB_MSPOS1        0x4014
++#     define R300_GB_MSPOS1__MS_X3_SHIFT      0
++#     define R300_GB_MSPOS1__MS_Y3_SHIFT      4
++#     define R300_GB_MSPOS1__MS_X4_SHIFT      8
++#     define R300_GB_MSPOS1__MS_Y4_SHIFT      12
++#     define R300_GB_MSPOS1__MS_X5_SHIFT      16
++#     define R300_GB_MSPOS1__MS_Y5_SHIFT      20
++#     define R300_GB_MSPOS1__MSBD1            24
++
++
++#define R300_GB_TILE_CONFIG   0x4018
++#     define R300_GB_TILE_ENABLE      (1<<0)
++#     define R300_GB_TILE_PIPE_COUNT_RV300    0
++#     define R300_GB_TILE_PIPE_COUNT_R300     (3<<1)
++#     define R300_GB_TILE_PIPE_COUNT_R420     (7<<1)
++#     define R300_GB_TILE_PIPE_COUNT_RV410    (3<<1)
++#     define R300_GB_TILE_SIZE_8              0
++#     define R300_GB_TILE_SIZE_16             (1<<4)
++#     define R300_GB_TILE_SIZE_32             (2<<4)
++#     define R300_GB_SUPER_SIZE_1             (0<<6)
++#     define R300_GB_SUPER_SIZE_2             (1<<6)
++#     define R300_GB_SUPER_SIZE_4             (2<<6)
++#     define R300_GB_SUPER_SIZE_8             (3<<6)
++#     define R300_GB_SUPER_SIZE_16            (4<<6)
++#     define R300_GB_SUPER_SIZE_32            (5<<6)
++#     define R300_GB_SUPER_SIZE_64            (6<<6)
++#     define R300_GB_SUPER_SIZE_128           (7<<6)
++#     define R300_GB_SUPER_X_SHIFT            9       /* 3 bits wide */
++#     define R300_GB_SUPER_Y_SHIFT            12      /* 3 bits wide */
++#     define R300_GB_SUPER_TILE_A             0
++#     define R300_GB_SUPER_TILE_B             (1<<15)
++#     define R300_GB_SUBPIXEL_1_12            0
++#     define R300_GB_SUBPIXEL_1_16            (1<<16)
++
++#define R300_GB_FIFO_SIZE     0x4024
++      /* each of the following is 2 bits wide */
++#define R300_GB_FIFO_SIZE_32  0
++#define R300_GB_FIFO_SIZE_64  1
++#define R300_GB_FIFO_SIZE_128 2
++#define R300_GB_FIFO_SIZE_256 3
++#     define R300_SC_IFIFO_SIZE_SHIFT 0
++#     define R300_SC_TZFIFO_SIZE_SHIFT        2
++#     define R300_SC_BFIFO_SIZE_SHIFT 4
++
++#     define R300_US_OFIFO_SIZE_SHIFT 12
++#     define R300_US_WFIFO_SIZE_SHIFT 14
++      /* the following use the same constants as above, but meaning is
++         is times 2 (i.e. instead of 32 words it means 64 */
++#     define R300_RS_TFIFO_SIZE_SHIFT 6
++#     define R300_RS_CFIFO_SIZE_SHIFT 8
++#     define R300_US_RAM_SIZE_SHIFT           10
++      /* watermarks, 3 bits wide */
++#     define R300_RS_HIGHWATER_COL_SHIFT      16
++#     define R300_RS_HIGHWATER_TEX_SHIFT      19
++#     define R300_OFIFO_HIGHWATER_SHIFT       22      /* two bits only */
++#     define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT       24
++
++#define R300_GB_SELECT        0x401C
++#     define R300_GB_FOG_SELECT_C0A           0
++#     define R300_GB_FOG_SELECT_C1A           1
++#     define R300_GB_FOG_SELECT_C2A           2
++#     define R300_GB_FOG_SELECT_C3A           3
++#     define R300_GB_FOG_SELECT_1_1_W 4
++#     define R300_GB_FOG_SELECT_Z             5
++#     define R300_GB_DEPTH_SELECT_Z           0
++#     define R300_GB_DEPTH_SELECT_1_1_W       (1<<3)
++#     define R300_GB_W_SELECT_1_W             0
++#     define R300_GB_W_SELECT_1               (1<<4)
++
++#define R300_GB_AA_CONFIG             0x4020
++#     define R300_AA_DISABLE                  0x00
++#     define R300_AA_ENABLE                   0x01
++#     define R300_AA_SUBSAMPLES_2             0
++#     define R300_AA_SUBSAMPLES_3             (1<<1)
++#     define R300_AA_SUBSAMPLES_4             (2<<1)
++#     define R300_AA_SUBSAMPLES_6             (3<<1)
++
++/* gap */
++
++/* Zero to flush caches. */
++#define R300_TX_INVALTAGS                   0x4100
++#define R300_TX_FLUSH                       0x0
++
++/* The upper enable bits are guessed, based on fglrx reported limits. */
++#define R300_TX_ENABLE                      0x4104
++#       define R300_TX_ENABLE_0                  (1 << 0)
++#       define R300_TX_ENABLE_1                  (1 << 1)
++#       define R300_TX_ENABLE_2                  (1 << 2)
++#       define R300_TX_ENABLE_3                  (1 << 3)
++#       define R300_TX_ENABLE_4                  (1 << 4)
++#       define R300_TX_ENABLE_5                  (1 << 5)
++#       define R300_TX_ENABLE_6                  (1 << 6)
++#       define R300_TX_ENABLE_7                  (1 << 7)
++#       define R300_TX_ENABLE_8                  (1 << 8)
++#       define R300_TX_ENABLE_9                  (1 << 9)
++#       define R300_TX_ENABLE_10                 (1 << 10)
++#       define R300_TX_ENABLE_11                 (1 << 11)
++#       define R300_TX_ENABLE_12                 (1 << 12)
++#       define R300_TX_ENABLE_13                 (1 << 13)
++#       define R300_TX_ENABLE_14                 (1 << 14)
++#       define R300_TX_ENABLE_15                 (1 << 15)
++
++/* The pointsize is given in multiples of 6. The pointsize can be
++ * enormous: Clear() renders a single point that fills the entire
++ * framebuffer.
++ */
++#define R300_RE_POINTSIZE                   0x421C
++#       define R300_POINTSIZE_Y_SHIFT            0
++#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
++#       define R300_POINTSIZE_X_SHIFT            16
++#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
++#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
++
++/* The line width is given in multiples of 6.
++ * In default mode lines are classified as vertical lines.
++ * HO: horizontal
++ * VE: vertical or horizontal
++ * HO & VE: no classification
++ */
++#define R300_RE_LINE_CNT                      0x4234
++#       define R300_LINESIZE_SHIFT            0
++#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
++#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
++#       define R300_LINE_CNT_HO               (1 << 16)
++#       define R300_LINE_CNT_VE               (1 << 17)
++
++/* Some sort of scale or clamp value for texcoordless textures. */
++#define R300_RE_UNK4238                       0x4238
++
++/* Something shade related */
++#define R300_RE_SHADE                         0x4274
++
++#define R300_RE_SHADE_MODEL                   0x4278
++#     define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
++#     define R300_RE_SHADE_MODEL_FLAT       0x39595
++
++/* Dangerous */
++#define R300_RE_POLYGON_MODE                  0x4288
++#     define R300_PM_ENABLED                (1 << 0)
++#     define R300_PM_FRONT_POINT            (0 << 0)
++#     define R300_PM_BACK_POINT             (0 << 0)
++#     define R300_PM_FRONT_LINE             (1 << 4)
++#     define R300_PM_FRONT_FILL             (1 << 5)
++#     define R300_PM_BACK_LINE              (1 << 7)
++#     define R300_PM_BACK_FILL              (1 << 8)
++
++/* Fog parameters */
++#define R300_RE_FOG_SCALE                     0x4294
++#define R300_RE_FOG_START                     0x4298
++
++/* Not sure why there are duplicate of factor and constant values.
++ * My best guess so far is that there are seperate zbiases for test and write.
++ * Ordering might be wrong.
++ * Some of the tests indicate that fgl has a fallback implementation of zbias
++ * via pixel shaders.
++ */
++#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
++#define R300_RE_ZBIAS_T_FACTOR                0x42A4
++#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
++#define R300_RE_ZBIAS_W_FACTOR                0x42AC
++#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
++
++/* This register needs to be set to (1<<1) for RV350 to correctly
++ * perform depth test (see --vb-triangles in r300_demo)
++ * Don't know about other chips. - Vladimir
++ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
++ * My guess is that there are two bits for each zbias primitive
++ * (FILL, LINE, POINT).
++ *  One to enable depth test and one for depth write.
++ * Yet this doesnt explain why depth writes work ...
++ */
++#define R300_RE_OCCLUSION_CNTL                    0x42B4
++#     define R300_OCCLUSION_ON                (1<<1)
++
++#define R300_RE_CULL_CNTL                   0x42B8
++#       define R300_CULL_FRONT                   (1 << 0)
++#       define R300_CULL_BACK                    (1 << 1)
++#       define R300_FRONT_FACE_CCW               (0 << 2)
++#       define R300_FRONT_FACE_CW                (1 << 2)
++
++
++/* BEGIN: Rasterization / Interpolators - many guesses */
++
++/* 0_UNKNOWN_18 has always been set except for clear operations.
++ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
++ * on the vertex program, *not* the fragment program)
++ */
++#define R300_RS_CNTL_0                      0x4300
++#       define R300_RS_CNTL_TC_CNT_SHIFT         2
++#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
++      /* number of color interpolators used */
++#     define R300_RS_CNTL_CI_CNT_SHIFT         7
++#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
++      /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
++         register. */
++#define R300_RS_CNTL_1                      0x4304
++
++/* gap */
++
++/* Only used for texture coordinates.
++ * Use the source field to route texture coordinate input from the
++ * vertex program to the desired interpolator. Note that the source
++ * field is relative to the outputs the vertex program *actually*
++ * writes. If a vertex program only writes texcoord[1], this will
++ * be source index 0.
++ * Set INTERP_USED on all interpolators that produce data used by
++ * the fragment program. INTERP_USED looks like a swizzling mask,
++ * but I haven't seen it used that way.
++ *
++ * Note: The _UNKNOWN constants are always set in their respective
++ * register. I don't know if this is necessary.
++ */
++#define R300_RS_INTERP_0                    0x4310
++#define R300_RS_INTERP_1                    0x4314
++#       define R300_RS_INTERP_1_UNKNOWN          0x40
++#define R300_RS_INTERP_2                    0x4318
++#       define R300_RS_INTERP_2_UNKNOWN          0x80
++#define R300_RS_INTERP_3                    0x431C
++#       define R300_RS_INTERP_3_UNKNOWN          0xC0
++#define R300_RS_INTERP_4                    0x4320
++#define R300_RS_INTERP_5                    0x4324
++#define R300_RS_INTERP_6                    0x4328
++#define R300_RS_INTERP_7                    0x432C
++#       define R300_RS_INTERP_SRC_SHIFT          2
++#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
++#       define R300_RS_INTERP_USED               0x00D10000
++
++/* These DWORDs control how vertex data is routed into fragment program
++ * registers, after interpolators.
++ */
++#define R300_RS_ROUTE_0                     0x4330
++#define R300_RS_ROUTE_1                     0x4334
++#define R300_RS_ROUTE_2                     0x4338
++#define R300_RS_ROUTE_3                     0x433C /* GUESS */
++#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
++#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
++#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
++#define R300_RS_ROUTE_7                     0x434C /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
++#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
++#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
++#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
++#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
++#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
++#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
++#       define R300_RS_ROUTE_DEST_SHIFT          6
++#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
++
++/* Special handling for color: When the fragment program uses color,
++ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
++ * color register index.
++ *
++ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
++ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
++ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
++ * correct or not. - Oliver.
++ */
++#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
++#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
++#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
++/* As above, but for secondary color */
++#             define R300_RS_ROUTE_1_COLOR1            (1 << 14)
++#             define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
++#             define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
++#             define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
++/* END: Rasterization / Interpolators - many guesses */
++
++/* Hierarchical Z Enable */
++#define R300_SC_HYPERZ                   0x43a4
++#     define R300_SC_HYPERZ_DISABLE     (0 << 0)
++#     define R300_SC_HYPERZ_ENABLE      (1 << 0)
++#     define R300_SC_HYPERZ_MIN         (0 << 1)
++#     define R300_SC_HYPERZ_MAX         (1 << 1)
++#     define R300_SC_HYPERZ_ADJ_256     (0 << 2)
++#     define R300_SC_HYPERZ_ADJ_128     (1 << 2)
++#     define R300_SC_HYPERZ_ADJ_64      (2 << 2)
++#     define R300_SC_HYPERZ_ADJ_32      (3 << 2)
++#     define R300_SC_HYPERZ_ADJ_16      (4 << 2)
++#     define R300_SC_HYPERZ_ADJ_8       (5 << 2)
++#     define R300_SC_HYPERZ_ADJ_4       (6 << 2)
++#     define R300_SC_HYPERZ_ADJ_2       (7 << 2)
++#     define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
++#     define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
++#     define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
++#     define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
++
++#define R300_SC_EDGERULE                 0x43a8
++
++/* BEGIN: Scissors and cliprects */
++
++/* There are four clipping rectangles. Their corner coordinates are inclusive.
++ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
++ * on whether the pixel is inside cliprects 0-3, respectively. For example,
++ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
++ * the number 3 (binary 0011).
++ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
++ * the pixel is rasterized.
++ *
++ * In addition to this, there is a scissors rectangle. Only pixels inside the
++ * scissors rectangle are drawn. (coordinates are inclusive)
++ *
++ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
++ * for the purpose of clipping and scissors.
++ */
++#define R300_RE_CLIPRECT_TL_0               0x43B0
++#define R300_RE_CLIPRECT_BR_0               0x43B4
++#define R300_RE_CLIPRECT_TL_1               0x43B8
++#define R300_RE_CLIPRECT_BR_1               0x43BC
++#define R300_RE_CLIPRECT_TL_2               0x43C0
++#define R300_RE_CLIPRECT_BR_2               0x43C4
++#define R300_RE_CLIPRECT_TL_3               0x43C8
++#define R300_RE_CLIPRECT_BR_3               0x43CC
++#       define R300_CLIPRECT_OFFSET              1440
++#       define R300_CLIPRECT_MASK                0x1FFF
++#       define R300_CLIPRECT_X_SHIFT             0
++#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
++#       define R300_CLIPRECT_Y_SHIFT             13
++#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
++#define R300_RE_CLIPRECT_CNTL               0x43D0
++#       define R300_CLIP_OUT                     (1 << 0)
++#       define R300_CLIP_0                       (1 << 1)
++#       define R300_CLIP_1                       (1 << 2)
++#       define R300_CLIP_10                      (1 << 3)
++#       define R300_CLIP_2                       (1 << 4)
++#       define R300_CLIP_20                      (1 << 5)
++#       define R300_CLIP_21                      (1 << 6)
++#       define R300_CLIP_210                     (1 << 7)
++#       define R300_CLIP_3                       (1 << 8)
++#       define R300_CLIP_30                      (1 << 9)
++#       define R300_CLIP_31                      (1 << 10)
++#       define R300_CLIP_310                     (1 << 11)
++#       define R300_CLIP_32                      (1 << 12)
++#       define R300_CLIP_320                     (1 << 13)
++#       define R300_CLIP_321                     (1 << 14)
++#       define R300_CLIP_3210                    (1 << 15)
++
++/* gap */
++
++#define R300_RE_SCISSORS_TL                 0x43E0
++#define R300_RE_SCISSORS_BR                 0x43E4
++#       define R300_SCISSORS_OFFSET              1440
++#       define R300_SCISSORS_X_SHIFT             0
++#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
++#       define R300_SCISSORS_Y_SHIFT             13
++#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
++/* END: Scissors and cliprects */
++
++/* BEGIN: Texture specification */
++
++/*
++ * The texture specification dwords are grouped by meaning and not by texture
++ * unit. This means that e.g. the offset for texture image unit N is found in
++ * register TX_OFFSET_0 + (4*N)
++ */
++#define R300_TX_FILTER_0                    0x4400
++#       define R300_TX_REPEAT                    0
++#       define R300_TX_MIRRORED                  1
++#       define R300_TX_CLAMP                     4
++#       define R300_TX_CLAMP_TO_EDGE             2
++#       define R300_TX_CLAMP_TO_BORDER           6
++#       define R300_TX_WRAP_S_SHIFT              0
++#       define R300_TX_WRAP_S_MASK               (7 << 0)
++#       define R300_TX_WRAP_T_SHIFT              3
++#       define R300_TX_WRAP_T_MASK               (7 << 3)
++#       define R300_TX_WRAP_Q_SHIFT              6
++#       define R300_TX_WRAP_Q_MASK               (7 << 6)
++#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
++#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
++#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
++#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
++#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
++#     define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
++#     define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
++#     define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
++#     define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
++
++/* NOTE: NEAREST doesnt seem to exist.
++ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
++ * anisotropy modes because that would void selected mag filter
++ */
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
++#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
++#     define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
++#     define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
++#     define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
++#     define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
++#     define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
++#     define R300_TX_MAX_ANISO_MASK    (14 << 21)
++
++#define R300_TX_FILTER1_0                      0x4440
++#     define R300_CHROMA_KEY_MODE_DISABLE    0
++#     define R300_CHROMA_KEY_FORCE           1
++#     define R300_CHROMA_KEY_BLEND           2
++#     define R300_MC_ROUND_NORMAL            (0<<2)
++#     define R300_MC_ROUND_MPEG4             (1<<2)
++#     define R300_LOD_BIAS_MASK           0x1fff
++#     define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
++#     define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
++#     define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
++#     define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
++#     define R300_TX_TRI_PERF_0_8            (0<<15)
++#     define R300_TX_TRI_PERF_1_8            (1<<15)
++#     define R300_TX_TRI_PERF_1_4            (2<<15)
++#     define R300_TX_TRI_PERF_3_8            (3<<15)
++#     define R300_ANISO_THRESHOLD_MASK       (7<<17)
++
++#define R300_TX_SIZE_0                      0x4480
++#       define R300_TX_WIDTHMASK_SHIFT           0
++#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
++#       define R300_TX_HEIGHTMASK_SHIFT          11
++#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
++#       define R300_TX_UNK23                     (1 << 23)
++#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
++#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
++#       define R300_TX_SIZE_PROJECTED            (1<<30)
++#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
++#define R300_TX_FORMAT_0                    0x44C0
++      /* The interpretation of the format word by Wladimir van der Laan */
++      /* The X, Y, Z and W refer to the layout of the components.
++         They are given meanings as R, G, B and Alpha by the swizzle
++         specification */
++#     define R300_TX_FORMAT_X8                    0x0
++#     define R300_TX_FORMAT_X16                   0x1
++#     define R300_TX_FORMAT_Y4X4                  0x2
++#     define R300_TX_FORMAT_Y8X8                  0x3
++#     define R300_TX_FORMAT_Y16X16                0x4
++#     define R300_TX_FORMAT_Z3Y3X2                0x5
++#     define R300_TX_FORMAT_Z5Y6X5                0x6
++#     define R300_TX_FORMAT_Z6Y5X5                0x7
++#     define R300_TX_FORMAT_Z11Y11X10             0x8
++#     define R300_TX_FORMAT_Z10Y11X11             0x9
++#     define R300_TX_FORMAT_W4Z4Y4X4              0xA
++#     define R300_TX_FORMAT_W1Z5Y5X5              0xB
++#     define R300_TX_FORMAT_W8Z8Y8X8              0xC
++#     define R300_TX_FORMAT_W2Z10Y10X10           0xD
++#     define R300_TX_FORMAT_W16Z16Y16X16          0xE
++#     define R300_TX_FORMAT_DXT1                  0xF
++#     define R300_TX_FORMAT_DXT3                  0x10
++#     define R300_TX_FORMAT_DXT5                  0x11
++#     define R300_TX_FORMAT_D3DMFT_CxV8U8         0x12     /* no swizzle */
++#     define R300_TX_FORMAT_A8R8G8B8              0x13     /* no swizzle */
++#     define R300_TX_FORMAT_B8G8_B8G8             0x14     /* no swizzle */
++#     define R300_TX_FORMAT_G8R8_G8B8             0x15     /* no swizzle */
++      /* 0x16 - some 16 bit green format.. ?? */
++#     define R300_TX_FORMAT_UNK25                (1 << 25) /* no swizzle */
++#     define R300_TX_FORMAT_CUBIC_MAP            (1 << 26)
++
++      /* gap */
++      /* Floating point formats */
++      /* Note - hardware supports both 16 and 32 bit floating point */
++#     define R300_TX_FORMAT_FL_I16                0x18
++#     define R300_TX_FORMAT_FL_I16A16             0x19
++#     define R300_TX_FORMAT_FL_R16G16B16A16       0x1A
++#     define R300_TX_FORMAT_FL_I32                0x1B
++#     define R300_TX_FORMAT_FL_I32A32             0x1C
++#     define R300_TX_FORMAT_FL_R32G32B32A32       0x1D
++      /* alpha modes, convenience mostly */
++      /* if you have alpha, pick constant appropriate to the
++         number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
++#     define R300_TX_FORMAT_ALPHA_1CH             0x000
++#     define R300_TX_FORMAT_ALPHA_2CH             0x200
++#     define R300_TX_FORMAT_ALPHA_4CH             0x600
++#     define R300_TX_FORMAT_ALPHA_NONE            0xA00
++      /* Swizzling */
++      /* constants */
++#     define R300_TX_FORMAT_X         0
++#     define R300_TX_FORMAT_Y         1
++#     define R300_TX_FORMAT_Z         2
++#     define R300_TX_FORMAT_W         3
++#     define R300_TX_FORMAT_ZERO      4
++#     define R300_TX_FORMAT_ONE       5
++      /* 2.0*Z, everything above 1.0 is set to 0.0 */
++#     define R300_TX_FORMAT_CUT_Z     6
++      /* 2.0*W, everything above 1.0 is set to 0.0 */
++#     define R300_TX_FORMAT_CUT_W     7
++
++#     define R300_TX_FORMAT_B_SHIFT   18
++#     define R300_TX_FORMAT_G_SHIFT   15
++#     define R300_TX_FORMAT_R_SHIFT   12
++#     define R300_TX_FORMAT_A_SHIFT   9
++      /* Convenience macro to take care of layout and swizzling */
++#     define R300_EASY_TX_FORMAT(B, G, R, A, FMT)     (               \
++              ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)          \
++              | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)        \
++              | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)        \
++              | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)        \
++              | (R300_TX_FORMAT_##FMT)                                \
++              )
++      /* These can be ORed with result of R300_EASY_TX_FORMAT()
++         We don't really know what they do. Take values from a
++           constant color ? */
++#     define R300_TX_FORMAT_CONST_X           (1<<5)
++#     define R300_TX_FORMAT_CONST_Y           (2<<5)
++#     define R300_TX_FORMAT_CONST_Z           (4<<5)
++#     define R300_TX_FORMAT_CONST_W           (8<<5)
++
++#     define R300_TX_FORMAT_YUV_MODE          0x00800000
++
++#define R300_TX_PITCH_0                           0x4500 /* obvious missing in gap */
++#define R300_TX_OFFSET_0                    0x4540
++      /* BEGIN: Guess from R200 */
++#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
++#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
++#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
++#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
++#       define R300_TXO_MACRO_TILE               (1 << 2)
++#       define R300_TXO_MICRO_TILE               (1 << 3)
++#       define R300_TXO_OFFSET_MASK              0xffffffe0
++#       define R300_TXO_OFFSET_SHIFT             5
++      /* END: Guess from R200 */
++
++/* 32 bit chroma key */
++#define R300_TX_CHROMA_KEY_0                      0x4580
++/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
++#define R300_TX_BORDER_COLOR_0              0x45C0
++
++/* END: Texture specification */
++
++/* BEGIN: Fragment program instruction set */
++
++/* Fragment programs are written directly into register space.
++ * There are separate instruction streams for texture instructions and ALU
++ * instructions.
++ * In order to synchronize these streams, the program is divided into up
++ * to 4 nodes. Each node begins with a number of TEX operations, followed
++ * by a number of ALU operations.
++ * The first node can have zero TEX ops, all subsequent nodes must have at
++ * least
++ * one TEX ops.
++ * All nodes must have at least one ALU op.
++ *
++ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
++ * 1 node, a value of 3 means 4 nodes.
++ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
++ * offsets into the respective instruction streams, while *_END points to the
++ * last instruction relative to this offset.
++ */
++#define R300_PFS_CNTL_0                     0x4600
++#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
++#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
++#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
++#define R300_PFS_CNTL_1                     0x4604
++/* There is an unshifted value here which has so far always been equal to the
++ * index of the highest used temporary register.
++ */
++#define R300_PFS_CNTL_2                     0x4608
++#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
++#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
++#       define R300_PFS_CNTL_ALU_END_SHIFT       6
++#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
++#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
++#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
++#       define R300_PFS_CNTL_TEX_END_SHIFT       18
++#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
++
++/* gap */
++
++/* Nodes are stored backwards. The last active node is always stored in
++ * PFS_NODE_3.
++ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
++ * first node is stored in NODE_2, the second node is stored in NODE_3.
++ *
++ * Offsets are relative to the master offset from PFS_CNTL_2.
++ */
++#define R300_PFS_NODE_0                     0x4610
++#define R300_PFS_NODE_1                     0x4614
++#define R300_PFS_NODE_2                     0x4618
++#define R300_PFS_NODE_3                     0x461C
++#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
++#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
++#       define R300_PFS_NODE_ALU_END_SHIFT       6
++#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
++#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
++#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
++#       define R300_PFS_NODE_TEX_END_SHIFT       17
++#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
++#             define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
++#             define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
++
++/* TEX
++ * As far as I can tell, texture instructions cannot write into output
++ * registers directly. A subsequent ALU instruction is always necessary,
++ * even if it's just MAD o0, r0, 1, 0
++ */
++#define R300_PFS_TEXI_0                     0x4620
++#     define R300_FPITX_SRC_SHIFT              0
++#     define R300_FPITX_SRC_MASK               (31 << 0)
++      /* GUESS */
++#     define R300_FPITX_SRC_CONST              (1 << 5)
++#     define R300_FPITX_DST_SHIFT              6
++#     define R300_FPITX_DST_MASK               (31 << 6)
++#     define R300_FPITX_IMAGE_SHIFT            11
++      /* GUESS based on layout and native limits */
++#       define R300_FPITX_IMAGE_MASK             (15 << 11)
++/* Unsure if these are opcodes, or some kind of bitfield, but this is how
++ * they were set when I checked
++ */
++#     define R300_FPITX_OPCODE_SHIFT          15
++#             define R300_FPITX_OP_TEX        1
++#             define R300_FPITX_OP_KIL        2
++#             define R300_FPITX_OP_TXP        3
++#             define R300_FPITX_OP_TXB        4
++#     define R300_FPITX_OPCODE_MASK           (7 << 15)
++
++/* ALU
++ * The ALU instructions register blocks are enumerated according to the order
++ * in which fglrx. I assume there is space for 64 instructions, since
++ * each block has space for a maximum of 64 DWORDs, and this matches reported
++ * native limits.
++ *
++ * The basic functional block seems to be one MAD for each color and alpha,
++ * and an adder that adds all components after the MUL.
++ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
++ *  - DP4: Use OUTC_DP4, OUTA_DP4
++ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
++ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
++ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
++ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
++ *  - FLR: use FRC+MAD
++ *  - XPD: use MAD+MAD
++ *  - SGE, SLT: use MAD+CMP
++ *  - RSQ: use ABS modifier for argument
++ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
++ *    (e.g. RCP) into color register
++ *  - apparently, there's no quick DST operation
++ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
++ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
++ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
++ *
++ * Operand selection
++ * First stage selects three sources from the available registers and
++ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
++ * fglrx sorts the three source fields: Registers before constants,
++ * lower indices before higher indices; I do not know whether this is
++ * necessary.
++ *
++ * fglrx fills unused sources with "read constant 0"
++ * According to specs, you cannot select more than two different constants.
++ *
++ * Second stage selects the operands from the sources. This is defined in
++ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
++ * zero and one.
++ * Swizzling and negation happens in this stage, as well.
++ *
++ * Important: Color and alpha seem to be mostly separate, i.e. their sources
++ * selection appears to be fully independent (the register storage is probably
++ * physically split into a color and an alpha section).
++ * However (because of the apparent physical split), there is some interaction
++ * WRT swizzling. If, for example, you want to load an R component into an
++ * Alpha operand, this R component is taken from a *color* source, not from
++ * an alpha source. The corresponding register doesn't even have to appear in
++ * the alpha sources list. (I hope this all makes sense to you)
++ *
++ * Destination selection
++ * The destination register index is in FPI1 (color) and FPI3 (alpha)
++ * together with enable bits.
++ * There are separate enable bits for writing into temporary registers
++ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
++ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
++ * same index must be used for both).
++ *
++ * Note: There is a special form for LRP
++ *  - Argument order is the same as in ARB_fragment_program.
++ *  - Operation is MAD
++ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
++ *  - Set FPI0/FPI2_SPECIAL_LRP
++ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
++ */
++#define R300_PFS_INSTR1_0                   0x46C0
++#       define R300_FPI1_SRC0C_SHIFT             0
++#       define R300_FPI1_SRC0C_MASK              (31 << 0)
++#       define R300_FPI1_SRC0C_CONST             (1 << 5)
++#       define R300_FPI1_SRC1C_SHIFT             6
++#       define R300_FPI1_SRC1C_MASK              (31 << 6)
++#       define R300_FPI1_SRC1C_CONST             (1 << 11)
++#       define R300_FPI1_SRC2C_SHIFT             12
++#       define R300_FPI1_SRC2C_MASK              (31 << 12)
++#       define R300_FPI1_SRC2C_CONST             (1 << 17)
++#       define R300_FPI1_SRC_MASK                0x0003ffff
++#       define R300_FPI1_DSTC_SHIFT              18
++#       define R300_FPI1_DSTC_MASK               (31 << 18)
++#             define R300_FPI1_DSTC_REG_MASK_SHIFT     23
++#       define R300_FPI1_DSTC_REG_X              (1 << 23)
++#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
++#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
++#             define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
++#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
++#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
++#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
++
++#define R300_PFS_INSTR3_0                   0x47C0
++#       define R300_FPI3_SRC0A_SHIFT             0
++#       define R300_FPI3_SRC0A_MASK              (31 << 0)
++#       define R300_FPI3_SRC0A_CONST             (1 << 5)
++#       define R300_FPI3_SRC1A_SHIFT             6
++#       define R300_FPI3_SRC1A_MASK              (31 << 6)
++#       define R300_FPI3_SRC1A_CONST             (1 << 11)
++#       define R300_FPI3_SRC2A_SHIFT             12
++#       define R300_FPI3_SRC2A_MASK              (31 << 12)
++#       define R300_FPI3_SRC2A_CONST             (1 << 17)
++#       define R300_FPI3_SRC_MASK                0x0003ffff
++#       define R300_FPI3_DSTA_SHIFT              18
++#       define R300_FPI3_DSTA_MASK               (31 << 18)
++#       define R300_FPI3_DSTA_REG                (1 << 23)
++#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
++#             define R300_FPI3_DSTA_DEPTH              (1 << 27)
++
++#define R300_PFS_INSTR0_0                   0x48C0
++#       define R300_FPI0_ARGC_SRC0C_XYZ          0
++#       define R300_FPI0_ARGC_SRC0C_XXX          1
++#       define R300_FPI0_ARGC_SRC0C_YYY          2
++#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
++#       define R300_FPI0_ARGC_SRC1C_XYZ          4
++#       define R300_FPI0_ARGC_SRC1C_XXX          5
++#       define R300_FPI0_ARGC_SRC1C_YYY          6
++#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
++#       define R300_FPI0_ARGC_SRC2C_XYZ          8
++#       define R300_FPI0_ARGC_SRC2C_XXX          9
++#       define R300_FPI0_ARGC_SRC2C_YYY          10
++#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
++#       define R300_FPI0_ARGC_SRC0A              12
++#       define R300_FPI0_ARGC_SRC1A              13
++#       define R300_FPI0_ARGC_SRC2A              14
++#       define R300_FPI0_ARGC_SRC1C_LRP          15
++#       define R300_FPI0_ARGC_ZERO               20
++#       define R300_FPI0_ARGC_ONE                21
++      /* GUESS */
++#       define R300_FPI0_ARGC_HALF               22
++#       define R300_FPI0_ARGC_SRC0C_YZX          23
++#       define R300_FPI0_ARGC_SRC1C_YZX          24
++#       define R300_FPI0_ARGC_SRC2C_YZX          25
++#       define R300_FPI0_ARGC_SRC0C_ZXY          26
++#       define R300_FPI0_ARGC_SRC1C_ZXY          27
++#       define R300_FPI0_ARGC_SRC2C_ZXY          28
++#       define R300_FPI0_ARGC_SRC0CA_WZY         29
++#       define R300_FPI0_ARGC_SRC1CA_WZY         30
++#       define R300_FPI0_ARGC_SRC2CA_WZY         31
++
++#       define R300_FPI0_ARG0C_SHIFT             0
++#       define R300_FPI0_ARG0C_MASK              (31 << 0)
++#       define R300_FPI0_ARG0C_NEG               (1 << 5)
++#       define R300_FPI0_ARG0C_ABS               (1 << 6)
++#       define R300_FPI0_ARG1C_SHIFT             7
++#       define R300_FPI0_ARG1C_MASK              (31 << 7)
++#       define R300_FPI0_ARG1C_NEG               (1 << 12)
++#       define R300_FPI0_ARG1C_ABS               (1 << 13)
++#       define R300_FPI0_ARG2C_SHIFT             14
++#       define R300_FPI0_ARG2C_MASK              (31 << 14)
++#       define R300_FPI0_ARG2C_NEG               (1 << 19)
++#       define R300_FPI0_ARG2C_ABS               (1 << 20)
++#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
++#       define R300_FPI0_OUTC_MAD                (0 << 23)
++#       define R300_FPI0_OUTC_DP3                (1 << 23)
++#       define R300_FPI0_OUTC_DP4                (2 << 23)
++#       define R300_FPI0_OUTC_MIN                (4 << 23)
++#       define R300_FPI0_OUTC_MAX                (5 << 23)
++#       define R300_FPI0_OUTC_CMPH               (7 << 23)
++#       define R300_FPI0_OUTC_CMP                (8 << 23)
++#       define R300_FPI0_OUTC_FRC                (9 << 23)
++#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
++#       define R300_FPI0_OUTC_SAT                (1 << 30)
++#       define R300_FPI0_INSERT_NOP              (1 << 31)
++
++#define R300_PFS_INSTR2_0                   0x49C0
++#       define R300_FPI2_ARGA_SRC0C_X            0
++#       define R300_FPI2_ARGA_SRC0C_Y            1
++#       define R300_FPI2_ARGA_SRC0C_Z            2
++#       define R300_FPI2_ARGA_SRC1C_X            3
++#       define R300_FPI2_ARGA_SRC1C_Y            4
++#       define R300_FPI2_ARGA_SRC1C_Z            5
++#       define R300_FPI2_ARGA_SRC2C_X            6
++#       define R300_FPI2_ARGA_SRC2C_Y            7
++#       define R300_FPI2_ARGA_SRC2C_Z            8
++#       define R300_FPI2_ARGA_SRC0A              9
++#       define R300_FPI2_ARGA_SRC1A              10
++#       define R300_FPI2_ARGA_SRC2A              11
++#       define R300_FPI2_ARGA_SRC1A_LRP          15
++#       define R300_FPI2_ARGA_ZERO               16
++#       define R300_FPI2_ARGA_ONE                17
++      /* GUESS */
++#       define R300_FPI2_ARGA_HALF               18
++#       define R300_FPI2_ARG0A_SHIFT             0
++#       define R300_FPI2_ARG0A_MASK              (31 << 0)
++#       define R300_FPI2_ARG0A_NEG               (1 << 5)
++      /* GUESS */
++#     define R300_FPI2_ARG0A_ABS               (1 << 6)
++#       define R300_FPI2_ARG1A_SHIFT             7
++#       define R300_FPI2_ARG1A_MASK              (31 << 7)
++#       define R300_FPI2_ARG1A_NEG               (1 << 12)
++      /* GUESS */
++#     define R300_FPI2_ARG1A_ABS               (1 << 13)
++#       define R300_FPI2_ARG2A_SHIFT             14
++#       define R300_FPI2_ARG2A_MASK              (31 << 14)
++#       define R300_FPI2_ARG2A_NEG               (1 << 19)
++      /* GUESS */
++#     define R300_FPI2_ARG2A_ABS               (1 << 20)
++#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
++#       define R300_FPI2_OUTA_MAD                (0 << 23)
++#       define R300_FPI2_OUTA_DP4                (1 << 23)
++#       define R300_FPI2_OUTA_MIN                (2 << 23)
++#       define R300_FPI2_OUTA_MAX                (3 << 23)
++#       define R300_FPI2_OUTA_CMP                (6 << 23)
++#       define R300_FPI2_OUTA_FRC                (7 << 23)
++#       define R300_FPI2_OUTA_EX2                (8 << 23)
++#       define R300_FPI2_OUTA_LG2                (9 << 23)
++#       define R300_FPI2_OUTA_RCP                (10 << 23)
++#       define R300_FPI2_OUTA_RSQ                (11 << 23)
++#       define R300_FPI2_OUTA_SAT                (1 << 30)
++#       define R300_FPI2_UNKNOWN_31              (1 << 31)
++/* END: Fragment program instruction set */
++
++/* Fog state and color */
++#define R300_RE_FOG_STATE                   0x4BC0
++#       define R300_FOG_ENABLE                   (1 << 0)
++#     define R300_FOG_MODE_LINEAR              (0 << 1)
++#     define R300_FOG_MODE_EXP                 (1 << 1)
++#     define R300_FOG_MODE_EXP2                (2 << 1)
++#     define R300_FOG_MODE_MASK                (3 << 1)
++#define R300_FOG_COLOR_R                    0x4BC8
++#define R300_FOG_COLOR_G                    0x4BCC
++#define R300_FOG_COLOR_B                    0x4BD0
++
++#define R300_PP_ALPHA_TEST                  0x4BD4
++#       define R300_REF_ALPHA_MASK               0x000000ff
++#       define R300_ALPHA_TEST_FAIL              (0 << 8)
++#       define R300_ALPHA_TEST_LESS              (1 << 8)
++#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
++#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
++#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
++#       define R300_ALPHA_TEST_GREATER           (4 << 8)
++#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
++#       define R300_ALPHA_TEST_PASS              (7 << 8)
++#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
++#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
++
++/* gap */
++
++/* Fragment program parameters in 7.16 floating point */
++#define R300_PFS_PARAM_0_X                  0x4C00
++#define R300_PFS_PARAM_0_Y                  0x4C04
++#define R300_PFS_PARAM_0_Z                  0x4C08
++#define R300_PFS_PARAM_0_W                  0x4C0C
++/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
++#define R300_PFS_PARAM_31_X                 0x4DF0
++#define R300_PFS_PARAM_31_Y                 0x4DF4
++#define R300_PFS_PARAM_31_Z                 0x4DF8
++#define R300_PFS_PARAM_31_W                 0x4DFC
++
++/* Notes:
++ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
++ *   the application
++ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
++ *    are set to the same
++ *   function (both registers are always set up completely in any case)
++ * - Most blend flags are simply copied from R200 and not tested yet
++ */
++#define R300_RB3D_CBLEND                    0x4E04
++#define R300_RB3D_ABLEND                    0x4E08
++/* the following only appear in CBLEND */
++#       define R300_BLEND_ENABLE                     (1 << 0)
++#       define R300_BLEND_UNKNOWN                    (3 << 1)
++#       define R300_BLEND_NO_SEPARATE                (1 << 3)
++/* the following are shared between CBLEND and ABLEND */
++#       define R300_FCN_MASK                         (3  << 12)
++#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
++#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
++#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
++#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
++#       define R300_COMB_FCN_MIN                     (4  << 12)
++#       define R300_COMB_FCN_MAX                     (5  << 12)
++#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
++#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
++#       define R300_BLEND_GL_ZERO                    (32)
++#       define R300_BLEND_GL_ONE                     (33)
++#       define R300_BLEND_GL_SRC_COLOR               (34)
++#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
++#       define R300_BLEND_GL_DST_COLOR               (36)
++#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
++#       define R300_BLEND_GL_SRC_ALPHA               (38)
++#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
++#       define R300_BLEND_GL_DST_ALPHA               (40)
++#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
++#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
++#       define R300_BLEND_GL_CONST_COLOR             (43)
++#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
++#       define R300_BLEND_GL_CONST_ALPHA             (45)
++#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
++#       define R300_BLEND_MASK                       (63)
++#       define R300_SRC_BLEND_SHIFT                  (16)
++#       define R300_DST_BLEND_SHIFT                  (24)
++#define R300_RB3D_BLEND_COLOR               0x4E10
++#define R300_RB3D_COLORMASK                 0x4E0C
++#       define R300_COLORMASK0_B                 (1<<0)
++#       define R300_COLORMASK0_G                 (1<<1)
++#       define R300_COLORMASK0_R                 (1<<2)
++#       define R300_COLORMASK0_A                 (1<<3)
++
++/* gap */
++
++#define R300_RB3D_COLOROFFSET0              0x4E28
++#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
++#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
++#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
++#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
++
++/* gap */
++
++/* Bit 16: Larger tiles
++ * Bit 17: 4x2 tiles
++ * Bit 18: Extremely weird tile like, but some pixels duplicated?
++ */
++#define R300_RB3D_COLORPITCH0               0x4E38
++#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
++#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
++#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
++#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
++#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
++#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
++#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
++#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
++#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
++#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
++#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
++
++#define R300_RB3D_AARESOLVE_CTL             0x4E88
++/* gap */
++
++/* Guess by Vladimir.
++ * Set to 0A before 3D operations, set to 02 afterwards.
++ */
++/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
++#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
++#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
++
++/* gap */
++/* There seems to be no "write only" setting, so use Z-test = ALWAYS
++ * for this.
++ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
++ */
++#define R300_ZB_CNTL                             0x4F00
++#     define R300_STENCIL_ENABLE               (1 << 0)
++#     define R300_Z_ENABLE                     (1 << 1)
++#     define R300_Z_WRITE_ENABLE               (1 << 2)
++#     define R300_Z_SIGNED_COMPARE             (1 << 3)
++#     define R300_STENCIL_FRONT_BACK           (1 << 4)
++
++#define R300_ZB_ZSTENCILCNTL                   0x4f04
++      /* functions */
++#     define R300_ZS_NEVER                    0
++#     define R300_ZS_LESS                     1
++#     define R300_ZS_LEQUAL                   2
++#     define R300_ZS_EQUAL                    3
++#     define R300_ZS_GEQUAL                   4
++#     define R300_ZS_GREATER                  5
++#     define R300_ZS_NOTEQUAL                 6
++#     define R300_ZS_ALWAYS                   7
++#       define R300_ZS_MASK                     7
++      /* operations */
++#     define R300_ZS_KEEP                     0
++#     define R300_ZS_ZERO                     1
++#     define R300_ZS_REPLACE                  2
++#     define R300_ZS_INCR                     3
++#     define R300_ZS_DECR                     4
++#     define R300_ZS_INVERT                   5
++#     define R300_ZS_INCR_WRAP                6
++#     define R300_ZS_DECR_WRAP                7
++#     define R300_Z_FUNC_SHIFT                0
++      /* front and back refer to operations done for front
++         and back faces, i.e. separate stencil function support */
++#     define R300_S_FRONT_FUNC_SHIFT          3
++#     define R300_S_FRONT_SFAIL_OP_SHIFT      6
++#     define R300_S_FRONT_ZPASS_OP_SHIFT      9
++#     define R300_S_FRONT_ZFAIL_OP_SHIFT      12
++#     define R300_S_BACK_FUNC_SHIFT           15
++#     define R300_S_BACK_SFAIL_OP_SHIFT       18
++#     define R300_S_BACK_ZPASS_OP_SHIFT       21
++#     define R300_S_BACK_ZFAIL_OP_SHIFT       24
++
++#define R300_ZB_STENCILREFMASK                        0x4f08
++#     define R300_STENCILREF_SHIFT       0
++#     define R300_STENCILREF_MASK        0x000000ff
++#     define R300_STENCILMASK_SHIFT      8
++#     define R300_STENCILMASK_MASK       0x0000ff00
++#     define R300_STENCILWRITEMASK_SHIFT 16
++#     define R300_STENCILWRITEMASK_MASK  0x00ff0000
++
++/* gap */
++
++#define R300_ZB_FORMAT                             0x4f10
++#     define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
++#     define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
++#     define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
++/* reserved up to (15 << 0) */
++#     define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
++#     define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
++
++#define R300_ZB_ZTOP                             0x4F14
++#     define R300_ZTOP_DISABLE                 (0 << 0)
++#     define R300_ZTOP_ENABLE                  (1 << 0)
++
++/* gap */
++
++#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
++
++#define R300_ZB_BW_CNTL                     0x4f1c
++#     define R300_HIZ_DISABLE                              (0 << 0)
++#     define R300_HIZ_ENABLE                               (1 << 0)
++#     define R300_HIZ_MIN                                  (0 << 1)
++#     define R300_HIZ_MAX                                  (1 << 1)
++#     define R300_FAST_FILL_DISABLE                        (0 << 2)
++#     define R300_FAST_FILL_ENABLE                         (1 << 2)
++#     define R300_RD_COMP_DISABLE                          (0 << 3)
++#     define R300_RD_COMP_ENABLE                           (1 << 3)
++#     define R300_WR_COMP_DISABLE                          (0 << 4)
++#     define R300_WR_COMP_ENABLE                           (1 << 4)
++#     define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
++#     define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
++#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
++#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
++
++#     define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
++#     define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
++#     define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
++#     define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
++
++#     define R500_BMASK_ENABLE                             (0 << 10)
++#     define R500_BMASK_DISABLE                            (1 << 10)
++#     define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
++#     define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
++#     define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
++#     define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
++#     define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
++#     define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
++#     define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
++#     define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
++#     define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
++#     define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
++#     define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
++#     define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
++#     define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
++#     define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
++#     define R500_PEQ_PACKING_DISABLE                      (0 << 18)
++#     define R500_PEQ_PACKING_ENABLE                       (1 << 18)
++#     define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
++#     define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
++
++
++/* gap */
++
++/* Z Buffer Address Offset.
++ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
++ */
++#define R300_ZB_DEPTHOFFSET               0x4f20
++
++/* Z Buffer Pitch and Endian Control */
++#define R300_ZB_DEPTHPITCH                0x4f24
++#       define R300_DEPTHPITCH_MASK              0x00003FFC
++#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
++#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
++#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
++#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
++#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
++#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
++#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
++#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
++#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
++
++/* Z Buffer Clear Value */
++#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
++
++#define R300_ZB_ZMASK_OFFSET                   0x4f30
++#define R300_ZB_ZMASK_PITCH                    0x4f34
++#define R300_ZB_ZMASK_WRINDEX                  0x4f38
++#define R300_ZB_ZMASK_DWORD                    0x4f3c
++#define R300_ZB_ZMASK_RDINDEX                  0x4f40
++
++/* Hierarchical Z Memory Offset */
++#define R300_ZB_HIZ_OFFSET                       0x4f44
++
++/* Hierarchical Z Write Index */
++#define R300_ZB_HIZ_WRINDEX                      0x4f48
++
++/* Hierarchical Z Data */
++#define R300_ZB_HIZ_DWORD                        0x4f4c
++
++/* Hierarchical Z Read Index */
++#define R300_ZB_HIZ_RDINDEX                      0x4f50
++
++/* Hierarchical Z Pitch */
++#define R300_ZB_HIZ_PITCH                        0x4f54
++
++/* Z Buffer Z Pass Counter Data */
++#define R300_ZB_ZPASS_DATA                       0x4f58
++
++/* Z Buffer Z Pass Counter Address */
++#define R300_ZB_ZPASS_ADDR                       0x4f5c
++
++/* Depth buffer X and Y coordinate offset */
++#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
++#     define R300_DEPTHX_OFFSET_SHIFT  1
++#     define R300_DEPTHX_OFFSET_MASK   0x000007FE
++#     define R300_DEPTHY_OFFSET_SHIFT  17
++#     define R300_DEPTHY_OFFSET_MASK   0x07FE0000
++
++/* Sets the fifo sizes */
++#define R500_ZB_FIFO_SIZE                        0x4fd0
++#     define R500_OP_FIFO_SIZE_FULL   (0 << 0)
++#     define R500_OP_FIFO_SIZE_HALF   (1 << 0)
++#     define R500_OP_FIFO_SIZE_QUATER (2 << 0)
++#     define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
++
++/* Stencil Reference Value and Mask for backfacing quads */
++/* R300_ZB_STENCILREFMASK handles front face */
++#define R500_ZB_STENCILREFMASK_BF                0x4fd4
++#     define R500_STENCILREF_SHIFT       0
++#     define R500_STENCILREF_MASK        0x000000ff
++#     define R500_STENCILMASK_SHIFT      8
++#     define R500_STENCILMASK_MASK       0x0000ff00
++#     define R500_STENCILWRITEMASK_SHIFT 16
++#     define R500_STENCILWRITEMASK_MASK  0x00ff0000
++
++/* BEGIN: Vertex program instruction set */
++
++/* Every instruction is four dwords long:
++ *  DWORD 0: output and opcode
++ *  DWORD 1: first argument
++ *  DWORD 2: second argument
++ *  DWORD 3: third argument
++ *
++ * Notes:
++ *  - ABS r, a is implemented as MAX r, a, -a
++ *  - MOV is implemented as ADD to zero
++ *  - XPD is implemented as MUL + MAD
++ *  - FLR is implemented as FRC + ADD
++ *  - apparently, fglrx tries to schedule instructions so that there is at
++ *    least one instruction between the write to a temporary and the first
++ *    read from said temporary; however, violations of this scheduling are
++ *    allowed
++ *  - register indices seem to be unrelated with OpenGL aliasing to
++ *    conventional state
++ *  - only one attribute and one parameter can be loaded at a time; however,
++ *    the same attribute/parameter can be used for more than one argument
++ *  - the second software argument for POW is the third hardware argument
++ *    (no idea why)
++ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
++ *
++ * There is some magic surrounding LIT:
++ *   The single argument is replicated across all three inputs, but swizzled:
++ *     First argument: xyzy
++ *     Second argument: xyzx
++ *     Third argument: xyzw
++ *   Whenever the result is used later in the fragment program, fglrx forces
++ *   x and w to be 1.0 in the input selection; I don't know whether this is
++ *   strictly necessary
++ */
++#define R300_VPI_OUT_OP_DOT                     (1 << 0)
++#define R300_VPI_OUT_OP_MUL                     (2 << 0)
++#define R300_VPI_OUT_OP_ADD                     (3 << 0)
++#define R300_VPI_OUT_OP_MAD                     (4 << 0)
++#define R300_VPI_OUT_OP_DST                     (5 << 0)
++#define R300_VPI_OUT_OP_FRC                     (6 << 0)
++#define R300_VPI_OUT_OP_MAX                     (7 << 0)
++#define R300_VPI_OUT_OP_MIN                     (8 << 0)
++#define R300_VPI_OUT_OP_SGE                     (9 << 0)
++#define R300_VPI_OUT_OP_SLT                     (10 << 0)
++      /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
++#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
++#define R300_VPI_OUT_OP_ARL                     (13 << 0)
++#define R300_VPI_OUT_OP_EXP                     (65 << 0)
++#define R300_VPI_OUT_OP_LOG                     (66 << 0)
++      /* Used in fog computations, scalar(scalar) */
++#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
++#define R300_VPI_OUT_OP_LIT                     (68 << 0)
++#define R300_VPI_OUT_OP_POW                     (69 << 0)
++#define R300_VPI_OUT_OP_RCP                     (70 << 0)
++#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
++      /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
++#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
++#define R300_VPI_OUT_OP_EX2                     (75 << 0)
++#define R300_VPI_OUT_OP_LG2                     (76 << 0)
++#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
++      /* all temps, vector(scalar, vector, vector) */
++#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
++
++#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
++#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
++#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
++#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
++
++#define R300_VPI_OUT_REG_INDEX_SHIFT            13
++      /* GUESS based on fglrx native limits */
++#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
++
++#define R300_VPI_OUT_WRITE_X                    (1 << 20)
++#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
++#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
++#define R300_VPI_OUT_WRITE_W                    (1 << 23)
++
++#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
++#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
++#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
++#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
++#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
++
++#define R300_VPI_IN_REG_INDEX_SHIFT             5
++      /* GUESS based on fglrx native limits */
++#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
++
++/* The R300 can select components from the input register arbitrarily.
++ * Use the following constants, shifted by the component shift you
++ * want to select
++ */
++#define R300_VPI_IN_SELECT_X    0
++#define R300_VPI_IN_SELECT_Y    1
++#define R300_VPI_IN_SELECT_Z    2
++#define R300_VPI_IN_SELECT_W    3
++#define R300_VPI_IN_SELECT_ZERO 4
++#define R300_VPI_IN_SELECT_ONE  5
++#define R300_VPI_IN_SELECT_MASK 7
++
++#define R300_VPI_IN_X_SHIFT                     13
++#define R300_VPI_IN_Y_SHIFT                     16
++#define R300_VPI_IN_Z_SHIFT                     19
++#define R300_VPI_IN_W_SHIFT                     22
++
++#define R300_VPI_IN_NEG_X                       (1 << 25)
++#define R300_VPI_IN_NEG_Y                       (1 << 26)
++#define R300_VPI_IN_NEG_Z                       (1 << 27)
++#define R300_VPI_IN_NEG_W                       (1 << 28)
++/* END: Vertex program instruction set */
++
++/* BEGIN: Packet 3 commands */
++
++/* A primitive emission dword. */
++#define R300_PRIM_TYPE_NONE                     (0 << 0)
++#define R300_PRIM_TYPE_POINT                    (1 << 0)
++#define R300_PRIM_TYPE_LINE                     (2 << 0)
++#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
++#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
++#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
++#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
++#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
++#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
++#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
++#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
++      /* GUESS (based on r200) */
++#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
++#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
++#define R300_PRIM_TYPE_QUADS                    (13 << 0)
++#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
++#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
++#define R300_PRIM_TYPE_MASK                     0xF
++#define R300_PRIM_WALK_IND                      (1 << 4)
++#define R300_PRIM_WALK_LIST                     (2 << 4)
++#define R300_PRIM_WALK_RING                     (3 << 4)
++#define R300_PRIM_WALK_MASK                     (3 << 4)
++      /* GUESS (based on r200) */
++#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
++#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
++#define R300_PRIM_NUM_VERTICES_SHIFT            16
++#define R300_PRIM_NUM_VERTICES_MASK             0xffff
++
++/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
++ * Two parameter dwords:
++ * 0. The first parameter appears to be always 0
++ * 1. The second parameter is a standard primitive emission dword.
++ */
++#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
++
++/* Specify the full set of vertex arrays as (address, stride).
++ * The first parameter is the number of vertex arrays specified.
++ * The rest of the command is a variable length list of blocks, where
++ * each block is three dwords long and specifies two arrays.
++ * The first dword of a block is split into two words, the lower significant
++ * word refers to the first array, the more significant word to the second
++ * array in the block.
++ * The low byte of each word contains the size of an array entry in dwords,
++ * the high byte contains the stride of the array.
++ * The second dword of a block contains the pointer to the first array,
++ * the third dword of a block contains the pointer to the second array.
++ * Note that if the total number of arrays is odd, the third dword of
++ * the last block is omitted.
++ */
++#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
++
++#define R300_PACKET3_INDX_BUFFER            0x00003300
++#    define R300_EB_UNK1_SHIFT                      24
++#    define R300_EB_UNK1                    (0x80<<24)
++#    define R300_EB_UNK2                        0x0810
++#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
++#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
++
++/* END: Packet 3 commands */
++
++
++/* Color formats for 2d packets
++ */
++#define R300_CP_COLOR_FORMAT_CI8      2
++#define R300_CP_COLOR_FORMAT_ARGB1555 3
++#define R300_CP_COLOR_FORMAT_RGB565   4
++#define R300_CP_COLOR_FORMAT_ARGB8888 6
++#define R300_CP_COLOR_FORMAT_RGB332   7
++#define R300_CP_COLOR_FORMAT_RGB8     9
++#define R300_CP_COLOR_FORMAT_ARGB4444 15
++
++/*
++ * CP type-3 packets
++ */
++#define R300_CP_CMD_BITBLT_MULTI      0xC0009B00
++
++#define R500_VAP_INDEX_OFFSET         0x208c
++
++#define R500_GA_US_VECTOR_INDEX         0x4250
++#define R500_GA_US_VECTOR_DATA          0x4254
++
++#define R500_RS_IP_0                    0x4074
++#define R500_RS_INST_0                  0x4320
++
++#define R500_US_CONFIG                  0x4600
++
++#define R500_US_FC_CTRL                       0x4624
++#define R500_US_CODE_ADDR             0x4630
++
++#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
++#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
++
++#endif /* _R300_REG_H */
++
++/* *INDENT-ON* */
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_cp.c git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c
+--- git/drivers/gpu/drm-tungsten/radeon_cp.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1771 @@
++/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * Copyright 2007 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++#include "r300_reg.h"
++
++#include "radeon_microcode.h"
++#define RADEON_FIFO_DEBUG     0
++
++static int radeon_do_cleanup_cp(struct drm_device * dev);
++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
++
++static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
++      ret = RADEON_READ(R520_MC_IND_DATA);
++      RADEON_WRITE(R520_MC_IND_INDEX, 0);
++      return ret;
++}
++
++static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
++      ret = RADEON_READ(RS480_NB_MC_DATA);
++      RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
++      return ret;
++}
++
++static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
++      ret = RADEON_READ(RS690_MC_DATA);
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
++      return ret;
++}
++
++static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++          return RS690_READ_MCIND(dev_priv, addr);
++      else
++          return RS480_READ_MCIND(dev_priv, addr);
++}
++
++u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
++{
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
++      else
++              return RADEON_READ(RADEON_MC_FB_LOCATION);
++}
++
++static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
++{
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
++      else
++              RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
++}
++
++static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
++{
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
++      else
++              RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
++}
++
++static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
++{
++      u32 agp_base_hi = upper_32_bits(agp_base);
++      u32 agp_base_lo = agp_base & 0xffffffff;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
++              R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
++              R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
++              RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
++              RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
++              R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
++              R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
++              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
++              RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
++      } else {
++              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
++              if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
++                      RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
++      }
++}
++
++static int RADEON_READ_PLL(struct drm_device * dev, int addr)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
++      return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
++}
++
++static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
++{
++      RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
++      return RADEON_READ(RADEON_PCIE_DATA);
++}
++
++#if RADEON_FIFO_DEBUG
++static void radeon_status(drm_radeon_private_t * dev_priv)
++{
++      printk("%s:\n", __FUNCTION__);
++      printk("RBBM_STATUS = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
++      printk("CP_RB_RTPR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
++      printk("CP_RB_WTPR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
++      printk("AIC_CNTL = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
++      printk("AIC_STAT = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_STAT));
++      printk("AIC_PT_BASE = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
++      printk("TLB_ADDR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
++      printk("TLB_DATA = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
++}
++#endif
++
++/* ================================================================
++ * Engine, FIFO control
++ */
++
++static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
++{
++      u32 tmp;
++      int i;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
++              tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
++              tmp |= RADEON_RB3D_DC_FLUSH_ALL;
++              RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
++
++              for (i = 0; i < dev_priv->usec_timeout; i++) {
++                      if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
++                            & RADEON_RB3D_DC_BUSY)) {
++                              return 0;
++                      }
++                      DRM_UDELAY(1);
++              }
++      } else {
++              /* don't flush or purge cache here or lockup */
++              return 0;
++      }
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
++{
++      int i;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              int slots = (RADEON_READ(RADEON_RBBM_STATUS)
++                           & RADEON_RBBM_FIFOCNT_MASK);
++              if (slots >= entries)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++      DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
++               RADEON_READ(RADEON_RBBM_STATUS),
++               RADEON_READ(R300_VAP_CNTL_STATUS));
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
++{
++      int i, ret;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      ret = radeon_do_wait_for_fifo(dev_priv, 64);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(RADEON_READ(RADEON_RBBM_STATUS)
++                    & RADEON_RBBM_ACTIVE)) {
++                      radeon_do_pixcache_flush(dev_priv);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++      DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
++               RADEON_READ(RADEON_RBBM_STATUS),
++               RADEON_READ(R300_VAP_CNTL_STATUS));
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static void radeon_init_pipes(drm_radeon_private_t * dev_priv)
++{
++      uint32_t gb_tile_config, gb_pipe_sel = 0;
++
++      /* RS4xx/RS6xx/R4xx/R5xx */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
++              gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
++              dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
++      } else {
++              /* R3xx */
++              if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
++                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
++                      dev_priv->num_gb_pipes = 2;
++              } else {
++                      /* R3Vxx */
++                      dev_priv->num_gb_pipes = 1;
++              }
++      }
++      DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
++
++      gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
++
++      switch(dev_priv->num_gb_pipes) {
++      case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
++      case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
++      case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
++      default:
++      case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
++      }
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++              RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
++              RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
++      }
++      RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
++      radeon_do_wait_for_idle(dev_priv);
++      RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
++      RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
++                                             R300_DC_AUTOFLUSH_ENABLE |
++                                             R300_DC_DC_DISABLE_IGNORE_PE));
++
++
++}
++
++/* ================================================================
++ * CP control, initialization
++ */
++
++/* Load the microcode for the CP */
++static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
++{
++      int i;
++      DRM_DEBUG("\n");
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
++
++      if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
++              DRM_INFO("Loading R100 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R100_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R100_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
++              DRM_INFO("Loading R200 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R200_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R200_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
++              DRM_INFO("Loading R300 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R300_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R300_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
++              DRM_INFO("Loading R400 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R420_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R420_cp_microcode[i][0]);
++              }
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
++              DRM_INFO("Loading RS690 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   RS690_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   RS690_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
++              DRM_INFO("Loading R500 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R520_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R520_cp_microcode[i][0]);
++              }
++      }
++}
++
++/* Flush any pending commands to the CP.  This should only be used just
++ * prior to a wait for idle, as it informs the engine that the command
++ * stream is ending.
++ */
++static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
++{
++      DRM_DEBUG("\n");
++#if 0
++      u32 tmp;
++
++      tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
++#endif
++}
++
++/* Wait for the CP to go idle.
++ */
++int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
++{
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(6);
++
++      RADEON_PURGE_CACHE();
++      RADEON_PURGE_ZCACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return radeon_do_wait_for_idle(dev_priv);
++}
++
++/* Start the Command Processor.
++ */
++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
++{
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
++
++      dev_priv->cp_running = 1;
++
++      BEGIN_RING(8);
++      /* isync can only be written through cp on r5xx write it here */
++      OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
++      OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
++               RADEON_ISYNC_ANY3D_IDLE2D |
++               RADEON_ISYNC_WAIT_IDLEGUI |
++               RADEON_ISYNC_CPSCRATCH_IDLEGUI);
++      RADEON_PURGE_CACHE();
++      RADEON_PURGE_ZCACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
++}
++
++/* Reset the Command Processor.  This will not flush any pending
++ * commands, so you must wait for the CP command stream to complete
++ * before calling this routine.
++ */
++static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
++{
++      u32 cur_read_ptr;
++      DRM_DEBUG("\n");
++
++      cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
++      SET_RING_HEAD(dev_priv, cur_read_ptr);
++      dev_priv->ring.tail = cur_read_ptr;
++}
++
++/* Stop the Command Processor.  This will not flush any pending
++ * commands, so you must flush the command stream and wait for the CP
++ * to go idle before calling this routine.
++ */
++static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
++{
++      DRM_DEBUG("\n");
++
++      RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
++
++      dev_priv->cp_running = 0;
++}
++
++/* Reset the engine.  This will stop the CP if it is running.
++ */
++static int radeon_do_engine_reset(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
++      DRM_DEBUG("\n");
++
++      radeon_do_pixcache_flush(dev_priv);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
++              /* may need something similar for newer chips */
++              clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
++              mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
++
++              RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
++                                                  RADEON_FORCEON_MCLKA |
++                                                  RADEON_FORCEON_MCLKB |
++                                                  RADEON_FORCEON_YCLKA |
++                                                  RADEON_FORCEON_YCLKB |
++                                                  RADEON_FORCEON_MC |
++                                                  RADEON_FORCEON_AIC));
++      }
++
++      rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
++
++      RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
++                                            RADEON_SOFT_RESET_CP |
++                                            RADEON_SOFT_RESET_HI |
++                                            RADEON_SOFT_RESET_SE |
++                                            RADEON_SOFT_RESET_RE |
++                                            RADEON_SOFT_RESET_PP |
++                                            RADEON_SOFT_RESET_E2 |
++                                            RADEON_SOFT_RESET_RB));
++      RADEON_READ(RADEON_RBBM_SOFT_RESET);
++      RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
++                                            ~(RADEON_SOFT_RESET_CP |
++                                              RADEON_SOFT_RESET_HI |
++                                              RADEON_SOFT_RESET_SE |
++                                              RADEON_SOFT_RESET_RE |
++                                              RADEON_SOFT_RESET_PP |
++                                              RADEON_SOFT_RESET_E2 |
++                                              RADEON_SOFT_RESET_RB)));
++      RADEON_READ(RADEON_RBBM_SOFT_RESET);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
++              RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
++              RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
++              RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
++      }
++
++      /* setup the raster pipes */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
++          radeon_init_pipes(dev_priv);
++
++      /* Reset the CP ring */
++      radeon_do_cp_reset(dev_priv);
++
++      /* The CP is no longer running after an engine reset */
++      dev_priv->cp_running = 0;
++
++      /* Reset any pending vertex, indirect buffers */
++      radeon_freelist_reset(dev);
++
++      return 0;
++}
++
++static void radeon_cp_init_ring_buffer(struct drm_device * dev,
++                                     drm_radeon_private_t * dev_priv)
++{
++      u32 ring_start, cur_read_ptr;
++      u32 tmp;
++
++      /* Initialize the memory controller. With new memory map, the fb location
++       * is not changed, it should have been properly initialized already. Part
++       * of the problem is that the code below is bogus, assuming the GART is
++       * always appended to the fb which is not necessarily the case
++       */
++      if (!dev_priv->new_memmap)
++              radeon_write_fb_location(dev_priv,
++                           ((dev_priv->gart_vm_start - 1) & 0xffff0000)
++                           | (dev_priv->fb_location >> 16));
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              radeon_write_agp_base(dev_priv, dev->agp->base);
++
++              radeon_write_agp_location(dev_priv,
++                           (((dev_priv->gart_vm_start - 1 +
++                              dev_priv->gart_size) & 0xffff0000) |
++                            (dev_priv->gart_vm_start >> 16)));
++
++              ring_start = (dev_priv->cp_ring->offset
++                            - dev->agp->base
++                            + dev_priv->gart_vm_start);
++      } else
++#endif
++              ring_start = (dev_priv->cp_ring->offset
++                            - (unsigned long)dev->sg->virtual
++                            + dev_priv->gart_vm_start);
++
++      RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
++
++      /* Set the write pointer delay */
++      RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
++
++      /* Initialize the ring buffer's read and write pointers */
++      cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
++      SET_RING_HEAD(dev_priv, cur_read_ptr);
++      dev_priv->ring.tail = cur_read_ptr;
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
++                           dev_priv->ring_rptr->offset
++                           - dev->agp->base + dev_priv->gart_vm_start);
++      } else
++#endif
++      {
++              struct drm_sg_mem *entry = dev->sg;
++              unsigned long tmp_ofs, page_ofs;
++
++              tmp_ofs = dev_priv->ring_rptr->offset -
++                              (unsigned long)dev->sg->virtual;
++              page_ofs = tmp_ofs >> PAGE_SHIFT;
++
++              RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
++              DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
++                        (unsigned long)entry->busaddr[page_ofs],
++                        entry->handle + tmp_ofs);
++      }
++
++      /* Set ring buffer size */
++#ifdef __BIG_ENDIAN
++      RADEON_WRITE(RADEON_CP_RB_CNTL,
++                   RADEON_BUF_SWAP_32BIT |
++                   (dev_priv->ring.fetch_size_l2ow << 18) |
++                   (dev_priv->ring.rptr_update_l2qw << 8) |
++                   dev_priv->ring.size_l2qw);
++#else
++      RADEON_WRITE(RADEON_CP_RB_CNTL,
++                   (dev_priv->ring.fetch_size_l2ow << 18) |
++                   (dev_priv->ring.rptr_update_l2qw << 8) |
++                   dev_priv->ring.size_l2qw);
++#endif
++
++      /* Initialize the scratch register pointer.  This will cause
++       * the scratch register values to be written out to memory
++       * whenever they are updated.
++       *
++       * We simply put this behind the ring read pointer, this works
++       * with PCI GART as well as (whatever kind of) AGP GART
++       */
++      RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
++                   + RADEON_SCRATCH_REG_OFFSET);
++
++      dev_priv->scratch = ((__volatile__ u32 *)
++                           dev_priv->ring_rptr->handle +
++                           (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
++
++      RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
++
++      /* Turn on bus mastering */
++      tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
++      RADEON_WRITE(RADEON_BUS_CNTL, tmp);
++
++      dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
++      RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
++
++      dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
++      RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
++                   dev_priv->sarea_priv->last_dispatch);
++
++      dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
++      RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      /* Sync everything up */
++      RADEON_WRITE(RADEON_ISYNC_CNTL,
++                   (RADEON_ISYNC_ANY2D_IDLE3D |
++                    RADEON_ISYNC_ANY3D_IDLE2D |
++                    RADEON_ISYNC_WAIT_IDLEGUI |
++                    RADEON_ISYNC_CPSCRATCH_IDLEGUI));
++
++}
++
++static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
++{
++      u32 tmp;
++
++      /* Writeback doesn't seem to work everywhere, test it here and possibly
++       * enable it if it appears to work
++       */
++      DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
++      RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
++
++      for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
++              if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
++                  0xdeadbeef)
++                      break;
++              DRM_UDELAY(1);
++      }
++
++      if (tmp < dev_priv->usec_timeout) {
++              dev_priv->writeback_works = 1;
++              DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
++      } else {
++              dev_priv->writeback_works = 0;
++              DRM_INFO("writeback test failed\n");
++      }
++      if (radeon_no_wb == 1) {
++              dev_priv->writeback_works = 0;
++              DRM_INFO("writeback forced off\n");
++      }
++
++      if (!dev_priv->writeback_works) {
++              /* Disable writeback to avoid unnecessary bus master transfers */
++              RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE);
++              RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
++      }
++}
++
++/* Enable or disable IGP GART on the chip */
++static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 temp;
++
++      if (on) {
++              DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
++                       dev_priv->gart_vm_start,
++                       (long)dev_priv->gart_info.bus_addr,
++                       dev_priv->gart_size);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
++
++              if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++                      IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
++                                                           RS690_BLOCK_GFX_D3_EN));
++              else
++                      IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
++
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
++                                                             RS480_VA_SIZE_32MB));
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
++              IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
++                                                      RS480_TLB_ENABLE |
++                                                      RS480_GTW_LAC_EN |
++                                                      RS480_1LEVEL_GART));
++
++              temp = dev_priv->gart_info.bus_addr & 0xfffff000;
++              temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
++              IGP_WRITE_MCIND(RS480_GART_BASE, temp);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
++              IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
++                                                    RS480_REQ_TYPE_SNOOP_DIS));
++
++              radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
++
++              dev_priv->gart_size = 32*1024*1024;
++              temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 
++                      0xffff0000) | (dev_priv->gart_vm_start >> 16));
++
++              radeon_write_agp_location(dev_priv, temp);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
++                                                             RS480_VA_SIZE_32MB));
++
++              do {
++                      temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
++                      if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
++                              break;
++                      DRM_UDELAY(1);
++              } while(1);
++
++              IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
++                              RS480_GART_CACHE_INVALIDATE);
++
++              do {
++                      temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
++                      if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
++                              break;
++                      DRM_UDELAY(1);
++              } while(1);
++
++              IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
++      } else {
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
++      }
++}
++
++static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
++      if (on) {
++
++              DRM_DEBUG("programming pcie %08X %08lX %08X\n",
++                        dev_priv->gart_vm_start,
++                        (long)dev_priv->gart_info.bus_addr,
++                        dev_priv->gart_size);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
++                                dev_priv->gart_vm_start);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
++                                dev_priv->gart_info.bus_addr);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
++                                dev_priv->gart_vm_start);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
++                                dev_priv->gart_vm_start +
++                                dev_priv->gart_size - 1);
++
++              radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
++
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
++                                RADEON_PCIE_TX_GART_EN);
++      } else {
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
++                                tmp & ~RADEON_PCIE_TX_GART_EN);
++      }
++}
++
++/* Enable or disable PCI GART on the chip */
++static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 tmp;
++
++      if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
++          (dev_priv->flags & RADEON_IS_IGPGART)) {
++              radeon_set_igpgart(dev_priv, on);
++              return;
++      }
++
++      if (dev_priv->flags & RADEON_IS_PCIE) {
++              radeon_set_pciegart(dev_priv, on);
++              return;
++      }
++
++      tmp = RADEON_READ(RADEON_AIC_CNTL);
++
++      if (on) {
++              RADEON_WRITE(RADEON_AIC_CNTL,
++                           tmp | RADEON_PCIGART_TRANSLATE_EN);
++
++              /* set PCI GART page-table base address
++               */
++              RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
++
++              /* set address range for PCI address translate
++               */
++              RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
++              RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
++                           + dev_priv->gart_size - 1);
++
++              /* Turn off AGP aperture -- is this required for PCI GART?
++               */
++              radeon_write_agp_location(dev_priv, 0xffffffc0);
++              RADEON_WRITE(RADEON_AGP_COMMAND, 0);    /* clear AGP_COMMAND */
++      } else {
++              RADEON_WRITE(RADEON_AIC_CNTL,
++                           tmp & ~RADEON_PCIGART_TRANSLATE_EN);
++      }
++}
++
++static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      /* if we require new memory map but we don't have it fail */
++      if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
++              DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP))
++      {
++              DRM_DEBUG("Forcing AGP card to PCI mode\n");
++              dev_priv->flags &= ~RADEON_IS_AGP;
++      }
++      else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
++               && !init->is_pci)
++      {
++              DRM_DEBUG("Restoring AGP flag\n");
++              dev_priv->flags |= RADEON_IS_AGP;
++      }
++
++      if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
++              DRM_ERROR("PCI GART memory not allocated!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->usec_timeout = init->usec_timeout;
++      if (dev_priv->usec_timeout < 1 ||
++          dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
++              DRM_DEBUG("TIMEOUT problem!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      /* Enable vblank on CRTC1 for older X servers
++       */
++      dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
++
++      dev_priv->do_boxes = 0;
++      dev_priv->cp_mode = init->cp_mode;
++
++      /* We don't support anything other than bus-mastering ring mode,
++       * but the ring can be in either AGP or PCI space for the ring
++       * read pointer.
++       */
++      if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
++          (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
++              DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      switch (init->fb_bpp) {
++      case 16:
++              dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
++              break;
++      case 32:
++      default:
++              dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
++              break;
++      }
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      switch (init->depth_bpp) {
++      case 16:
++              dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
++              break;
++      case 32:
++      default:
++              dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
++              break;
++      }
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      /* Hardware state for depth clears.  Remove this if/when we no
++       * longer clear the depth buffer with a 3D rectangle.  Hard-code
++       * all values to prevent unwanted 3D state from slipping through
++       * and screwing with the clear operation.
++       */
++      dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
++                                         (dev_priv->color_fmt << 10) |
++                                         (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
++
++      dev_priv->depth_clear.rb3d_zstencilcntl =
++          (dev_priv->depth_fmt |
++           RADEON_Z_TEST_ALWAYS |
++           RADEON_STENCIL_TEST_ALWAYS |
++           RADEON_STENCIL_S_FAIL_REPLACE |
++           RADEON_STENCIL_ZPASS_REPLACE |
++           RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
++
++      dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
++                                       RADEON_BFACE_SOLID |
++                                       RADEON_FFACE_SOLID |
++                                       RADEON_FLAT_SHADE_VTX_LAST |
++                                       RADEON_DIFFUSE_SHADE_FLAT |
++                                       RADEON_ALPHA_SHADE_FLAT |
++                                       RADEON_SPECULAR_SHADE_FLAT |
++                                       RADEON_FOG_SHADE_FLAT |
++                                       RADEON_VTX_PIX_CENTER_OGL |
++                                       RADEON_ROUND_MODE_TRUNC |
++                                       RADEON_ROUND_PREC_8TH_PIX);
++
++
++      dev_priv->ring_offset = init->ring_offset;
++      dev_priv->ring_rptr_offset = init->ring_rptr_offset;
++      dev_priv->buffers_offset = init->buffers_offset;
++      dev_priv->gart_textures_offset = init->gart_textures_offset;
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->cp_ring) {
++              DRM_ERROR("could not find cp ring region!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++      dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
++      if (!dev_priv->ring_rptr) {
++              DRM_ERROR("could not find ring read pointer!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              DRM_ERROR("could not find dma buffer region!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      if (init->gart_textures_offset) {
++              dev_priv->gart_textures =
++                  drm_core_findmap(dev, init->gart_textures_offset);
++              if (!dev_priv->gart_textures) {
++                      DRM_ERROR("could not find GART texture region!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev_priv->sarea_priv =
++          (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                                  init->sarea_priv_offset);
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              drm_core_ioremap(dev_priv->cp_ring, dev);
++              drm_core_ioremap(dev_priv->ring_rptr, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev_priv->cp_ring->handle ||
++                  !dev_priv->ring_rptr->handle ||
++                  !dev->agp_buffer_map->handle) {
++                      DRM_ERROR("could not find ioremap agp regions!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -EINVAL;
++              }
++      } else
++#endif
++      {
++              dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
++              dev_priv->ring_rptr->handle =
++                  (void *)dev_priv->ring_rptr->offset;
++              dev->agp_buffer_map->handle =
++                  (void *)dev->agp_buffer_map->offset;
++
++              DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
++                        dev_priv->cp_ring->handle);
++              DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
++                        dev_priv->ring_rptr->handle);
++              DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
++                        dev->agp_buffer_map->handle);
++      }
++
++      dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
++      dev_priv->fb_size =
++              ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
++              - dev_priv->fb_location;
++
++      dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
++                                      ((dev_priv->front_offset
++                                        + dev_priv->fb_location) >> 10));
++
++      dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
++                                     ((dev_priv->back_offset
++                                       + dev_priv->fb_location) >> 10));
++
++      dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
++                                      ((dev_priv->depth_offset
++                                        + dev_priv->fb_location) >> 10));
++
++      dev_priv->gart_size = init->gart_size;
++
++      /* New let's set the memory map ... */
++      if (dev_priv->new_memmap) {
++              u32 base = 0;
++
++              DRM_INFO("Setting GART location based on new memory map\n");
++
++              /* If using AGP, try to locate the AGP aperture at the same
++               * location in the card and on the bus, though we have to
++               * align it down.
++               */
++#if __OS_HAS_AGP
++              if (dev_priv->flags & RADEON_IS_AGP) {
++                      base = dev->agp->base;
++                      /* Check if valid */
++                      if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
++                          base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
++                              DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
++                                       dev->agp->base);
++                              base = 0;
++                      }
++              }
++#endif
++              /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
++              if (base == 0) {
++                      base = dev_priv->fb_location + dev_priv->fb_size;
++                      if (base < dev_priv->fb_location ||
++                          ((base + dev_priv->gart_size) & 0xfffffffful) < base)
++                              base = dev_priv->fb_location
++                                      - dev_priv->gart_size;
++              }
++              dev_priv->gart_vm_start = base & 0xffc00000u;
++              if (dev_priv->gart_vm_start != base)
++                      DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
++                               base, dev_priv->gart_vm_start);
++      } else {
++              DRM_INFO("Setting GART location based on old memory map\n");
++              dev_priv->gart_vm_start = dev_priv->fb_location +
++                      RADEON_READ(RADEON_CONFIG_APER_SIZE);
++      }
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP)
++              dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
++                                               - dev->agp->base
++                                               + dev_priv->gart_vm_start);
++      else
++#endif
++              dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
++                                      - (unsigned long)dev->sg->virtual
++                                      + dev_priv->gart_vm_start);
++
++      DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
++      DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
++      DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
++                dev_priv->gart_buffers_offset);
++
++      dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
++      dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
++                            + init->ring_size / sizeof(u32));
++      dev_priv->ring.size = init->ring_size;
++      dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
++
++      dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
++      dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
++
++      dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
++      dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
++
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++
++      dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              /* Turn off PCI GART */
++              radeon_set_pcigart(dev_priv, 0);
++      } else
++#endif
++      {
++              dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
++              /* if we have an offset set from userspace */
++              if (dev_priv->pcigart_offset_set) {
++                      dev_priv->gart_info.bus_addr =
++                          dev_priv->pcigart_offset + dev_priv->fb_location;
++                      dev_priv->gart_info.mapping.offset =
++                          dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
++                      dev_priv->gart_info.mapping.size =
++                          dev_priv->gart_info.table_size;
++
++                      drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
++                      dev_priv->gart_info.addr =
++                          dev_priv->gart_info.mapping.handle;
++
++                      if (dev_priv->flags & RADEON_IS_PCIE)
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
++                      else
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++                      dev_priv->gart_info.gart_table_location =
++                          DRM_ATI_GART_FB;
++
++                      DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
++                                dev_priv->gart_info.addr,
++                                dev_priv->pcigart_offset);
++              } else {
++                      if (dev_priv->flags & RADEON_IS_IGPGART)
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
++                      else
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++                      dev_priv->gart_info.gart_table_location =
++                          DRM_ATI_GART_MAIN;
++                      dev_priv->gart_info.addr = NULL;
++                      dev_priv->gart_info.bus_addr = 0;
++                      if (dev_priv->flags & RADEON_IS_PCIE) {
++                              DRM_ERROR
++                                  ("Cannot use PCI Express without GART in FB memory\n");
++                              radeon_do_cleanup_cp(dev);
++                              return -EINVAL;
++                      }
++              }
++
++              if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
++                      DRM_ERROR("failed to init PCI GART!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -ENOMEM;
++              }
++
++              /* Turn on PCI GART */
++              radeon_set_pcigart(dev_priv, 1);
++      }
++
++      /* Start with assuming that writeback doesn't work */
++      dev_priv->writeback_works = 0;
++
++      radeon_cp_load_microcode(dev_priv);
++      radeon_cp_init_ring_buffer(dev, dev_priv);
++
++      dev_priv->last_buf = 0;
++
++      radeon_do_engine_reset(dev);
++      radeon_test_writeback(dev_priv);
++
++      return 0;
++}
++
++static int radeon_do_cleanup_cp(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              if (dev_priv->cp_ring != NULL) {
++                      drm_core_ioremapfree(dev_priv->cp_ring, dev);
++                      dev_priv->cp_ring = NULL;
++              }
++              if (dev_priv->ring_rptr != NULL) {
++                      drm_core_ioremapfree(dev_priv->ring_rptr, dev);
++                      dev_priv->ring_rptr = NULL;
++              }
++              if (dev->agp_buffer_map != NULL) {
++                      drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                      dev->agp_buffer_map = NULL;
++              }
++      } else
++#endif
++      {
++
++              if (dev_priv->gart_info.bus_addr) {
++                      /* Turn off PCI GART */
++                      radeon_set_pcigart(dev_priv, 0);
++                      if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
++                              DRM_ERROR("failed to cleanup PCI GART!\n");
++              }
++
++              if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
++              {
++                      drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
++                      dev_priv->gart_info.addr = 0;
++              }
++      }
++      /* only clear to the start of flags */
++      memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
++
++      return 0;
++}
++
++/* This code will reinit the Radeon CP hardware after a resume from disc.
++ * AFAIK, it would be very difficult to pickle the state at suspend time, so
++ * here we make sure that all Radeon hardware initialisation is re-done without
++ * affecting running applications.
++ *
++ * Charl P. Botha <http://cpbotha.net>
++ */
++static int radeon_do_resume_cp(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("Called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("Starting radeon_do_resume_cp()\n");
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              /* Turn off PCI GART */
++              radeon_set_pcigart(dev_priv, 0);
++      } else
++#endif
++      {
++              /* Turn on PCI GART */
++              radeon_set_pcigart(dev_priv, 1);
++      }
++
++      radeon_cp_load_microcode(dev_priv);
++      radeon_cp_init_ring_buffer(dev, dev_priv);
++
++      radeon_do_engine_reset(dev);
++      radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
++
++      DRM_DEBUG("radeon_do_resume_cp() complete\n");
++
++      return 0;
++}
++
++int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_init_t *init = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (init->func == RADEON_INIT_R300_CP)
++              r300_init_reg_flags(dev);
++
++      switch (init->func) {
++      case RADEON_INIT_CP:
++      case RADEON_INIT_R200_CP:
++      case RADEON_INIT_R300_CP:
++              return radeon_do_init_cp(dev, init);
++      case RADEON_CLEANUP_CP:
++              return radeon_do_cleanup_cp(dev);
++      }
++
++      return -EINVAL;
++}
++
++int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cp_running) {
++              DRM_DEBUG("while CP running\n");
++              return 0;
++      }
++      if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
++              DRM_DEBUG("called with bogus CP mode (%d)\n",
++                        dev_priv->cp_mode);
++              return 0;
++      }
++
++      radeon_do_cp_start(dev_priv);
++
++      return 0;
++}
++
++/* Stop the CP.  The engine must have been idled before calling this
++ * routine.
++ */
++int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_cp_stop_t *stop = data;
++      int ret;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv->cp_running)
++              return 0;
++
++      /* Flush any pending CP commands.  This ensures any outstanding
++       * commands are exectuted by the engine before we turn it off.
++       */
++      if (stop->flush) {
++              radeon_do_cp_flush(dev_priv);
++      }
++
++      /* If we fail to make the engine go idle, we return an error
++       * code so that the DRM ioctl wrapper can try again.
++       */
++      if (stop->idle) {
++              ret = radeon_do_cp_idle(dev_priv);
++              if (ret)
++                      return ret;
++      }
++
++      /* Finally, we can turn off the CP.  If the engine isn't idle,
++       * we will get some dropped triangles as they won't be fully
++       * rendered before the CP is shut down.
++       */
++      radeon_do_cp_stop(dev_priv);
++
++      /* Reset the engine */
++      radeon_do_engine_reset(dev);
++
++      return 0;
++}
++
++void radeon_do_release(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i, ret;
++
++      if (dev_priv) {
++              if (dev_priv->cp_running) {
++                      /* Stop the cp */
++                      while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
++                              DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
++#ifdef __linux__
++                              schedule();
++#else
++#if defined(__FreeBSD__) && __FreeBSD_version > 500000
++                              mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel",
++                                     1);
++#else
++                              tsleep(&ret, PZERO, "rdnrel", 1);
++#endif
++#endif
++                      }
++                      radeon_do_cp_stop(dev_priv);
++                      radeon_do_engine_reset(dev);
++              }
++
++              /* Disable *all* interrupts */
++              if (dev_priv->mmio)     /* remove this after permanent addmaps */
++                      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++
++              if (dev_priv->mmio) {   /* remove all surfaces */
++                      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++                              RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
++                              RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
++                                           16 * i, 0);
++                              RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
++                                           16 * i, 0);
++                      }
++              }
++
++              /* Free memory heap structures */
++              radeon_mem_takedown(&(dev_priv->gart_heap));
++              radeon_mem_takedown(&(dev_priv->fb_heap));
++
++              /* deallocate kernel resources */
++              radeon_do_cleanup_cp(dev);
++      }
++}
++
++/* Just reset the CP ring.  Called as part of an X Server engine reset.
++ */
++int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_DEBUG("called before init done\n");
++              return -EINVAL;
++      }
++
++      radeon_do_cp_reset(dev_priv);
++
++      /* The CP is no longer running after an engine reset */
++      dev_priv->cp_running = 0;
++
++      return 0;
++}
++
++int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return radeon_do_cp_idle(dev_priv);
++}
++
++/* Added by Charl P. Botha to call radeon_do_resume_cp().
++ */
++int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++
++      return radeon_do_resume_cp(dev);
++}
++
++int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return radeon_do_engine_reset(dev);
++}
++
++/* ================================================================
++ * Fullscreen mode
++ */
++
++/* KW: Deprecated to say the least:
++ */
++int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      return 0;
++}
++
++/* ================================================================
++ * Freelist management
++ */
++
++/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
++ *   bufs until freelist code is used.  Note this hides a problem with
++ *   the scratch register * (used to keep track of last buffer
++ *   completed) being written to before * the last buffer has actually
++ *   completed rendering.
++ *
++ * KW:  It's also a good way to find free buffers quickly.
++ *
++ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
++ * sleep.  However, bugs in older versions of radeon_accel.c mean that
++ * we essentially have to do this, else old clients will break.
++ *
++ * However, it does leave open a potential deadlock where all the
++ * buffers are held by other clients, which can't release them because
++ * they can't get the lock.
++ */
++
++struct drm_buf *radeon_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++      int start;
++
++      if (++dev_priv->last_buf >= dma->buf_count)
++              dev_priv->last_buf = 0;
++
++      start = dev_priv->last_buf;
++
++      for (t = 0; t < dev_priv->usec_timeout; t++) {
++              u32 done_age = GET_SCRATCH(1);
++              DRM_DEBUG("done_age = %d\n", done_age);
++              for (i = start; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->file_priv == NULL || (buf->pending &&
++                                                     buf_priv->age <=
++                                                     done_age)) {
++                              dev_priv->stats.requested_bufs++;
++                              buf->pending = 0;
++                              return buf;
++                      }
++                      start = 0;
++              }
++
++              if (t) {
++                      DRM_UDELAY(1);
++                      dev_priv->stats.freelist_loops++;
++              }
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++#if 0
++struct drm_buf *radeon_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++      int start;
++      u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
++
++      if (++dev_priv->last_buf >= dma->buf_count)
++              dev_priv->last_buf = 0;
++
++      start = dev_priv->last_buf;
++      dev_priv->stats.freelist_loops++;
++
++      for (t = 0; t < 2; t++) {
++              for (i = start; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->file_priv == 0 || (buf->pending &&
++                                                  buf_priv->age <=
++                                                  done_age)) {
++                              dev_priv->stats.requested_bufs++;
++                              buf->pending = 0;
++                              return buf;
++                      }
++              }
++              start = 0;
++      }
++
++      return NULL;
++}
++#endif
++
++void radeon_freelist_reset(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      dev_priv->last_buf = 0;
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++              buf_priv->age = 0;
++      }
++}
++
++/* ================================================================
++ * CP command submission
++ */
++
++int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
++{
++      drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
++      int i;
++      u32 last_head = GET_RING_HEAD(dev_priv);
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              u32 head = GET_RING_HEAD(dev_priv);
++
++              ring->space = (head - ring->tail) * sizeof(u32);
++              if (ring->space <= 0)
++                      ring->space += ring->size;
++              if (ring->space > n)
++                      return 0;
++
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++              if (head != last_head)
++                      i = 0;
++              last_head = head;
++
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This return value is ignored in the BEGIN_RING macro! */
++#if RADEON_FIFO_DEBUG
++      radeon_status(dev_priv);
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int radeon_cp_get_buffers(struct drm_device *dev,
++                               struct drm_file *file_priv,
++                               struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = radeon_freelist_get(dev);
++              if (!buf)
++                      return -EBUSY;  /* NOTE: broken client */
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int ret = 0;
++      struct drm_dma *d = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = radeon_cp_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++int radeon_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      drm_radeon_private_t *dev_priv;
++      int ret = 0;
++
++      dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_radeon_private_t));
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->flags = flags;
++
++      switch (flags & RADEON_FAMILY_MASK) {
++      case CHIP_R100:
++      case CHIP_RV200:
++      case CHIP_R200:
++      case CHIP_R300:
++      case CHIP_R350:
++      case CHIP_R420:
++      case CHIP_RV410:
++      case CHIP_RV515:
++      case CHIP_R520:
++      case CHIP_RV570:
++      case CHIP_R580:
++              dev_priv->flags |= RADEON_HAS_HIERZ;
++              break;
++      default:
++              /* all other chips have no hierarchical z buffer */
++              break;
++      }
++
++      dev_priv->chip_family = flags & RADEON_FAMILY_MASK;
++      if (drm_device_is_agp(dev))
++              dev_priv->flags |= RADEON_IS_AGP;
++      else if (drm_device_is_pcie(dev))
++              dev_priv->flags |= RADEON_IS_PCIE;
++      else
++              dev_priv->flags |= RADEON_IS_PCI;
++
++      DRM_DEBUG("%s card detected\n",
++                ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
++      return ret;
++}
++
++/* Create mappings for registers and framebuffer so userland doesn't necessarily
++ * have to find them.
++ */
++int radeon_driver_firstopen(struct drm_device *dev)
++{
++      int ret;
++      drm_local_map_t *map;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
++
++      ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
++                       drm_get_resource_len(dev, 2), _DRM_REGISTERS,
++                       _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret != 0)
++              return ret;
++
++      dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
++      ret = drm_addmap(dev, dev_priv->fb_aper_offset,
++                       drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
++                       _DRM_WRITE_COMBINING, &map);
++      if (ret != 0)
++              return ret;
++
++      return 0;
++}
++
++int radeon_driver_unload(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++
++      dev->dev_private = NULL;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drm.h git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h
+--- git/drivers/gpu/drm-tungsten/radeon_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,750 @@
++/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
++ *
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++#ifndef __RADEON_DRM_H__
++#define __RADEON_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the X server file (radeon_sarea.h)
++ */
++#ifndef __RADEON_SAREA_DEFINES__
++#define __RADEON_SAREA_DEFINES__
++
++/* Old style state flags, required for sarea interface (1.1 and 1.2
++ * clears) and 1.2 drm_vertex2 ioctl.
++ */
++#define RADEON_UPLOAD_CONTEXT         0x00000001
++#define RADEON_UPLOAD_VERTFMT         0x00000002
++#define RADEON_UPLOAD_LINE            0x00000004
++#define RADEON_UPLOAD_BUMPMAP         0x00000008
++#define RADEON_UPLOAD_MASKS           0x00000010
++#define RADEON_UPLOAD_VIEWPORT                0x00000020
++#define RADEON_UPLOAD_SETUP           0x00000040
++#define RADEON_UPLOAD_TCL             0x00000080
++#define RADEON_UPLOAD_MISC            0x00000100
++#define RADEON_UPLOAD_TEX0            0x00000200
++#define RADEON_UPLOAD_TEX1            0x00000400
++#define RADEON_UPLOAD_TEX2            0x00000800
++#define RADEON_UPLOAD_TEX0IMAGES      0x00001000
++#define RADEON_UPLOAD_TEX1IMAGES      0x00002000
++#define RADEON_UPLOAD_TEX2IMAGES      0x00004000
++#define RADEON_UPLOAD_CLIPRECTS               0x00008000      /* handled client-side */
++#define RADEON_REQUIRE_QUIESCENCE     0x00010000
++#define RADEON_UPLOAD_ZBIAS           0x00020000      /* version 1.2 and newer */
++#define RADEON_UPLOAD_ALL             0x003effff
++#define RADEON_UPLOAD_CONTEXT_ALL       0x003e01ff
++
++/* New style per-packet identifiers for use in cmd_buffer ioctl with
++ * the RADEON_EMIT_PACKET command.  Comments relate new packets to old
++ * state bits and the packet size:
++ */
++#define RADEON_EMIT_PP_MISC                         0 /* context/7 */
++#define RADEON_EMIT_PP_CNTL                         1 /* context/3 */
++#define RADEON_EMIT_RB3D_COLORPITCH                 2 /* context/1 */
++#define RADEON_EMIT_RE_LINE_PATTERN                 3 /* line/2 */
++#define RADEON_EMIT_SE_LINE_WIDTH                   4 /* line/1 */
++#define RADEON_EMIT_PP_LUM_MATRIX                   5 /* bumpmap/1 */
++#define RADEON_EMIT_PP_ROT_MATRIX_0                 6 /* bumpmap/2 */
++#define RADEON_EMIT_RB3D_STENCILREFMASK             7 /* masks/3 */
++#define RADEON_EMIT_SE_VPORT_XSCALE                 8 /* viewport/6 */
++#define RADEON_EMIT_SE_CNTL                         9 /* setup/2 */
++#define RADEON_EMIT_SE_CNTL_STATUS                  10        /* setup/1 */
++#define RADEON_EMIT_RE_MISC                         11        /* misc/1 */
++#define RADEON_EMIT_PP_TXFILTER_0                   12        /* tex0/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_0               13        /* tex0/1 */
++#define RADEON_EMIT_PP_TXFILTER_1                   14        /* tex1/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_1               15        /* tex1/1 */
++#define RADEON_EMIT_PP_TXFILTER_2                   16        /* tex2/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_2               17        /* tex2/1 */
++#define RADEON_EMIT_SE_ZBIAS_FACTOR                 18        /* zbias/2 */
++#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT           19        /* tcl/11 */
++#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED   20        /* material/17 */
++#define R200_EMIT_PP_TXCBLEND_0                     21        /* tex0/4 */
++#define R200_EMIT_PP_TXCBLEND_1                     22        /* tex1/4 */
++#define R200_EMIT_PP_TXCBLEND_2                     23        /* tex2/4 */
++#define R200_EMIT_PP_TXCBLEND_3                     24        /* tex3/4 */
++#define R200_EMIT_PP_TXCBLEND_4                     25        /* tex4/4 */
++#define R200_EMIT_PP_TXCBLEND_5                     26        /* tex5/4 */
++#define R200_EMIT_PP_TXCBLEND_6                     27        /* /4 */
++#define R200_EMIT_PP_TXCBLEND_7                     28        /* /4 */
++#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0             29        /* tcl/7 */
++#define R200_EMIT_TFACTOR_0                         30        /* tf/7 */
++#define R200_EMIT_VTX_FMT_0                         31        /* vtx/5 */
++#define R200_EMIT_VAP_CTL                           32        /* vap/1 */
++#define R200_EMIT_MATRIX_SELECT_0                   33        /* msl/5 */
++#define R200_EMIT_TEX_PROC_CTL_2                    34        /* tcg/5 */
++#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL            35        /* tcl/1 */
++#define R200_EMIT_PP_TXFILTER_0                     36        /* tex0/6 */
++#define R200_EMIT_PP_TXFILTER_1                     37        /* tex1/6 */
++#define R200_EMIT_PP_TXFILTER_2                     38        /* tex2/6 */
++#define R200_EMIT_PP_TXFILTER_3                     39        /* tex3/6 */
++#define R200_EMIT_PP_TXFILTER_4                     40        /* tex4/6 */
++#define R200_EMIT_PP_TXFILTER_5                     41        /* tex5/6 */
++#define R200_EMIT_PP_TXOFFSET_0                     42        /* tex0/1 */
++#define R200_EMIT_PP_TXOFFSET_1                     43        /* tex1/1 */
++#define R200_EMIT_PP_TXOFFSET_2                     44        /* tex2/1 */
++#define R200_EMIT_PP_TXOFFSET_3                     45        /* tex3/1 */
++#define R200_EMIT_PP_TXOFFSET_4                     46        /* tex4/1 */
++#define R200_EMIT_PP_TXOFFSET_5                     47        /* tex5/1 */
++#define R200_EMIT_VTE_CNTL                          48        /* vte/1 */
++#define R200_EMIT_OUTPUT_VTX_COMP_SEL               49        /* vtx/1 */
++#define R200_EMIT_PP_TAM_DEBUG3                     50        /* tam/1 */
++#define R200_EMIT_PP_CNTL_X                         51        /* cst/1 */
++#define R200_EMIT_RB3D_DEPTHXY_OFFSET               52        /* cst/1 */
++#define R200_EMIT_RE_AUX_SCISSOR_CNTL               53        /* cst/1 */
++#define R200_EMIT_RE_SCISSOR_TL_0                   54        /* cst/2 */
++#define R200_EMIT_RE_SCISSOR_TL_1                   55        /* cst/2 */
++#define R200_EMIT_RE_SCISSOR_TL_2                   56        /* cst/2 */
++#define R200_EMIT_SE_VAP_CNTL_STATUS                57        /* cst/1 */
++#define R200_EMIT_SE_VTX_STATE_CNTL                 58        /* cst/1 */
++#define R200_EMIT_RE_POINTSIZE                      59        /* cst/1 */
++#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0       60        /* cst/4 */
++#define R200_EMIT_PP_CUBIC_FACES_0                  61
++#define R200_EMIT_PP_CUBIC_OFFSETS_0                62
++#define R200_EMIT_PP_CUBIC_FACES_1                  63
++#define R200_EMIT_PP_CUBIC_OFFSETS_1                64
++#define R200_EMIT_PP_CUBIC_FACES_2                  65
++#define R200_EMIT_PP_CUBIC_OFFSETS_2                66
++#define R200_EMIT_PP_CUBIC_FACES_3                  67
++#define R200_EMIT_PP_CUBIC_OFFSETS_3                68
++#define R200_EMIT_PP_CUBIC_FACES_4                  69
++#define R200_EMIT_PP_CUBIC_OFFSETS_4                70
++#define R200_EMIT_PP_CUBIC_FACES_5                  71
++#define R200_EMIT_PP_CUBIC_OFFSETS_5                72
++#define RADEON_EMIT_PP_TEX_SIZE_0                   73
++#define RADEON_EMIT_PP_TEX_SIZE_1                   74
++#define RADEON_EMIT_PP_TEX_SIZE_2                   75
++#define R200_EMIT_RB3D_BLENDCOLOR                   76
++#define R200_EMIT_TCL_POINT_SPRITE_CNTL             77
++#define RADEON_EMIT_PP_CUBIC_FACES_0                78
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0             79
++#define RADEON_EMIT_PP_CUBIC_FACES_1                80
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1             81
++#define RADEON_EMIT_PP_CUBIC_FACES_2                82
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2             83
++#define R200_EMIT_PP_TRI_PERF_CNTL                  84
++#define R200_EMIT_PP_AFS_0                          85
++#define R200_EMIT_PP_AFS_1                          86
++#define R200_EMIT_ATF_TFACTOR                       87
++#define R200_EMIT_PP_TXCTLALL_0                     88
++#define R200_EMIT_PP_TXCTLALL_1                     89
++#define R200_EMIT_PP_TXCTLALL_2                     90
++#define R200_EMIT_PP_TXCTLALL_3                     91
++#define R200_EMIT_PP_TXCTLALL_4                     92
++#define R200_EMIT_PP_TXCTLALL_5                     93
++#define R200_EMIT_VAP_PVS_CNTL                      94
++#define RADEON_MAX_STATE_PACKETS                    95
++
++/* Commands understood by cmd_buffer ioctl.  More can be added but
++ * obviously these can't be removed or changed:
++ */
++#define RADEON_CMD_PACKET      1      /* emit one of the register packets above */
++#define RADEON_CMD_SCALARS     2      /* emit scalar data */
++#define RADEON_CMD_VECTORS     3      /* emit vector data */
++#define RADEON_CMD_DMA_DISCARD 4      /* discard current dma buf */
++#define RADEON_CMD_PACKET3     5      /* emit hw packet */
++#define RADEON_CMD_PACKET3_CLIP 6     /* emit hw packet wrapped in cliprects */
++#define RADEON_CMD_SCALARS2     7     /* r200 stopgap */
++#define RADEON_CMD_WAIT         8     /* emit hw wait commands -- note:
++                                       *  doesn't make the cpu wait, just
++                                       *  the graphics hardware */
++#define RADEON_CMD_VECLINEAR  9       /* another r200 stopgap */
++
++typedef union {
++      int i;
++      struct {
++              unsigned char cmd_type, pad0, pad1, pad2;
++      } header;
++      struct {
++              unsigned char cmd_type, packet_id, pad0, pad1;
++      } packet;
++      struct {
++              unsigned char cmd_type, offset, stride, count;
++      } scalars;
++      struct {
++              unsigned char cmd_type, offset, stride, count;
++      } vectors;
++      struct {
++              unsigned char cmd_type, addr_lo, addr_hi, count;
++      } veclinear;
++      struct {
++              unsigned char cmd_type, buf_idx, pad0, pad1;
++      } dma;
++      struct {
++              unsigned char cmd_type, flags, pad0, pad1;
++      } wait;
++} drm_radeon_cmd_header_t;
++
++#define RADEON_WAIT_2D  0x1
++#define RADEON_WAIT_3D  0x2
++
++/* Allowed parameters for R300_CMD_PACKET3
++ */
++#define R300_CMD_PACKET3_CLEAR                0
++#define R300_CMD_PACKET3_RAW          1
++
++/* Commands understood by cmd_buffer ioctl for R300.
++ * The interface has not been stabilized, so some of these may be removed
++ * and eventually reordered before stabilization.
++ */
++#define R300_CMD_PACKET0              1
++#define R300_CMD_VPU                  2       /* emit vertex program upload */
++#define R300_CMD_PACKET3              3       /* emit a packet3 */
++#define R300_CMD_END3D                        4       /* emit sequence ending 3d rendering */
++#define R300_CMD_CP_DELAY             5
++#define R300_CMD_DMA_DISCARD          6
++#define R300_CMD_WAIT                 7
++#     define R300_WAIT_2D             0x1
++#     define R300_WAIT_3D             0x2
++/* these two defines are DOING IT WRONG - however
++ * we have userspace which relies on using these.
++ * The wait interface is backwards compat new 
++ * code should use the NEW_WAIT defines below
++ * THESE ARE NOT BIT FIELDS
++ */
++#     define R300_WAIT_2D_CLEAN       0x3
++#     define R300_WAIT_3D_CLEAN       0x4
++
++#     define R300_NEW_WAIT_2D_3D      0x3
++#     define R300_NEW_WAIT_2D_2D_CLEAN        0x4
++#     define R300_NEW_WAIT_3D_3D_CLEAN        0x6
++#     define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN    0x8
++
++#define R300_CMD_SCRATCH              8
++#define R300_CMD_R500FP                 9
++
++typedef union {
++      unsigned int u;
++      struct {
++              unsigned char cmd_type, pad0, pad1, pad2;
++      } header;
++      struct {
++              unsigned char cmd_type, count, reglo, reghi;
++      } packet0;
++      struct {
++              unsigned char cmd_type, count, adrlo, adrhi;
++      } vpu;
++      struct {
++              unsigned char cmd_type, packet, pad0, pad1;
++      } packet3;
++      struct {
++              unsigned char cmd_type, packet;
++              unsigned short count;   /* amount of packet2 to emit */
++      } delay;
++      struct {
++              unsigned char cmd_type, buf_idx, pad0, pad1;
++      } dma;
++      struct {
++              unsigned char cmd_type, flags, pad0, pad1;
++      } wait;
++      struct {
++              unsigned char cmd_type, reg, n_bufs, flags;
++      } scratch;
++      struct {
++              unsigned char cmd_type, count, adrlo, adrhi_flags;
++      } r500fp;
++} drm_r300_cmd_header_t;
++
++#define RADEON_FRONT                  0x1
++#define RADEON_BACK                   0x2
++#define RADEON_DEPTH                  0x4
++#define RADEON_STENCIL                        0x8
++#define RADEON_CLEAR_FASTZ            0x80000000
++#define RADEON_USE_HIERZ              0x40000000
++#define RADEON_USE_COMP_ZBUF          0x20000000
++
++#define R500FP_CONSTANT_TYPE  (1 << 1)
++#define R500FP_CONSTANT_CLAMP (1 << 2)
++
++/* Primitive types
++ */
++#define RADEON_POINTS                 0x1
++#define RADEON_LINES                  0x2
++#define RADEON_LINE_STRIP             0x3
++#define RADEON_TRIANGLES              0x4
++#define RADEON_TRIANGLE_FAN           0x5
++#define RADEON_TRIANGLE_STRIP         0x6
++
++/* Vertex/indirect buffer size
++ */
++#define RADEON_BUFFER_SIZE            65536
++
++/* Byte offsets for indirect buffer data
++ */
++#define RADEON_INDEX_PRIM_OFFSET      20
++
++#define RADEON_SCRATCH_REG_OFFSET     32
++
++#define RADEON_NR_SAREA_CLIPRECTS     12
++
++/* There are 2 heaps (local/GART).  Each region within a heap is a
++ * minimum of 64k, and there are at most 64 of them per heap.
++ */
++#define RADEON_LOCAL_TEX_HEAP         0
++#define RADEON_GART_TEX_HEAP          1
++#define RADEON_NR_TEX_HEAPS           2
++#define RADEON_NR_TEX_REGIONS         64
++#define RADEON_LOG_TEX_GRANULARITY    16
++
++#define RADEON_MAX_TEXTURE_LEVELS     12
++#define RADEON_MAX_TEXTURE_UNITS      3
++
++#define RADEON_MAX_SURFACES           8
++
++/* Blits have strict offset rules.  All blit offset must be aligned on
++ * a 1K-byte boundary.
++ */
++#define RADEON_OFFSET_SHIFT             10
++#define RADEON_OFFSET_ALIGN             (1 << RADEON_OFFSET_SHIFT)
++#define RADEON_OFFSET_MASK              (RADEON_OFFSET_ALIGN - 1)
++
++#endif                                /* __RADEON_SAREA_DEFINES__ */
++
++typedef struct {
++      unsigned int red;
++      unsigned int green;
++      unsigned int blue;
++      unsigned int alpha;
++} radeon_color_regs_t;
++
++typedef struct {
++      /* Context state */
++      unsigned int pp_misc;   /* 0x1c14 */
++      unsigned int pp_fog_color;
++      unsigned int re_solid_color;
++      unsigned int rb3d_blendcntl;
++      unsigned int rb3d_depthoffset;
++      unsigned int rb3d_depthpitch;
++      unsigned int rb3d_zstencilcntl;
++
++      unsigned int pp_cntl;   /* 0x1c38 */
++      unsigned int rb3d_cntl;
++      unsigned int rb3d_coloroffset;
++      unsigned int re_width_height;
++      unsigned int rb3d_colorpitch;
++      unsigned int se_cntl;
++
++      /* Vertex format state */
++      unsigned int se_coord_fmt;      /* 0x1c50 */
++
++      /* Line state */
++      unsigned int re_line_pattern;   /* 0x1cd0 */
++      unsigned int re_line_state;
++
++      unsigned int se_line_width;     /* 0x1db8 */
++
++      /* Bumpmap state */
++      unsigned int pp_lum_matrix;     /* 0x1d00 */
++
++      unsigned int pp_rot_matrix_0;   /* 0x1d58 */
++      unsigned int pp_rot_matrix_1;
++
++      /* Mask state */
++      unsigned int rb3d_stencilrefmask;       /* 0x1d7c */
++      unsigned int rb3d_ropcntl;
++      unsigned int rb3d_planemask;
++
++      /* Viewport state */
++      unsigned int se_vport_xscale;   /* 0x1d98 */
++      unsigned int se_vport_xoffset;
++      unsigned int se_vport_yscale;
++      unsigned int se_vport_yoffset;
++      unsigned int se_vport_zscale;
++      unsigned int se_vport_zoffset;
++
++      /* Setup state */
++      unsigned int se_cntl_status;    /* 0x2140 */
++
++      /* Misc state */
++      unsigned int re_top_left;       /* 0x26c0 */
++      unsigned int re_misc;
++} drm_radeon_context_regs_t;
++
++typedef struct {
++      /* Zbias state */
++      unsigned int se_zbias_factor;   /* 0x1dac */
++      unsigned int se_zbias_constant;
++} drm_radeon_context2_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int pp_txfilter;
++      unsigned int pp_txformat;
++      unsigned int pp_txoffset;
++      unsigned int pp_txcblend;
++      unsigned int pp_txablend;
++      unsigned int pp_tfactor;
++      unsigned int pp_border_color;
++} drm_radeon_texture_regs_t;
++
++typedef struct {
++      unsigned int start;
++      unsigned int finish;
++      unsigned int prim:8;
++      unsigned int stateidx:8;
++      unsigned int numverts:16;       /* overloaded as offset/64 for elt prims */
++      unsigned int vc_format; /* vertex format */
++} drm_radeon_prim_t;
++
++typedef struct {
++      drm_radeon_context_regs_t context;
++      drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
++      drm_radeon_context2_regs_t context2;
++      unsigned int dirty;
++} drm_radeon_state_t;
++
++typedef struct {
++      /* The channel for communication of state information to the
++       * kernel on firing a vertex buffer with either of the
++       * obsoleted vertex/index ioctls.
++       */
++      drm_radeon_context_regs_t context_state;
++      drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
++      unsigned int dirty;
++      unsigned int vertsize;
++      unsigned int vc_format;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int last_frame;
++      unsigned int last_dispatch;
++      unsigned int last_clear;
++
++      struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
++                                                     1];
++      unsigned int tex_age[RADEON_NR_TEX_HEAPS];
++      int ctx_owner;
++      int pfState;            /* number of 3d windows (0,1,2ormore) */
++      int pfCurrentPage;      /* which buffer is being displayed? */
++      int crtc2_base;         /* CRTC2 frame offset */
++      int tiling_enabled;     /* set by drm, read by 2d + 3d clients */
++} drm_radeon_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmRadeon.h)
++ *
++ * KW: actually it's illegal to change any of this (backwards compatibility).
++ */
++
++/* Radeon specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_RADEON_CP_INIT    0x00
++#define DRM_RADEON_CP_START   0x01
++#define DRM_RADEON_CP_STOP    0x02
++#define DRM_RADEON_CP_RESET   0x03
++#define DRM_RADEON_CP_IDLE    0x04
++#define DRM_RADEON_RESET      0x05
++#define DRM_RADEON_FULLSCREEN 0x06
++#define DRM_RADEON_SWAP       0x07
++#define DRM_RADEON_CLEAR      0x08
++#define DRM_RADEON_VERTEX     0x09
++#define DRM_RADEON_INDICES    0x0A
++#define DRM_RADEON_NOT_USED
++#define DRM_RADEON_STIPPLE    0x0C
++#define DRM_RADEON_INDIRECT   0x0D
++#define DRM_RADEON_TEXTURE    0x0E
++#define DRM_RADEON_VERTEX2    0x0F
++#define DRM_RADEON_CMDBUF     0x10
++#define DRM_RADEON_GETPARAM   0x11
++#define DRM_RADEON_FLIP       0x12
++#define DRM_RADEON_ALLOC      0x13
++#define DRM_RADEON_FREE       0x14
++#define DRM_RADEON_INIT_HEAP  0x15
++#define DRM_RADEON_IRQ_EMIT   0x16
++#define DRM_RADEON_IRQ_WAIT   0x17
++#define DRM_RADEON_CP_RESUME  0x18
++#define DRM_RADEON_SETPARAM   0x19
++#define DRM_RADEON_SURF_ALLOC 0x1a
++#define DRM_RADEON_SURF_FREE  0x1b
++
++#define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
++#define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
++#define DRM_IOCTL_RADEON_CP_STOP    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
++#define DRM_IOCTL_RADEON_CP_RESET   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
++#define DRM_IOCTL_RADEON_CP_IDLE    DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
++#define DRM_IOCTL_RADEON_RESET      DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_RESET)
++#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
++#define DRM_IOCTL_RADEON_SWAP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_SWAP)
++#define DRM_IOCTL_RADEON_CLEAR      DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
++#define DRM_IOCTL_RADEON_VERTEX     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
++#define DRM_IOCTL_RADEON_INDICES    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
++#define DRM_IOCTL_RADEON_STIPPLE    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
++#define DRM_IOCTL_RADEON_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
++#define DRM_IOCTL_RADEON_TEXTURE    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
++#define DRM_IOCTL_RADEON_VERTEX2    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
++#define DRM_IOCTL_RADEON_CMDBUF     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
++#define DRM_IOCTL_RADEON_GETPARAM   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
++#define DRM_IOCTL_RADEON_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_FLIP)
++#define DRM_IOCTL_RADEON_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
++#define DRM_IOCTL_RADEON_FREE       DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
++#define DRM_IOCTL_RADEON_INIT_HEAP  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
++#define DRM_IOCTL_RADEON_IRQ_EMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
++#define DRM_IOCTL_RADEON_IRQ_WAIT   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
++#define DRM_IOCTL_RADEON_CP_RESUME  DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
++#define DRM_IOCTL_RADEON_SETPARAM   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
++#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
++#define DRM_IOCTL_RADEON_SURF_FREE  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
++
++typedef struct drm_radeon_init {
++      enum {
++              RADEON_INIT_CP = 0x01,
++              RADEON_CLEANUP_CP = 0x02,
++              RADEON_INIT_R200_CP = 0x03,
++              RADEON_INIT_R300_CP = 0x04
++      } func;
++      unsigned long sarea_priv_offset;
++      int is_pci; /* for overriding only */
++      int cp_mode;
++      int gart_size;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned long fb_offset DEPRECATED;     /* deprecated, driver asks hardware */
++      unsigned long mmio_offset DEPRECATED;   /* deprecated, driver asks hardware */
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long gart_textures_offset;
++} drm_radeon_init_t;
++
++typedef struct drm_radeon_cp_stop {
++      int flush;
++      int idle;
++} drm_radeon_cp_stop_t;
++
++typedef struct drm_radeon_fullscreen {
++      enum {
++              RADEON_INIT_FULLSCREEN = 0x01,
++              RADEON_CLEANUP_FULLSCREEN = 0x02
++      } func;
++} drm_radeon_fullscreen_t;
++
++#define CLEAR_X1      0
++#define CLEAR_Y1      1
++#define CLEAR_X2      2
++#define CLEAR_Y2      3
++#define CLEAR_DEPTH   4
++
++typedef union drm_radeon_clear_rect {
++      float f[5];
++      unsigned int ui[5];
++} drm_radeon_clear_rect_t;
++
++typedef struct drm_radeon_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;        /* misnamed field:  should be stencil */
++      drm_radeon_clear_rect_t __user *depth_boxes;
++} drm_radeon_clear_t;
++
++typedef struct drm_radeon_vertex {
++      int prim;
++      int idx;                /* Index of vertex buffer */
++      int count;              /* Number of vertices in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_radeon_vertex_t;
++
++typedef struct drm_radeon_indices {
++      int prim;
++      int idx;
++      int start;
++      int end;
++      int discard;            /* Client finished with buffer? */
++} drm_radeon_indices_t;
++
++/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
++ *      - allows multiple primitives and state changes in a single ioctl
++ *      - supports driver change to emit native primitives
++ */
++typedef struct drm_radeon_vertex2 {
++      int idx;                /* Index of vertex buffer */
++      int discard;            /* Client finished with buffer? */
++      int nr_states;
++      drm_radeon_state_t __user *state;
++      int nr_prims;
++      drm_radeon_prim_t __user *prim;
++} drm_radeon_vertex2_t;
++
++/* v1.3 - obsoletes drm_radeon_vertex2
++ *      - allows arbitarily large cliprect list
++ *      - allows updating of tcl packet, vector and scalar state
++ *      - allows memory-efficient description of state updates
++ *      - allows state to be emitted without a primitive
++ *           (for clears, ctx switches)
++ *      - allows more than one dma buffer to be referenced per ioctl
++ *      - supports tcl driver
++ *      - may be extended in future versions with new cmd types, packets
++ */
++typedef struct drm_radeon_cmd_buffer {
++      int bufsz;
++      char __user *buf;
++      int nbox;
++      struct drm_clip_rect __user *boxes;
++} drm_radeon_cmd_buffer_t;
++
++typedef struct drm_radeon_tex_image {
++      unsigned int x, y;      /* Blit coordinates */
++      unsigned int width, height;
++      const void __user *data;
++} drm_radeon_tex_image_t;
++
++typedef struct drm_radeon_texture {
++      unsigned int offset;
++      int pitch;
++      int format;
++      int width;              /* Texture image coordinates */
++      int height;
++      drm_radeon_tex_image_t __user *image;
++} drm_radeon_texture_t;
++
++typedef struct drm_radeon_stipple {
++      unsigned int __user *mask;
++} drm_radeon_stipple_t;
++
++typedef struct drm_radeon_indirect {
++      int idx;
++      int start;
++      int end;
++      int discard;
++} drm_radeon_indirect_t;
++
++/* enum for card type parameters */
++#define RADEON_CARD_PCI 0
++#define RADEON_CARD_AGP 1
++#define RADEON_CARD_PCIE 2
++
++/* 1.3: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define RADEON_PARAM_GART_BUFFER_OFFSET    1  /* card offset of 1st GART buffer */
++#define RADEON_PARAM_LAST_FRAME            2
++#define RADEON_PARAM_LAST_DISPATCH         3
++#define RADEON_PARAM_LAST_CLEAR            4
++/* Added with DRM version 1.6. */
++#define RADEON_PARAM_IRQ_NR                5
++#define RADEON_PARAM_GART_BASE             6  /* card offset of GART base */
++/* Added with DRM version 1.8. */
++#define RADEON_PARAM_REGISTER_HANDLE       7  /* for drmMap() */
++#define RADEON_PARAM_STATUS_HANDLE         8
++#define RADEON_PARAM_SAREA_HANDLE          9
++#define RADEON_PARAM_GART_TEX_HANDLE       10
++#define RADEON_PARAM_SCRATCH_OFFSET        11
++#define RADEON_PARAM_CARD_TYPE             12
++#define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */
++#define RADEON_PARAM_FB_LOCATION           14   /* FB location */
++#define RADEON_PARAM_NUM_GB_PIPES          15   /* num GB pipes */
++
++typedef struct drm_radeon_getparam {
++      int param;
++      void __user *value;
++} drm_radeon_getparam_t;
++
++/* 1.6: Set up a memory manager for regions of shared memory:
++ */
++#define RADEON_MEM_REGION_GART 1
++#define RADEON_MEM_REGION_FB   2
++
++typedef struct drm_radeon_mem_alloc {
++      int region;
++      int alignment;
++      int size;
++      int __user *region_offset;      /* offset from start of fb or GART */
++} drm_radeon_mem_alloc_t;
++
++typedef struct drm_radeon_mem_free {
++      int region;
++      int region_offset;
++} drm_radeon_mem_free_t;
++
++typedef struct drm_radeon_mem_init_heap {
++      int region;
++      int size;
++      int start;
++} drm_radeon_mem_init_heap_t;
++
++/* 1.6: Userspace can request & wait on irq's:
++ */
++typedef struct drm_radeon_irq_emit {
++      int __user *irq_seq;
++} drm_radeon_irq_emit_t;
++
++typedef struct drm_radeon_irq_wait {
++      int irq_seq;
++} drm_radeon_irq_wait_t;
++
++/* 1.10: Clients tell the DRM where they think the framebuffer is located in
++ * the card's address space, via a new generic ioctl to set parameters
++ */
++
++typedef struct drm_radeon_setparam {
++      unsigned int param;
++      int64_t value;
++} drm_radeon_setparam_t;
++
++#define RADEON_SETPARAM_FB_LOCATION    1      /* determined framebuffer location */
++#define RADEON_SETPARAM_SWITCH_TILING  2      /* enable/disable color tiling */
++#define RADEON_SETPARAM_PCIGART_LOCATION 3    /* PCI Gart Location */
++
++#define RADEON_SETPARAM_NEW_MEMMAP 4          /* Use new memory map */
++#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5    /* PCI GART Table Size */
++#define RADEON_SETPARAM_VBLANK_CRTC 6           /* VBLANK CRTC */
++/* 1.14: Clients can allocate/free a surface
++ */
++typedef struct drm_radeon_surface_alloc {
++      unsigned int address;
++      unsigned int size;
++      unsigned int flags;
++} drm_radeon_surface_alloc_t;
++
++typedef struct drm_radeon_surface_free {
++      unsigned int address;
++} drm_radeon_surface_free_t;
++
++#define       DRM_RADEON_VBLANK_CRTC1         1
++#define       DRM_RADEON_VBLANK_CRTC2         2
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.c git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c
+--- git/drivers/gpu/drm-tungsten/radeon_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,157 @@
++/**
++ * \file radeon_drv.c
++ * ATI Radeon driver
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++#include "drm_pciids.h"
++
++int radeon_no_wb;
++
++MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
++module_param_named(no_wb, radeon_no_wb, int, 0444);
++
++static int dri_library_name(struct drm_device * dev, char * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int family = dev_priv->flags & RADEON_FAMILY_MASK;
++
++      return snprintf(buf, PAGE_SIZE, "%s\n",
++              (family < CHIP_R200) ? "radeon" :
++              ((family < CHIP_R300) ? "r200" :
++              "r300"));
++}
++
++static int radeon_suspend(struct drm_device *dev, pm_message_t state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      /* Disable *all* interrupts */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++      return 0;
++}
++
++static int radeon_resume(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      /* Restore interrupt registers */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
++      return 0;
++}
++
++static struct pci_device_id pciidlist[] = {
++      radeon_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
++          DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
++      .load = radeon_driver_load,
++      .firstopen = radeon_driver_firstopen,
++      .open = radeon_driver_open,
++      .preclose = radeon_driver_preclose,
++      .postclose = radeon_driver_postclose,
++      .lastclose = radeon_driver_lastclose,
++      .unload = radeon_driver_unload,
++      .suspend = radeon_suspend,
++      .resume = radeon_resume,
++      .get_vblank_counter = radeon_get_vblank_counter,
++      .enable_vblank = radeon_enable_vblank,
++      .disable_vblank = radeon_disable_vblank,
++      .dri_library_name = dri_library_name,
++      .irq_preinstall = radeon_driver_irq_preinstall,
++      .irq_postinstall = radeon_driver_irq_postinstall,
++      .irq_uninstall = radeon_driver_irq_uninstall,
++      .irq_handler = radeon_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = radeon_ioctls,
++      .dma_ioctl = radeon_cp_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = radeon_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init radeon_init(void)
++{
++      driver.num_ioctls = radeon_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit radeon_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(radeon_init);
++module_exit(radeon_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.h git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h
+--- git/drivers/gpu/drm-tungsten/radeon_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1443 @@
++/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __RADEON_DRV_H__
++#define __RADEON_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, Keith Whitwell, others."
++
++#define DRIVER_NAME           "radeon"
++#define DRIVER_DESC           "ATI Radeon"
++#define DRIVER_DATE           "20080613"
++
++/* Interface history:
++ *
++ * 1.1 - ??
++ * 1.2 - Add vertex2 ioctl (keith)
++ *     - Add stencil capability to clear ioctl (gareth, keith)
++ *     - Increase MAX_TEXTURE_LEVELS (brian)
++ * 1.3 - Add cmdbuf ioctl (keith)
++ *     - Add support for new radeon packets (keith)
++ *     - Add getparam ioctl (keith)
++ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
++ * 1.4 - Add scratch registers to get_param ioctl.
++ * 1.5 - Add r200 packets to cmdbuf ioctl
++ *     - Add r200 function to init ioctl
++ *     - Add 'scalar2' instruction to cmdbuf
++ * 1.6 - Add static GART memory manager
++ *       Add irq handler (won't be turned on unless X server knows to)
++ *       Add irq ioctls and irq_active getparam.
++ *       Add wait command for cmdbuf ioctl
++ *       Add GART offset query for getparam
++ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
++ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
++ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
++ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
++ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
++ *       Add 'GET' queries for starting additional clients on different VT's.
++ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
++ *       Add texture rectangle support for r100.
++ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
++ *       clients use to tell the DRM where they think the framebuffer is
++ *       located in the card's address space
++ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
++ *       and GL_EXT_blend_[func|equation]_separate on r200
++ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
++ *       (No 3D support yet - just microcode loading).
++ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
++ *     - Add hyperz support, add hyperz flags to clear ioctl.
++ * 1.14- Add support for color tiling
++ *     - Add R100/R200 surface allocation/free support
++ * 1.15- Add support for texture micro tiling
++ *     - Add support for r100 cube maps
++ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
++ *       texture filtering on r200
++ * 1.17- Add initial support for R300 (3D).
++ * 1.18- Add support for GL_ATI_fragment_shader, new packets
++ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
++ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
++ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
++ * 1.19- Add support for gart table in FB memory and PCIE r300
++ * 1.20- Add support for r300 texrect
++ * 1.21- Add support for card type getparam
++ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
++ * 1.23- Add new radeon memory map work from benh
++ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
++ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
++ *       new packet type)
++ * 1.26- Add support for variable size PCI(E) gart aperture
++ * 1.27- Add support for IGP GART
++ * 1.28- Add support for VBL on CRTC2
++ * 1.29- R500 3D cmd buffer support
++ */
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          29
++#define DRIVER_PATCHLEVEL     0
++
++/*
++ * Radeon chip families
++ */
++enum radeon_family {
++      CHIP_R100,
++      CHIP_RV100,
++      CHIP_RS100,
++      CHIP_RV200,
++      CHIP_RS200,
++      CHIP_R200,
++      CHIP_RV250,
++      CHIP_RS300,
++      CHIP_RV280,
++      CHIP_R300,
++      CHIP_R350,
++      CHIP_RV350,
++      CHIP_RV380,
++      CHIP_R420,
++      CHIP_RV410,
++      CHIP_RS400,
++      CHIP_RS480,
++      CHIP_RS690,
++      CHIP_RV515,
++      CHIP_R520,
++      CHIP_RV530,
++      CHIP_RV560,
++      CHIP_RV570,
++      CHIP_R580,
++      CHIP_LAST,
++};
++
++/*
++ * Chip flags
++ */
++enum radeon_chip_flags {
++      RADEON_FAMILY_MASK = 0x0000ffffUL,
++      RADEON_FLAGS_MASK = 0xffff0000UL,
++      RADEON_IS_MOBILITY = 0x00010000UL,
++      RADEON_IS_IGP = 0x00020000UL,
++      RADEON_SINGLE_CRTC = 0x00040000UL,
++      RADEON_IS_AGP = 0x00080000UL,
++      RADEON_HAS_HIERZ = 0x00100000UL,
++      RADEON_IS_PCIE = 0x00200000UL,
++      RADEON_NEW_MEMMAP = 0x00400000UL,
++      RADEON_IS_PCI = 0x00800000UL,
++      RADEON_IS_IGPGART = 0x01000000UL,
++};
++
++#define GET_RING_HEAD(dev_priv)       (dev_priv->writeback_works ? \
++        DRM_READ32(  (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
++#define SET_RING_HEAD(dev_priv,val)   DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
++
++typedef struct drm_radeon_freelist {
++      unsigned int age;
++      struct drm_buf *buf;
++      struct drm_radeon_freelist *next;
++      struct drm_radeon_freelist *prev;
++} drm_radeon_freelist_t;
++
++typedef struct drm_radeon_ring_buffer {
++      u32 *start;
++      u32 *end;
++      int size; /* Double Words */
++      int size_l2qw; /* log2 Quad Words */
++
++      int rptr_update; /* Double Words */
++      int rptr_update_l2qw; /* log2 Quad Words */
++
++      int fetch_size; /* Double Words */
++      int fetch_size_l2ow; /* log2 Oct Words */
++
++      u32 tail;
++      u32 tail_mask;
++      int space;
++
++      int high_mark;
++} drm_radeon_ring_buffer_t;
++
++typedef struct drm_radeon_depth_clear_t {
++      u32 rb3d_cntl;
++      u32 rb3d_zstencilcntl;
++      u32 se_cntl;
++} drm_radeon_depth_clear_t;
++
++struct drm_radeon_driver_file_fields {
++      int64_t radeon_fb_delta;
++};
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      int start;
++      int size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++};
++
++struct radeon_surface {
++      int refcount;
++      u32 lower;
++      u32 upper;
++      u32 flags;
++};
++
++struct radeon_virt_surface {
++      int surface_index;
++      u32 lower;
++      u32 upper;
++      u32 flags;
++      struct drm_file *file_priv;
++};
++
++#define RADEON_FLUSH_EMITED   (1 < 0)
++#define RADEON_PURGE_EMITED   (1 < 1)
++
++typedef struct drm_radeon_private {
++
++      drm_radeon_ring_buffer_t ring;
++      drm_radeon_sarea_t *sarea_priv;
++
++      u32 fb_location;
++      u32 fb_size;
++      int new_memmap;
++
++      int gart_size;
++      u32 gart_vm_start;
++      unsigned long gart_buffers_offset;
++
++      int cp_mode;
++      int cp_running;
++
++      drm_radeon_freelist_t *head;
++      drm_radeon_freelist_t *tail;
++      int last_buf;
++      volatile u32 *scratch;
++      int writeback_works;
++
++      int usec_timeout;
++
++      struct {
++              u32 boxes;
++              int freelist_timeouts;
++              int freelist_loops;
++              int requested_bufs;
++              int last_frame_reads;
++              int last_clear_reads;
++              int clears;
++              int texture_uploads;
++      } stats;
++
++      int do_boxes;
++      int page_flipping;
++
++      u32 color_fmt;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      u32 depth_fmt;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++
++      u32 front_pitch_offset;
++      u32 back_pitch_offset;
++      u32 depth_pitch_offset;
++
++      drm_radeon_depth_clear_t depth_clear;
++
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long gart_textures_offset;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *cp_ring;
++      drm_local_map_t *ring_rptr;
++      drm_local_map_t *gart_textures;
++
++      struct mem_block *gart_heap;
++      struct mem_block *fb_heap;
++
++      /* SW interrupt */
++      wait_queue_head_t swi_queue;
++      atomic_t swi_emitted;
++      int vblank_crtc;
++      uint32_t irq_enable_reg;
++      int irq_enabled;
++      uint32_t r500_disp_irq_reg;
++
++      struct radeon_surface surfaces[RADEON_MAX_SURFACES];
++      struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
++
++      unsigned long pcigart_offset;
++      unsigned int pcigart_offset_set;
++      struct drm_ati_pcigart_info gart_info;
++
++      u32 scratch_ages[5];
++
++      unsigned int crtc_last_cnt;
++      unsigned int crtc2_last_cnt;
++
++      /* starting from here on, data is preserved accross an open */
++      uint32_t flags;         /* see radeon_chip_flags */
++      unsigned long fb_aper_offset;
++
++      int num_gb_pipes;
++      int track_flush;
++      uint32_t chip_family; /* extract from flags */
++} drm_radeon_private_t;
++
++typedef struct drm_radeon_buf_priv {
++      u32 age;
++} drm_radeon_buf_priv_t;
++
++typedef struct drm_radeon_kcmd_buffer {
++      int bufsz;
++      char *buf;
++      int nbox;
++      struct drm_clip_rect __user *boxes;
++} drm_radeon_kcmd_buffer_t;
++
++extern int radeon_no_wb;
++extern struct drm_ioctl_desc radeon_ioctls[];
++extern int radeon_max_ioctl;
++
++/* Check whether the given hardware address is inside the framebuffer or the
++ * GART area.
++ */
++static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
++                                        u64 off)
++{
++      u32 fb_start = dev_priv->fb_location;
++      u32 fb_end = fb_start + dev_priv->fb_size - 1;
++      u32 gart_start = dev_priv->gart_vm_start;
++      u32 gart_end = gart_start + dev_priv->gart_size - 1;
++
++      return ((off >= fb_start && off <= fb_end) ||
++              (off >= gart_start && off <= gart_end));
++}
++
++                              /* radeon_cp.c */
++extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
++
++extern void radeon_freelist_reset(struct drm_device * dev);
++extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
++
++extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
++
++extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
++
++extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern void radeon_mem_takedown(struct mem_block **heap);
++extern void radeon_mem_release(struct drm_file *file_priv,
++                             struct mem_block *heap);
++
++                              /* radeon_irq.c */
++extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
++extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++extern void radeon_do_release(struct drm_device * dev);
++extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
++extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
++extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
++extern void radeon_driver_irq_preinstall(struct drm_device * dev);
++extern int radeon_driver_irq_postinstall(struct drm_device * dev);
++extern void radeon_driver_irq_uninstall(struct drm_device * dev);
++extern int radeon_vblank_crtc_get(struct drm_device *dev);
++extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
++
++extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
++extern int radeon_driver_unload(struct drm_device *dev);
++extern int radeon_driver_firstopen(struct drm_device *dev);
++extern void radeon_driver_preclose(struct drm_device * dev,
++                                 struct drm_file *file_priv);
++extern void radeon_driver_postclose(struct drm_device * dev,
++                                  struct drm_file *file_priv);
++extern void radeon_driver_lastclose(struct drm_device * dev);
++extern int radeon_driver_open(struct drm_device * dev,
++                            struct drm_file * file_priv);
++extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
++                                       unsigned long arg);
++
++/* r300_cmdbuf.c */
++extern void r300_init_reg_flags(struct drm_device *dev);
++
++extern int r300_do_cp_cmdbuf(struct drm_device *dev,
++                           struct drm_file *file_priv,
++                           drm_radeon_kcmd_buffer_t *cmdbuf);
++
++/* Flags for stats.boxes
++ */
++#define RADEON_BOX_DMA_IDLE      0x1
++#define RADEON_BOX_RING_FULL     0x2
++#define RADEON_BOX_FLIP          0x4
++#define RADEON_BOX_WAIT_IDLE     0x8
++#define RADEON_BOX_TEXTURE_LOAD  0x10
++
++/* Register definitions, register access macros and drmAddMap constants
++ * for Radeon kernel driver.
++ */
++#define RADEON_AGP_COMMAND            0x0f60
++#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060  /* offset in PCI config */
++#       define RADEON_AGP_ENABLE            (1<<8)
++#define RADEON_AUX_SCISSOR_CNTL               0x26f0
++#     define RADEON_EXCLUSIVE_SCISSOR_0       (1 << 24)
++#     define RADEON_EXCLUSIVE_SCISSOR_1       (1 << 25)
++#     define RADEON_EXCLUSIVE_SCISSOR_2       (1 << 26)
++#     define RADEON_SCISSOR_0_ENABLE          (1 << 28)
++#     define RADEON_SCISSOR_1_ENABLE          (1 << 29)
++#     define RADEON_SCISSOR_2_ENABLE          (1 << 30)
++
++#define RADEON_BUS_CNTL                       0x0030
++#     define RADEON_BUS_MASTER_DIS            (1 << 6)
++
++#define RADEON_CLOCK_CNTL_DATA                0x000c
++#     define RADEON_PLL_WR_EN                 (1 << 7)
++#define RADEON_CLOCK_CNTL_INDEX               0x0008
++#define RADEON_CONFIG_APER_SIZE               0x0108
++#define RADEON_CONFIG_MEMSIZE           0x00f8
++#define RADEON_CRTC_OFFSET            0x0224
++#define RADEON_CRTC_OFFSET_CNTL               0x0228
++#     define RADEON_CRTC_TILE_EN              (1 << 15)
++#     define RADEON_CRTC_OFFSET_FLIP_CNTL     (1 << 16)
++#define RADEON_CRTC2_OFFSET           0x0324
++#define RADEON_CRTC2_OFFSET_CNTL      0x0328
++
++#define RADEON_PCIE_INDEX               0x0030
++#define RADEON_PCIE_DATA                0x0034
++#define RADEON_PCIE_TX_GART_CNTL      0x10
++#     define RADEON_PCIE_TX_GART_EN           (1 << 0)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
++#     define RADEON_PCIE_TX_GART_MODE_32_128_CACHE    (0 << 3)
++#     define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE   (1 << 3)
++#     define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
++#     define RADEON_PCIE_TX_GART_INVALIDATE_TLB       (1 << 8)
++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
++#define RADEON_PCIE_TX_GART_BASE      0x13
++#define RADEON_PCIE_TX_GART_START_LO  0x14
++#define RADEON_PCIE_TX_GART_START_HI  0x15
++#define RADEON_PCIE_TX_GART_END_LO    0x16
++#define RADEON_PCIE_TX_GART_END_HI    0x17
++
++#define RS480_NB_MC_INDEX               0x168
++#     define RS480_NB_MC_IND_WR_EN    (1 << 8)
++#define RS480_NB_MC_DATA                0x16c
++
++#define RS690_MC_INDEX                  0x78
++#   define RS690_MC_INDEX_MASK          0x1ff
++#   define RS690_MC_INDEX_WR_EN         (1 << 9)
++#   define RS690_MC_INDEX_WR_ACK        0x7f
++#define RS690_MC_DATA                   0x7c
++
++/* MC indirect registers */
++#define RS480_MC_MISC_CNTL              0x18
++#     define RS480_DISABLE_GTW        (1 << 1)
++/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
++#     define RS480_GART_INDEX_REG_EN  (1 << 12)
++#     define RS690_BLOCK_GFX_D3_EN    (1 << 14)
++#define RS480_K8_FB_LOCATION            0x1e
++#define RS480_GART_FEATURE_ID           0x2b
++#     define RS480_HANG_EN            (1 << 11)
++#     define RS480_TLB_ENABLE         (1 << 18)
++#     define RS480_P2P_ENABLE         (1 << 19)
++#     define RS480_GTW_LAC_EN         (1 << 25)
++#     define RS480_2LEVEL_GART        (0 << 30)
++#     define RS480_1LEVEL_GART        (1 << 30)
++#     define RS480_PDC_EN             (1 << 31)
++#define RS480_GART_BASE                 0x2c
++#define RS480_GART_CACHE_CNTRL          0x2e
++#     define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
++#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
++#     define RS480_GART_EN            (1 << 0)
++#     define RS480_VA_SIZE_32MB       (0 << 1)
++#     define RS480_VA_SIZE_64MB       (1 << 1)
++#     define RS480_VA_SIZE_128MB      (2 << 1)
++#     define RS480_VA_SIZE_256MB      (3 << 1)
++#     define RS480_VA_SIZE_512MB      (4 << 1)
++#     define RS480_VA_SIZE_1GB        (5 << 1)
++#     define RS480_VA_SIZE_2GB        (6 << 1)
++#define RS480_AGP_MODE_CNTL             0x39
++#     define RS480_POST_GART_Q_SIZE   (1 << 18)
++#     define RS480_NONGART_SNOOP      (1 << 19)
++#     define RS480_AGP_RD_BUF_SIZE    (1 << 20)
++#     define RS480_REQ_TYPE_SNOOP_SHIFT 22
++#     define RS480_REQ_TYPE_SNOOP_MASK  0x3
++#     define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
++#define RS480_MC_MISC_UMA_CNTL          0x5f
++#define RS480_MC_MCLK_CNTL              0x7a
++#define RS480_MC_UMA_DUALCH_CNTL        0x86
++
++#define RS690_MC_FB_LOCATION            0x100
++#define RS690_MC_AGP_LOCATION           0x101
++#define RS690_MC_AGP_BASE               0x102
++#define RS690_MC_AGP_BASE_2             0x103
++
++#define R520_MC_IND_INDEX 0x70
++#define R520_MC_IND_WR_EN (1 << 24)
++#define R520_MC_IND_DATA  0x74
++
++#define RV515_MC_FB_LOCATION 0x01
++#define RV515_MC_AGP_LOCATION 0x02
++#define RV515_MC_AGP_BASE     0x03
++#define RV515_MC_AGP_BASE_2   0x04
++
++#define R520_MC_FB_LOCATION 0x04
++#define R520_MC_AGP_LOCATION 0x05
++#define R520_MC_AGP_BASE     0x06
++#define R520_MC_AGP_BASE_2   0x07
++
++#define RADEON_MPP_TB_CONFIG          0x01c0
++#define RADEON_MEM_CNTL                       0x0140
++#define RADEON_MEM_SDRAM_MODE_REG     0x0158
++#define RADEON_AGP_BASE_2             0x015c /* r200+ only */
++#define RS480_AGP_BASE_2              0x0164
++#define RADEON_AGP_BASE                       0x0170
++
++/* pipe config regs */
++#define R400_GB_PIPE_SELECT             0x402c
++#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
++#define R500_SU_REG_DEST                0x42c8
++#define R300_GB_TILE_CONFIG             0x4018
++#       define R300_ENABLE_TILING       (1 << 0)
++#       define R300_PIPE_COUNT_RV350    (0 << 1)
++#       define R300_PIPE_COUNT_R300     (3 << 1)
++#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
++#       define R300_PIPE_COUNT_R420     (7 << 1)
++#       define R300_TILE_SIZE_8         (0 << 4)
++#       define R300_TILE_SIZE_16        (1 << 4)
++#       define R300_TILE_SIZE_32        (2 << 4)
++#       define R300_SUBPIXEL_1_12       (0 << 16)
++#       define R300_SUBPIXEL_1_16       (1 << 16)
++#define R300_DST_PIPE_CONFIG            0x170c
++#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
++#define R300_RB2D_DSTCACHE_MODE         0x3428
++#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
++#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
++
++#define RADEON_RB3D_COLOROFFSET               0x1c40
++#define RADEON_RB3D_COLORPITCH                0x1c48
++
++#define       RADEON_SRC_X_Y                  0x1590
++
++#define RADEON_DP_GUI_MASTER_CNTL     0x146c
++#     define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
++#     define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
++#     define RADEON_GMC_BRUSH_SOLID_COLOR     (13 << 4)
++#     define RADEON_GMC_BRUSH_NONE            (15 << 4)
++#     define RADEON_GMC_DST_16BPP             (4 << 8)
++#     define RADEON_GMC_DST_24BPP             (5 << 8)
++#     define RADEON_GMC_DST_32BPP             (6 << 8)
++#     define RADEON_GMC_DST_DATATYPE_SHIFT    8
++#     define RADEON_GMC_SRC_DATATYPE_COLOR    (3 << 12)
++#     define RADEON_DP_SRC_SOURCE_MEMORY      (2 << 24)
++#     define RADEON_DP_SRC_SOURCE_HOST_DATA   (3 << 24)
++#     define RADEON_GMC_CLR_CMP_CNTL_DIS      (1 << 28)
++#     define RADEON_GMC_WR_MSK_DIS            (1 << 30)
++#     define RADEON_ROP3_S                    0x00cc0000
++#     define RADEON_ROP3_P                    0x00f00000
++#define RADEON_DP_WRITE_MASK          0x16cc
++#define RADEON_SRC_PITCH_OFFSET               0x1428
++#define RADEON_DST_PITCH_OFFSET               0x142c
++#define RADEON_DST_PITCH_OFFSET_C     0x1c80
++#     define RADEON_DST_TILE_LINEAR           (0 << 30)
++#     define RADEON_DST_TILE_MACRO            (1 << 30)
++#     define RADEON_DST_TILE_MICRO            (2 << 30)
++#     define RADEON_DST_TILE_BOTH             (3 << 30)
++
++#define RADEON_SCRATCH_REG0           0x15e0
++#define RADEON_SCRATCH_REG1           0x15e4
++#define RADEON_SCRATCH_REG2           0x15e8
++#define RADEON_SCRATCH_REG3           0x15ec
++#define RADEON_SCRATCH_REG4           0x15f0
++#define RADEON_SCRATCH_REG5           0x15f4
++#define RADEON_SCRATCH_UMSK           0x0770
++#define RADEON_SCRATCH_ADDR           0x0774
++
++#define RADEON_SCRATCHOFF( x )                (RADEON_SCRATCH_REG_OFFSET + 4*(x))
++
++#define GET_SCRATCH( x )      (dev_priv->writeback_works                      \
++                              ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
++                              : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
++
++#define RADEON_CRTC_CRNT_FRAME 0x0214
++#define RADEON_CRTC2_CRNT_FRAME 0x0314
++
++#define RADEON_CRTC_STATUS            0x005c
++#define RADEON_CRTC2_STATUS           0x03fc
++
++#define RADEON_GEN_INT_CNTL           0x0040
++#     define RADEON_CRTC_VBLANK_MASK          (1 << 0)
++#     define RADEON_CRTC2_VBLANK_MASK         (1 << 9)
++#     define RADEON_GUI_IDLE_INT_ENABLE       (1 << 19)
++#     define RADEON_SW_INT_ENABLE             (1 << 25)
++
++#define RADEON_GEN_INT_STATUS         0x0044
++#     define RADEON_CRTC_VBLANK_STAT          (1 << 0)
++#     define RADEON_CRTC_VBLANK_STAT_ACK      (1 << 0)
++#     define RADEON_CRTC2_VBLANK_STAT         (1 << 9)
++#     define RADEON_CRTC2_VBLANK_STAT_ACK     (1 << 9)
++#     define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
++#     define RADEON_SW_INT_TEST               (1 << 25)
++#     define RADEON_SW_INT_TEST_ACK           (1 << 25)
++#     define RADEON_SW_INT_FIRE               (1 << 26)
++#       define R500_DISPLAY_INT_STATUS          (1 << 0)
++
++
++#define RADEON_HOST_PATH_CNTL         0x0130
++#     define RADEON_HDP_SOFT_RESET            (1 << 26)
++#     define RADEON_HDP_WC_TIMEOUT_MASK       (7 << 28)
++#     define RADEON_HDP_WC_TIMEOUT_28BCLK     (7 << 28)
++
++#define RADEON_ISYNC_CNTL             0x1724
++#     define RADEON_ISYNC_ANY2D_IDLE3D        (1 << 0)
++#     define RADEON_ISYNC_ANY3D_IDLE2D        (1 << 1)
++#     define RADEON_ISYNC_TRIG2D_IDLE3D       (1 << 2)
++#     define RADEON_ISYNC_TRIG3D_IDLE2D       (1 << 3)
++#     define RADEON_ISYNC_WAIT_IDLEGUI        (1 << 4)
++#     define RADEON_ISYNC_CPSCRATCH_IDLEGUI   (1 << 5)
++
++#define RADEON_RBBM_GUICNTL           0x172c
++#     define RADEON_HOST_DATA_SWAP_NONE       (0 << 0)
++#     define RADEON_HOST_DATA_SWAP_16BIT      (1 << 0)
++#     define RADEON_HOST_DATA_SWAP_32BIT      (2 << 0)
++#     define RADEON_HOST_DATA_SWAP_HDW        (3 << 0)
++
++#define RADEON_MC_AGP_LOCATION                0x014c
++#define RADEON_MC_FB_LOCATION         0x0148
++#define RADEON_MCLK_CNTL              0x0012
++#     define RADEON_FORCEON_MCLKA             (1 << 16)
++#     define RADEON_FORCEON_MCLKB             (1 << 17)
++#     define RADEON_FORCEON_YCLKA             (1 << 18)
++#     define RADEON_FORCEON_YCLKB             (1 << 19)
++#     define RADEON_FORCEON_MC                (1 << 20)
++#     define RADEON_FORCEON_AIC               (1 << 21)
++
++#define RADEON_PP_BORDER_COLOR_0      0x1d40
++#define RADEON_PP_BORDER_COLOR_1      0x1d44
++#define RADEON_PP_BORDER_COLOR_2      0x1d48
++#define RADEON_PP_CNTL                        0x1c38
++#     define RADEON_SCISSOR_ENABLE            (1 <<  1)
++#define RADEON_PP_LUM_MATRIX          0x1d00
++#define RADEON_PP_MISC                        0x1c14
++#define RADEON_PP_ROT_MATRIX_0                0x1d58
++#define RADEON_PP_TXFILTER_0          0x1c54
++#define RADEON_PP_TXOFFSET_0          0x1c5c
++#define RADEON_PP_TXFILTER_1          0x1c6c
++#define RADEON_PP_TXFILTER_2          0x1c84
++
++#define R300_RB2D_DSTCACHE_CTLSTAT    0x342c /* use R300_DSTCACHE_CTLSTAT */
++#define R300_DSTCACHE_CTLSTAT         0x1714
++#     define R300_RB2D_DC_FLUSH               (3 << 0)
++#     define R300_RB2D_DC_FREE                (3 << 2)
++#     define R300_RB2D_DC_FLUSH_ALL           0xf
++#     define R300_RB2D_DC_BUSY                (1 << 31)
++#define RADEON_RB3D_CNTL              0x1c3c
++#     define RADEON_ALPHA_BLEND_ENABLE        (1 << 0)
++#     define RADEON_PLANE_MASK_ENABLE         (1 << 1)
++#     define RADEON_DITHER_ENABLE             (1 << 2)
++#     define RADEON_ROUND_ENABLE              (1 << 3)
++#     define RADEON_SCALE_DITHER_ENABLE       (1 << 4)
++#     define RADEON_DITHER_INIT               (1 << 5)
++#     define RADEON_ROP_ENABLE                (1 << 6)
++#     define RADEON_STENCIL_ENABLE            (1 << 7)
++#     define RADEON_Z_ENABLE                  (1 << 8)
++#     define RADEON_ZBLOCK16                  (1 << 15)
++#define RADEON_RB3D_DEPTHOFFSET               0x1c24
++#define RADEON_RB3D_DEPTHCLEARVALUE   0x3230
++#define RADEON_RB3D_DEPTHPITCH                0x1c28
++#define RADEON_RB3D_PLANEMASK         0x1d84
++#define RADEON_RB3D_STENCILREFMASK    0x1d7c
++#define RADEON_RB3D_ZCACHE_MODE               0x3250
++#define RADEON_RB3D_ZCACHE_CTLSTAT    0x3254
++#     define RADEON_RB3D_ZC_FLUSH             (1 << 0)
++#     define RADEON_RB3D_ZC_FREE              (1 << 2)
++#     define RADEON_RB3D_ZC_FLUSH_ALL         0x5
++#     define RADEON_RB3D_ZC_BUSY              (1 << 31)
++#define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
++#     define R300_ZC_FLUSH                    (1 << 0)
++#     define R300_ZC_FREE                     (1 << 1)
++#     define R300_ZC_BUSY                     (1 << 31)
++#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325c
++#     define RADEON_RB3D_DC_FLUSH             (3 << 0)
++#     define RADEON_RB3D_DC_FREE              (3 << 2)
++#     define RADEON_RB3D_DC_FLUSH_ALL         0xf
++#     define RADEON_RB3D_DC_BUSY              (1 << 31)
++#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
++#     define R300_RB3D_DC_FLUSH               (2 << 0)
++#     define R300_RB3D_DC_FREE                (2 << 2)
++#     define R300_RB3D_DC_FINISH              (1 << 4)
++#define RADEON_RB3D_ZSTENCILCNTL      0x1c2c
++#     define RADEON_Z_TEST_MASK               (7 << 4)
++#     define RADEON_Z_TEST_ALWAYS             (7 << 4)
++#     define RADEON_Z_HIERARCHY_ENABLE        (1 << 8)
++#     define RADEON_STENCIL_TEST_ALWAYS       (7 << 12)
++#     define RADEON_STENCIL_S_FAIL_REPLACE    (2 << 16)
++#     define RADEON_STENCIL_ZPASS_REPLACE     (2 << 20)
++#     define RADEON_STENCIL_ZFAIL_REPLACE     (2 << 24)
++#     define RADEON_Z_COMPRESSION_ENABLE      (1 << 28)
++#     define RADEON_FORCE_Z_DIRTY             (1 << 29)
++#     define RADEON_Z_WRITE_ENABLE            (1 << 30)
++#     define RADEON_Z_DECOMPRESSION_ENABLE    (1 << 31)
++#define RADEON_RBBM_SOFT_RESET                0x00f0
++#     define RADEON_SOFT_RESET_CP             (1 <<  0)
++#     define RADEON_SOFT_RESET_HI             (1 <<  1)
++#     define RADEON_SOFT_RESET_SE             (1 <<  2)
++#     define RADEON_SOFT_RESET_RE             (1 <<  3)
++#     define RADEON_SOFT_RESET_PP             (1 <<  4)
++#     define RADEON_SOFT_RESET_E2             (1 <<  5)
++#     define RADEON_SOFT_RESET_RB             (1 <<  6)
++#     define RADEON_SOFT_RESET_HDP            (1 <<  7)
++/*
++ *   6:0  Available slots in the FIFO
++ *   8    Host Interface active
++ *   9    CP request active
++ *   10   FIFO request active
++ *   11   Host Interface retry active
++ *   12   CP retry active
++ *   13   FIFO retry active
++ *   14   FIFO pipeline busy
++ *   15   Event engine busy
++ *   16   CP command stream busy
++ *   17   2D engine busy
++ *   18   2D portion of render backend busy
++ *   20   3D setup engine busy
++ *   26   GA engine busy
++ *   27   CBA 2D engine busy
++ *   31   2D engine busy or 3D engine busy or FIFO not empty or CP busy or
++ *           command stream queue not empty or Ring Buffer not empty
++ */
++#define RADEON_RBBM_STATUS            0x0e40
++/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */
++/* #define RADEON_RBBM_STATUS         0x1740 */
++/* bits 6:0 are dword slots available in the cmd fifo */
++#     define RADEON_RBBM_FIFOCNT_MASK         0x007f
++#     define RADEON_HIRQ_ON_RBB       (1 <<  8)
++#     define RADEON_CPRQ_ON_RBB       (1 <<  9)
++#     define RADEON_CFRQ_ON_RBB       (1 << 10)
++#     define RADEON_HIRQ_IN_RTBUF     (1 << 11)
++#     define RADEON_CPRQ_IN_RTBUF     (1 << 12)
++#     define RADEON_CFRQ_IN_RTBUF     (1 << 13)
++#     define RADEON_PIPE_BUSY         (1 << 14)
++#     define RADEON_ENG_EV_BUSY       (1 << 15)
++#     define RADEON_CP_CMDSTRM_BUSY   (1 << 16)
++#     define RADEON_E2_BUSY           (1 << 17)
++#     define RADEON_RB2D_BUSY         (1 << 18)
++#     define RADEON_RB3D_BUSY         (1 << 19) /* not used on r300 */
++#     define RADEON_VAP_BUSY          (1 << 20)
++#     define RADEON_RE_BUSY           (1 << 21) /* not used on r300 */
++#     define RADEON_TAM_BUSY          (1 << 22) /* not used on r300 */
++#     define RADEON_TDM_BUSY          (1 << 23) /* not used on r300 */
++#     define RADEON_PB_BUSY           (1 << 24) /* not used on r300 */
++#     define RADEON_TIM_BUSY          (1 << 25) /* not used on r300 */
++#     define RADEON_GA_BUSY           (1 << 26)
++#     define RADEON_CBA2D_BUSY        (1 << 27)
++#     define RADEON_RBBM_ACTIVE       (1 << 31)
++#define RADEON_RE_LINE_PATTERN                0x1cd0
++#define RADEON_RE_MISC                        0x26c4
++#define RADEON_RE_TOP_LEFT            0x26c0
++#define RADEON_RE_WIDTH_HEIGHT                0x1c44
++#define RADEON_RE_STIPPLE_ADDR                0x1cc8
++#define RADEON_RE_STIPPLE_DATA                0x1ccc
++
++#define RADEON_SCISSOR_TL_0           0x1cd8
++#define RADEON_SCISSOR_BR_0           0x1cdc
++#define RADEON_SCISSOR_TL_1           0x1ce0
++#define RADEON_SCISSOR_BR_1           0x1ce4
++#define RADEON_SCISSOR_TL_2           0x1ce8
++#define RADEON_SCISSOR_BR_2           0x1cec
++#define RADEON_SE_COORD_FMT           0x1c50
++#define RADEON_SE_CNTL                        0x1c4c
++#     define RADEON_FFACE_CULL_CW             (0 << 0)
++#     define RADEON_BFACE_SOLID               (3 << 1)
++#     define RADEON_FFACE_SOLID               (3 << 3)
++#     define RADEON_FLAT_SHADE_VTX_LAST       (3 << 6)
++#     define RADEON_DIFFUSE_SHADE_FLAT        (1 << 8)
++#     define RADEON_DIFFUSE_SHADE_GOURAUD     (2 << 8)
++#     define RADEON_ALPHA_SHADE_FLAT          (1 << 10)
++#     define RADEON_ALPHA_SHADE_GOURAUD       (2 << 10)
++#     define RADEON_SPECULAR_SHADE_FLAT       (1 << 12)
++#     define RADEON_SPECULAR_SHADE_GOURAUD    (2 << 12)
++#     define RADEON_FOG_SHADE_FLAT            (1 << 14)
++#     define RADEON_FOG_SHADE_GOURAUD         (2 << 14)
++#     define RADEON_VPORT_XY_XFORM_ENABLE     (1 << 24)
++#     define RADEON_VPORT_Z_XFORM_ENABLE      (1 << 25)
++#     define RADEON_VTX_PIX_CENTER_OGL        (1 << 27)
++#     define RADEON_ROUND_MODE_TRUNC          (0 << 28)
++#     define RADEON_ROUND_PREC_8TH_PIX        (1 << 30)
++#define RADEON_SE_CNTL_STATUS         0x2140
++#define RADEON_SE_LINE_WIDTH          0x1db8
++#define RADEON_SE_VPORT_XSCALE                0x1d98
++#define RADEON_SE_ZBIAS_FACTOR                0x1db0
++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
++#define RADEON_SE_TCL_OUTPUT_VTX_FMT         0x2254
++#define RADEON_SE_TCL_VECTOR_INDX_REG        0x2200
++#       define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT  16
++#       define RADEON_VEC_INDX_DWORD_COUNT_SHIFT     28
++#define RADEON_SE_TCL_VECTOR_DATA_REG       0x2204
++#define RADEON_SE_TCL_SCALAR_INDX_REG       0x2208
++#       define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT  16
++#define RADEON_SE_TCL_SCALAR_DATA_REG       0x220C
++#define RADEON_SURFACE_ACCESS_FLAGS   0x0bf8
++#define RADEON_SURFACE_ACCESS_CLR     0x0bfc
++#define RADEON_SURFACE_CNTL           0x0b00
++#     define RADEON_SURF_TRANSLATION_DIS      (1 << 8)
++#     define RADEON_NONSURF_AP0_SWP_MASK      (3 << 20)
++#     define RADEON_NONSURF_AP0_SWP_LITTLE    (0 << 20)
++#     define RADEON_NONSURF_AP0_SWP_BIG16     (1 << 20)
++#     define RADEON_NONSURF_AP0_SWP_BIG32     (2 << 20)
++#     define RADEON_NONSURF_AP1_SWP_MASK      (3 << 22)
++#     define RADEON_NONSURF_AP1_SWP_LITTLE    (0 << 22)
++#     define RADEON_NONSURF_AP1_SWP_BIG16     (1 << 22)
++#     define RADEON_NONSURF_AP1_SWP_BIG32     (2 << 22)
++#define RADEON_SURFACE0_INFO          0x0b0c
++#     define RADEON_SURF_PITCHSEL_MASK        (0x1ff << 0)
++#     define RADEON_SURF_TILE_MODE_MASK       (3 << 16)
++#     define RADEON_SURF_TILE_MODE_MACRO      (0 << 16)
++#     define RADEON_SURF_TILE_MODE_MICRO      (1 << 16)
++#     define RADEON_SURF_TILE_MODE_32BIT_Z    (2 << 16)
++#     define RADEON_SURF_TILE_MODE_16BIT_Z    (3 << 16)
++#define RADEON_SURFACE0_LOWER_BOUND   0x0b04
++#define RADEON_SURFACE0_UPPER_BOUND   0x0b08
++#     define RADEON_SURF_ADDRESS_FIXED_MASK   (0x3ff << 0)
++#define RADEON_SURFACE1_INFO          0x0b1c
++#define RADEON_SURFACE1_LOWER_BOUND   0x0b14
++#define RADEON_SURFACE1_UPPER_BOUND   0x0b18
++#define RADEON_SURFACE2_INFO          0x0b2c
++#define RADEON_SURFACE2_LOWER_BOUND   0x0b24
++#define RADEON_SURFACE2_UPPER_BOUND   0x0b28
++#define RADEON_SURFACE3_INFO          0x0b3c
++#define RADEON_SURFACE3_LOWER_BOUND   0x0b34
++#define RADEON_SURFACE3_UPPER_BOUND   0x0b38
++#define RADEON_SURFACE4_INFO          0x0b4c
++#define RADEON_SURFACE4_LOWER_BOUND   0x0b44
++#define RADEON_SURFACE4_UPPER_BOUND   0x0b48
++#define RADEON_SURFACE5_INFO          0x0b5c
++#define RADEON_SURFACE5_LOWER_BOUND   0x0b54
++#define RADEON_SURFACE5_UPPER_BOUND   0x0b58
++#define RADEON_SURFACE6_INFO          0x0b6c
++#define RADEON_SURFACE6_LOWER_BOUND   0x0b64
++#define RADEON_SURFACE6_UPPER_BOUND   0x0b68
++#define RADEON_SURFACE7_INFO          0x0b7c
++#define RADEON_SURFACE7_LOWER_BOUND   0x0b74
++#define RADEON_SURFACE7_UPPER_BOUND   0x0b78
++#define RADEON_SW_SEMAPHORE           0x013c
++
++#define RADEON_WAIT_UNTIL             0x1720
++#     define RADEON_WAIT_CRTC_PFLIP           (1 << 0)
++#     define RADEON_WAIT_2D_IDLE              (1 << 14)
++#     define RADEON_WAIT_3D_IDLE              (1 << 15)
++#     define RADEON_WAIT_2D_IDLECLEAN         (1 << 16)
++#     define RADEON_WAIT_3D_IDLECLEAN         (1 << 17)
++#     define RADEON_WAIT_HOST_IDLECLEAN       (1 << 18)
++
++#define RADEON_RB3D_ZMASKOFFSET               0x3234
++#define RADEON_RB3D_ZSTENCILCNTL      0x1c2c
++#     define RADEON_DEPTH_FORMAT_16BIT_INT_Z  (0 << 0)
++#     define RADEON_DEPTH_FORMAT_24BIT_INT_Z  (2 << 0)
++
++/* CP registers */
++#define RADEON_CP_ME_RAM_ADDR         0x07d4
++#define RADEON_CP_ME_RAM_RADDR                0x07d8
++#define RADEON_CP_ME_RAM_DATAH                0x07dc
++#define RADEON_CP_ME_RAM_DATAL                0x07e0
++
++#define RADEON_CP_RB_BASE             0x0700
++#define RADEON_CP_RB_CNTL             0x0704
++#     define RADEON_BUF_SWAP_32BIT            (2 << 16)
++#     define RADEON_RB_NO_UPDATE              (1 << 27)
++#define RADEON_CP_RB_RPTR_ADDR                0x070c
++#define RADEON_CP_RB_RPTR             0x0710
++#define RADEON_CP_RB_WPTR             0x0714
++
++#define RADEON_CP_RB_WPTR_DELAY               0x0718
++#     define RADEON_PRE_WRITE_TIMER_SHIFT     0
++#     define RADEON_PRE_WRITE_LIMIT_SHIFT     23
++
++#define RADEON_CP_IB_BASE             0x0738
++
++#define RADEON_CP_CSQ_CNTL            0x0740
++#     define RADEON_CSQ_CNT_PRIMARY_MASK      (0xff << 0)
++#     define RADEON_CSQ_PRIDIS_INDDIS         (0 << 28)
++#     define RADEON_CSQ_PRIPIO_INDDIS         (1 << 28)
++#     define RADEON_CSQ_PRIBM_INDDIS          (2 << 28)
++#     define RADEON_CSQ_PRIPIO_INDBM          (3 << 28)
++#     define RADEON_CSQ_PRIBM_INDBM           (4 << 28)
++#     define RADEON_CSQ_PRIPIO_INDPIO         (15 << 28)
++
++#define RADEON_AIC_CNTL                       0x01d0
++#     define RADEON_PCIGART_TRANSLATE_EN      (1 << 0)
++#define RADEON_AIC_STAT                       0x01d4
++#define RADEON_AIC_PT_BASE            0x01d8
++#define RADEON_AIC_LO_ADDR            0x01dc
++#define RADEON_AIC_HI_ADDR            0x01e0
++#define RADEON_AIC_TLB_ADDR           0x01e4
++#define RADEON_AIC_TLB_DATA           0x01e8
++
++/* CP command packets */
++#define RADEON_CP_PACKET0             0x00000000
++#     define RADEON_ONE_REG_WR                (1 << 15)
++#define RADEON_CP_PACKET1             0x40000000
++#define RADEON_CP_PACKET2             0x80000000
++#define RADEON_CP_PACKET3             0xC0000000
++#       define RADEON_CP_NOP                    0x00001000
++#       define RADEON_CP_NEXT_CHAR              0x00001900
++#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
++#       define RADEON_CP_SET_SCISSORS           0x00001E00
++             /* GEN_INDX_PRIM is unsupported starting with R300 */
++#     define RADEON_3D_RNDR_GEN_INDX_PRIM     0x00002300
++#     define RADEON_WAIT_FOR_IDLE             0x00002600
++#     define RADEON_3D_DRAW_VBUF              0x00002800
++#     define RADEON_3D_DRAW_IMMD              0x00002900
++#     define RADEON_3D_DRAW_INDX              0x00002A00
++#       define RADEON_CP_LOAD_PALETTE           0x00002C00
++#     define RADEON_3D_LOAD_VBPNTR            0x00002F00
++#     define RADEON_MPEG_IDCT_MACROBLOCK      0x00003000
++#     define RADEON_MPEG_IDCT_MACROBLOCK_REV  0x00003100
++#     define RADEON_3D_CLEAR_ZMASK            0x00003200
++#     define RADEON_CP_INDX_BUFFER            0x00003300
++#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
++#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
++#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
++#     define RADEON_3D_CLEAR_HIZ              0x00003700
++#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
++#     define RADEON_CNTL_HOSTDATA_BLT         0x00009400
++#     define RADEON_CNTL_PAINT_MULTI          0x00009A00
++#     define RADEON_CNTL_BITBLT_MULTI         0x00009B00
++#     define RADEON_CNTL_SET_SCISSORS         0xC0001E00
++
++#define RADEON_CP_PACKET_MASK         0xC0000000
++#define RADEON_CP_PACKET_COUNT_MASK   0x3fff0000
++#define RADEON_CP_PACKET0_REG_MASK    0x000007ff
++#define RADEON_CP_PACKET1_REG0_MASK   0x000007ff
++#define RADEON_CP_PACKET1_REG1_MASK   0x003ff800
++
++#define RADEON_VTX_Z_PRESENT                  (1 << 31)
++#define RADEON_VTX_PKCOLOR_PRESENT            (1 << 3)
++
++#define RADEON_PRIM_TYPE_NONE                 (0 << 0)
++#define RADEON_PRIM_TYPE_POINT                        (1 << 0)
++#define RADEON_PRIM_TYPE_LINE                 (2 << 0)
++#define RADEON_PRIM_TYPE_LINE_STRIP           (3 << 0)
++#define RADEON_PRIM_TYPE_TRI_LIST             (4 << 0)
++#define RADEON_PRIM_TYPE_TRI_FAN              (5 << 0)
++#define RADEON_PRIM_TYPE_TRI_STRIP            (6 << 0)
++#define RADEON_PRIM_TYPE_TRI_TYPE2            (7 << 0)
++#define RADEON_PRIM_TYPE_RECT_LIST            (8 << 0)
++#define RADEON_PRIM_TYPE_3VRT_POINT_LIST      (9 << 0)
++#define RADEON_PRIM_TYPE_3VRT_LINE_LIST               (10 << 0)
++#define RADEON_PRIM_TYPE_MASK                   0xf
++#define RADEON_PRIM_WALK_IND                  (1 << 4)
++#define RADEON_PRIM_WALK_LIST                 (2 << 4)
++#define RADEON_PRIM_WALK_RING                 (3 << 4)
++#define RADEON_COLOR_ORDER_BGRA                       (0 << 6)
++#define RADEON_COLOR_ORDER_RGBA                       (1 << 6)
++#define RADEON_MAOS_ENABLE                    (1 << 7)
++#define RADEON_VTX_FMT_R128_MODE              (0 << 8)
++#define RADEON_VTX_FMT_RADEON_MODE            (1 << 8)
++#define RADEON_NUM_VERTICES_SHIFT             16
++
++#define RADEON_COLOR_FORMAT_CI8               2
++#define RADEON_COLOR_FORMAT_ARGB1555  3
++#define RADEON_COLOR_FORMAT_RGB565    4
++#define RADEON_COLOR_FORMAT_ARGB8888  6
++#define RADEON_COLOR_FORMAT_RGB332    7
++#define RADEON_COLOR_FORMAT_RGB8      9
++#define RADEON_COLOR_FORMAT_ARGB4444  15
++
++#define RADEON_TXFORMAT_I8            0
++#define RADEON_TXFORMAT_AI88          1
++#define RADEON_TXFORMAT_RGB332                2
++#define RADEON_TXFORMAT_ARGB1555      3
++#define RADEON_TXFORMAT_RGB565                4
++#define RADEON_TXFORMAT_ARGB4444      5
++#define RADEON_TXFORMAT_ARGB8888      6
++#define RADEON_TXFORMAT_RGBA8888      7
++#define RADEON_TXFORMAT_Y8            8
++#define RADEON_TXFORMAT_VYUY422         10
++#define RADEON_TXFORMAT_YVYU422         11
++#define RADEON_TXFORMAT_DXT1            12
++#define RADEON_TXFORMAT_DXT23           14
++#define RADEON_TXFORMAT_DXT45           15
++
++#define R200_PP_TXCBLEND_0                0x2f00
++#define R200_PP_TXCBLEND_1                0x2f10
++#define R200_PP_TXCBLEND_2                0x2f20
++#define R200_PP_TXCBLEND_3                0x2f30
++#define R200_PP_TXCBLEND_4                0x2f40
++#define R200_PP_TXCBLEND_5                0x2f50
++#define R200_PP_TXCBLEND_6                0x2f60
++#define R200_PP_TXCBLEND_7                0x2f70
++#define R200_SE_TCL_LIGHT_MODEL_CTL_0     0x2268
++#define R200_PP_TFACTOR_0                 0x2ee0
++#define R200_SE_VTX_FMT_0                 0x2088
++#define R200_SE_VAP_CNTL                  0x2080
++#define R200_SE_TCL_MATRIX_SEL_0          0x2230
++#define R200_SE_TCL_TEX_PROC_CTL_2        0x22a8
++#define R200_SE_TCL_UCP_VERT_BLEND_CTL    0x22c0
++#define R200_PP_TXFILTER_5                0x2ca0
++#define R200_PP_TXFILTER_4                0x2c80
++#define R200_PP_TXFILTER_3                0x2c60
++#define R200_PP_TXFILTER_2                0x2c40
++#define R200_PP_TXFILTER_1                0x2c20
++#define R200_PP_TXFILTER_0                0x2c00
++#define R200_PP_TXOFFSET_5                0x2d78
++#define R200_PP_TXOFFSET_4                0x2d60
++#define R200_PP_TXOFFSET_3                0x2d48
++#define R200_PP_TXOFFSET_2                0x2d30
++#define R200_PP_TXOFFSET_1                0x2d18
++#define R200_PP_TXOFFSET_0                0x2d00
++
++#define R200_PP_CUBIC_FACES_0             0x2c18
++#define R200_PP_CUBIC_FACES_1             0x2c38
++#define R200_PP_CUBIC_FACES_2             0x2c58
++#define R200_PP_CUBIC_FACES_3             0x2c78
++#define R200_PP_CUBIC_FACES_4             0x2c98
++#define R200_PP_CUBIC_FACES_5             0x2cb8
++#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
++#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
++#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
++#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
++#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
++#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
++#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
++#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
++#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
++#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
++#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
++#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
++#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
++#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
++#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
++#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
++#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
++#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
++#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
++#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
++#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
++#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
++#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
++#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
++#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
++#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
++#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
++#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
++#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
++#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
++
++#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
++#define R200_SE_VTE_CNTL                  0x20b0
++#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL   0x2250
++#define R200_PP_TAM_DEBUG3                0x2d9c
++#define R200_PP_CNTL_X                    0x2cc4
++#define R200_SE_VAP_CNTL_STATUS           0x2140
++#define R200_RE_SCISSOR_TL_0              0x1cd8
++#define R200_RE_SCISSOR_TL_1              0x1ce0
++#define R200_RE_SCISSOR_TL_2              0x1ce8
++#define R200_RB3D_DEPTHXY_OFFSET          0x1d60
++#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
++#define R200_SE_VTX_STATE_CNTL            0x2180
++#define R200_RE_POINTSIZE                 0x2648
++#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
++
++#define RADEON_PP_TEX_SIZE_0                0x1d04    /* NPOT */
++#define RADEON_PP_TEX_SIZE_1                0x1d0c
++#define RADEON_PP_TEX_SIZE_2                0x1d14
++
++#define RADEON_PP_CUBIC_FACES_0             0x1d24
++#define RADEON_PP_CUBIC_FACES_1             0x1d28
++#define RADEON_PP_CUBIC_FACES_2             0x1d2c
++#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0    /* bits [31:5] */
++#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
++#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
++
++#define RADEON_SE_TCL_STATE_FLUSH           0x2284
++
++#define SE_VAP_CNTL__TCL_ENA_MASK                          0x00000001
++#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK                   0x00010000
++#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT                 0x00000012
++#define SE_VTE_CNTL__VTX_XY_FMT_MASK                       0x00000100
++#define SE_VTE_CNTL__VTX_Z_FMT_MASK                        0x00000200
++#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK                  0x00000001
++#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK                  0x00000002
++#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT               0x0000000b
++#define R200_3D_DRAW_IMMD_2      0xC0003500
++#define R200_SE_VTX_FMT_1                 0x208c
++#define R200_RE_CNTL                      0x1c50
++
++#define R200_RB3D_BLENDCOLOR              0x3218
++
++#define R200_SE_TCL_POINT_SPRITE_CNTL     0x22c4
++
++#define R200_PP_TRI_PERF                  0x2cf8
++
++#define R200_PP_AFS_0                     0x2f80
++#define R200_PP_AFS_1                     0x2f00 /* same as txcblend_0 */
++
++#define R200_VAP_PVS_CNTL_1               0x22D0
++
++/* MPEG settings from VHA code */
++#define RADEON_VHA_SETTO16_1                       0x2694
++#define RADEON_VHA_SETTO16_2                       0x2680
++#define RADEON_VHA_SETTO0_1                        0x1840
++#define RADEON_VHA_FB_OFFSET                       0x19e4
++#define RADEON_VHA_SETTO1AND70S                    0x19d8
++#define RADEON_VHA_DST_PITCH                       0x1408
++
++// set as reference header
++#define RADEON_VHA_BACKFRAME0_OFF_Y              0x1840
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y        0x1844
++#define RADEON_VHA_BACKFRAME0_OFF_U              0x1848
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U        0x184c
++#define RADOEN_VHA_BACKFRAME0_OFF_V              0x1850
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V        0x1854
++#define RADEON_VHA_FORWFRAME0_OFF_Y              0x1858
++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y        0x185c
++#define RADEON_VHA_FORWFRAME0_OFF_U              0x1860
++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U        0x1864
++#define RADEON_VHA_FORWFRAME0_OFF_V              0x1868
++#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V        0x1880
++#define RADEON_VHA_BACKFRAME0_OFF_Y_2            0x1884
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2      0x1888
++#define RADEON_VHA_BACKFRAME0_OFF_U_2            0x188c
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2      0x1890
++#define RADEON_VHA_BACKFRAME0_OFF_V_2            0x1894
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2      0x1898
++
++#define R500_D1CRTC_STATUS 0x609c
++#define R500_D2CRTC_STATUS 0x689c
++#define R500_CRTC_V_BLANK (1<<0)
++
++#define R500_D1CRTC_FRAME_COUNT 0x60a4
++#define R500_D2CRTC_FRAME_COUNT 0x68a4
++
++#define R500_D1MODE_V_COUNTER 0x6530
++#define R500_D2MODE_V_COUNTER 0x6d30
++
++#define R500_D1MODE_VBLANK_STATUS 0x6534
++#define R500_D2MODE_VBLANK_STATUS 0x6d34
++#define R500_VBLANK_OCCURED (1<<0)
++#define R500_VBLANK_ACK     (1<<4)
++#define R500_VBLANK_STAT    (1<<12)
++#define R500_VBLANK_INT     (1<<16)
++
++#define R500_DxMODE_INT_MASK 0x6540
++#define R500_D1MODE_INT_MASK (1<<0)
++#define R500_D2MODE_INT_MASK (1<<8)
++
++#define R500_DISP_INTERRUPT_STATUS 0x7edc
++#define R500_D1_VBLANK_INTERRUPT (1 << 4)
++#define R500_D2_VBLANK_INTERRUPT (1 << 5)
++
++/* Constants */
++#define RADEON_MAX_USEC_TIMEOUT               100000  /* 100 ms */
++
++#define RADEON_LAST_FRAME_REG         RADEON_SCRATCH_REG0
++#define RADEON_LAST_DISPATCH_REG      RADEON_SCRATCH_REG1
++#define RADEON_LAST_CLEAR_REG         RADEON_SCRATCH_REG2
++#define RADEON_LAST_SWI_REG           RADEON_SCRATCH_REG3
++#define RADEON_LAST_DISPATCH          1
++
++#define RADEON_MAX_VB_AGE             0x7fffffff
++#define RADEON_MAX_VB_VERTS           (0xffff)
++
++#define RADEON_RING_HIGH_MARK         128
++
++#define RADEON_PCIGART_TABLE_SIZE      (32*1024)
++
++#define RADEON_READ(reg)    DRM_READ32(  dev_priv->mmio, (reg) )
++#define RADEON_WRITE(reg,val)  DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#define RADEON_READ8(reg)     DRM_READ8(  dev_priv->mmio, (reg) )
++#define RADEON_WRITE8(reg,val)        DRM_WRITE8( dev_priv->mmio, (reg), (val) )
++
++#define RADEON_WRITE_PLL( addr, val )                                 \
++do {                                                                  \
++      RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX,                         \
++                     ((addr) & 0x1f) | RADEON_PLL_WR_EN );            \
++      RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) );                  \
++} while (0)
++
++#define RADEON_WRITE_PCIE( addr, val )                                        \
++do {                                                                  \
++      RADEON_WRITE8( RADEON_PCIE_INDEX,                               \
++                      ((addr) & 0xff));                               \
++      RADEON_WRITE( RADEON_PCIE_DATA, (val) );                        \
++} while (0)
++
++#define R500_WRITE_MCIND( addr, val )                                 \
++do {                                                          \
++      RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));    \
++      RADEON_WRITE(R520_MC_IND_DATA, (val));                  \
++      RADEON_WRITE(R520_MC_IND_INDEX, 0);     \
++} while (0)
++
++#define RS480_WRITE_MCIND( addr, val )                                \
++do {                                                                  \
++      RADEON_WRITE( RS480_NB_MC_INDEX,                                \
++                      ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);       \
++      RADEON_WRITE( RS480_NB_MC_DATA, (val) );                        \
++      RADEON_WRITE( RS480_NB_MC_INDEX, 0xff );                        \
++} while (0)
++
++#define RS690_WRITE_MCIND( addr, val )                                        \
++do {                                                          \
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));    \
++      RADEON_WRITE(RS690_MC_DATA, val);                       \
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);    \
++} while (0)
++
++#define IGP_WRITE_MCIND( addr, val )                          \
++do {                                                                  \
++        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
++              RS690_WRITE_MCIND( addr, val );                         \
++      else                                                            \
++              RS480_WRITE_MCIND( addr, val );                         \
++} while (0)
++
++#define CP_PACKET0( reg, n )                                          \
++      (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
++#define CP_PACKET0_TABLE( reg, n )                                    \
++      (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
++#define CP_PACKET1( reg0, reg1 )                                      \
++      (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
++#define CP_PACKET2()                                                  \
++      (RADEON_CP_PACKET2)
++#define CP_PACKET3( pkt, n )                                          \
++      (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
++
++/* ================================================================
++ * Engine control helper macros
++ */
++
++#define RADEON_WAIT_UNTIL_2D_IDLE() do {                              \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_3D_IDLE() do {                              \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_3D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_IDLE() do {                                 \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |                           \
++                 RADEON_WAIT_3D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do {                         \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( RADEON_WAIT_CRTC_PFLIP );                             \
++} while (0)
++
++#define RADEON_FLUSH_CACHE() do {                                     \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));  \
++              OUT_RING(RADEON_RB3D_DC_FLUSH);                         \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
++              OUT_RING(R300_RB3D_DC_FLUSH);                           \
++        }                                                               \
++} while (0)
++
++#define RADEON_PURGE_CACHE() do {                                     \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
++              OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);   \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
++              OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE );      \
++        }                                                               \
++} while (0)
++
++#define RADEON_FLUSH_ZCACHE() do {                                    \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
++              OUT_RING( RADEON_RB3D_ZC_FLUSH );                       \
++      } else {                                                        \
++              OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) );    \
++              OUT_RING( R300_ZC_FLUSH );                              \
++        }                                                               \
++} while (0)
++
++#define RADEON_PURGE_ZCACHE() do {                                    \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));    \
++              OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);   \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));        \
++              OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);                 \
++        }                                                               \
++} while (0)
++
++/* ================================================================
++ * Misc helper macros
++ */
++
++/* Perfbox functionality only.
++ */
++#define RING_SPACE_TEST_WITH_RETURN( dev_priv )                               \
++do {                                                                  \
++      if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) {           \
++              u32 head = GET_RING_HEAD( dev_priv );                   \
++              if (head == dev_priv->ring.tail)                        \
++                      dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE;   \
++      }                                                               \
++} while (0)
++
++#define VB_AGE_TEST_WITH_RETURN( dev_priv )                           \
++do {                                                                  \
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;          \
++      if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {         \
++              int __ret = radeon_do_cp_idle( dev_priv );              \
++              if ( __ret ) return __ret;                              \
++              sarea_priv->last_dispatch = 0;                          \
++              radeon_freelist_reset( dev );                           \
++      }                                                               \
++} while (0)
++
++#define RADEON_DISPATCH_AGE( age ) do {                                       \
++      OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) );          \
++      OUT_RING( age );                                                \
++} while (0)
++
++#define RADEON_FRAME_AGE( age ) do {                                  \
++      OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) );             \
++      OUT_RING( age );                                                \
++} while (0)
++
++#define RADEON_CLEAR_AGE( age ) do {                                  \
++      OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) );             \
++      OUT_RING( age );                                                \
++} while (0)
++
++/* ================================================================
++ * Ring control
++ */
++
++#define RADEON_VERBOSE        0
++
++#define RING_LOCALS   int write, _nr; unsigned int mask; u32 *ring;
++
++#define BEGIN_RING( n ) do {                                          \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "BEGIN_RING( %d )\n", (n));                   \
++      }                                                               \
++      if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {              \
++              COMMIT_RING();                                          \
++              radeon_wait_ring( dev_priv, (n) * sizeof(u32) );        \
++      }                                                               \
++      _nr = n; dev_priv->ring.space -= (n) * sizeof(u32);             \
++      ring = dev_priv->ring.start;                                    \
++      write = dev_priv->ring.tail;                                    \
++      mask = dev_priv->ring.tail_mask;                                \
++} while (0)
++
++#define ADVANCE_RING() do {                                           \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        write, dev_priv->ring.tail );                 \
++      }                                                               \
++      if (((dev_priv->ring.tail + _nr) & mask) != write) {            \
++              DRM_ERROR(                                              \
++                      "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",        \
++                      ((dev_priv->ring.tail + _nr) & mask),           \
++                      write, __LINE__);                                               \
++      } else                                                          \
++              dev_priv->ring.tail = write;                            \
++} while (0)
++
++#define COMMIT_RING() do {                                            \
++      /* Flush writes to ring */                                      \
++      DRM_MEMORYBARRIER();                                            \
++      GET_RING_HEAD( dev_priv );                                      \
++      RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail );         \
++      /* read from PCI bus to ensure correct posting */               \
++      RADEON_READ( RADEON_CP_RB_RPTR );                               \
++} while (0)
++
++#define OUT_RING( x ) do {                                            \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",            \
++                         (unsigned int)(x), write );                  \
++      }                                                               \
++      ring[write++] = (x);                                            \
++      write &= mask;                                                  \
++} while (0)
++
++#define OUT_RING_REG( reg, val ) do {                                 \
++      OUT_RING( CP_PACKET0( reg, 0 ) );                               \
++      OUT_RING( val );                                                \
++} while (0)
++
++#define OUT_RING_TABLE( tab, sz ) do {                                \
++      int _size = (sz);                                       \
++      int *_tab = (int *)(tab);                               \
++                                                              \
++      if (write + _size > mask) {                             \
++              int _i = (mask+1) - write;                      \
++              _size -= _i;                                    \
++              while (_i > 0) {                                \
++                      *(int *)(ring + write) = *_tab++;       \
++                      write++;                                \
++                      _i--;                                   \
++              }                                               \
++              write = 0;                                      \
++              _tab += _i;                                     \
++      }                                                       \
++      while (_size > 0) {                                     \
++              *(ring + write) = *_tab++;                      \
++              write++;                                        \
++              _size--;                                        \
++      }                                                       \
++      write &= mask;                                          \
++} while (0)
++
++#endif                                /* __RADEON_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_ioc32.c git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c
+--- git/drivers/gpu/drm-tungsten/radeon_ioc32.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,424 @@
++/**
++ * \file radeon_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the Radeon DRM.
++ *
++ * \author Paul Mackerras <paulus@samba.org>
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++typedef struct drm_radeon_init32 {
++      int func;
++      u32 sarea_priv_offset;
++      int is_pci;
++      int cp_mode;
++      int gart_size;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      u32 fb_offset;
++      u32 mmio_offset;
++      u32 ring_offset;
++      u32 ring_rptr_offset;
++      u32 buffers_offset;
++      u32 gart_textures_offset;
++} drm_radeon_init32_t;
++
++static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_radeon_init32_t init32;
++      drm_radeon_init_t __user *init;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.is_pci, &init->is_pci)
++          || __put_user(init32.cp_mode, &init->cp_mode)
++          || __put_user(init32.gart_size, &init->gart_size)
++          || __put_user(init32.ring_size, &init->ring_size)
++          || __put_user(init32.usec_timeout, &init->usec_timeout)
++          || __put_user(init32.fb_bpp, &init->fb_bpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_bpp, &init->depth_bpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.ring_offset, &init->ring_offset)
++          || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset)
++          || __put_user(init32.gart_textures_offset,
++                        &init->gart_textures_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init);
++}
++
++typedef struct drm_radeon_clear32 {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;   /* misnamed field:  should be stencil */
++      u32          depth_boxes;
++} drm_radeon_clear32_t;
++
++static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_radeon_clear32_t clr32;
++      drm_radeon_clear_t __user *clr;
++
++      if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
++              return -EFAULT;
++
++      clr = compat_alloc_user_space(sizeof(*clr));
++      if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
++          || __put_user(clr32.flags, &clr->flags)
++          || __put_user(clr32.clear_color, &clr->clear_color)
++          || __put_user(clr32.clear_depth, &clr->clear_depth)
++          || __put_user(clr32.color_mask, &clr->color_mask)
++          || __put_user(clr32.depth_mask, &clr->depth_mask)
++          || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
++                        &clr->depth_boxes))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr);
++}
++
++typedef struct drm_radeon_stipple32 {
++      u32 mask;
++} drm_radeon_stipple32_t;
++
++static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_stipple32_t __user *argp = (void __user *)arg;
++      drm_radeon_stipple_t __user *request;
++      u32 mask;
++
++      if (get_user(mask, &argp->mask))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((unsigned int __user *)(unsigned long) mask,
++                        &request->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request);
++}
++
++typedef struct drm_radeon_tex_image32 {
++      unsigned int x, y;              /* Blit coordinates */
++      unsigned int width, height;
++      u32 data;
++} drm_radeon_tex_image32_t;
++
++typedef struct drm_radeon_texture32 {
++      unsigned int offset;
++      int pitch;
++      int format;
++      int width;                      /* Texture image coordinates */
++      int height;
++      u32 image;
++} drm_radeon_texture32_t;
++
++static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_texture32_t req32;
++      drm_radeon_texture_t __user *request;
++      drm_radeon_tex_image32_t img32;
++      drm_radeon_tex_image_t __user *image;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++      if (req32.image == 0)
++              return -EINVAL;
++      if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
++                         sizeof(img32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
++      if (!access_ok(VERIFY_WRITE, request,
++                     sizeof(*request) + sizeof(*image)))
++              return -EFAULT;
++      image = (drm_radeon_tex_image_t __user *) (request + 1);
++
++      if (__put_user(req32.offset, &request->offset)
++          || __put_user(req32.pitch, &request->pitch)
++          || __put_user(req32.format, &request->format)
++          || __put_user(req32.width, &request->width)
++          || __put_user(req32.height, &request->height)
++          || __put_user(image, &request->image)
++          || __put_user(img32.x, &image->x)
++          || __put_user(img32.y, &image->y)
++          || __put_user(img32.width, &image->width)
++          || __put_user(img32.height, &image->height)
++          || __put_user((const void __user *)(unsigned long)img32.data,
++                        &image->data))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request);
++}
++
++typedef struct drm_radeon_vertex2_32 {
++      int idx;                        /* Index of vertex buffer */
++      int discard;                    /* Client finished with buffer? */
++      int nr_states;
++      u32 state;
++      int nr_prims;
++      u32 prim;
++} drm_radeon_vertex2_32_t;
++
++static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_vertex2_32_t req32;
++      drm_radeon_vertex2_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.idx, &request->idx)
++          || __put_user(req32.discard, &request->discard)
++          || __put_user(req32.nr_states, &request->nr_states)
++          || __put_user((void __user *)(unsigned long)req32.state,
++                        &request->state)
++          || __put_user(req32.nr_prims, &request->nr_prims)
++          || __put_user((void __user *)(unsigned long)req32.prim,
++                        &request->prim))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request);
++}
++
++typedef struct drm_radeon_cmd_buffer32 {
++      int bufsz;
++      u32 buf;
++      int nbox;
++      u32 boxes;
++} drm_radeon_cmd_buffer32_t;
++
++static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_radeon_cmd_buffer32_t req32;
++      drm_radeon_cmd_buffer_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.bufsz, &request->bufsz)
++          || __put_user((void __user *)(unsigned long)req32.buf,
++                        &request->buf)
++          || __put_user(req32.nbox, &request->nbox)
++          || __put_user((void __user *)(unsigned long)req32.boxes,
++                        &request->boxes))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request);
++}
++
++typedef struct drm_radeon_getparam32 {
++      int param;
++      u32 value;
++} drm_radeon_getparam32_t;
++
++static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
++                                   unsigned long arg)
++{
++      drm_radeon_getparam32_t req32;
++      drm_radeon_getparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request);
++}
++
++typedef struct drm_radeon_mem_alloc32 {
++      int region;
++      int alignment;
++      int size;
++      u32 region_offset;      /* offset from start of fb or GART */
++} drm_radeon_mem_alloc32_t;
++
++static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_radeon_mem_alloc32_t req32;
++      drm_radeon_mem_alloc_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.region, &request->region)
++          || __put_user(req32.alignment, &request->alignment)
++          || __put_user(req32.size, &request->size)
++          || __put_user((int __user *)(unsigned long)req32.region_offset,
++                        &request->region_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_ALLOC, (unsigned long) request);
++}
++
++typedef struct drm_radeon_irq_emit32 {
++      u32 irq_seq;
++} drm_radeon_irq_emit32_t;
++
++static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_radeon_irq_emit32_t req32;
++      drm_radeon_irq_emit_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((int __user *)(unsigned long)req32.irq_seq,
++                        &request->irq_seq))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request);
++}
++
++/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
++#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
++typedef struct drm_radeon_setparam32 {
++      int param;
++      u64 value;
++} __attribute__((packed)) drm_radeon_setparam32_t;
++
++static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
++                                   unsigned long arg)
++{
++      drm_radeon_setparam32_t req32;
++      drm_radeon_setparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
++}
++#else
++#define compat_radeon_cp_setparam NULL
++#endif /* X86_64 || IA64 */
++
++drm_ioctl_compat_t *radeon_compat_ioctls[] = {
++      [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
++      [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
++      [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
++      [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
++      [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
++      [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
++      [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
++      [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
++      [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
++      [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
++              fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_irq.c git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c
+--- git/drivers/gpu/drm-tungsten/radeon_irq.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,390 @@
++/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Michel D�zer <michel@daenzer.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (state)
++              dev_priv->irq_enable_reg |= mask;
++      else
++              dev_priv->irq_enable_reg &= ~mask;
++
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
++}
++
++static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (state)
++              dev_priv->r500_disp_irq_reg |= mask;
++      else
++              dev_priv->r500_disp_irq_reg &= ~mask;
++
++      RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
++}
++
++int radeon_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {     
++              switch (crtc) {
++              case 0:
++                      r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
++                      break;
++              case 1:
++                      r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      return EINVAL;
++              }
++      } else {
++              switch (crtc) {
++              case 0:
++                      radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
++                      break;
++              case 1:
++                      radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      return EINVAL;
++              }
++      }
++
++      return 0;
++}
++
++void radeon_disable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {     
++              switch (crtc) {
++              case 0:
++                      r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
++                      break;
++              case 1:
++                      r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      break;
++              }
++      } else {
++              switch (crtc) {
++              case 0:
++                      radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
++                      break;
++              case 1:
++                      radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      break;
++              }
++      }
++}
++
++static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int)
++{
++      u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
++      u32 irq_mask = RADEON_SW_INT_TEST;
++
++      *r500_disp_int = 0;
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              /* vbl interrupts in a different place */
++
++              if (irqs & R500_DISPLAY_INT_STATUS) {
++                      /* if a display interrupt */
++                      u32 disp_irq;
++
++                      disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
++
++                      *r500_disp_int = disp_irq;
++                      if (disp_irq & R500_D1_VBLANK_INTERRUPT) {
++                              RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
++                      }
++                      if (disp_irq & R500_D2_VBLANK_INTERRUPT) {
++                              RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
++                      }
++              }
++              irq_mask |= R500_DISPLAY_INT_STATUS;
++      } else
++              irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
++
++      irqs &= irq_mask;
++
++      if (irqs)
++              RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
++      
++      return irqs;
++}
++
++/* Interrupts - Used for device synchronization and flushing in the
++ * following circumstances:
++ *
++ * - Exclusive FB access with hw idle:
++ *    - Wait for GUI Idle (?) interrupt, then do normal flush.
++ *
++ * - Frame throttling, NV_fence:
++ *    - Drop marker irq's into command stream ahead of time.
++ *    - Wait on irq's with lock *not held*
++ *    - Check each for termination condition
++ *
++ * - Internally in cp_getbuffer, etc:
++ *    - as above, but wait with lock held???
++ *
++ * NOTE: These functions are misleadingly named -- the irq's aren't
++ * tied to dma at all, this is just a hangover from dri prehistory.
++ */
++
++irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      u32 stat;
++      u32 r500_disp_int;
++
++      /* Only consider the bits we're interested in - others could be used
++       * outside the DRM
++       */
++      stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
++      if (!stat)
++              return IRQ_NONE;
++
++      stat &= dev_priv->irq_enable_reg;
++
++      /* SW interrupt */
++      if (stat & RADEON_SW_INT_TEST)
++              DRM_WAKEUP(&dev_priv->swi_queue);
++
++      /* VBLANK interrupt */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
++                      drm_handle_vblank(dev, 0);
++              if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
++                      drm_handle_vblank(dev, 1);
++      } else {
++              if (stat & RADEON_CRTC_VBLANK_STAT)
++                      drm_handle_vblank(dev, 0);
++              if (stat & RADEON_CRTC2_VBLANK_STAT)
++                      drm_handle_vblank(dev, 1);
++      }
++      return IRQ_HANDLED;
++}
++
++static int radeon_emit_irq(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      unsigned int ret;
++      RING_LOCALS;
++
++      atomic_inc(&dev_priv->swi_emitted);
++      ret = atomic_read(&dev_priv->swi_emitted);
++
++      BEGIN_RING(4);
++      OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
++      OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return ret;
++}
++
++static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
++              return 0;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
++                  RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
++
++      return ret;
++}
++
++u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (crtc < 0 || crtc > 1) {
++              DRM_ERROR("Invalid crtc %d\n", crtc);
++              return -EINVAL;
++      }
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              if (crtc == 0)
++                      return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
++              else
++                      return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
++      } else {
++              if (crtc == 0)
++                      return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
++              else
++                      return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
++      }
++}
++
++/* Needs the lock as it touches the ring.
++ */
++int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_irq_emit_t *emit = data;
++      int result;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      result = radeon_emit_irq(dev);
++
++      if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++/* Doesn't need the hardware lock.
++ */
++int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_irq_wait_t *irqwait = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return radeon_wait_irq(dev, irqwait->irq_seq);
++}
++
++/* drm_dma.h hooks
++*/
++void radeon_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      u32 dummy;
++
++      /* Disable *all* interrupts */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++
++      /* Clear bits if they're already high */
++      radeon_acknowledge_irqs(dev_priv, &dummy);
++}
++
++int radeon_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      int ret;
++
++      atomic_set(&dev_priv->swi_emitted, 0);
++      DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
++
++      ret = drm_vblank_init(dev, 2);
++      if (ret)
++              return ret;
++
++      dev->max_vblank_count = 0x001fffff;
++
++      radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
++
++      return 0;
++}
++
++void radeon_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      dev_priv->irq_enabled = 0;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      /* Disable *all* interrupts */
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++}
++
++
++int radeon_vblank_crtc_get(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
++
++      return dev_priv->vblank_crtc;
++}
++
++int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
++{
++      drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
++      if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
++              DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
++              return -EINVAL;
++      }
++      dev_priv->vblank_crtc = (unsigned int)value;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_mem.c git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c
+--- git/drivers/gpu/drm-tungsten/radeon_mem.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,302 @@
++/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++/* Very simple allocator for GART memory, working on a static range
++ * already mapped into each client's address space.
++ */
++
++static struct mem_block *split_block(struct mem_block *p, int start, int size,
++                                   struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++      out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++static struct mem_block *alloc_block(struct mem_block *heap, int size,
++                                   int align2, struct drm_file *file_priv)
++{
++      struct mem_block *p;
++      int mask = (1 << align2) - 1;
++
++      list_for_each(p, heap) {
++              int start = (p->start + mask) & ~mask;
++              if (p->file_priv == NULL && start + size <= p->start + p->size)
++                      return split_block(p, start, size, file_priv);
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, int start)
++{
++      struct mem_block *p;
++
++      list_for_each(p, heap)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++static void free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == NULL) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFS);
++      }
++
++      if (p->prev->file_priv == NULL) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++static int init_heap(struct mem_block **heap, int start, int size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/* Free all blocks associated with the releasing file.
++ */
++void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      list_for_each(p, heap) {
++              if (p->file_priv == file_priv)
++                      p->file_priv = NULL;
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      list_for_each(p, heap) {
++              while (p->file_priv == NULL && p->next->file_priv == NULL) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++              }
++      }
++}
++
++/* Shutdown.
++ */
++void radeon_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
++      *heap = NULL;
++}
++
++/* IOCTL HANDLERS */
++
++static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
++{
++      switch (region) {
++      case RADEON_MEM_REGION_GART:
++              return &dev_priv->gart_heap;
++      case RADEON_MEM_REGION_FB:
++              return &dev_priv->fb_heap;
++      default:
++              return NULL;
++      }
++}
++
++int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_alloc_t *alloc = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, alloc->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      /* Make things easier on ourselves: all allocations at least
++       * 4k aligned.
++       */
++      if (alloc->alignment < 12)
++              alloc->alignment = 12;
++
++      block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
++
++      if (!block)
++              return -ENOMEM;
++
++      if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
++                           sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_free_t *memfree = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, memfree->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      block = find_block(*heap, memfree->region_offset);
++      if (!block)
++              return -EFAULT;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      free_block(block);
++      return 0;
++}
++
++int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_init_heap_t *initheap = data;
++      struct mem_block **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, initheap->region);
++      if (!heap)
++              return -EFAULT;
++
++      if (*heap) {
++              DRM_ERROR("heap already initialized?");
++              return -EFAULT;
++      }
++
++      return init_heap(heap, initheap->start, initheap->size);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_microcode.h git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h
+--- git/drivers/gpu/drm-tungsten/radeon_microcode.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1844 @@
++/*
++ * Copyright 2007 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef RADEON_MICROCODE_H
++#define RADEON_MICROCODE_H
++
++/* production radeon ucode r1xx-r6xx */
++static const u32 R100_cp_microcode[][2]={
++    { 0x21007000, 0000000000 },
++    { 0x20007000, 0000000000 },
++    { 0x000000b4, 0x00000004 },
++    { 0x000000b8, 0x00000004 },
++    { 0x6f5b4d4c, 0000000000 },
++    { 0x4c4c427f, 0000000000 },
++    { 0x5b568a92, 0000000000 },
++    { 0x4ca09c6d, 0000000000 },
++    { 0xad4c4c4c, 0000000000 },
++    { 0x4ce1af3d, 0000000000 },
++    { 0xd8afafaf, 0000000000 },
++    { 0xd64c4cdc, 0000000000 },
++    { 0x4cd10d10, 0000000000 },
++    { 0x000f0000, 0x00000016 },
++    { 0x362f242d, 0000000000 },
++    { 0x00000012, 0x00000004 },
++    { 0x000f0000, 0x00000016 },
++    { 0x362f282d, 0000000000 },
++    { 0x000380e7, 0x00000002 },
++    { 0x04002c97, 0x00000002 },
++    { 0x000f0001, 0x00000016 },
++    { 0x333a3730, 0000000000 },
++    { 0x000077ef, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00000017, 0x00000004 },
++    { 0x0003802b, 0x00000002 },
++    { 0x040067e0, 0x00000002 },
++    { 0x00000017, 0x00000004 },
++    { 0x000077e0, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x000037e1, 0x00000002 },
++    { 0x040067e1, 0x00000006 },
++    { 0x000077e0, 0x00000002 },
++    { 0x000077e1, 0x00000002 },
++    { 0x000077e1, 0x00000006 },
++    { 0xffffffff, 0000000000 },
++    { 0x10000000, 0000000000 },
++    { 0x0003802b, 0x00000002 },
++    { 0x040067e0, 0x00000006 },
++    { 0x00007675, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0000002f, 0x00000018 },
++    { 0x0000002f, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x00000030, 0x00000018 },
++    { 0x00000030, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x01605000, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00098000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x64c0603e, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00080000, 0x00000016 },
++    { 0000000000, 0000000000 },
++    { 0x0400251d, 0x00000002 },
++    { 0x00007580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x04002580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x00000049, 0x00000004 },
++    { 0x00005000, 0000000000 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x00019000, 0x00000002 },
++    { 0x00011055, 0x00000014 },
++    { 0x00000055, 0x00000012 },
++    { 0x0400250f, 0x00000002 },
++    { 0x0000504f, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007565, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000058, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x01e655b4, 0x00000002 },
++    { 0x4401b0e4, 0x00000002 },
++    { 0x01c110e4, 0x00000002 },
++    { 0x26667066, 0x00000018 },
++    { 0x040c2565, 0x00000002 },
++    { 0x00000066, 0x00000018 },
++    { 0x04002564, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x0000005d, 0x00000004 },
++    { 0x00401069, 0x00000008 },
++    { 0x00101000, 0x00000002 },
++    { 0x000d80ff, 0x00000002 },
++    { 0x0080006c, 0x00000008 },
++    { 0x000f9000, 0x00000002 },
++    { 0x000e00ff, 0x00000002 },
++    { 0000000000, 0x00000006 },
++    { 0x0000008f, 0x00000018 },
++    { 0x0000005b, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007576, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00009000, 0x00000002 },
++    { 0x00041000, 0x00000002 },
++    { 0x0c00350e, 0x00000002 },
++    { 0x00049000, 0x00000002 },
++    { 0x00051000, 0x00000002 },
++    { 0x01e785f8, 0x00000002 },
++    { 0x00200000, 0x00000002 },
++    { 0x0060007e, 0x0000000c },
++    { 0x00007563, 0x00000002 },
++    { 0x006075f0, 0x00000021 },
++    { 0x20007073, 0x00000004 },
++    { 0x00005073, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007576, 0x00000002 },
++    { 0x00007577, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x0000750f, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00600083, 0x0000000c },
++    { 0x006075f0, 0x00000021 },
++    { 0x000075f8, 0x00000002 },
++    { 0x00000083, 0x00000004 },
++    { 0x000a750e, 0x00000002 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x0020750f, 0x00000002 },
++    { 0x00600086, 0x00000004 },
++    { 0x00007570, 0x00000002 },
++    { 0x00007571, 0x00000002 },
++    { 0x00007572, 0x00000006 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00005000, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00007568, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000095, 0x0000000c },
++    { 0x00058000, 0x00000002 },
++    { 0x0c607562, 0x00000002 },
++    { 0x00000097, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00600096, 0x00000004 },
++    { 0x400070e5, 0000000000 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x000380e5, 0x00000002 },
++    { 0x000000a8, 0x0000001c },
++    { 0x000650aa, 0x00000018 },
++    { 0x040025bb, 0x00000002 },
++    { 0x000610ab, 0x00000018 },
++    { 0x040075bc, 0000000000 },
++    { 0x000075bb, 0x00000002 },
++    { 0x000075bc, 0000000000 },
++    { 0x00090000, 0x00000006 },
++    { 0x00090000, 0x00000002 },
++    { 0x000d8002, 0x00000006 },
++    { 0x00007832, 0x00000002 },
++    { 0x00005000, 0x00000002 },
++    { 0x000380e7, 0x00000002 },
++    { 0x04002c97, 0x00000002 },
++    { 0x00007820, 0x00000002 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x01200000, 0x00000002 },
++    { 0x20077000, 0x00000002 },
++    { 0x01200000, 0x00000002 },
++    { 0x20007000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x0120751b, 0x00000002 },
++    { 0x8040750a, 0x00000002 },
++    { 0x8040750b, 0x00000002 },
++    { 0x00110000, 0x00000002 },
++    { 0x000380e5, 0x00000002 },
++    { 0x000000c6, 0x0000001c },
++    { 0x000610ab, 0x00000018 },
++    { 0x844075bd, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x840075bb, 0x00000002 },
++    { 0x000610ab, 0x00000018 },
++    { 0x844075bc, 0x00000002 },
++    { 0x000000c9, 0x00000004 },
++    { 0x804075bd, 0x00000002 },
++    { 0x800075bb, 0x00000002 },
++    { 0x804075bc, 0x00000002 },
++    { 0x00108000, 0x00000002 },
++    { 0x01400000, 0x00000002 },
++    { 0x006000cd, 0x0000000c },
++    { 0x20c07000, 0x00000020 },
++    { 0x000000cf, 0x00000012 },
++    { 0x00800000, 0x00000006 },
++    { 0x0080751d, 0x00000006 },
++    { 0000000000, 0000000000 },
++    { 0x0000775c, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460275d, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x01e00830, 0x00000002 },
++    { 0x21007000, 0000000000 },
++    { 0x6464614d, 0000000000 },
++    { 0x69687420, 0000000000 },
++    { 0x00000073, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x00005000, 0x00000002 },
++    { 0x000380d0, 0x00000002 },
++    { 0x040025e0, 0x00000002 },
++    { 0x000075e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000380e0, 0x00000002 },
++    { 0x04002394, 0x00000002 },
++    { 0x00005000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x00000008, 0000000000 },
++    { 0x00000004, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R200_cp_microcode[][2]={
++    { 0x21007000, 0000000000 },
++    { 0x20007000, 0000000000 },
++    { 0x000000bf, 0x00000004 },
++    { 0x000000c3, 0x00000004 },
++    { 0x7a685e5d, 0000000000 },
++    { 0x5d5d5588, 0000000000 },
++    { 0x68659197, 0000000000 },
++    { 0x5da19f78, 0000000000 },
++    { 0x5d5d5d5d, 0000000000 },
++    { 0x5dee5d50, 0000000000 },
++    { 0xf2acacac, 0000000000 },
++    { 0xe75df9e9, 0000000000 },
++    { 0xb1dd0e11, 0000000000 },
++    { 0xe2afafaf, 0000000000 },
++    { 0x000f0000, 0x00000016 },
++    { 0x452f232d, 0000000000 },
++    { 0x00000013, 0x00000004 },
++    { 0x000f0000, 0x00000016 },
++    { 0x452f272d, 0000000000 },
++    { 0x000f0001, 0x00000016 },
++    { 0x3e4d4a37, 0000000000 },
++    { 0x000077ef, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00000016, 0x00000004 },
++    { 0x0003802a, 0x00000002 },
++    { 0x040067e0, 0x00000002 },
++    { 0x00000016, 0x00000004 },
++    { 0x000077e0, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x000037e1, 0x00000002 },
++    { 0x040067e1, 0x00000006 },
++    { 0x000077e0, 0x00000002 },
++    { 0x000077e1, 0x00000002 },
++    { 0x000077e1, 0x00000006 },
++    { 0xffffffff, 0000000000 },
++    { 0x10000000, 0000000000 },
++    { 0x07f007f0, 0000000000 },
++    { 0x0003802a, 0x00000002 },
++    { 0x040067e0, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007675, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802b, 0x00000002 },
++    { 0x04002676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0000002f, 0x00000018 },
++    { 0x0000002f, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x00000037, 0x00000018 },
++    { 0x00000037, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x01605000, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00098000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x64c06051, 0x00000004 },
++    { 0x00080000, 0x00000016 },
++    { 0000000000, 0000000000 },
++    { 0x0400251d, 0x00000002 },
++    { 0x00007580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x04002580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x0000005a, 0x00000004 },
++    { 0x00005000, 0000000000 },
++    { 0x00061000, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x00019000, 0x00000002 },
++    { 0x00011064, 0x00000014 },
++    { 0x00000064, 0x00000012 },
++    { 0x0400250f, 0x00000002 },
++    { 0x0000505e, 0x00000004 },
++    { 0x00007565, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000065, 0x00000004 },
++    { 0x01e655b4, 0x00000002 },
++    { 0x4401b0f0, 0x00000002 },
++    { 0x01c110f0, 0x00000002 },
++    { 0x26667071, 0x00000018 },
++    { 0x040c2565, 0x00000002 },
++    { 0x00000071, 0x00000018 },
++    { 0x04002564, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000068, 0x00000004 },
++    { 0x00401074, 0x00000008 },
++    { 0x00101000, 0x00000002 },
++    { 0x000d80ff, 0x00000002 },
++    { 0x00800077, 0x00000008 },
++    { 0x000f9000, 0x00000002 },
++    { 0x000e00ff, 0x00000002 },
++    { 0000000000, 0x00000006 },
++    { 0x00000094, 0x00000018 },
++    { 0x00000068, 0x00000004 },
++    { 0x00007576, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00009000, 0x00000002 },
++    { 0x00041000, 0x00000002 },
++    { 0x0c00350e, 0x00000002 },
++    { 0x00049000, 0x00000002 },
++    { 0x00051000, 0x00000002 },
++    { 0x01e785f8, 0x00000002 },
++    { 0x00200000, 0x00000002 },
++    { 0x00600087, 0x0000000c },
++    { 0x00007563, 0x00000002 },
++    { 0x006075f0, 0x00000021 },
++    { 0x2000707c, 0x00000004 },
++    { 0x0000507c, 0x00000004 },
++    { 0x00007576, 0x00000002 },
++    { 0x00007577, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x0000750f, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x0060008a, 0x0000000c },
++    { 0x006075f0, 0x00000021 },
++    { 0x000075f8, 0x00000002 },
++    { 0x0000008a, 0x00000004 },
++    { 0x000a750e, 0x00000002 },
++    { 0x0020750f, 0x00000002 },
++    { 0x0060008d, 0x00000004 },
++    { 0x00007570, 0x00000002 },
++    { 0x00007571, 0x00000002 },
++    { 0x00007572, 0x00000006 },
++    { 0x00005000, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00007568, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000098, 0x0000000c },
++    { 0x00058000, 0x00000002 },
++    { 0x0c607562, 0x00000002 },
++    { 0x0000009a, 0x00000004 },
++    { 0x00600099, 0x00000004 },
++    { 0x400070f1, 0000000000 },
++    { 0x000380f1, 0x00000002 },
++    { 0x000000a7, 0x0000001c },
++    { 0x000650a9, 0x00000018 },
++    { 0x040025bb, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x040075bc, 0000000000 },
++    { 0x000075bb, 0x00000002 },
++    { 0x000075bc, 0000000000 },
++    { 0x00090000, 0x00000006 },
++    { 0x00090000, 0x00000002 },
++    { 0x000d8002, 0x00000006 },
++    { 0x00005000, 0x00000002 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x01665000, 0x00000002 },
++    { 0x000a0000, 0x00000002 },
++    { 0x000671cc, 0x00000002 },
++    { 0x0286f1cd, 0x00000002 },
++    { 0x000000b7, 0x00000010 },
++    { 0x21007000, 0000000000 },
++    { 0x000000be, 0x0000001c },
++    { 0x00065000, 0x00000002 },
++    { 0x000a0000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x000b0000, 0x00000002 },
++    { 0x38067000, 0x00000002 },
++    { 0x000a00ba, 0x00000004 },
++    { 0x20007000, 0000000000 },
++    { 0x01200000, 0x00000002 },
++    { 0x20077000, 0x00000002 },
++    { 0x01200000, 0x00000002 },
++    { 0x20007000, 0000000000 },
++    { 0x00061000, 0x00000002 },
++    { 0x0120751b, 0x00000002 },
++    { 0x8040750a, 0x00000002 },
++    { 0x8040750b, 0x00000002 },
++    { 0x00110000, 0x00000002 },
++    { 0x000380f1, 0x00000002 },
++    { 0x000000d1, 0x0000001c },
++    { 0x000610aa, 0x00000018 },
++    { 0x844075bd, 0x00000002 },
++    { 0x000610a9, 0x00000018 },
++    { 0x840075bb, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x844075bc, 0x00000002 },
++    { 0x000000d4, 0x00000004 },
++    { 0x804075bd, 0x00000002 },
++    { 0x800075bb, 0x00000002 },
++    { 0x804075bc, 0x00000002 },
++    { 0x00108000, 0x00000002 },
++    { 0x01400000, 0x00000002 },
++    { 0x006000d8, 0x0000000c },
++    { 0x20c07000, 0x00000020 },
++    { 0x000000da, 0x00000012 },
++    { 0x00800000, 0x00000006 },
++    { 0x0080751d, 0x00000006 },
++    { 0x000025bb, 0x00000002 },
++    { 0x000040d4, 0x00000004 },
++    { 0x0000775c, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460275d, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x00007999, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460299b, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x01e00830, 0x00000002 },
++    { 0x21007000, 0000000000 },
++    { 0x00005000, 0x00000002 },
++    { 0x00038056, 0x00000002 },
++    { 0x040025e0, 0x00000002 },
++    { 0x000075e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000380ed, 0x00000002 },
++    { 0x04007394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000078c4, 0x00000002 },
++    { 0x000078c5, 0x00000002 },
++    { 0x000078c6, 0x00000002 },
++    { 0x00007924, 0x00000002 },
++    { 0x00007925, 0x00000002 },
++    { 0x00007926, 0x00000002 },
++    { 0x000000f2, 0x00000004 },
++    { 0x00007924, 0x00000002 },
++    { 0x00007925, 0x00000002 },
++    { 0x00007926, 0x00000002 },
++    { 0x000000f9, 0x00000004 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R300_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x000000ae, 0x00000008 },
++    { 0x000000b2, 0x00000008 },
++    { 0x67554b4a, 0000000000 },
++    { 0x4a4a4475, 0000000000 },
++    { 0x55527d83, 0000000000 },
++    { 0x4a8c8b65, 0000000000 },
++    { 0x4aef4af6, 0000000000 },
++    { 0x4ae14a4a, 0000000000 },
++    { 0xe4979797, 0000000000 },
++    { 0xdb4aebdd, 0000000000 },
++    { 0x9ccc4a4a, 0000000000 },
++    { 0xd1989898, 0000000000 },
++    { 0x4a0f9ad6, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000080, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00012000, 0x00000004 },
++    { 0x00082000, 0x00000004 },
++    { 0x1800650e, 0x00000004 },
++    { 0x00092000, 0x00000004 },
++    { 0x000a2000, 0x00000004 },
++    { 0x000f0000, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000074, 0x00000018 },
++    { 0x0000e563, 0x00000004 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0000a069, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000077, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000077, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0007a, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000084, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000086, 0x00000008 },
++    { 0x00c00085, 0x00000008 },
++    { 0x000700e3, 0x00000004 },
++    { 0x00000092, 0x00000038 },
++    { 0x000ca094, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x000000a4, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x000000a1, 0x00000008 },
++    { 0x000000a6, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x000000ad, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x001400a9, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700e3, 0x00000004 },
++    { 0x000000c0, 0x00000038 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2094, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000c3, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000c7, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000c9, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080c3, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700e0, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000e4, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000eb, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000f3, 0x00000034 },
++    { 0x000000f0, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R420_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x00000099, 0x00000008 },
++    { 0x0000009d, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0xd9d3dff6, 0000000000 },
++    { 0x4ac54a4a, 0000000000 },
++    { 0xc8828282, 0000000000 },
++    { 0xbf4acfc1, 0000000000 },
++    { 0x87b04a4a, 0000000000 },
++    { 0xb5838383, 0000000000 },
++    { 0x4a0f85ba, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700c7, 0x00000004 },
++    { 0x00000080, 0x00000038 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x0000008f, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x0000008c, 0x00000008 },
++    { 0x00000091, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x00000098, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x00140094, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700c7, 0x00000004 },
++    { 0x000000a4, 0x00000038 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000ab, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000ad, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080a7, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c4, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000c8, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cf, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000d7, 0x00000034 },
++    { 0x000000d4, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x0000e1cc, 0x00000004 },
++    { 0x0500e1cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000de, 0x00000034 },
++    { 0x000000da, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x0019e1cc, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x0500a000, 0x00000004 },
++    { 0x080041cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 RS600_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x000000a0, 0x00000008 },
++    { 0x000000a4, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0x4ae74af6, 0000000000 },
++    { 0x4ad34a4a, 0000000000 },
++    { 0xd6898989, 0000000000 },
++    { 0xcd4addcf, 0000000000 },
++    { 0x8ebe4ae2, 0000000000 },
++    { 0xc38a8a8a, 0000000000 },
++    { 0x4a0f8cc8, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700d5, 0x00000004 },
++    { 0x00000084, 0x00000038 },
++    { 0x000ca086, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000096, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x00000093, 0x00000008 },
++    { 0x00000098, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000009f, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x0014009b, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700d5, 0x00000004 },
++    { 0x000000b2, 0x00000038 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2086, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000b5, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000b9, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000bb, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080b5, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700d2, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000d6, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000dd, 0x00000008 },
++    { 0x00e00116, 0000000000 },
++    { 0x000700e1, 0x00000004 },
++    { 0x0800401c, 0x00000004 },
++    { 0x200050e7, 0x00000004 },
++    { 0x0000e01d, 0x00000004 },
++    { 0x000000e4, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000eb, 0x00000034 },
++    { 0x000000e8, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 RS690_cp_microcode[][2]={
++    { 0x000000dd, 0x00000008 },
++    { 0x000000df, 0x00000008 },
++    { 0x000000a0, 0x00000008 },
++    { 0x000000a4, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0x4ad74af6, 0000000000 },
++    { 0x4ac94a4a, 0000000000 },
++    { 0xcc898989, 0000000000 },
++    { 0xc34ad3c5, 0000000000 },
++    { 0x8e4a4a4a, 0000000000 },
++    { 0x4a8a8a8a, 0000000000 },
++    { 0x4a0f8c4a, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700cb, 0x00000004 },
++    { 0x00000084, 0x00000038 },
++    { 0x000ca086, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000096, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x00000093, 0x00000008 },
++    { 0x00000098, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000009f, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x0014009b, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x00100000, 0x0000002c },
++    { 0x00004000, 0000000000 },
++    { 0x080045c8, 0x00000004 },
++    { 0x00240005, 0x00000004 },
++    { 0x08004d0b, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700cb, 0x00000004 },
++    { 0x000000b7, 0x00000038 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2086, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000ba, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000be, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000c0, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080ba, 0x00000008 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c8, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cc, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000d3, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000db, 0x00000034 },
++    { 0x000000d8, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x000000e1, 0x00000030 },
++    { 0x4200e000, 0000000000 },
++    { 0x000000e1, 0x00000030 },
++    { 0x4000e000, 0000000000 },
++    { 0x0025001b, 0x00000004 },
++    { 0x00230000, 0x00000004 },
++    { 0x00250005, 0x00000004 },
++    { 0x000000e6, 0x00000034 },
++    { 0000000000, 0x0000000c },
++    { 0x00244000, 0x00000004 },
++    { 0x080045c8, 0x00000004 },
++    { 0x00240005, 0x00000004 },
++    { 0x08004d0b, 0x0000000c },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R520_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x00000099, 0x00000008 },
++    { 0x0000009d, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0xe0dae6f6, 0000000000 },
++    { 0x4ac54a4a, 0000000000 },
++    { 0xc8828282, 0000000000 },
++    { 0xbf4acfc1, 0000000000 },
++    { 0x87b04ad5, 0000000000 },
++    { 0xb5838383, 0000000000 },
++    { 0x4a0f85ba, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700c7, 0x00000004 },
++    { 0x00000080, 0x00000038 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x0000008f, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x0000008c, 0x00000008 },
++    { 0x00000091, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x00000098, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x00140094, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700c7, 0x00000004 },
++    { 0x000000a4, 0x00000038 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000ab, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000ad, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080a7, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c4, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000c8, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cf, 0x00000008 },
++    { 0xdeadbeef, 0000000000 },
++    { 0x00000116, 0000000000 },
++    { 0x000700d3, 0x00000004 },
++    { 0x080050e7, 0x00000004 },
++    { 0x000700d4, 0x00000004 },
++    { 0x0800401c, 0x00000004 },
++    { 0x0000e01d, 0000000000 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000de, 0x00000034 },
++    { 0x000000db, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x0000e1cc, 0x00000004 },
++    { 0x0500e1cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000e5, 0x00000034 },
++    { 0x000000e1, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x0019e1cc, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x0500a000, 0x00000004 },
++    { 0x080041cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_state.c git-nokia/drivers/gpu/drm-tungsten/radeon_state.c
+--- git/drivers/gpu/drm-tungsten/radeon_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,3263 @@
++/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++/* ================================================================
++ * Helper functions for client state checking and fixup
++ */
++
++static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
++                                                  dev_priv,
++                                                  struct drm_file *file_priv,
++                                                  u32 * offset)
++{
++      u64 off = *offset;
++      u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      /* Hrm ... the story of the offset ... So this function converts
++       * the various ideas of what userland clients might have for an
++       * offset in the card address space into an offset into the card
++       * address space :) So with a sane client, it should just keep
++       * the value intact and just do some boundary checking. However,
++       * not all clients are sane. Some older clients pass us 0 based
++       * offsets relative to the start of the framebuffer and some may
++       * assume the AGP aperture it appended to the framebuffer, so we
++       * try to detect those cases and fix them up.
++       *
++       * Note: It might be a good idea here to make sure the offset lands
++       * in some "allowed" area to protect things like the PCIE GART...
++       */
++
++      /* First, the best case, the offset already lands in either the
++       * framebuffer or the GART mapped space
++       */
++      if (radeon_check_offset(dev_priv, off))
++              return 0;
++
++      /* Ok, that didn't happen... now check if we have a zero based
++       * offset that fits in the framebuffer + gart space, apply the
++       * magic offset we get from SETPARAM or calculated from fb_location
++       */
++      if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
++              radeon_priv = file_priv->driver_priv;
++              off += radeon_priv->radeon_fb_delta;
++      }
++
++      /* Finally, assume we aimed at a GART offset if beyond the fb */
++      if (off > fb_end)
++              off = off - fb_end - 1 + dev_priv->gart_vm_start;
++
++      /* Now recheck and fail if out of bounds */
++      if (radeon_check_offset(dev_priv, off)) {
++              DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
++              *offset = off;
++              return 0;
++      }
++      return -EINVAL;
++}
++
++static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
++                                                   dev_priv,
++                                                   struct drm_file *file_priv,
++                                                   int id, u32 *data)
++{
++      switch (id) {
++
++      case RADEON_EMIT_PP_MISC:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_EMIT_PP_CNTL:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
++                      DRM_ERROR("Invalid colour buffer offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case R200_EMIT_PP_TXOFFSET_0:
++      case R200_EMIT_PP_TXOFFSET_1:
++      case R200_EMIT_PP_TXOFFSET_2:
++      case R200_EMIT_PP_TXOFFSET_3:
++      case R200_EMIT_PP_TXOFFSET_4:
++      case R200_EMIT_PP_TXOFFSET_5:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &data[0])) {
++                      DRM_ERROR("Invalid R200 texture offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_EMIT_PP_TXFILTER_0:
++      case RADEON_EMIT_PP_TXFILTER_1:
++      case RADEON_EMIT_PP_TXFILTER_2:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
++                      DRM_ERROR("Invalid R100 texture offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case R200_EMIT_PP_CUBIC_OFFSETS_0:
++      case R200_EMIT_PP_CUBIC_OFFSETS_1:
++      case R200_EMIT_PP_CUBIC_OFFSETS_2:
++      case R200_EMIT_PP_CUBIC_OFFSETS_3:
++      case R200_EMIT_PP_CUBIC_OFFSETS_4:
++      case R200_EMIT_PP_CUBIC_OFFSETS_5:{
++                      int i;
++                      for (i = 0; i < 5; i++) {
++                              if (radeon_check_and_fixup_offset(dev_priv,
++                                                                file_priv,
++                                                                &data[i])) {
++                                      DRM_ERROR
++                                          ("Invalid R200 cubic texture offset\n");
++                                      return -EINVAL;
++                              }
++                      }
++                      break;
++              }
++
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
++                      int i;
++                      for (i = 0; i < 5; i++) {
++                              if (radeon_check_and_fixup_offset(dev_priv,
++                                                                file_priv,
++                                                                &data[i])) {
++                                      DRM_ERROR
++                                          ("Invalid R100 cubic texture offset\n");
++                                      return -EINVAL;
++                              }
++                      }
++              }
++              break;
++
++      case R200_EMIT_VAP_CTL: {
++                      RING_LOCALS;
++                      BEGIN_RING(2);
++                      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++                      ADVANCE_RING();
++              }
++              break;
++
++      case RADEON_EMIT_RB3D_COLORPITCH:
++      case RADEON_EMIT_RE_LINE_PATTERN:
++      case RADEON_EMIT_SE_LINE_WIDTH:
++      case RADEON_EMIT_PP_LUM_MATRIX:
++      case RADEON_EMIT_PP_ROT_MATRIX_0:
++      case RADEON_EMIT_RB3D_STENCILREFMASK:
++      case RADEON_EMIT_SE_VPORT_XSCALE:
++      case RADEON_EMIT_SE_CNTL:
++      case RADEON_EMIT_SE_CNTL_STATUS:
++      case RADEON_EMIT_RE_MISC:
++      case RADEON_EMIT_PP_BORDER_COLOR_0:
++      case RADEON_EMIT_PP_BORDER_COLOR_1:
++      case RADEON_EMIT_PP_BORDER_COLOR_2:
++      case RADEON_EMIT_SE_ZBIAS_FACTOR:
++      case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
++      case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
++      case R200_EMIT_PP_TXCBLEND_0:
++      case R200_EMIT_PP_TXCBLEND_1:
++      case R200_EMIT_PP_TXCBLEND_2:
++      case R200_EMIT_PP_TXCBLEND_3:
++      case R200_EMIT_PP_TXCBLEND_4:
++      case R200_EMIT_PP_TXCBLEND_5:
++      case R200_EMIT_PP_TXCBLEND_6:
++      case R200_EMIT_PP_TXCBLEND_7:
++      case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
++      case R200_EMIT_TFACTOR_0:
++      case R200_EMIT_VTX_FMT_0:
++      case R200_EMIT_MATRIX_SELECT_0:
++      case R200_EMIT_TEX_PROC_CTL_2:
++      case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
++      case R200_EMIT_PP_TXFILTER_0:
++      case R200_EMIT_PP_TXFILTER_1:
++      case R200_EMIT_PP_TXFILTER_2:
++      case R200_EMIT_PP_TXFILTER_3:
++      case R200_EMIT_PP_TXFILTER_4:
++      case R200_EMIT_PP_TXFILTER_5:
++      case R200_EMIT_VTE_CNTL:
++      case R200_EMIT_OUTPUT_VTX_COMP_SEL:
++      case R200_EMIT_PP_TAM_DEBUG3:
++      case R200_EMIT_PP_CNTL_X:
++      case R200_EMIT_RB3D_DEPTHXY_OFFSET:
++      case R200_EMIT_RE_AUX_SCISSOR_CNTL:
++      case R200_EMIT_RE_SCISSOR_TL_0:
++      case R200_EMIT_RE_SCISSOR_TL_1:
++      case R200_EMIT_RE_SCISSOR_TL_2:
++      case R200_EMIT_SE_VAP_CNTL_STATUS:
++      case R200_EMIT_SE_VTX_STATE_CNTL:
++      case R200_EMIT_RE_POINTSIZE:
++      case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
++      case R200_EMIT_PP_CUBIC_FACES_0:
++      case R200_EMIT_PP_CUBIC_FACES_1:
++      case R200_EMIT_PP_CUBIC_FACES_2:
++      case R200_EMIT_PP_CUBIC_FACES_3:
++      case R200_EMIT_PP_CUBIC_FACES_4:
++      case R200_EMIT_PP_CUBIC_FACES_5:
++      case RADEON_EMIT_PP_TEX_SIZE_0:
++      case RADEON_EMIT_PP_TEX_SIZE_1:
++      case RADEON_EMIT_PP_TEX_SIZE_2:
++      case R200_EMIT_RB3D_BLENDCOLOR:
++      case R200_EMIT_TCL_POINT_SPRITE_CNTL:
++      case RADEON_EMIT_PP_CUBIC_FACES_0:
++      case RADEON_EMIT_PP_CUBIC_FACES_1:
++      case RADEON_EMIT_PP_CUBIC_FACES_2:
++      case R200_EMIT_PP_TRI_PERF_CNTL:
++      case R200_EMIT_PP_AFS_0:
++      case R200_EMIT_PP_AFS_1:
++      case R200_EMIT_ATF_TFACTOR:
++      case R200_EMIT_PP_TXCTLALL_0:
++      case R200_EMIT_PP_TXCTLALL_1:
++      case R200_EMIT_PP_TXCTLALL_2:
++      case R200_EMIT_PP_TXCTLALL_3:
++      case R200_EMIT_PP_TXCTLALL_4:
++      case R200_EMIT_PP_TXCTLALL_5:
++      case R200_EMIT_VAP_PVS_CNTL:
++              /* These packets don't contain memory offsets */
++              break;
++
++      default:
++              DRM_ERROR("Unknown state packet ID %d\n", id);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
++                                                   dev_priv,
++                                                   struct drm_file *file_priv,
++                                                   drm_radeon_kcmd_buffer_t *
++                                                   cmdbuf,
++                                                   unsigned int *cmdsz)
++{
++      u32 *cmd = (u32 *) cmdbuf->buf;
++      u32 offset, narrays;
++      int count, i, k;
++
++      *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
++
++      if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
++              DRM_ERROR("Not a type 3 packet\n");
++              return -EINVAL;
++      }
++
++      if (4 * *cmdsz > cmdbuf->bufsz) {
++              DRM_ERROR("Packet size larger than size of data provided\n");
++              return -EINVAL;
++      }
++
++      switch(cmd[0] & 0xff00) {
++      /* XXX Are there old drivers needing other packets? */
++
++      case RADEON_3D_DRAW_IMMD:
++      case RADEON_3D_DRAW_VBUF:
++      case RADEON_3D_DRAW_INDX:
++      case RADEON_WAIT_FOR_IDLE:
++      case RADEON_CP_NOP:
++      case RADEON_3D_CLEAR_ZMASK:
++/*    case RADEON_CP_NEXT_CHAR:
++      case RADEON_CP_PLY_NEXTSCAN:
++      case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
++              /* these packets are safe */
++              break;
++
++      case RADEON_CP_3D_DRAW_IMMD_2:
++      case RADEON_CP_3D_DRAW_VBUF_2:
++      case RADEON_CP_3D_DRAW_INDX_2:
++      case RADEON_3D_CLEAR_HIZ:
++              /* safe but r200 only */
++              if ((dev_priv->chip_family < CHIP_R200) ||
++                  (dev_priv->chip_family > CHIP_RV280)) {
++                      DRM_ERROR("Invalid 3d packet for non r200-class chip\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_3D_LOAD_VBPNTR:
++              count = (cmd[0] >> 16) & 0x3fff;
++
++              if (count > 18) { /* 12 arrays max */
++                      DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
++                                count);
++                      return -EINVAL;
++              }
++
++              /* carefully check packet contents */
++              narrays = cmd[1] & ~0xc000;
++              k = 0;
++              i = 2;
++              while ((k < narrays) && (i < (count + 2))) {
++                      i++;            /* skip attribute field */
++                      if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                        &cmd[i])) {
++                              DRM_ERROR
++                                  ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
++                                   k, i);
++                              return -EINVAL;
++                      }
++                      k++;
++                      i++;
++                      if (k == narrays)
++                              break;
++                      /* have one more to process, they come in pairs */
++                      if (radeon_check_and_fixup_offset(dev_priv,
++                                                        file_priv, &cmd[i]))
++                      {
++                              DRM_ERROR
++                                  ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
++                                   k, i);
++                              return -EINVAL;
++                      }
++                      k++;
++                      i++;
++              }
++              /* do the counts match what we expect ? */
++              if ((k != narrays) || (i != (count + 2))) {
++                      DRM_ERROR
++                          ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
++                            k, i, narrays, count + 1);
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_3D_RNDR_GEN_INDX_PRIM:
++              if (dev_priv->chip_family > CHIP_RS200) {
++                      DRM_ERROR("Invalid 3d packet for non-r100-class chip\n");
++                      return -EINVAL;
++              }
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
++                              DRM_ERROR("Invalid rndr_gen_indx offset\n");
++                              return -EINVAL;
++              }
++              break;
++
++      case RADEON_CP_INDX_BUFFER:
++              /* safe but r200 only */
++              if ((dev_priv->chip_family < CHIP_R200) ||
++                  (dev_priv->chip_family > CHIP_RV280)) {
++                      DRM_ERROR("Invalid 3d packet for non-r200-class chip\n");
++                      return -EINVAL;
++              }
++              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
++                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
++                      return -EINVAL;
++              }
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
++                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_CNTL_HOSTDATA_BLT:
++      case RADEON_CNTL_PAINT_MULTI:
++      case RADEON_CNTL_BITBLT_MULTI:
++              /* MSB of opcode: next DWORD GUI_CNTL */
++              if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
++                            | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[2] << 10;
++                      if (radeon_check_and_fixup_offset
++                          (dev_priv, file_priv, &offset)) {
++                              DRM_ERROR("Invalid first packet offset\n");
++                              return -EINVAL;
++                      }
++                      cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
++              }
++
++              if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
++                  (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[3] << 10;
++                      if (radeon_check_and_fixup_offset
++                          (dev_priv, file_priv, &offset)) {
++                              DRM_ERROR("Invalid second packet offset\n");
++                              return -EINVAL;
++                      }
++                      cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
++              }
++              break;
++
++      default:
++              DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* ================================================================
++ * CP hardware state programming functions
++ */
++
++static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
++                                           struct drm_clip_rect * box)
++{
++      RING_LOCALS;
++
++      DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
++                box->x1, box->y1, box->x2, box->y2);
++
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
++      OUT_RING((box->y1 << 16) | box->x1);
++      OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
++      OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
++      ADVANCE_RING();
++}
++
++/* Emit 1.1 state
++ */
++static int radeon_emit_state(drm_radeon_private_t * dev_priv,
++                           struct drm_file *file_priv,
++                           drm_radeon_context_regs_t * ctx,
++                           drm_radeon_texture_regs_t * tex,
++                           unsigned int dirty)
++{
++      RING_LOCALS;
++      DRM_DEBUG("dirty=0x%08x\n", dirty);
++
++      if (dirty & RADEON_UPLOAD_CONTEXT) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &ctx->rb3d_depthoffset)) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &ctx->rb3d_coloroffset)) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(14);
++              OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
++              OUT_RING(ctx->pp_misc);
++              OUT_RING(ctx->pp_fog_color);
++              OUT_RING(ctx->re_solid_color);
++              OUT_RING(ctx->rb3d_blendcntl);
++              OUT_RING(ctx->rb3d_depthoffset);
++              OUT_RING(ctx->rb3d_depthpitch);
++              OUT_RING(ctx->rb3d_zstencilcntl);
++              OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
++              OUT_RING(ctx->pp_cntl);
++              OUT_RING(ctx->rb3d_cntl);
++              OUT_RING(ctx->rb3d_coloroffset);
++              OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
++              OUT_RING(ctx->rb3d_colorpitch);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_VERTFMT) {
++              BEGIN_RING(2);
++              OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
++              OUT_RING(ctx->se_coord_fmt);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_LINE) {
++              BEGIN_RING(5);
++              OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
++              OUT_RING(ctx->re_line_pattern);
++              OUT_RING(ctx->re_line_state);
++              OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
++              OUT_RING(ctx->se_line_width);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_BUMPMAP) {
++              BEGIN_RING(5);
++              OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
++              OUT_RING(ctx->pp_lum_matrix);
++              OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
++              OUT_RING(ctx->pp_rot_matrix_0);
++              OUT_RING(ctx->pp_rot_matrix_1);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_MASKS) {
++              BEGIN_RING(4);
++              OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
++              OUT_RING(ctx->rb3d_stencilrefmask);
++              OUT_RING(ctx->rb3d_ropcntl);
++              OUT_RING(ctx->rb3d_planemask);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_VIEWPORT) {
++              BEGIN_RING(7);
++              OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
++              OUT_RING(ctx->se_vport_xscale);
++              OUT_RING(ctx->se_vport_xoffset);
++              OUT_RING(ctx->se_vport_yscale);
++              OUT_RING(ctx->se_vport_yoffset);
++              OUT_RING(ctx->se_vport_zscale);
++              OUT_RING(ctx->se_vport_zoffset);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_SETUP) {
++              BEGIN_RING(4);
++              OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
++              OUT_RING(ctx->se_cntl);
++              OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
++              OUT_RING(ctx->se_cntl_status);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_MISC) {
++              BEGIN_RING(2);
++              OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
++              OUT_RING(ctx->re_misc);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX0) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[0].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 0\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
++              OUT_RING(tex[0].pp_txfilter);
++              OUT_RING(tex[0].pp_txformat);
++              OUT_RING(tex[0].pp_txoffset);
++              OUT_RING(tex[0].pp_txcblend);
++              OUT_RING(tex[0].pp_txablend);
++              OUT_RING(tex[0].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
++              OUT_RING(tex[0].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX1) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[1].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 1\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
++              OUT_RING(tex[1].pp_txfilter);
++              OUT_RING(tex[1].pp_txformat);
++              OUT_RING(tex[1].pp_txoffset);
++              OUT_RING(tex[1].pp_txcblend);
++              OUT_RING(tex[1].pp_txablend);
++              OUT_RING(tex[1].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
++              OUT_RING(tex[1].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX2) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[2].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 2\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
++              OUT_RING(tex[2].pp_txfilter);
++              OUT_RING(tex[2].pp_txformat);
++              OUT_RING(tex[2].pp_txoffset);
++              OUT_RING(tex[2].pp_txcblend);
++              OUT_RING(tex[2].pp_txablend);
++              OUT_RING(tex[2].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
++              OUT_RING(tex[2].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      return 0;
++}
++
++/* Emit 1.2 state
++ */
++static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
++                            struct drm_file *file_priv,
++                            drm_radeon_state_t * state)
++{
++      RING_LOCALS;
++
++      if (state->dirty & RADEON_UPLOAD_ZBIAS) {
++              BEGIN_RING(3);
++              OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
++              OUT_RING(state->context2.se_zbias_factor);
++              OUT_RING(state->context2.se_zbias_constant);
++              ADVANCE_RING();
++      }
++
++      return radeon_emit_state(dev_priv, file_priv, &state->context,
++                               state->tex, state->dirty);
++}
++
++/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
++ * 1.3 cmdbuffers allow all previous state to be updated as well as
++ * the tcl scalar and vector areas.
++ */
++static struct {
++      int start;
++      int len;
++      const char *name;
++} packet[RADEON_MAX_STATE_PACKETS] = {
++      {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
++      {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
++      {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
++      {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
++      {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
++      {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
++      {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
++      {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
++      {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
++      {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
++      {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
++      {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
++      {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
++      {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
++      {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
++      {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
++      {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
++      {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
++      {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
++      {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
++      {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
++                  "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
++      {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
++      {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
++      {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
++      {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
++      {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
++      {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
++      {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
++      {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
++      {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
++      {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
++      {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
++      {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
++      {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
++      {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
++      {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
++      {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
++      {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
++      {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
++      {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
++      {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
++      {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
++      {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
++      {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
++      {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
++      {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
++      {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
++      {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
++      {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
++      {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
++       "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
++      {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
++      {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
++      {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
++      {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
++      {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
++      {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
++      {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
++      {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
++      {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
++      {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
++      {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
++                  "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
++      {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},    /* 61 */
++      {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
++      {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
++      {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
++      {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
++      {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
++      {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
++      {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
++      {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
++      {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
++      {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
++      {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
++      {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
++      {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
++      {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
++      {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
++      {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
++      {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
++      {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
++      {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
++      {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
++      {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
++      {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
++      {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
++      {R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
++      {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
++      {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
++      {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
++      {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
++      {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
++      {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
++      {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
++      {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
++      {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
++};
++
++/* ================================================================
++ * Performance monitoring functions
++ */
++
++static void radeon_clear_box(drm_radeon_private_t * dev_priv,
++                           int x, int y, int w, int h, int r, int g, int b)
++{
++      u32 color;
++      RING_LOCALS;
++
++      x += dev_priv->sarea_priv->boxes[0].x1;
++      y += dev_priv->sarea_priv->boxes[0].y1;
++
++      switch (dev_priv->color_fmt) {
++      case RADEON_COLOR_FORMAT_RGB565:
++              color = (((r & 0xf8) << 8) |
++                       ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
++              break;
++      case RADEON_COLOR_FORMAT_ARGB8888:
++      default:
++              color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
++              break;
++      }
++
++      BEGIN_RING(4);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
++      OUT_RING(0xffffffff);
++      ADVANCE_RING();
++
++      BEGIN_RING(6);
++
++      OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
++      OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++               RADEON_GMC_BRUSH_SOLID_COLOR |
++               (dev_priv->color_fmt << 8) |
++               RADEON_GMC_SRC_DATATYPE_COLOR |
++               RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++      if (dev_priv->sarea_priv->pfCurrentPage == 1) {
++              OUT_RING(dev_priv->front_pitch_offset);
++      } else {
++              OUT_RING(dev_priv->back_pitch_offset);
++      }
++
++      OUT_RING(color);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((w << 16) | h);
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
++{
++      /* Collapse various things into a wait flag -- trying to
++       * guess if userspase slept -- better just to have them tell us.
++       */
++      if (dev_priv->stats.last_frame_reads > 1 ||
++          dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++      }
++
++      if (dev_priv->stats.freelist_loops) {
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++      }
++
++      /* Purple box for page flipping
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
++              radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
++
++      /* Red box if we have to wait for idle at any point
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
++              radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
++
++      /* Blue box: lost context?
++       */
++
++      /* Yellow box for texture swaps
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
++              radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
++
++      /* Green box if hardware never idles (as far as we can tell)
++       */
++      if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
++              radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
++
++      /* Draw bars indicating number of buffers allocated
++       * (not a great measure, easily confused)
++       */
++      if (dev_priv->stats.requested_bufs) {
++              if (dev_priv->stats.requested_bufs > 100)
++                      dev_priv->stats.requested_bufs = 100;
++
++              radeon_clear_box(dev_priv, 4, 16,
++                               dev_priv->stats.requested_bufs, 4,
++                               196, 128, 128);
++      }
++
++      memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
++
++}
++
++/* ================================================================
++ * CP command dispatch functions
++ */
++
++static void radeon_cp_dispatch_clear(struct drm_device * dev,
++                                   drm_radeon_clear_t * clear,
++                                   drm_radeon_clear_rect_t * depth_boxes)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      unsigned int flags = clear->flags;
++      u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("flags = 0x%x\n", flags);
++
++      dev_priv->stats.clears++;
++
++      if (dev_priv->sarea_priv->pfCurrentPage == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(RADEON_FRONT | RADEON_BACK);
++              if (tmp & RADEON_FRONT)
++                      flags |= RADEON_BACK;
++              if (tmp & RADEON_BACK)
++                      flags |= RADEON_FRONT;
++      }
++
++      if (flags & (RADEON_FRONT | RADEON_BACK)) {
++
++              BEGIN_RING(4);
++
++              /* Ensure the 3D stream is idle before doing a
++               * 2D fill to clear the front or back buffer.
++               */
++              RADEON_WAIT_UNTIL_3D_IDLE();
++
++              OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
++              OUT_RING(clear->color_mask);
++
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++                      int x = pbox[i].x1;
++                      int y = pbox[i].y1;
++                      int w = pbox[i].x2 - x;
++                      int h = pbox[i].y2 - y;
++
++                      DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
++                                x, y, w, h, flags);
++
++                      if (flags & RADEON_FRONT) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CP_PACKET3
++                                       (RADEON_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                                       RADEON_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->
++                                        color_fmt << 8) |
++                                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                                       RADEON_ROP3_P |
++                                       RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++                              OUT_RING(dev_priv->front_pitch_offset);
++                              OUT_RING(clear->clear_color);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((w << 16) | h);
++
++                              ADVANCE_RING();
++                      }
++
++                      if (flags & RADEON_BACK) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CP_PACKET3
++                                       (RADEON_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                                       RADEON_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->
++                                        color_fmt << 8) |
++                                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                                       RADEON_ROP3_P |
++                                       RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++                              OUT_RING(dev_priv->back_pitch_offset);
++                              OUT_RING(clear->clear_color);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((w << 16) | h);
++
++                              ADVANCE_RING();
++                      }
++              }
++      }
++
++      /* hyper z clear */
++      /* no docs available, based on reverse engeneering by Stephane Marchesin */
++      if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
++          && (flags & RADEON_CLEAR_FASTZ)) {
++
++              int i;
++              int depthpixperline =
++                  dev_priv->depth_fmt ==
++                  RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
++                                                     2) : (dev_priv->
++                                                           depth_pitch / 4);
++
++              u32 clearmask;
++
++              u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
++                  ((clear->depth_mask & 0xff) << 24);
++
++              /* Make sure we restore the 3D state next time.
++               * we haven't touched any "normal" state - still need this?
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                  && (flags & RADEON_USE_HIERZ)) {
++                      /* FIXME : reverse engineer that for Rx00 cards */
++                      /* FIXME : the mask supposedly contains low-res z values. So can't set
++                         just to the max (0xff? or actually 0x3fff?), need to take z clear
++                         value into account? */
++                      /* pattern seems to work for r100, though get slight
++                         rendering errors with glxgears. If hierz is not enabled for r100,
++                         only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
++                         other ones are ignored, and the same clear mask can be used. That's
++                         very different behaviour than R200 which needs different clear mask
++                         and different number of tiles to clear if hierz is enabled or not !?!
++                       */
++                      clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
++              } else {
++                      /* clear mask : chooses the clearing pattern.
++                         rv250: could be used to clear only parts of macrotiles
++                         (but that would get really complicated...)?
++                         bit 0 and 1 (either or both of them ?!?!) are used to
++                         not clear tile (or maybe one of the bits indicates if the tile is
++                         compressed or not), bit 2 and 3 to not clear tile 1,...,.
++                         Pattern is as follows:
++                         | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
++                         bits -------------------------------------------------
++                         | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
++                         rv100: clearmask covers 2x8 4x1 tiles, but one clear still
++                         covers 256 pixels ?!?
++                       */
++                      clearmask = 0x0;
++              }
++
++              BEGIN_RING(8);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
++                           tempRB3D_DEPTHCLEARVALUE);
++              /* what offset is this exactly ? */
++              OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
++              /* need ctlstat, otherwise get some strange black flickering */
++              OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
++                           RADEON_RB3D_ZC_FLUSH_ALL);
++              ADVANCE_RING();
++
++              for (i = 0; i < nbox; i++) {
++                      int tileoffset, nrtilesx, nrtilesy, j;
++                      /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
++                      if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                          && (dev_priv->chip_family < CHIP_R200)) {
++                              /* FIXME : figure this out for r200 (when hierz is enabled). Or
++                                 maybe r200 actually doesn't need to put the low-res z value into
++                                 the tile cache like r100, but just needs to clear the hi-level z-buffer?
++                                 Works for R100, both with hierz and without.
++                                 R100 seems to operate on 2x1 8x8 tiles, but...
++                                 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
++                                 problematic with resolutions which are not 64 pix aligned? */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 3) * depthpixperline +
++                                   pbox[i].x1) >> 6;
++                              nrtilesx =
++                                  ((pbox[i].x2 & ~63) -
++                                   (pbox[i].x1 & ~63)) >> 4;
++                              nrtilesy =
++                                  (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      /* first tile */
++                                      OUT_RING(tileoffset * 8);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 4);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 6;
++                              }
++                      } else if ((dev_priv->chip_family >= CHIP_R200) &&
++                                 (dev_priv->chip_family <= CHIP_RV280)) {
++                              /* works for rv250. */
++                              /* find first macro tile (8x2 4x4 z-pixels on rv250) */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 3) * depthpixperline +
++                                   pbox[i].x1) >> 5;
++                              nrtilesx =
++                                  (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
++                              nrtilesy =
++                                  (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      /* first tile */
++                                      /* judging by the first tile offset needed, could possibly
++                                         directly address/clear 4x4 tiles instead of 8x2 * 4x4
++                                         macro tiles, though would still need clear mask for
++                                         right/bottom if truely 4x4 granularity is desired ? */
++                                      OUT_RING(tileoffset * 16);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 1);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 5;
++                              }
++                      } else {        /* rv 100 */
++                              /* rv100 might not need 64 pix alignment, who knows */
++                              /* offsets are, hmm, weird */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 4) * depthpixperline +
++                                   pbox[i].x1) >> 6;
++                              nrtilesx =
++                                  ((pbox[i].x2 & ~63) -
++                                   (pbox[i].x1 & ~63)) >> 4;
++                              nrtilesy =
++                                  (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      OUT_RING(tileoffset * 128);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 4);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 6;
++                              }
++                      }
++              }
++
++              /* TODO don't always clear all hi-level z tiles */
++              if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                  && ((dev_priv->chip_family >= CHIP_R200) &&
++                      (dev_priv->chip_family <= CHIP_RV280))
++                  && (flags & RADEON_USE_HIERZ))
++                      /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
++                      /* FIXME : the mask supposedly contains low-res z values. So can't set
++                         just to the max (0xff? or actually 0x3fff?), need to take z clear
++                         value into account? */
++              {
++                      BEGIN_RING(4);
++                      OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
++                      OUT_RING(0x0);  /* First tile */
++                      OUT_RING(0x3cc0);
++                      OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
++                      ADVANCE_RING();
++              }
++      }
++
++      /* We have to clear the depth and/or stencil buffers by
++       * rendering a quad into just those buffers.  Thus, we have to
++       * make sure the 3D engine is configured correctly.
++       */
++      else if ((dev_priv->chip_family >= CHIP_R200) &&
++               (dev_priv->chip_family <= CHIP_RV280) &&
++               (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
++
++              int tempPP_CNTL;
++              int tempRE_CNTL;
++              int tempRB3D_CNTL;
++              int tempRB3D_ZSTENCILCNTL;
++              int tempRB3D_STENCILREFMASK;
++              int tempRB3D_PLANEMASK;
++              int tempSE_CNTL;
++              int tempSE_VTE_CNTL;
++              int tempSE_VTX_FMT_0;
++              int tempSE_VTX_FMT_1;
++              int tempSE_VAP_CNTL;
++              int tempRE_AUX_SCISSOR_CNTL;
++
++              tempPP_CNTL = 0;
++              tempRE_CNTL = 0;
++
++              tempRB3D_CNTL = depth_clear->rb3d_cntl;
++
++              tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
++              tempRB3D_STENCILREFMASK = 0x0;
++
++              tempSE_CNTL = depth_clear->se_cntl;
++
++              /* Disable TCL */
++
++              tempSE_VAP_CNTL = (     /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
++                                        (0x9 <<
++                                         SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
++
++              tempRB3D_PLANEMASK = 0x0;
++
++              tempRE_AUX_SCISSOR_CNTL = 0x0;
++
++              tempSE_VTE_CNTL =
++                  SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
++
++              /* Vertex format (X, Y, Z, W) */
++              tempSE_VTX_FMT_0 =
++                  SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
++                  SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
++              tempSE_VTX_FMT_1 = 0x0;
++
++              /*
++               * Depth buffer specific enables
++               */
++              if (flags & RADEON_DEPTH) {
++                      /* Enable depth buffer */
++                      tempRB3D_CNTL |= RADEON_Z_ENABLE;
++              } else {
++                      /* Disable depth buffer */
++                      tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
++              }
++
++              /*
++               * Stencil buffer specific enables
++               */
++              if (flags & RADEON_STENCIL) {
++                      tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
++                      tempRB3D_STENCILREFMASK = clear->depth_mask;
++              } else {
++                      tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
++                      tempRB3D_STENCILREFMASK = 0x00000000;
++              }
++
++              if (flags & RADEON_USE_COMP_ZBUF) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
++                          RADEON_Z_DECOMPRESSION_ENABLE;
++              }
++              if (flags & RADEON_USE_HIERZ) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
++              }
++
++              BEGIN_RING(26);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++
++              OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
++              OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
++              OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
++              OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
++              OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
++                           tempRB3D_STENCILREFMASK);
++              OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
++              OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
++              OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
++              OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
++              OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
++              OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
++              OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++
++                      /* Funny that this should be required --
++                       *  sets top-left?
++                       */
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++                      BEGIN_RING(14);
++                      OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
++                      OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
++                                RADEON_PRIM_WALK_RING |
++                                (3 << RADEON_NUM_VERTICES_SHIFT)));
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      ADVANCE_RING();
++              }
++      } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
++
++              int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
++
++              rb3d_cntl = depth_clear->rb3d_cntl;
++
++              if (flags & RADEON_DEPTH) {
++                      rb3d_cntl |= RADEON_Z_ENABLE;
++              } else {
++                      rb3d_cntl &= ~RADEON_Z_ENABLE;
++              }
++
++              if (flags & RADEON_STENCIL) {
++                      rb3d_cntl |= RADEON_STENCIL_ENABLE;
++                      rb3d_stencilrefmask = clear->depth_mask;        /* misnamed field */
++              } else {
++                      rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
++                      rb3d_stencilrefmask = 0x00000000;
++              }
++
++              if (flags & RADEON_USE_COMP_ZBUF) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
++                          RADEON_Z_DECOMPRESSION_ENABLE;
++              }
++              if (flags & RADEON_USE_HIERZ) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
++              }
++
++              BEGIN_RING(13);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++
++              OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
++              OUT_RING(0x00000000);
++              OUT_RING(rb3d_cntl);
++
++              OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
++              OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
++              OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
++              OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++
++                      /* Funny that this should be required --
++                       *  sets top-left?
++                       */
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++                      BEGIN_RING(15);
++
++                      OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
++                      OUT_RING(RADEON_VTX_Z_PRESENT |
++                               RADEON_VTX_PKCOLOR_PRESENT);
++                      OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
++                                RADEON_PRIM_WALK_RING |
++                                RADEON_MAOS_ENABLE |
++                                RADEON_VTX_FMT_RADEON_MODE |
++                                (3 << RADEON_NUM_VERTICES_SHIFT)));
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      /* Increment the clear counter.  The client-side 3D driver must
++       * wait on this value before performing the clear ioctl.  We
++       * need this because the card's so damned fast...
++       */
++      dev_priv->sarea_priv->last_clear++;
++
++      BEGIN_RING(4);
++
++      RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
++      RADEON_WAIT_UNTIL_IDLE();
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_dispatch_swap(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* Do some trivial performance monitoring...
++       */
++      if (dev_priv->do_boxes)
++              radeon_cp_performance_boxes(dev_priv);
++
++      /* Wait for the 3D stream to idle before dispatching the bitblt.
++       * This will prevent data corruption between the two streams.
++       */
++      BEGIN_RING(2);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++
++      ADVANCE_RING();
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
++
++              BEGIN_RING(9);
++
++              OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
++              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_BRUSH_NONE |
++                       (dev_priv->color_fmt << 8) |
++                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                       RADEON_ROP3_S |
++                       RADEON_DP_SRC_SOURCE_MEMORY |
++                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
++
++              /* Make this work even if front & back are flipped:
++               */
++              OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
++              if (dev_priv->sarea_priv->pfCurrentPage == 0) {
++                      OUT_RING(dev_priv->back_pitch_offset);
++                      OUT_RING(dev_priv->front_pitch_offset);
++              } else {
++                      OUT_RING(dev_priv->front_pitch_offset);
++                      OUT_RING(dev_priv->back_pitch_offset);
++              }
++
++              OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
++              OUT_RING((x << 16) | y);
++              OUT_RING((x << 16) | y);
++              OUT_RING((w << 16) | h);
++
++              ADVANCE_RING();
++      }
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++
++      BEGIN_RING(4);
++
++      RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
++      RADEON_WAIT_UNTIL_2D_IDLE();
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_dispatch_flip(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
++      int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
++          ? dev_priv->front_offset : dev_priv->back_offset;
++      RING_LOCALS;
++      DRM_DEBUG("pfCurrentPage=%d\n",
++                dev_priv->sarea_priv->pfCurrentPage);
++
++      /* Do some trivial performance monitoring...
++       */
++      if (dev_priv->do_boxes) {
++              dev_priv->stats.boxes |= RADEON_BOX_FLIP;
++              radeon_cp_performance_boxes(dev_priv);
++      }
++
++      /* Update the frame offsets for both CRTCs
++       */
++      BEGIN_RING(6);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING_REG(RADEON_CRTC_OFFSET,
++                   ((sarea->frame.y * dev_priv->front_pitch +
++                     sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
++                   + offset);
++      OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
++                   + offset);
++
++      ADVANCE_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++      dev_priv->sarea_priv->pfCurrentPage =
++              1 - dev_priv->sarea_priv->pfCurrentPage;
++
++      BEGIN_RING(2);
++
++      RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static int bad_prim_vertex_nr(int primitive, int nr)
++{
++      switch (primitive & RADEON_PRIM_TYPE_MASK) {
++      case RADEON_PRIM_TYPE_NONE:
++      case RADEON_PRIM_TYPE_POINT:
++              return nr < 1;
++      case RADEON_PRIM_TYPE_LINE:
++              return (nr & 1) || nr == 0;
++      case RADEON_PRIM_TYPE_LINE_STRIP:
++              return nr < 2;
++      case RADEON_PRIM_TYPE_TRI_LIST:
++      case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
++      case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
++      case RADEON_PRIM_TYPE_RECT_LIST:
++              return nr % 3 || nr == 0;
++      case RADEON_PRIM_TYPE_TRI_FAN:
++      case RADEON_PRIM_TYPE_TRI_STRIP:
++              return nr < 3;
++      default:
++              return 1;
++      }
++}
++
++typedef struct {
++      unsigned int start;
++      unsigned int finish;
++      unsigned int prim;
++      unsigned int numverts;
++      unsigned int offset;
++      unsigned int vc_format;
++} drm_radeon_tcl_prim_t;
++
++static void radeon_cp_dispatch_vertex(struct drm_device * dev,
++                                    struct drm_buf * buf,
++                                    drm_radeon_tcl_prim_t * prim)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
++      int numverts = (int)prim->numverts;
++      int nbox = sarea_priv->nbox;
++      int i = 0;
++      RING_LOCALS;
++
++      DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
++                prim->prim,
++                prim->vc_format, prim->start, prim->finish, prim->numverts);
++
++      if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
++              DRM_ERROR("bad prim %x numverts %d\n",
++                        prim->prim, prim->numverts);
++              return;
++      }
++
++      do {
++              /* Emit the next cliprect */
++              if (i < nbox) {
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++              }
++
++              /* Emit the vertex buffer rendering commands */
++              BEGIN_RING(5);
++
++              OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
++              OUT_RING(offset);
++              OUT_RING(numverts);
++              OUT_RING(prim->vc_format);
++              OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
++                       RADEON_COLOR_ORDER_RGBA |
++                       RADEON_VTX_FMT_RADEON_MODE |
++                       (numverts << RADEON_NUM_VERTICES_SHIFT));
++
++              ADVANCE_RING();
++
++              i++;
++      } while (i < nbox);
++}
++
++static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++      RING_LOCALS;
++
++      buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
++
++      /* Emit the vertex buffer age */
++      BEGIN_RING(2);
++      RADEON_DISPATCH_AGE(buf_priv->age);
++      ADVANCE_RING();
++
++      buf->pending = 1;
++      buf->used = 0;
++}
++
++static void radeon_cp_dispatch_indirect(struct drm_device * dev,
++                                      struct drm_buf * buf, int start, int end)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
++
++      if (start != end) {
++              int offset = (dev_priv->gart_buffers_offset
++                            + buf->offset + start);
++              int dwords = (end - start + 3) / sizeof(u32);
++
++              /* Indirect buffer data must be an even number of
++               * dwords, so if we've been given an odd number we must
++               * pad the data with a Type-2 CP packet.
++               */
++              if (dwords & 1) {
++                      u32 *data = (u32 *)
++                          ((char *)dev->agp_buffer_map->handle
++                           + buf->offset + start);
++                      data[dwords++] = RADEON_CP_PACKET2;
++              }
++
++              /* Fire off the indirect buffer */
++              BEGIN_RING(3);
++
++              OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
++              OUT_RING(offset);
++              OUT_RING(dwords);
++
++              ADVANCE_RING();
++      }
++}
++
++static void radeon_cp_dispatch_indices(struct drm_device * dev,
++                                     struct drm_buf * elt_buf,
++                                     drm_radeon_tcl_prim_t * prim)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int offset = dev_priv->gart_buffers_offset + prim->offset;
++      u32 *data;
++      int dwords;
++      int i = 0;
++      int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
++      int count = (prim->finish - start) / sizeof(u16);
++      int nbox = sarea_priv->nbox;
++
++      DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
++                prim->prim,
++                prim->vc_format,
++                prim->start, prim->finish, prim->offset, prim->numverts);
++
++      if (bad_prim_vertex_nr(prim->prim, count)) {
++              DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
++              return;
++      }
++
++      if (start >= prim->finish || (prim->start & 0x7)) {
++              DRM_ERROR("buffer prim %d\n", prim->prim);
++              return;
++      }
++
++      dwords = (prim->finish - prim->start + 3) / sizeof(u32);
++
++      data = (u32 *) ((char *)dev->agp_buffer_map->handle +
++                      elt_buf->offset + prim->start);
++
++      data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
++      data[1] = offset;
++      data[2] = prim->numverts;
++      data[3] = prim->vc_format;
++      data[4] = (prim->prim |
++                 RADEON_PRIM_WALK_IND |
++                 RADEON_COLOR_ORDER_RGBA |
++                 RADEON_VTX_FMT_RADEON_MODE |
++                 (count << RADEON_NUM_VERTICES_SHIFT));
++
++      do {
++              if (i < nbox)
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++              radeon_cp_dispatch_indirect(dev, elt_buf,
++                                          prim->start, prim->finish);
++
++              i++;
++      } while (i < nbox);
++
++}
++
++#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
++
++static int radeon_cp_dispatch_texture(struct drm_device * dev,
++                                    struct drm_file *file_priv,
++                                    drm_radeon_texture_t * tex,
++                                    drm_radeon_tex_image_t * image)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      u32 format;
++      u32 *buffer;
++      const u8 __user *data;
++      int size, dwords, tex_width, blit_width, spitch;
++      u32 height;
++      int i;
++      u32 texpitch, microtile;
++      u32 offset, byte_offset;
++      RING_LOCALS;
++
++      if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
++              DRM_ERROR("Invalid destination offset\n");
++              return -EINVAL;
++      }
++
++      dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
++
++      /* Flush the pixel cache.  This ensures no pixel data gets mixed
++       * up with the texture data from the host data blit, otherwise
++       * part of the texture image may be corrupted.
++       */
++      BEGIN_RING(4);
++      RADEON_FLUSH_CACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++      ADVANCE_RING();
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (tex->format) {
++      case RADEON_TXFORMAT_ARGB8888:
++      case RADEON_TXFORMAT_RGBA8888:
++              format = RADEON_COLOR_FORMAT_ARGB8888;
++              tex_width = tex->width * 4;
++              blit_width = image->width * 4;
++              break;
++      case RADEON_TXFORMAT_AI88:
++      case RADEON_TXFORMAT_ARGB1555:
++      case RADEON_TXFORMAT_RGB565:
++      case RADEON_TXFORMAT_ARGB4444:
++      case RADEON_TXFORMAT_VYUY422:
++      case RADEON_TXFORMAT_YVYU422:
++              format = RADEON_COLOR_FORMAT_RGB565;
++              tex_width = tex->width * 2;
++              blit_width = image->width * 2;
++              break;
++      case RADEON_TXFORMAT_I8:
++      case RADEON_TXFORMAT_RGB332:
++              format = RADEON_COLOR_FORMAT_CI8;
++              tex_width = tex->width * 1;
++              blit_width = image->width * 1;
++              break;
++      default:
++              DRM_ERROR("invalid texture format %d\n", tex->format);
++              return -EINVAL;
++      }
++      spitch = blit_width >> 6;
++      if (spitch == 0 && image->height > 1)
++              return -EINVAL;
++
++      texpitch = tex->pitch;
++      if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
++              microtile = 1;
++              if (tex_width < 64) {
++                      texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
++                      /* we got tiled coordinates, untile them */
++                      image->x *= 2;
++              }
++      } else
++              microtile = 0;
++
++      /* this might fail for zero-sized uploads - are those illegal? */
++      if (!radeon_check_offset(dev_priv, tex->offset + image->height *
++                              blit_width - 1)) {
++              DRM_ERROR("Invalid final destination offset\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
++
++      do {
++              DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
++                        tex->offset >> 10, tex->pitch, tex->format,
++                        image->x, image->y, image->width, image->height);
++
++              /* Make a copy of some parameters in case we have to
++               * update them for a multi-pass texture blit.
++               */
++              height = image->height;
++              data = (const u8 __user *)image->data;
++
++              size = height * blit_width;
++
++              if (size > RADEON_MAX_TEXTURE_SIZE) {
++                      height = RADEON_MAX_TEXTURE_SIZE / blit_width;
++                      size = height * blit_width;
++              } else if (size < 4 && size > 0) {
++                      size = 4;
++              } else if (size == 0) {
++                      return 0;
++              }
++
++              buf = radeon_freelist_get(dev);
++              if (0 && !buf) {
++                      radeon_do_cp_idle(dev_priv);
++                      buf = radeon_freelist_get(dev);
++              }
++              if (!buf) {
++                      DRM_DEBUG("EAGAIN\n");
++                      if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
++                              return -EFAULT;
++                      return -EAGAIN;
++              }
++
++              /* Dispatch the indirect buffer.
++               */
++              buffer =
++                  (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
++              dwords = size / 4;
++
++#define RADEON_COPY_MT(_buf, _data, _width) \
++      do { \
++              if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
++                      DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
++                      return -EFAULT; \
++              } \
++      } while(0)
++
++              if (microtile) {
++                      /* texture micro tiling in use, minimum texture width is thus 16 bytes.
++                         however, we cannot use blitter directly for texture width < 64 bytes,
++                         since minimum tex pitch is 64 bytes and we need this to match
++                         the texture width, otherwise the blitter will tile it wrong.
++                         Thus, tiling manually in this case. Additionally, need to special
++                         case tex height = 1, since our actual image will have height 2
++                         and we need to ensure we don't read beyond the texture size
++                         from user space. */
++                      if (tex->height == 1) {
++                              if (tex_width >= 64 || tex_width <= 16) {
++                                      RADEON_COPY_MT(buffer, data,
++                                              (int)(tex_width * sizeof(u32)));
++                              } else if (tex_width == 32) {
++                                      RADEON_COPY_MT(buffer, data, 16);
++                                      RADEON_COPY_MT(buffer + 8,
++                                                     data + 16, 16);
++                              }
++                      } else if (tex_width >= 64 || tex_width == 16) {
++                              RADEON_COPY_MT(buffer, data,
++                                             (int)(dwords * sizeof(u32)));
++                      } else if (tex_width < 16) {
++                              for (i = 0; i < tex->height; i++) {
++                                      RADEON_COPY_MT(buffer, data, tex_width);
++                                      buffer += 4;
++                                      data += tex_width;
++                              }
++                      } else if (tex_width == 32) {
++                              /* TODO: make sure this works when not fitting in one buffer
++                                 (i.e. 32bytes x 2048...) */
++                              for (i = 0; i < tex->height; i += 2) {
++                                      RADEON_COPY_MT(buffer, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 8, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 4, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 12, data, 16);
++                                      data += 16;
++                                      buffer += 16;
++                              }
++                      }
++              } else {
++                      if (tex_width >= 32) {
++                              /* Texture image width is larger than the minimum, so we
++                               * can upload it directly.
++                               */
++                              RADEON_COPY_MT(buffer, data,
++                                             (int)(dwords * sizeof(u32)));
++                      } else {
++                              /* Texture image width is less than the minimum, so we
++                               * need to pad out each image scanline to the minimum
++                               * width.
++                               */
++                              for (i = 0; i < tex->height; i++) {
++                                      RADEON_COPY_MT(buffer, data, tex_width);
++                                      buffer += 8;
++                                      data += tex_width;
++                              }
++                      }
++              }
++
++#undef RADEON_COPY_MT
++              byte_offset = (image->y & ~2047) * blit_width;
++              buf->file_priv = file_priv;
++              buf->used = size;
++              offset = dev_priv->gart_buffers_offset + buf->offset;
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_BRUSH_NONE |
++                       (format << 8) |
++                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                       RADEON_ROP3_S |
++                       RADEON_DP_SRC_SOURCE_MEMORY |
++                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
++              OUT_RING((spitch << 22) | (offset >> 10));
++              OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
++              OUT_RING(0);
++              OUT_RING((image->x << 16) | (image->y % 2048));
++              OUT_RING((image->width << 16) | height);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              ADVANCE_RING();
++              COMMIT_RING();
++
++              radeon_cp_discard_buffer(dev, buf);
++
++              /* Update the input parameters for next time */
++              image->y += height;
++              image->height -= height;
++              image->data = (const u8 __user *)image->data + size;
++      } while (image->height > 0);
++
++      /* Flush the pixel cache after the blit completes.  This ensures
++       * the texture data is written out to memory before rendering
++       * continues.
++       */
++      BEGIN_RING(4);
++      RADEON_FLUSH_CACHE();
++      RADEON_WAIT_UNTIL_2D_IDLE();
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return 0;
++}
++
++static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(35);
++
++      OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
++      OUT_RING(0x00000000);
++
++      OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
++      for (i = 0; i < 32; i++) {
++              OUT_RING(stipple[i]);
++      }
++
++      ADVANCE_RING();
++}
++
++static void radeon_apply_surface_regs(int surf_index,
++                                    drm_radeon_private_t *dev_priv)
++{
++      if (!dev_priv->mmio)
++              return;
++
++      radeon_do_cp_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].flags);
++      RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].lower);
++      RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].upper);
++}
++
++/* Allocates a virtual surface
++ * doesn't always allocate a real surface, will stretch an existing
++ * surface when possible.
++ *
++ * Note that refcount can be at most 2, since during a free refcount=3
++ * might mean we have to allocate a new surface which might not always
++ * be available.
++ * For example : we allocate three contigous surfaces ABC. If B is
++ * freed, we suddenly need two surfaces to store A and C, which might
++ * not always be available.
++ */
++static int alloc_surface(drm_radeon_surface_alloc_t *new,
++                       drm_radeon_private_t *dev_priv,
++                       struct drm_file *file_priv)
++{
++      struct radeon_virt_surface *s;
++      int i;
++      int virt_surface_index;
++      uint32_t new_upper, new_lower;
++
++      new_lower = new->address;
++      new_upper = new_lower + new->size - 1;
++
++      /* sanity check */
++      if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
++          ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
++           RADEON_SURF_ADDRESS_FIXED_MASK)
++          || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
++              return -1;
++
++      /* make sure there is no overlap with existing surfaces */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              if ((dev_priv->surfaces[i].refcount != 0) &&
++                  (((new_lower >= dev_priv->surfaces[i].lower) &&
++                    (new_lower < dev_priv->surfaces[i].upper)) ||
++                   ((new_lower < dev_priv->surfaces[i].lower) &&
++                    (new_upper > dev_priv->surfaces[i].lower)))) {
++                      return -1;
++              }
++      }
++
++      /* find a virtual surface */
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
++              if (dev_priv->virt_surfaces[i].file_priv == 0)
++                      break;
++      if (i == 2 * RADEON_MAX_SURFACES) {
++              return -1;
++      }
++      virt_surface_index = i;
++
++      /* try to reuse an existing surface */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              /* extend before */
++              if ((dev_priv->surfaces[i].refcount == 1) &&
++                  (new->flags == dev_priv->surfaces[i].flags) &&
++                  (new_upper + 1 == dev_priv->surfaces[i].lower)) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount++;
++                      dev_priv->surfaces[i].lower = s->lower;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++
++              /* extend after */
++              if ((dev_priv->surfaces[i].refcount == 1) &&
++                  (new->flags == dev_priv->surfaces[i].flags) &&
++                  (new_lower == dev_priv->surfaces[i].upper + 1)) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount++;
++                      dev_priv->surfaces[i].upper = s->upper;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++      }
++
++      /* okay, we need a new one */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              if (dev_priv->surfaces[i].refcount == 0) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount = 1;
++                      dev_priv->surfaces[i].lower = s->lower;
++                      dev_priv->surfaces[i].upper = s->upper;
++                      dev_priv->surfaces[i].flags = s->flags;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++      }
++
++      /* we didn't find anything */
++      return -1;
++}
++
++static int free_surface(struct drm_file *file_priv,
++                      drm_radeon_private_t * dev_priv,
++                      int lower)
++{
++      struct radeon_virt_surface *s;
++      int i;
++      /* find the virtual surface */
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
++              s = &(dev_priv->virt_surfaces[i]);
++              if (s->file_priv) {
++                      if ((lower == s->lower) && (file_priv == s->file_priv))
++                      {
++                              if (dev_priv->surfaces[s->surface_index].
++                                  lower == s->lower)
++                                      dev_priv->surfaces[s->surface_index].
++                                          lower = s->upper;
++
++                              if (dev_priv->surfaces[s->surface_index].
++                                  upper == s->upper)
++                                      dev_priv->surfaces[s->surface_index].
++                                          upper = s->lower;
++
++                              dev_priv->surfaces[s->surface_index].refcount--;
++                              if (dev_priv->surfaces[s->surface_index].
++                                  refcount == 0)
++                                      dev_priv->surfaces[s->surface_index].
++                                          flags = 0;
++                              s->file_priv = NULL;
++                              radeon_apply_surface_regs(s->surface_index,
++                                                        dev_priv);
++                              return 0;
++                      }
++              }
++      }
++      return 1;
++}
++
++static void radeon_surfaces_release(struct drm_file *file_priv,
++                                  drm_radeon_private_t * dev_priv)
++{
++      int i;
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
++              if (dev_priv->virt_surfaces[i].file_priv == file_priv)
++                      free_surface(file_priv, dev_priv,
++                                   dev_priv->virt_surfaces[i].lower);
++      }
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_surface_alloc_t *alloc = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (alloc_surface(alloc, dev_priv, file_priv) == -1)
++              return -EINVAL;
++      else
++              return 0;
++}
++
++static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_surface_free_t *memfree = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (free_surface(file_priv, dev_priv, memfree->address))
++              return -EINVAL;
++      else
++              return 0;
++}
++
++static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_radeon_clear_t *clear = data;
++      drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
++
++      if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++                             sarea_priv->nbox * sizeof(depth_boxes[0])))
++              return -EFAULT;
++
++      radeon_cp_dispatch_clear(dev, clear, depth_boxes);
++
++      COMMIT_RING();
++      return 0;
++}
++
++/* Not sure why this isn't set all the time:
++ */
++static int radeon_do_init_pageflip(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(6);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
++      OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
++               RADEON_CRTC_OFFSET_FLIP_CNTL);
++      OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
++      OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
++               RADEON_CRTC_OFFSET_FLIP_CNTL);
++      ADVANCE_RING();
++
++      dev_priv->page_flipping = 1;
++
++      if (dev_priv->sarea_priv->pfCurrentPage != 1)
++              dev_priv->sarea_priv->pfCurrentPage = 0;
++
++      return 0;
++}
++
++/* Swapping and flipping are different operations, need different ioctls.
++ * They can & should be intermixed to support multiple 3d windows.
++ */
++static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (!dev_priv->page_flipping)
++              radeon_do_init_pageflip(dev);
++
++      radeon_cp_dispatch_flip(dev);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
++
++      radeon_cp_dispatch_swap(dev);
++      dev_priv->sarea_priv->ctx_owner = 0;
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_vertex_t *vertex = data;
++      drm_radeon_tcl_prim_t prim;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      /* Build up a prim_t record:
++       */
++      if (vertex->count) {
++              buf->used = vertex->count;      /* not used? */
++
++              if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
++                      if (radeon_emit_state(dev_priv, file_priv,
++                                            &sarea_priv->context_state,
++                                            sarea_priv->tex_state,
++                                            sarea_priv->dirty)) {
++                              DRM_ERROR("radeon_emit_state failed\n");
++                              return -EINVAL;
++                      }
++
++                      sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
++                                             RADEON_UPLOAD_TEX1IMAGES |
++                                             RADEON_UPLOAD_TEX2IMAGES |
++                                             RADEON_REQUIRE_QUIESCENCE);
++              }
++
++              prim.start = 0;
++              prim.finish = vertex->count;    /* unused */
++              prim.prim = vertex->prim;
++              prim.numverts = vertex->count;
++              prim.vc_format = dev_priv->sarea_priv->vc_format;
++
++              radeon_cp_dispatch_vertex(dev, buf, &prim);
++      }
++
++      if (vertex->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_indices_t *elts = data;
++      drm_radeon_tcl_prim_t prim;
++      int count;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
++                DRM_CURRENTPID, elts->idx, elts->start, elts->end,
++                elts->discard);
++
++      if (elts->idx < 0 || elts->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        elts->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
++              DRM_ERROR("buffer prim %d\n", elts->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[elts->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", elts->idx);
++              return -EINVAL;
++      }
++
++      count = (elts->end - elts->start) / sizeof(u16);
++      elts->start -= RADEON_INDEX_PRIM_OFFSET;
++
++      if (elts->start & 0x7) {
++              DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
++              return -EINVAL;
++      }
++      if (elts->start < buf->used) {
++              DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
++              return -EINVAL;
++      }
++
++      buf->used = elts->end;
++
++      if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
++              if (radeon_emit_state(dev_priv, file_priv,
++                                    &sarea_priv->context_state,
++                                    sarea_priv->tex_state,
++                                    sarea_priv->dirty)) {
++                      DRM_ERROR("radeon_emit_state failed\n");
++                      return -EINVAL;
++              }
++
++              sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
++                                     RADEON_UPLOAD_TEX1IMAGES |
++                                     RADEON_UPLOAD_TEX2IMAGES |
++                                     RADEON_REQUIRE_QUIESCENCE);
++      }
++
++      /* Build up a prim_t record:
++       */
++      prim.start = elts->start;
++      prim.finish = elts->end;
++      prim.prim = elts->prim;
++      prim.offset = 0;        /* offset from start of dma buffers */
++      prim.numverts = RADEON_MAX_VB_VERTS;    /* duh */
++      prim.vc_format = dev_priv->sarea_priv->vc_format;
++
++      radeon_cp_dispatch_indices(dev, buf, &prim);
++      if (elts->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_texture_t *tex = data;
++      drm_radeon_tex_image_t image;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (tex->image == NULL) {
++              DRM_ERROR("null texture image!\n");
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_FROM_USER(&image,
++                             (drm_radeon_tex_image_t __user *) tex->image,
++                             sizeof(image)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
++
++      return ret;
++}
++
++static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_stipple_t *stipple = data;
++      u32 mask[32];
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      radeon_cp_dispatch_stipple(dev, mask);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_indirect_t *indirect = data;
++      RING_LOCALS;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
++                indirect->idx, indirect->start, indirect->end,
++                indirect->discard);
++
++      if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        indirect->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      buf = dma->buflist[indirect->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", indirect->idx);
++              return -EINVAL;
++      }
++
++      if (indirect->start < buf->used) {
++              DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
++                        indirect->start, buf->used);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf->used = indirect->end;
++
++      /* Wait for the 3D stream to idle before the indirect buffer
++       * containing 2D acceleration commands is processed.
++       */
++      BEGIN_RING(2);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++
++      ADVANCE_RING();
++
++      /* Dispatch the indirect buffer full of commands from the
++       * X server.  This is insecure and is thus only available to
++       * privileged clients.
++       */
++      radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
++      if (indirect->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_vertex2_t *vertex = data;
++      int i;
++      unsigned char laststate;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              return -EINVAL;
++
++      for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
++              drm_radeon_prim_t prim;
++              drm_radeon_tcl_prim_t tclprim;
++
++              if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
++                      return -EFAULT;
++
++              if (prim.stateidx != laststate) {
++                      drm_radeon_state_t state;
++
++                      if (DRM_COPY_FROM_USER(&state,
++                                             &vertex->state[prim.stateidx],
++                                             sizeof(state)))
++                              return -EFAULT;
++
++                      if (radeon_emit_state2(dev_priv, file_priv, &state)) {
++                              DRM_ERROR("radeon_emit_state2 failed\n");
++                              return -EINVAL;
++                      }
++
++                      laststate = prim.stateidx;
++              }
++
++              tclprim.start = prim.start;
++              tclprim.finish = prim.finish;
++              tclprim.prim = prim.prim;
++              tclprim.vc_format = prim.vc_format;
++
++              if (prim.prim & RADEON_PRIM_WALK_IND) {
++                      tclprim.offset = prim.numverts * 64;
++                      tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
++
++                      radeon_cp_dispatch_indices(dev, buf, &tclprim);
++              } else {
++                      tclprim.numverts = prim.numverts;
++                      tclprim.offset = 0;     /* not used */
++
++                      radeon_cp_dispatch_vertex(dev, buf, &tclprim);
++              }
++
++              if (sarea_priv->nbox == 1)
++                      sarea_priv->nbox = 0;
++      }
++
++      if (vertex->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
++                             struct drm_file *file_priv,
++                             drm_radeon_cmd_header_t header,
++                             drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int id = (int)header.packet.packet_id;
++      int sz, reg;
++      int *data = (int *)cmdbuf->buf;
++      RING_LOCALS;
++
++      if (id >= RADEON_MAX_STATE_PACKETS)
++              return -EINVAL;
++
++      sz = packet[id].len;
++      reg = packet[id].start;
++
++      if (sz * sizeof(int) > cmdbuf->bufsz) {
++              DRM_ERROR("Packet size provided larger than data provided\n");
++              return -EINVAL;
++      }
++
++      if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
++              DRM_ERROR("Packet verification failed\n");
++              return -EINVAL;
++      }
++
++      BEGIN_RING(sz + 1);
++      OUT_RING(CP_PACKET0(reg, (sz - 1)));
++      OUT_RING_TABLE(data, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.scalars.count;
++      int start = header.scalars.offset;
++      int stride = header.scalars.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(3 + sz);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++/* God this is ugly
++ */
++static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
++                                         drm_radeon_cmd_header_t header,
++                                         drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.scalars.count;
++      int start = ((unsigned int)header.scalars.offset) + 0x100;
++      int stride = header.scalars.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(3 + sz);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.vectors.count;
++      int start = header.vectors.offset;
++      int stride = header.vectors.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(5 + sz);
++      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.veclinear.count * 4;
++      int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
++      RING_LOCALS;
++
++      if (!sz)
++              return 0;
++      if (sz * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(5 + sz);
++      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
++      OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static int radeon_emit_packet3(struct drm_device * dev,
++                             struct drm_file *file_priv,
++                             drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      unsigned int cmdsz;
++      int ret;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
++                                                cmdbuf, &cmdsz))) {
++              DRM_ERROR("Packet verification failed\n");
++              return ret;
++      }
++
++      BEGIN_RING(cmdsz);
++      OUT_RING_TABLE(cmdbuf->buf, cmdsz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += cmdsz * 4;
++      cmdbuf->bufsz -= cmdsz * 4;
++      return 0;
++}
++
++static int radeon_emit_packet3_cliprect(struct drm_device *dev,
++                                      struct drm_file *file_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      int orig_nbox)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect box;
++      unsigned int cmdsz;
++      int ret;
++      struct drm_clip_rect __user *boxes = cmdbuf->boxes;
++      int i = 0;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
++                                                cmdbuf, &cmdsz))) {
++              DRM_ERROR("Packet verification failed\n");
++              return ret;
++      }
++
++      if (!orig_nbox)
++              goto out;
++
++      do {
++              if (i < cmdbuf->nbox) {
++                      if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
++                              return -EFAULT;
++                      /* FIXME The second and subsequent times round
++                       * this loop, send a WAIT_UNTIL_3D_IDLE before
++                       * calling emit_clip_rect(). This fixes a
++                       * lockup on fast machines when sending
++                       * several cliprects with a cmdbuf, as when
++                       * waving a 2D window over a 3D
++                       * window. Something in the commands from user
++                       * space seems to hang the card when they're
++                       * sent several times in a row. That would be
++                       * the correct place to fix it but this works
++                       * around it until I can figure that out - Tim
++                       * Smith */
++                      if (i) {
++                              BEGIN_RING(2);
++                              RADEON_WAIT_UNTIL_3D_IDLE();
++                              ADVANCE_RING();
++                      }
++                      radeon_emit_clip_rect(dev_priv, &box);
++              }
++
++              BEGIN_RING(cmdsz);
++              OUT_RING_TABLE(cmdbuf->buf, cmdsz);
++              ADVANCE_RING();
++
++      } while (++i < cmdbuf->nbox);
++      if (cmdbuf->nbox == 1)
++              cmdbuf->nbox = 0;
++
++      out:
++      cmdbuf->buf += cmdsz * 4;
++      cmdbuf->bufsz -= cmdsz * 4;
++      return 0;
++}
++
++static int radeon_emit_wait(struct drm_device * dev, int flags)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      DRM_DEBUG("%x\n", flags);
++      switch (flags) {
++      case RADEON_WAIT_2D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              ADVANCE_RING();
++              break;
++      case RADEON_WAIT_3D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_3D_IDLE();
++              ADVANCE_RING();
++              break;
++      case RADEON_WAIT_2D | RADEON_WAIT_3D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_IDLE();
++              ADVANCE_RING();
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf = NULL;
++      int idx;
++      drm_radeon_kcmd_buffer_t *cmdbuf = data;
++      drm_radeon_cmd_header_t header;
++      int orig_nbox, orig_bufsz;
++      char *kbuf = NULL;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
++              return -EINVAL;
++      }
++
++      /* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
++       * races between checking values and using those values in other code,
++       * and simply to avoid a lot of function calls to copy in data.
++       */
++      orig_bufsz = cmdbuf->bufsz;
++      if (orig_bufsz != 0) {
++              kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
++              if (kbuf == NULL)
++                      return -ENOMEM;
++              if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
++                                     cmdbuf->bufsz)) {
++                      drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++                      return -EFAULT;
++              }
++              cmdbuf->buf = kbuf;
++      }
++
++      orig_nbox = cmdbuf->nbox;
++
++      if (dev_priv->chip_family >= CHIP_R300) {
++              int temp;
++              temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
++
++              if (orig_bufsz != 0)
++                      drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++
++              return temp;
++      }
++
++      /* microcode_version != r300 */
++      while (cmdbuf->bufsz >= sizeof(header)) {
++
++              header.i = *(int *)cmdbuf->buf;
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++
++              switch (header.header.cmd_type) {
++              case RADEON_CMD_PACKET:
++                      DRM_DEBUG("RADEON_CMD_PACKET\n");
++                      if (radeon_emit_packets
++                          (dev_priv, file_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_packets failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_SCALARS:
++                      DRM_DEBUG("RADEON_CMD_SCALARS\n");
++                      if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_scalars failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_VECTORS:
++                      DRM_DEBUG("RADEON_CMD_VECTORS\n");
++                      if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_vectors failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_DMA_DISCARD:
++                      DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
++                      idx = header.dma.buf_idx;
++                      if (idx < 0 || idx >= dma->buf_count) {
++                              DRM_ERROR("buffer index %d (of %d max)\n",
++                                        idx, dma->buf_count - 1);
++                              goto err;
++                      }
++
++                      buf = dma->buflist[idx];
++                      if (buf->file_priv != file_priv || buf->pending) {
++                              DRM_ERROR("bad buffer %p %p %d\n",
++                                        buf->file_priv, file_priv,
++                                        buf->pending);
++                              goto err;
++                      }
++
++                      radeon_cp_discard_buffer(dev, buf);
++                      break;
++
++              case RADEON_CMD_PACKET3:
++                      DRM_DEBUG("RADEON_CMD_PACKET3\n");
++                      if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_packet3 failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_PACKET3_CLIP:
++                      DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
++                      if (radeon_emit_packet3_cliprect
++                          (dev, file_priv, cmdbuf, orig_nbox)) {
++                              DRM_ERROR("radeon_emit_packet3_clip failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_SCALARS2:
++                      DRM_DEBUG("RADEON_CMD_SCALARS2\n");
++                      if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_scalars2 failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_WAIT:
++                      DRM_DEBUG("RADEON_CMD_WAIT\n");
++                      if (radeon_emit_wait(dev, header.wait.flags)) {
++                              DRM_ERROR("radeon_emit_wait failed\n");
++                              goto err;
++                      }
++                      break;
++              case RADEON_CMD_VECLINEAR:
++                      DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
++                      if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_veclinear failed\n");
++                              goto err;
++                      }
++                      break;
++
++              default:
++                      DRM_ERROR("bad cmd_type %d at %p\n",
++                                header.header.cmd_type,
++                                cmdbuf->buf - sizeof(header));
++                      goto err;
++              }
++      }
++
++      if (orig_bufsz != 0)
++              drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++
++      DRM_DEBUG("DONE\n");
++      COMMIT_RING();
++      return 0;
++
++      err:
++      if (orig_bufsz != 0)
++              drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++      return -EINVAL;
++}
++
++static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case RADEON_PARAM_GART_BUFFER_OFFSET:
++              value = dev_priv->gart_buffers_offset;
++              break;
++      case RADEON_PARAM_LAST_FRAME:
++              dev_priv->stats.last_frame_reads++;
++              value = GET_SCRATCH(0);
++              break;
++      case RADEON_PARAM_LAST_DISPATCH:
++              value = GET_SCRATCH(1);
++              break;
++      case RADEON_PARAM_LAST_CLEAR:
++              dev_priv->stats.last_clear_reads++;
++              value = GET_SCRATCH(2);
++              break;
++      case RADEON_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      case RADEON_PARAM_GART_BASE:
++              value = dev_priv->gart_vm_start;
++              break;
++      case RADEON_PARAM_REGISTER_HANDLE:
++              value = dev_priv->mmio->offset;
++              break;
++      case RADEON_PARAM_STATUS_HANDLE:
++              value = dev_priv->ring_rptr_offset;
++              break;
++#ifndef __LP64__
++              /*
++               * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
++               * pointer which can't fit into an int-sized variable.  According to
++               * Michel Dänzer, the ioctl() is only used on embedded platforms, so
++               * not supporting it shouldn't be a problem.  If the same functionality
++               * is needed on 64-bit platforms, a new ioctl() would have to be added,
++               * so backwards-compatibility for the embedded platforms can be
++               * maintained.  --davidm 4-Feb-2004.
++               */
++      case RADEON_PARAM_SAREA_HANDLE:
++              /* The lock is the first dword in the sarea. */
++              value = (long)dev->lock.hw_lock;
++              break;
++#endif
++      case RADEON_PARAM_GART_TEX_HANDLE:
++              value = dev_priv->gart_textures_offset;
++              break;
++      case RADEON_PARAM_SCRATCH_OFFSET:
++              if (!dev_priv->writeback_works)
++                      return -EINVAL;
++              value = RADEON_SCRATCH_REG_OFFSET;
++              break;
++
++      case RADEON_PARAM_CARD_TYPE:
++              if (dev_priv->flags & RADEON_IS_PCIE)
++                      value = RADEON_CARD_PCIE;
++              else if (dev_priv->flags & RADEON_IS_AGP)
++                      value = RADEON_CARD_AGP;
++              else
++                      value = RADEON_CARD_PCI;
++              break;
++      case RADEON_PARAM_VBLANK_CRTC:
++              value = radeon_vblank_crtc_get(dev);
++              break;
++      case RADEON_PARAM_FB_LOCATION:
++              value = radeon_read_fb_location(dev_priv);
++              break;
++      case RADEON_PARAM_NUM_GB_PIPES:
++              value = dev_priv->num_gb_pipes;
++              break;
++      default:
++              DRM_DEBUG( "Invalid parameter %d\n", param->param );
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_setparam_t *sp = data;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (sp->param) {
++      case RADEON_SETPARAM_FB_LOCATION:
++              radeon_priv = file_priv->driver_priv;
++              radeon_priv->radeon_fb_delta = dev_priv->fb_location -
++                  sp->value;
++              break;
++      case RADEON_SETPARAM_SWITCH_TILING:
++              if (sp->value == 0) {
++                      DRM_DEBUG("color tiling disabled\n");
++                      dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
++                      dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
++                      if (dev_priv->sarea_priv)
++                              dev_priv->sarea_priv->tiling_enabled = 0;
++              } else if (sp->value == 1) {
++                      DRM_DEBUG("color tiling enabled\n");
++                      dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
++                      dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
++                      if (dev_priv->sarea_priv)
++                              dev_priv->sarea_priv->tiling_enabled = 1;
++              }
++              break;
++      case RADEON_SETPARAM_PCIGART_LOCATION:
++              dev_priv->pcigart_offset = sp->value;
++              dev_priv->pcigart_offset_set = 1;
++              break;
++      case RADEON_SETPARAM_NEW_MEMMAP:
++              dev_priv->new_memmap = sp->value;
++              break;
++      case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
++              dev_priv->gart_info.table_size = sp->value;
++              if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
++                      dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
++              break;
++      case RADEON_SETPARAM_VBLANK_CRTC:
++              return radeon_vblank_crtc_set(dev, sp->value);
++              break;
++      default:
++              DRM_DEBUG("Invalid parameter %d\n", sp->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* When a client dies:
++ *    - Check for and clean up flipped page state
++ *    - Free any alloced GART memory.
++ *    - Free any alloced radeon surfaces.
++ *
++ * DRM infrastructure takes care of reclaiming dma buffers.
++ */
++void radeon_driver_preclose(struct drm_device *dev,
++                          struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_radeon_private_t *dev_priv = dev->dev_private;
++              dev_priv->page_flipping = 0;
++              radeon_mem_release(file_priv, dev_priv->gart_heap);
++              radeon_mem_release(file_priv, dev_priv->fb_heap);
++              radeon_surfaces_release(file_priv, dev_priv);
++      }
++}
++
++void radeon_driver_lastclose(struct drm_device *dev)
++{
++      if (dev->dev_private) {
++              drm_radeon_private_t *dev_priv = dev->dev_private;
++
++              if (dev_priv->sarea_priv &&
++                  dev_priv->sarea_priv->pfCurrentPage != 0)
++                      radeon_cp_dispatch_flip(dev);
++      }
++
++      radeon_do_release(dev);
++}
++
++int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      DRM_DEBUG("\n");
++      radeon_priv =
++          (struct drm_radeon_driver_file_fields *)
++          drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
++
++      if (!radeon_priv)
++              return -ENOMEM;
++
++      file_priv->driver_priv = radeon_priv;
++
++      if (dev_priv)
++              radeon_priv->radeon_fb_delta = dev_priv->fb_location;
++      else
++              radeon_priv->radeon_fb_delta = 0;
++      return 0;
++}
++
++void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_radeon_driver_file_fields *radeon_priv =
++          file_priv->driver_priv;
++
++      drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
++}
++
++struct drm_ioctl_desc radeon_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
++};
++
++int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_bci.c git-nokia/drivers/gpu/drm-tungsten/savage_bci.c
+--- git/drivers/gpu/drm-tungsten/savage_bci.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_bci.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1092 @@
++/* savage_bci.c -- BCI support for Savage
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++/* Need a long timeout for shadow status updates can take a while
++ * and so can waiting for events when the queue is full. */
++#define SAVAGE_DEFAULT_USEC_TIMEOUT   1000000 /* 1s */
++#define SAVAGE_EVENT_USEC_TIMEOUT     5000000 /* 5s */
++#define SAVAGE_FREELIST_DEBUG         0
++
++static int savage_do_cleanup_bci(struct drm_device *dev);
++
++static int
++savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t mask = dev_priv->status_used_mask;
++      uint32_t threshold = dev_priv->bci_threshold_hi;
++      uint32_t status;
++      int i;
++
++#if SAVAGE_BCI_DEBUG
++      if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
++              DRM_ERROR("Trying to emit %d words "
++                        "(more than guaranteed space in COB)\n", n);
++#endif
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              DRM_MEMORYBARRIER();
++              status = dev_priv->status_ptr[0];
++              if ((status & mask) < threshold)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
++#endif
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
++              if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
++              if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++/*
++ * Waiting for events.
++ *
++ * The BIOSresets the event tag to 0 on mode changes. Therefore we
++ * never emit 0 to the event tag. If we find a 0 event tag we know the
++ * BIOS stomped on it and return success assuming that the BIOS waited
++ * for engine idle.
++ *
++ * Note: if the Xserver uses the event tag it has to follow the same
++ * rule. Otherwise there may be glitches every 2^16 events.
++ */
++static int
++savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
++{
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
++              DRM_MEMORYBARRIER();
++              status = dev_priv->status_ptr[1];
++              if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
++                  (status & 0xffff) == 0)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
++#endif
++
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
++{
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
++              if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
++                  (status & 0xffff) == 0)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
++#endif
++
++      return -EBUSY;
++}
++
++uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
++                             unsigned int flags)
++{
++      uint16_t count;
++      BCI_LOCALS;
++
++      if (dev_priv->status_ptr) {
++              /* coordinate with Xserver */
++              count = dev_priv->status_ptr[1023];
++              if (count < dev_priv->event_counter)
++                      dev_priv->event_wrap++;
++      } else {
++              count = dev_priv->event_counter;
++      }
++      count = (count + 1) & 0xffff;
++      if (count == 0) {
++              count++; /* See the comment above savage_wait_event_*. */
++              dev_priv->event_wrap++;
++      }
++      dev_priv->event_counter = count;
++      if (dev_priv->status_ptr)
++              dev_priv->status_ptr[1023] = (uint32_t)count;
++
++      if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
++              unsigned int wait_cmd = BCI_CMD_WAIT;
++              if ((flags & SAVAGE_WAIT_2D))
++                      wait_cmd |= BCI_CMD_WAIT_2D;
++              if ((flags & SAVAGE_WAIT_3D))
++                      wait_cmd |= BCI_CMD_WAIT_3D;
++              BEGIN_BCI(2);
++              BCI_WRITE(wait_cmd);
++      } else {
++              BEGIN_BCI(1);
++      }
++      BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
++
++      return count;
++}
++
++/*
++ * Freelist management
++ */
++static int savage_freelist_init(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_savage_buf_priv_t *entry;
++      int i;
++      DRM_DEBUG("count=%d\n", dma->buf_count);
++
++      dev_priv->head.next = &dev_priv->tail;
++      dev_priv->head.prev = NULL;
++      dev_priv->head.buf = NULL;
++
++      dev_priv->tail.next = NULL;
++      dev_priv->tail.prev = &dev_priv->head;
++      dev_priv->tail.buf = NULL;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              entry = buf->dev_private;
++
++              SET_AGE(&entry->age, 0, 0);
++              entry->buf = buf;
++
++              entry->next = dev_priv->head.next;
++              entry->prev = &dev_priv->head;
++              dev_priv->head.next->prev = entry;
++              dev_priv->head.next = entry;
++      }
++
++      return 0;
++}
++
++static struct drm_buf *savage_freelist_get(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
++      uint16_t event;
++      unsigned int wrap;
++      DRM_DEBUG("\n");
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              event = dev_priv->status_ptr[1] & 0xffff;
++      else
++              event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      wrap = dev_priv->event_wrap;
++      if (event > dev_priv->event_counter)
++              wrap--; /* hardware hasn't passed the last wrap yet */
++
++      DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
++      DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
++
++      if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
++              drm_savage_buf_priv_t *next = tail->next;
++              drm_savage_buf_priv_t *prev = tail->prev;
++              prev->next = next;
++              next->prev = prev;
++              tail->next = tail->prev = NULL;
++              return tail->buf;
++      }
++
++      DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
++      return NULL;
++}
++
++void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
++
++      DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
++
++      if (entry->next != NULL || entry->prev != NULL) {
++              DRM_ERROR("entry already on freelist.\n");
++              return;
++      }
++
++      prev = &dev_priv->head;
++      next = prev->next;
++      prev->next = entry;
++      next->prev = entry;
++      entry->prev = prev;
++      entry->next = next;
++}
++
++/*
++ * Command DMA
++ */
++static int savage_dma_init(drm_savage_private_t *dev_priv)
++{
++      unsigned int i;
++
++      dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
++              (SAVAGE_DMA_PAGE_SIZE*4);
++      dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
++                                      dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
++      if (dev_priv->dma_pages == NULL)
++              return -ENOMEM;
++
++      for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      SET_AGE(&dev_priv->last_dma_age, 0, 0);
++
++      dev_priv->first_dma_page = 0;
++      dev_priv->current_dma_page = 0;
++
++      return 0;
++}
++
++void savage_dma_reset(drm_savage_private_t *dev_priv)
++{
++      uint16_t event;
++      unsigned int wrap, i;
++      event = savage_bci_emit_event(dev_priv, 0);
++      wrap = dev_priv->event_wrap;
++      for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      SET_AGE(&dev_priv->last_dma_age, event, wrap);
++      dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
++}
++
++void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
++{
++      uint16_t event;
++      unsigned int wrap;
++
++      /* Faked DMA buffer pages don't age. */
++      if (dev_priv->cmd_dma == &dev_priv->fake_dma)
++              return;
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              event = dev_priv->status_ptr[1] & 0xffff;
++      else
++              event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      wrap = dev_priv->event_wrap;
++      if (event > dev_priv->event_counter)
++              wrap--; /* hardware hasn't passed the last wrap yet */
++
++      if (dev_priv->dma_pages[page].age.wrap > wrap ||
++          (dev_priv->dma_pages[page].age.wrap == wrap &&
++           dev_priv->dma_pages[page].age.event > event)) {
++              if (dev_priv->wait_evnt(dev_priv,
++                                      dev_priv->dma_pages[page].age.event)
++                  < 0)
++                      DRM_ERROR("wait_evnt failed!\n");
++      }
++}
++
++uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      unsigned int cur = dev_priv->current_dma_page;
++      unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
++              dev_priv->dma_pages[cur].used;
++      unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
++              SAVAGE_DMA_PAGE_SIZE;
++      uint32_t *dma_ptr;
++      unsigned int i;
++
++      DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
++                cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
++
++      if (cur + nr_pages < dev_priv->nr_dma_pages) {
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
++              if (n < rest)
++                      rest = n;
++              dev_priv->dma_pages[cur].used += rest;
++              n -= rest;
++              cur++;
++      } else {
++              dev_priv->dma_flush(dev_priv);
++              nr_pages =
++                  (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
++              for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
++                      dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
++                      dev_priv->dma_pages[i].used = 0;
++                      dev_priv->dma_pages[i].flushed = 0;
++              }
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
++              dev_priv->first_dma_page = cur = 0;
++      }
++      for (i = cur; nr_pages > 0; ++i, --nr_pages) {
++#if SAVAGE_DMA_DEBUG
++              if (dev_priv->dma_pages[i].used) {
++                      DRM_ERROR("unflushed page %u: used=%u\n",
++                                i, dev_priv->dma_pages[i].used);
++              }
++#endif
++              if (n > SAVAGE_DMA_PAGE_SIZE)
++                      dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
++              else
++                      dev_priv->dma_pages[i].used = n;
++              n -= SAVAGE_DMA_PAGE_SIZE;
++      }
++      dev_priv->current_dma_page = --i;
++
++      DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
++                i, dev_priv->dma_pages[i].used, n);
++
++      savage_dma_wait(dev_priv, dev_priv->current_dma_page);
++
++      return dma_ptr;
++}
++
++static void savage_dma_flush(drm_savage_private_t *dev_priv)
++{
++      unsigned int first = dev_priv->first_dma_page;
++      unsigned int cur = dev_priv->current_dma_page;
++      uint16_t event;
++      unsigned int wrap, pad, align, len, i;
++      unsigned long phys_addr;
++      BCI_LOCALS;
++
++      if (first == cur &&
++          dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
++              return;
++
++      /* pad length to multiples of 2 entries
++       * align start of next DMA block to multiles of 8 entries */
++      pad = -dev_priv->dma_pages[cur].used & 1;
++      align = -(dev_priv->dma_pages[cur].used + pad) & 7;
++
++      DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
++                "pad=%u, align=%u\n",
++                first, cur, dev_priv->dma_pages[first].flushed,
++                dev_priv->dma_pages[cur].used, pad, align);
++
++      /* pad with noops */
++      if (pad) {
++              uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
++              dev_priv->dma_pages[cur].used += pad;
++              while (pad != 0) {
++                      *dma_ptr++ = BCI_CMD_WAIT;
++                      pad--;
++              }
++      }
++
++      DRM_MEMORYBARRIER();
++
++      /* do flush ... */
++      phys_addr = dev_priv->cmd_dma->offset +
++              (first * SAVAGE_DMA_PAGE_SIZE +
++               dev_priv->dma_pages[first].flushed) * 4;
++      len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
++          dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
++
++      DRM_DEBUG("phys_addr=%lx, len=%u\n",
++                phys_addr | dev_priv->dma_type, len);
++
++      BEGIN_BCI(3);
++      BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
++      BCI_WRITE(phys_addr | dev_priv->dma_type);
++      BCI_DMA(len);
++
++      /* fix alignment of the start of the next block */
++      dev_priv->dma_pages[cur].used += align;
++
++      /* age DMA pages */
++      event = savage_bci_emit_event(dev_priv, 0);
++      wrap = dev_priv->event_wrap;
++      for (i = first; i < cur; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      /* age the current page only when it's full */
++      if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
++              SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
++              dev_priv->dma_pages[cur].used = 0;
++              dev_priv->dma_pages[cur].flushed = 0;
++              /* advance to next page */
++              cur++;
++              if (cur == dev_priv->nr_dma_pages)
++                      cur = 0;
++              dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
++      } else {
++              dev_priv->first_dma_page = cur;
++              dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
++      }
++      SET_AGE(&dev_priv->last_dma_age, event, wrap);
++
++      DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
++                dev_priv->dma_pages[cur].used,
++                dev_priv->dma_pages[cur].flushed);
++}
++
++static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
++{
++      unsigned int i, j;
++      BCI_LOCALS;
++
++      if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
++          dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
++              return;
++
++      DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
++                dev_priv->first_dma_page, dev_priv->current_dma_page,
++                dev_priv->dma_pages[dev_priv->current_dma_page].used);
++
++      for (i = dev_priv->first_dma_page;
++           i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
++           ++i) {
++              uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                      i * SAVAGE_DMA_PAGE_SIZE;
++#if SAVAGE_DMA_DEBUG
++              /* Sanity check: all pages except the last one must be full. */
++              if (i < dev_priv->current_dma_page &&
++                  dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
++                      DRM_ERROR("partial DMA page %u: used=%u",
++                                i, dev_priv->dma_pages[i].used);
++              }
++#endif
++              BEGIN_BCI(dev_priv->dma_pages[i].used);
++              for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
++                      BCI_WRITE(dma_ptr[j]);
++              }
++              dev_priv->dma_pages[i].used = 0;
++      }
++
++      /* reset to first page */
++      dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
++}
++
++int savage_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_savage_private_t *dev_priv;
++
++      dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_savage_private_t));
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->chipset = (enum savage_family)chipset;
++
++      return 0;
++}
++
++/*
++ * Initalize mappings. On Savage4 and SavageIX the alignment
++ * and size of the aperture is not suitable for automatic MTRR setup
++ * in drm_addmap. Therefore we add them manually before the maps are
++ * initialized, and tear them down on last close.
++ */
++int savage_driver_firstopen(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      unsigned long mmio_base, fb_base, fb_size, aperture_base;
++      /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
++       * in case we decide we need information on the BAR for BSD in the
++       * future.
++       */
++      unsigned int fb_rsrc, aper_rsrc;
++      int ret = 0;
++
++      dev_priv->mtrr[0].handle = -1;
++      dev_priv->mtrr[1].handle = -1;
++      dev_priv->mtrr[2].handle = -1;
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              fb_rsrc = 0;
++              fb_base = drm_get_resource_start(dev, 0);
++              fb_size = SAVAGE_FB_SIZE_S3;
++              mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
++              aper_rsrc = 0;
++              aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
++              /* this should always be true */
++              if (drm_get_resource_len(dev, 0) == 0x08000000) {
++                      /* Don't make MMIO write-cobining! We need 3
++                       * MTRRs. */
++                      dev_priv->mtrr[0].base = fb_base;
++                      dev_priv->mtrr[0].size = 0x01000000;
++                      dev_priv->mtrr[0].handle =
++                          drm_mtrr_add(dev_priv->mtrr[0].base,
++                                       dev_priv->mtrr[0].size, DRM_MTRR_WC);
++                      dev_priv->mtrr[1].base = fb_base + 0x02000000;
++                      dev_priv->mtrr[1].size = 0x02000000;
++                      dev_priv->mtrr[1].handle =
++                          drm_mtrr_add(dev_priv->mtrr[1].base,
++                                       dev_priv->mtrr[1].size, DRM_MTRR_WC);
++                      dev_priv->mtrr[2].base = fb_base + 0x04000000;
++                      dev_priv->mtrr[2].size = 0x04000000;
++                      dev_priv->mtrr[2].handle =
++                          drm_mtrr_add(dev_priv->mtrr[2].base,
++                                       dev_priv->mtrr[2].size, DRM_MTRR_WC);
++              } else {
++                      DRM_ERROR("strange pci_resource_len %08lx\n",
++                                drm_get_resource_len(dev, 0));
++              }
++      } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
++                 dev_priv->chipset != S3_SAVAGE2000) {
++              mmio_base = drm_get_resource_start(dev, 0);
++              fb_rsrc = 1;
++              fb_base = drm_get_resource_start(dev, 1);
++              fb_size = SAVAGE_FB_SIZE_S4;
++              aper_rsrc = 1;
++              aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
++              /* this should always be true */
++              if (drm_get_resource_len(dev, 1) == 0x08000000) {
++                      /* Can use one MTRR to cover both fb and
++                       * aperture. */
++                      dev_priv->mtrr[0].base = fb_base;
++                      dev_priv->mtrr[0].size = 0x08000000;
++                      dev_priv->mtrr[0].handle =
++                          drm_mtrr_add(dev_priv->mtrr[0].base,
++                                       dev_priv->mtrr[0].size, DRM_MTRR_WC);
++              } else {
++                      DRM_ERROR("strange pci_resource_len %08lx\n",
++                                drm_get_resource_len(dev, 1));
++              }
++      } else {
++              mmio_base = drm_get_resource_start(dev, 0);
++              fb_rsrc = 1;
++              fb_base = drm_get_resource_start(dev, 1);
++              fb_size = drm_get_resource_len(dev, 1);
++              aper_rsrc = 2;
++              aperture_base = drm_get_resource_start(dev, 2);
++              /* Automatic MTRR setup will do the right thing. */
++      }
++
++      ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
++                       _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret)
++              return ret;
++
++      ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
++                       _DRM_WRITE_COMBINING, &dev_priv->fb);
++      if (ret)
++              return ret;
++
++      ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
++                       _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
++                       &dev_priv->aperture);
++      if (ret)
++              return ret;
++
++      return ret;
++}
++
++/*
++ * Delete MTRRs and free device-private data.
++ */
++void savage_driver_lastclose(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      for (i = 0; i < 3; ++i)
++              if (dev_priv->mtrr[i].handle >= 0)
++                      drm_mtrr_del(dev_priv->mtrr[i].handle,
++                                   dev_priv->mtrr[i].base,
++                                   dev_priv->mtrr[i].size, DRM_MTRR_WC);
++}
++
++int savage_driver_unload(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      if (init->fb_bpp != 16 && init->fb_bpp != 32) {
++              DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
++              return -EINVAL;
++      }
++      if (init->depth_bpp != 16 && init->depth_bpp != 32) {
++              DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
++              return -EINVAL;
++      }
++      if (init->dma_type != SAVAGE_DMA_AGP &&
++          init->dma_type != SAVAGE_DMA_PCI) {
++              DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
++              return -EINVAL;
++      }
++
++      dev_priv->cob_size = init->cob_size;
++      dev_priv->bci_threshold_lo = init->bci_threshold_lo;
++      dev_priv->bci_threshold_hi = init->bci_threshold_hi;
++      dev_priv->dma_type = init->dma_type;
++
++      dev_priv->fb_bpp = init->fb_bpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++      dev_priv->depth_bpp = init->depth_bpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      dev_priv->texture_offset = init->texture_offset;
++      dev_priv->texture_size = init->texture_size;
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              savage_do_cleanup_bci(dev);
++              return -EINVAL;
++      }
++      if (init->status_offset != 0) {
++              dev_priv->status = drm_core_findmap(dev, init->status_offset);
++              if (!dev_priv->status) {
++                      DRM_ERROR("could not find shadow status region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->status = NULL;
++      }
++      if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map = drm_core_findmap(dev,
++                                                     init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("could not find DMA buffer region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("failed to ioremap DMA buffer region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -ENOMEM;
++              }
++      }
++      if (init->agp_textures_offset) {
++              dev_priv->agp_textures =
++                      drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("could not find agp texture region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->agp_textures = NULL;
++      }
++
++      if (init->cmd_dma_offset) {
++              if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      DRM_ERROR("command DMA not supported on "
++                                "Savage3D/MX/IX.\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              if (dev->dma && dev->dma->buflist) {
++                      DRM_ERROR("command and vertex DMA not supported "
++                                "at the same time.\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
++              if (!dev_priv->cmd_dma) {
++                      DRM_ERROR("could not find command DMA region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
++                      if (dev_priv->cmd_dma->type != _DRM_AGP) {
++                              DRM_ERROR("AGP command DMA region is not a "
++                                        "_DRM_AGP map!\n");
++                              savage_do_cleanup_bci(dev);
++                              return -EINVAL;
++                      }
++                      drm_core_ioremap(dev_priv->cmd_dma, dev);
++                      if (!dev_priv->cmd_dma->handle) {
++                              DRM_ERROR("failed to ioremap command "
++                                        "DMA region!\n");
++                              savage_do_cleanup_bci(dev);
++                              return -ENOMEM;
++                      }
++              } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
++                      DRM_ERROR("PCI command DMA region is not a "
++                                "_DRM_CONSISTENT map!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->cmd_dma = NULL;
++      }
++
++      dev_priv->dma_flush = savage_dma_flush;
++      if (!dev_priv->cmd_dma) {
++              DRM_DEBUG("falling back to faked command DMA.\n");
++              dev_priv->fake_dma.offset = 0;
++              dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
++              dev_priv->fake_dma.type = _DRM_SHM;
++              dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
++                                                    DRM_MEM_DRIVER);
++              if (!dev_priv->fake_dma.handle) {
++                      DRM_ERROR("could not allocate faked DMA buffer!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -ENOMEM;
++              }
++              dev_priv->cmd_dma = &dev_priv->fake_dma;
++              dev_priv->dma_flush = savage_fake_dma_flush;
++      }
++
++      dev_priv->sarea_priv =
++              (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
++                                     init->sarea_priv_offset);
++
++      /* setup bitmap descriptors */
++      {
++              unsigned int color_tile_format;
++              unsigned int depth_tile_format;
++              unsigned int front_stride, back_stride, depth_stride;
++              if (dev_priv->chipset <= S3_SAVAGE4) {
++                      color_tile_format = dev_priv->fb_bpp == 16 ?
++                              SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
++                      depth_tile_format = dev_priv->depth_bpp == 16 ?
++                              SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
++              } else {
++                      color_tile_format = SAVAGE_BD_TILE_DEST;
++                      depth_tile_format = SAVAGE_BD_TILE_DEST;
++              }
++              front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
++              back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
++              depth_stride =
++                  dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
++
++              dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (color_tile_format << SAVAGE_BD_TILE_SHIFT);
++
++              dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (color_tile_format << SAVAGE_BD_TILE_SHIFT);
++
++              dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
++      }
++
++      /* setup status and bci ptr */
++      dev_priv->event_counter = 0;
++      dev_priv->event_wrap = 0;
++      dev_priv->bci_ptr = (volatile uint32_t *)
++          ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
++      } else {
++              dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
++      }
++      if (dev_priv->status != NULL) {
++              dev_priv->status_ptr =
++                      (volatile uint32_t *)dev_priv->status->handle;
++              dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
++              dev_priv->wait_evnt = savage_bci_wait_event_shadow;
++              dev_priv->status_ptr[1023] = dev_priv->event_counter;
++      } else {
++              dev_priv->status_ptr = NULL;
++              if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
++              } else {
++                      dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
++              }
++              dev_priv->wait_evnt = savage_bci_wait_event_reg;
++      }
++
++      /* cliprect functions */
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
++              dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
++      else
++              dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
++
++      if (savage_freelist_init(dev) < 0) {
++              DRM_ERROR("could not initialize freelist\n");
++              savage_do_cleanup_bci(dev);
++              return -ENOMEM;
++      }
++
++      if (savage_dma_init(dev_priv) < 0) {
++              DRM_ERROR("could not initialize command DMA\n");
++              savage_do_cleanup_bci(dev);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static int savage_do_cleanup_bci(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
++              if (dev_priv->fake_dma.handle)
++                      drm_free(dev_priv->fake_dma.handle,
++                               SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
++      } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
++                 dev_priv->cmd_dma->type == _DRM_AGP &&
++                 dev_priv->dma_type == SAVAGE_DMA_AGP)
++              drm_core_ioremapfree(dev_priv->cmd_dma, dev);
++
++      if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
++          dev->agp_buffer_map && dev->agp_buffer_map->handle) {
++              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++              /* make sure the next instance (which may be running
++               * in PCI mode) doesn't try to use an old
++               * agp_buffer_map. */
++              dev->agp_buffer_map = NULL;
++      }
++
++      if (dev_priv->dma_pages)
++              drm_free(dev_priv->dma_pages,
++                       sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
++                       DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_init_t *init = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case SAVAGE_INIT_BCI:
++              return savage_do_init_bci(dev, init);
++      case SAVAGE_CLEANUP_BCI:
++              return savage_do_cleanup_bci(dev);
++      }
++
++      return -EINVAL;
++}
++
++static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_event_emit_t *event = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      event->count = savage_bci_emit_event(dev_priv, event->flags);
++      event->count |= dev_priv->event_wrap << 16;
++
++      return 0;
++}
++
++static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_event_wait_t *event = data;
++      unsigned int event_e, hw_e;
++      unsigned int event_w, hw_w;
++
++      DRM_DEBUG("\n");
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              hw_e = dev_priv->status_ptr[1] & 0xffff;
++      else
++              hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      hw_w = dev_priv->event_wrap;
++      if (hw_e > dev_priv->event_counter)
++              hw_w--; /* hardware hasn't passed the last wrap yet */
++
++      event_e = event->count & 0xffff;
++      event_w = event->count >> 16;
++
++      /* Don't need to wait if
++       * - event counter wrapped since the event was emitted or
++       * - the hardware has advanced up to or over the event to wait for.
++       */
++      if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
++              return 0;
++      else
++              return dev_priv->wait_evnt(dev_priv, event_e);
++}
++
++/*
++ * DMA buffer management
++ */
++
++static int savage_bci_get_buffers(struct drm_device *dev,
++                                struct drm_file *file_priv,
++                                struct drm_dma *d)
++{
++      struct drm_buf *buf;
++      int i;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = savage_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i],
++                                   &buf->idx, sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i],
++                                   &buf->total, sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = savage_bci_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      if (!dma)
++              return;
++      if (!dev_priv)
++              return;
++      if (!dma->buflist)
++              return;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_savage_buf_priv_t *buf_priv = buf->dev_private;
++
++              if (buf->file_priv == file_priv && buf_priv &&
++                  buf_priv->next == NULL && buf_priv->prev == NULL) {
++                      uint16_t event;
++                      DRM_DEBUG("reclaimed from client\n");
++                      event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
++                      SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
++                      savage_freelist_put(dev, buf);
++              }
++      }
++
++      drm_core_reclaim_buffers(dev, file_priv);
++}
++
++struct drm_ioctl_desc savage_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
++};
++
++int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drm.h git-nokia/drivers/gpu/drm-tungsten/savage_drm.h
+--- git/drivers/gpu/drm-tungsten/savage_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,209 @@
++/* savage_drm.h -- Public header for the savage driver
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __SAVAGE_DRM_H__
++#define __SAVAGE_DRM_H__
++
++#ifndef __SAVAGE_SAREA_DEFINES__
++#define __SAVAGE_SAREA_DEFINES__
++
++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
++ * regions, subject to a minimum region size of (1<<16) == 64k.
++ *
++ * Clients may subdivide regions internally, but when sharing between
++ * clients, the region size is the minimum granularity.
++ */
++
++#define SAVAGE_CARD_HEAP              0
++#define SAVAGE_AGP_HEAP                       1
++#define SAVAGE_NR_TEX_HEAPS           2
++#define SAVAGE_NR_TEX_REGIONS         16
++#define SAVAGE_LOG_MIN_TEX_REGION_SIZE        16
++
++#endif /* __SAVAGE_SAREA_DEFINES__ */
++
++typedef struct _drm_savage_sarea {
++      /* LRU lists for texture memory in agp space and on the card.
++       */
++      struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
++      unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
++
++      /* Mechanism to validate card state.
++       */
++      int ctxOwner;
++} drm_savage_sarea_t, *drm_savage_sarea_ptr;
++
++/* Savage-specific ioctls
++ */
++#define DRM_SAVAGE_BCI_INIT           0x00
++#define DRM_SAVAGE_BCI_CMDBUF           0x01
++#define DRM_SAVAGE_BCI_EVENT_EMIT     0x02
++#define DRM_SAVAGE_BCI_EVENT_WAIT     0x03
++
++#define DRM_IOCTL_SAVAGE_INIT         DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
++#define DRM_IOCTL_SAVAGE_CMDBUF               DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
++#define DRM_IOCTL_SAVAGE_EVENT_EMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
++#define DRM_IOCTL_SAVAGE_EVENT_WAIT   DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
++
++#define SAVAGE_DMA_PCI        1
++#define SAVAGE_DMA_AGP        3
++typedef struct drm_savage_init {
++      enum {
++              SAVAGE_INIT_BCI = 1,
++              SAVAGE_CLEANUP_BCI = 2
++      } func;
++      unsigned int sarea_priv_offset;
++
++      /* some parameters */
++      unsigned int cob_size;
++      unsigned int bci_threshold_lo, bci_threshold_hi;
++      unsigned int dma_type;
++
++      /* frame buffer layout */
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      /* local textures */
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      /* physical locations of non-permanent maps */
++      unsigned long status_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++      unsigned long cmd_dma_offset;
++} drm_savage_init_t;
++
++typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
++typedef struct drm_savage_cmdbuf {
++                              /* command buffer in client's address space */
++      drm_savage_cmd_header_t __user *cmd_addr;
++      unsigned int size;      /* size of the command buffer in 64bit units */
++
++      unsigned int dma_idx;   /* DMA buffer index to use */
++      int discard;            /* discard DMA buffer when done */
++                              /* vertex buffer in client's address space */
++      unsigned int __user *vb_addr;
++      unsigned int vb_size;   /* size of client vertex buffer in bytes */
++      unsigned int vb_stride; /* stride of vertices in 32bit words */
++                              /* boxes in client's address space */
++      struct drm_clip_rect __user *box_addr;
++      unsigned int nbox;      /* number of clipping boxes */
++} drm_savage_cmdbuf_t;
++
++#define SAVAGE_WAIT_2D  0x1 /* wait for 2D idle before updating event tag */
++#define SAVAGE_WAIT_3D  0x2 /* wait for 3D idle before updating event tag */
++#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
++typedef struct drm_savage_event {
++      unsigned int count;
++      unsigned int flags;
++} drm_savage_event_emit_t, drm_savage_event_wait_t;
++
++/* Commands for the cmdbuf ioctl
++ */
++#define SAVAGE_CMD_STATE      0  /* a range of state registers */
++#define SAVAGE_CMD_DMA_PRIM   1  /* vertices from DMA buffer */
++#define SAVAGE_CMD_VB_PRIM    2  /* vertices from client vertex buffer */
++#define SAVAGE_CMD_DMA_IDX    3  /* indexed vertices from DMA buffer */
++#define SAVAGE_CMD_VB_IDX     4  /* indexed vertices client vertex buffer */
++#define SAVAGE_CMD_CLEAR      5  /* clear buffers */
++#define SAVAGE_CMD_SWAP               6  /* swap buffers */
++
++/* Primitive types
++*/
++#define SAVAGE_PRIM_TRILIST   0  /* triangle list */
++#define SAVAGE_PRIM_TRISTRIP  1  /* triangle strip */
++#define SAVAGE_PRIM_TRIFAN    2  /* triangle fan */
++#define SAVAGE_PRIM_TRILIST_201       3  /* reorder verts for correct flat
++                                  * shading on s3d */
++
++/* Skip flags (vertex format)
++ */
++#define SAVAGE_SKIP_Z         0x01
++#define SAVAGE_SKIP_W         0x02
++#define SAVAGE_SKIP_C0                0x04
++#define SAVAGE_SKIP_C1                0x08
++#define SAVAGE_SKIP_S0                0x10
++#define SAVAGE_SKIP_T0                0x20
++#define SAVAGE_SKIP_ST0               0x30
++#define SAVAGE_SKIP_S1                0x40
++#define SAVAGE_SKIP_T1                0x80
++#define SAVAGE_SKIP_ST1               0xc0
++#define SAVAGE_SKIP_ALL_S3D   0x3f
++#define SAVAGE_SKIP_ALL_S4    0xff
++
++/* Buffer names for clear command
++ */
++#define SAVAGE_FRONT          0x1
++#define SAVAGE_BACK           0x2
++#define SAVAGE_DEPTH          0x4
++
++/* 64-bit command header
++ */
++union drm_savage_cmd_header {
++      struct {
++              unsigned char cmd;      /* command */
++              unsigned char pad0;
++              unsigned short pad1;
++              unsigned short pad2;
++              unsigned short pad3;
++      } cmd; /* generic */
++      struct {
++              unsigned char cmd;
++              unsigned char global;   /* need idle engine? */
++              unsigned short count;   /* number of consecutive registers */
++              unsigned short start;   /* first register */
++              unsigned short pad3;
++      } state; /* SAVAGE_CMD_STATE */
++      struct {
++              unsigned char cmd;
++              unsigned char prim;     /* primitive type */
++              unsigned short skip;    /* vertex format (skip flags) */
++              unsigned short count;   /* number of vertices */
++              unsigned short start;   /* first vertex in DMA/vertex buffer */
++      } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
++      struct {
++              unsigned char cmd;
++              unsigned char prim;
++              unsigned short skip;
++              unsigned short count;   /* number of indices that follow */
++              unsigned short pad3;
++      } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
++      struct {
++              unsigned char cmd;
++              unsigned char pad0;
++              unsigned short pad1;
++              unsigned int flags;
++      } clear0; /* SAVAGE_CMD_CLEAR */
++      struct {
++              unsigned int mask;
++              unsigned int value;
++      } clear1; /* SAVAGE_CMD_CLEAR data */
++};
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.c git-nokia/drivers/gpu/drm-tungsten/savage_drv.c
+--- git/drivers/gpu/drm-tungsten/savage_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,96 @@
++/* savage_drv.c -- Savage driver for Linux
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      savage_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR |
++          DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
++      .dev_priv_size = sizeof(drm_savage_buf_priv_t),
++      .load = savage_driver_load,
++      .firstopen = savage_driver_firstopen,
++      .lastclose = savage_driver_lastclose,
++      .unload = savage_driver_unload,
++      .reclaim_buffers = savage_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = savage_ioctls,
++      .dma_ioctl = savage_bci_buffers,
++      .fops = {
++              .owner   = THIS_MODULE,
++              .open    = drm_open,
++              .release = drm_release,
++              .ioctl   = drm_ioctl,
++              .mmap    = drm_mmap,
++              .poll = drm_poll,
++              .fasync  = drm_fasync,
++      },
++      .pci_driver = {
++              .name          = DRIVER_NAME,
++              .id_table      = pciidlist,
++              .probe         = probe,
++              .remove        = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init savage_init(void)
++{
++      driver.num_ioctls = savage_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit savage_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(savage_init);
++module_exit(savage_exit);
++
++MODULE_AUTHOR( DRIVER_AUTHOR );
++MODULE_DESCRIPTION( DRIVER_DESC );
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.h git-nokia/drivers/gpu/drm-tungsten/savage_drv.h
+--- git/drivers/gpu/drm-tungsten/savage_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,575 @@
++/* savage_drv.h -- Private header for the savage driver */
++/*
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __SAVAGE_DRV_H__
++#define __SAVAGE_DRV_H__
++
++#define DRIVER_AUTHOR "Felix Kuehling"
++
++#define DRIVER_NAME   "savage"
++#define DRIVER_DESC   "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
++#define DRIVER_DATE   "20050313"
++
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          4
++#define DRIVER_PATCHLEVEL     1
++/* Interface history:
++ *
++ * 1.x   The DRM driver from the VIA/S3 code drop, basically a dummy
++ * 2.0   The first real DRM
++ * 2.1   Scissors registers managed by the DRM, 3D operations clipped by
++ *       cliprects of the cmdbuf ioctl
++ * 2.2   Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
++ * 2.3   Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
++ *       wide and thus very long lived (unlikely to ever wrap). The size
++ *       in the struct was 32 bits before, but only 16 bits were used
++ * 2.4   Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
++ *       actually used
++ */
++
++typedef struct drm_savage_age {
++      uint16_t event;
++      unsigned int wrap;
++} drm_savage_age_t;
++
++typedef struct drm_savage_buf_priv {
++      struct drm_savage_buf_priv *next;
++      struct drm_savage_buf_priv *prev;
++      drm_savage_age_t age;
++      struct drm_buf *buf;
++} drm_savage_buf_priv_t;
++
++typedef struct drm_savage_dma_page {
++      drm_savage_age_t age;
++      unsigned int used, flushed;
++} drm_savage_dma_page_t;
++#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
++/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
++ * size of 16kbytes or 4k entries. Minimum requirement would be
++ * 10kbytes for 255 40-byte vertices in one drawing command. */
++#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
++
++/* interesting bits of hardware state that are saved in dev_priv */
++typedef union {
++      struct drm_savage_common_state {
++              uint32_t vbaddr;
++      } common;
++      struct {
++              unsigned char pad[sizeof(struct drm_savage_common_state)];
++              uint32_t texctrl, texaddr;
++              uint32_t scstart, new_scstart;
++              uint32_t scend, new_scend;
++      } s3d;
++      struct {
++              unsigned char pad[sizeof(struct drm_savage_common_state)];
++              uint32_t texdescr, texaddr0, texaddr1;
++              uint32_t drawctrl0, new_drawctrl0;
++              uint32_t drawctrl1, new_drawctrl1;
++      } s4;
++} drm_savage_state_t;
++
++/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
++enum savage_family {
++      S3_UNKNOWN = 0,
++      S3_SAVAGE3D,
++      S3_SAVAGE_MX,
++      S3_SAVAGE4,
++      S3_PROSAVAGE,
++      S3_TWISTER,
++      S3_PROSAVAGEDDR,
++      S3_SUPERSAVAGE,
++      S3_SAVAGE2000,
++      S3_LAST
++};
++
++extern struct drm_ioctl_desc savage_ioctls[];
++extern int savage_max_ioctl;
++
++#define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
++
++#define S3_SAVAGE4_SERIES(chip)  ((chip==S3_SAVAGE4)            \
++                                  || (chip==S3_PROSAVAGE)       \
++                                  || (chip==S3_TWISTER)         \
++                                  || (chip==S3_PROSAVAGEDDR))
++
++#define       S3_SAVAGE_MOBILE_SERIES(chip)   ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
++
++#define S3_SAVAGE_SERIES(chip)    ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
++
++#define S3_MOBILE_TWISTER_SERIES(chip)   ((chip==S3_TWISTER)    \
++                                          ||(chip==S3_PROSAVAGEDDR))
++
++/* flags */
++#define SAVAGE_IS_AGP 1
++
++typedef struct drm_savage_private {
++      drm_savage_sarea_t *sarea_priv;
++
++      drm_savage_buf_priv_t head, tail;
++
++      /* who am I? */
++      enum savage_family chipset;
++
++      unsigned int cob_size;
++      unsigned int bci_threshold_lo, bci_threshold_hi;
++      unsigned int dma_type;
++
++      /* frame buffer layout */
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      /* bitmap descriptors for swap and clear */
++      unsigned int front_bd, back_bd, depth_bd;
++
++      /* local textures */
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      /* memory regions in physical memory */
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *fb;
++      drm_local_map_t *aperture;
++      drm_local_map_t *status;
++      drm_local_map_t *agp_textures;
++      drm_local_map_t *cmd_dma;
++      drm_local_map_t fake_dma;
++
++      struct {
++              int handle;
++              unsigned long base, size;
++      } mtrr[3];
++
++      /* BCI and status-related stuff */
++      volatile uint32_t *status_ptr, *bci_ptr;
++      uint32_t status_used_mask;
++      uint16_t event_counter;
++      unsigned int event_wrap;
++
++      /* Savage4 command DMA */
++      drm_savage_dma_page_t *dma_pages;
++      unsigned int nr_dma_pages, first_dma_page, current_dma_page;
++      drm_savage_age_t last_dma_age;
++
++      /* saved hw state for global/local check on S3D */
++      uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
++      /* and for scissors (global, so don't emit if not changed) */
++      uint32_t hw_scissors_start, hw_scissors_end;
++
++      drm_savage_state_t state;
++
++      /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
++      unsigned int waiting;
++
++      /* config/hardware-dependent function pointers */
++      int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
++      int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
++      /* Err, there is a macro wait_event in include/linux/wait.h.
++       * Avoid unwanted macro expansion. */
++      void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
++                             const struct drm_clip_rect *pbox);
++      void (*dma_flush)(struct drm_savage_private *dev_priv);
++} drm_savage_private_t;
++
++/* ioctls */
++extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++/* BCI functions */
++extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
++                                    unsigned int flags);
++extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf);
++extern void savage_dma_reset(drm_savage_private_t *dev_priv);
++extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
++extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
++                                unsigned int n);
++extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
++extern int savage_driver_firstopen(struct drm_device *dev);
++extern void savage_driver_lastclose(struct drm_device *dev);
++extern int savage_driver_unload(struct drm_device *dev);
++extern void savage_reclaim_buffers(struct drm_device *dev,
++                                 struct drm_file *file_priv);
++
++/* state functions */
++extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
++                                    const struct drm_clip_rect *pbox);
++extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
++                                   const struct drm_clip_rect *pbox);
++
++#define SAVAGE_FB_SIZE_S3     0x01000000      /*  16MB */
++#define SAVAGE_FB_SIZE_S4     0x02000000      /*  32MB */
++#define SAVAGE_MMIO_SIZE        0x00080000    /* 512kB */
++#define SAVAGE_APERTURE_OFFSET  0x02000000    /*  32MB */
++#define SAVAGE_APERTURE_SIZE    0x05000000    /* 5 tiled surfaces, 16MB each */
++
++#define SAVAGE_BCI_OFFSET       0x00010000      /* offset of the BCI region
++                                               * inside the MMIO region */
++#define SAVAGE_BCI_FIFO_SIZE  32              /* number of entries in on-chip
++                                               * BCI FIFO */
++
++/*
++ * MMIO registers
++ */
++#define SAVAGE_STATUS_WORD0           0x48C00
++#define SAVAGE_STATUS_WORD1           0x48C04
++#define SAVAGE_ALT_STATUS_WORD0               0x48C60
++
++#define SAVAGE_FIFO_USED_MASK_S3D     0x0001ffff
++#define SAVAGE_FIFO_USED_MASK_S4      0x001fffff
++
++/* Copied from savage_bci.h in the 2D driver with some renaming. */
++
++/* Bitmap descriptors */
++#define SAVAGE_BD_STRIDE_SHIFT 0
++#define SAVAGE_BD_BPP_SHIFT   16
++#define SAVAGE_BD_TILE_SHIFT  24
++#define SAVAGE_BD_BW_DISABLE  (1<<28)
++/* common: */
++#define       SAVAGE_BD_TILE_LINEAR           0
++/* savage4, MX, IX, 3D */
++#define       SAVAGE_BD_TILE_16BPP            2
++#define       SAVAGE_BD_TILE_32BPP            3
++/* twister, prosavage, DDR, supersavage, 2000 */
++#define       SAVAGE_BD_TILE_DEST             1
++#define       SAVAGE_BD_TILE_TEXTURE          2
++/* GBD - BCI enable */
++/* savage4, MX, IX, 3D */
++#define SAVAGE_GBD_BCI_ENABLE                    8
++/* twister, prosavage, DDR, supersavage, 2000 */
++#define SAVAGE_GBD_BCI_ENABLE_TWISTER            0
++
++#define SAVAGE_GBD_BIG_ENDIAN                    4
++#define SAVAGE_GBD_LITTLE_ENDIAN                 0
++#define SAVAGE_GBD_64                            1
++
++/*  Global Bitmap Descriptor */
++#define SAVAGE_BCI_GLB_BD_LOW             0x8168
++#define SAVAGE_BCI_GLB_BD_HIGH            0x816C
++
++/*
++ * BCI registers
++ */
++/* Savage4/Twister/ProSavage 3D registers */
++#define SAVAGE_DRAWLOCALCTRL_S4               0x1e
++#define SAVAGE_TEXPALADDR_S4          0x1f
++#define SAVAGE_TEXCTRL0_S4            0x20
++#define SAVAGE_TEXCTRL1_S4            0x21
++#define SAVAGE_TEXADDR0_S4            0x22
++#define SAVAGE_TEXADDR1_S4            0x23
++#define SAVAGE_TEXBLEND0_S4           0x24
++#define SAVAGE_TEXBLEND1_S4           0x25
++#define SAVAGE_TEXXPRCLR_S4           0x26 /* never used */
++#define SAVAGE_TEXDESCR_S4            0x27
++#define SAVAGE_FOGTABLE_S4            0x28
++#define SAVAGE_FOGCTRL_S4             0x30
++#define SAVAGE_STENCILCTRL_S4         0x31
++#define SAVAGE_ZBUFCTRL_S4            0x32
++#define SAVAGE_ZBUFOFF_S4             0x33
++#define SAVAGE_DESTCTRL_S4            0x34
++#define SAVAGE_DRAWCTRL0_S4           0x35
++#define SAVAGE_DRAWCTRL1_S4           0x36
++#define SAVAGE_ZWATERMARK_S4          0x37
++#define SAVAGE_DESTTEXRWWATERMARK_S4  0x38
++#define SAVAGE_TEXBLENDCOLOR_S4               0x39
++/* Savage3D/MX/IX 3D registers */
++#define SAVAGE_TEXPALADDR_S3D         0x18
++#define SAVAGE_TEXXPRCLR_S3D          0x19 /* never used */
++#define SAVAGE_TEXADDR_S3D            0x1A
++#define SAVAGE_TEXDESCR_S3D           0x1B
++#define SAVAGE_TEXCTRL_S3D            0x1C
++#define SAVAGE_FOGTABLE_S3D           0x20
++#define SAVAGE_FOGCTRL_S3D            0x30
++#define SAVAGE_DRAWCTRL_S3D           0x31
++#define SAVAGE_ZBUFCTRL_S3D           0x32
++#define SAVAGE_ZBUFOFF_S3D            0x33
++#define SAVAGE_DESTCTRL_S3D           0x34
++#define SAVAGE_SCSTART_S3D            0x35
++#define SAVAGE_SCEND_S3D              0x36
++#define SAVAGE_ZWATERMARK_S3D         0x37
++#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
++/* common stuff */
++#define SAVAGE_VERTBUFADDR            0x3e
++#define SAVAGE_BITPLANEWTMASK         0xd7
++#define SAVAGE_DMABUFADDR             0x51
++
++/* texture enable bits (needed for tex addr checking) */
++#define SAVAGE_TEXCTRL_TEXEN_MASK     0x00010000 /* S3D */
++#define SAVAGE_TEXDESCR_TEX0EN_MASK   0x02000000 /* S4 */
++#define SAVAGE_TEXDESCR_TEX1EN_MASK   0x04000000 /* S4 */
++
++/* Global fields in Savage4/Twister/ProSavage 3D registers:
++ *
++ * All texture registers and DrawLocalCtrl are local. All other
++ * registers are global. */
++
++/* Global fields in Savage3D/MX/IX 3D registers:
++ *
++ * All texture registers are local. DrawCtrl and ZBufCtrl are
++ * partially local. All other registers are global.
++ *
++ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
++ * ZBufCtrl global fields: zCmpFunc, zBufEn
++ */
++#define SAVAGE_DRAWCTRL_S3D_GLOBAL    0x03f3c00c
++#define SAVAGE_ZBUFCTRL_S3D_GLOBAL    0x00000027
++
++/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
++ */
++#define SAVAGE_SCISSOR_MASK_S4                0x00fff7ff
++#define SAVAGE_SCISSOR_MASK_S3D               0x07ff07ff
++
++/*
++ * BCI commands
++ */
++#define BCI_CMD_NOP                  0x40000000
++#define BCI_CMD_RECT                 0x48000000
++#define BCI_CMD_RECT_XP              0x01000000
++#define BCI_CMD_RECT_YP              0x02000000
++#define BCI_CMD_SCANLINE             0x50000000
++#define BCI_CMD_LINE                 0x5C000000
++#define BCI_CMD_LINE_LAST_PIXEL      0x58000000
++#define BCI_CMD_BYTE_TEXT            0x63000000
++#define BCI_CMD_NT_BYTE_TEXT         0x67000000
++#define BCI_CMD_BIT_TEXT             0x6C000000
++#define BCI_CMD_GET_ROP(cmd)         (((cmd) >> 16) & 0xFF)
++#define BCI_CMD_SET_ROP(cmd, rop)    ((cmd) |= ((rop & 0xFF) << 16))
++#define BCI_CMD_SEND_COLOR           0x00008000
++
++#define BCI_CMD_CLIP_NONE            0x00000000
++#define BCI_CMD_CLIP_CURRENT         0x00002000
++#define BCI_CMD_CLIP_LR              0x00004000
++#define BCI_CMD_CLIP_NEW             0x00006000
++
++#define BCI_CMD_DEST_GBD             0x00000000
++#define BCI_CMD_DEST_PBD             0x00000800
++#define BCI_CMD_DEST_PBD_NEW         0x00000C00
++#define BCI_CMD_DEST_SBD             0x00001000
++#define BCI_CMD_DEST_SBD_NEW         0x00001400
++
++#define BCI_CMD_SRC_TRANSPARENT      0x00000200
++#define BCI_CMD_SRC_SOLID            0x00000000
++#define BCI_CMD_SRC_GBD              0x00000020
++#define BCI_CMD_SRC_COLOR            0x00000040
++#define BCI_CMD_SRC_MONO             0x00000060
++#define BCI_CMD_SRC_PBD_COLOR        0x00000080
++#define BCI_CMD_SRC_PBD_MONO         0x000000A0
++#define BCI_CMD_SRC_PBD_COLOR_NEW    0x000000C0
++#define BCI_CMD_SRC_PBD_MONO_NEW     0x000000E0
++#define BCI_CMD_SRC_SBD_COLOR        0x00000100
++#define BCI_CMD_SRC_SBD_MONO         0x00000120
++#define BCI_CMD_SRC_SBD_COLOR_NEW    0x00000140
++#define BCI_CMD_SRC_SBD_MONO_NEW     0x00000160
++
++#define BCI_CMD_PAT_TRANSPARENT      0x00000010
++#define BCI_CMD_PAT_NONE             0x00000000
++#define BCI_CMD_PAT_COLOR            0x00000002
++#define BCI_CMD_PAT_MONO             0x00000003
++#define BCI_CMD_PAT_PBD_COLOR        0x00000004
++#define BCI_CMD_PAT_PBD_MONO         0x00000005
++#define BCI_CMD_PAT_PBD_COLOR_NEW    0x00000006
++#define BCI_CMD_PAT_PBD_MONO_NEW     0x00000007
++#define BCI_CMD_PAT_SBD_COLOR        0x00000008
++#define BCI_CMD_PAT_SBD_MONO         0x00000009
++#define BCI_CMD_PAT_SBD_COLOR_NEW    0x0000000A
++#define BCI_CMD_PAT_SBD_MONO_NEW     0x0000000B
++
++#define BCI_BD_BW_DISABLE            0x10000000
++#define BCI_BD_TILE_MASK             0x03000000
++#define BCI_BD_TILE_NONE             0x00000000
++#define BCI_BD_TILE_16               0x02000000
++#define BCI_BD_TILE_32               0x03000000
++#define BCI_BD_GET_BPP(bd)           (((bd) >> 16) & 0xFF)
++#define BCI_BD_SET_BPP(bd, bpp)      ((bd) |= (((bpp) & 0xFF) << 16))
++#define BCI_BD_GET_STRIDE(bd)        ((bd) & 0xFFFF)
++#define BCI_BD_SET_STRIDE(bd, st)    ((bd) |= ((st) & 0xFFFF))
++
++#define BCI_CMD_SET_REGISTER            0x96000000
++
++#define BCI_CMD_WAIT                    0xC0000000
++#define BCI_CMD_WAIT_3D                 0x00010000
++#define BCI_CMD_WAIT_2D                 0x00020000
++
++#define BCI_CMD_UPDATE_EVENT_TAG        0x98000000
++
++#define BCI_CMD_DRAW_PRIM               0x80000000
++#define BCI_CMD_DRAW_INDEXED_PRIM       0x88000000
++#define BCI_CMD_DRAW_CONT               0x01000000
++#define BCI_CMD_DRAW_TRILIST            0x00000000
++#define BCI_CMD_DRAW_TRISTRIP           0x02000000
++#define BCI_CMD_DRAW_TRIFAN             0x04000000
++#define BCI_CMD_DRAW_SKIPFLAGS          0x000000ff
++#define BCI_CMD_DRAW_NO_Z             0x00000001
++#define BCI_CMD_DRAW_NO_W             0x00000002
++#define BCI_CMD_DRAW_NO_CD            0x00000004
++#define BCI_CMD_DRAW_NO_CS            0x00000008
++#define BCI_CMD_DRAW_NO_U0            0x00000010
++#define BCI_CMD_DRAW_NO_V0            0x00000020
++#define BCI_CMD_DRAW_NO_UV0           0x00000030
++#define BCI_CMD_DRAW_NO_U1            0x00000040
++#define BCI_CMD_DRAW_NO_V1            0x00000080
++#define BCI_CMD_DRAW_NO_UV1           0x000000c0
++
++#define BCI_CMD_DMA                   0xa8000000
++
++#define BCI_W_H(w, h)                ((((h) << 16) | (w)) & 0x0FFF0FFF)
++#define BCI_X_Y(x, y)                ((((y) << 16) | (x)) & 0x0FFF0FFF)
++#define BCI_X_W(x, y)                ((((w) << 16) | (x)) & 0x0FFF0FFF)
++#define BCI_CLIP_LR(l, r)            ((((r) << 16) | (l)) & 0x0FFF0FFF)
++#define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
++#define BCI_CLIP_BR(b, r)            ((((b) << 16) | (r)) & 0x0FFF0FFF)
++
++#define BCI_LINE_X_Y(x, y)           (((y) << 16) | ((x) & 0xFFFF))
++#define BCI_LINE_STEPS(diag, axi)    (((axi) << 16) | ((diag) & 0xFFFF))
++#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
++      (((maj) & 0x1FFF) | \
++      ((ym) ? 1<<13 : 0) | \
++      ((xp) ? 1<<14 : 0) | \
++      ((yp) ? 1<<15 : 0) | \
++      ((err) << 16))
++
++/*
++ * common commands
++ */
++#define BCI_SET_REGISTERS( first, n )                 \
++      BCI_WRITE(BCI_CMD_SET_REGISTER |                \
++                ((uint32_t)(n) & 0xff) << 16 |        \
++                ((uint32_t)(first) & 0xffff))
++#define DMA_SET_REGISTERS( first, n )                 \
++      DMA_WRITE(BCI_CMD_SET_REGISTER |                \
++                ((uint32_t)(n) & 0xff) << 16 |        \
++                ((uint32_t)(first) & 0xffff))
++
++#define BCI_DRAW_PRIMITIVE(n, type, skip)         \
++        BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
++                ((n) << 16))
++#define DMA_DRAW_PRIMITIVE(n, type, skip)         \
++        DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
++                ((n) << 16))
++
++#define BCI_DRAW_INDICES_S3D(n, type, i0)         \
++        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
++                ((n) << 16) | (i0))
++
++#define BCI_DRAW_INDICES_S4(n, type, skip)        \
++        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
++                  (skip) | ((n) << 16))
++
++#define BCI_DMA(n)    \
++      BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
++
++/*
++ * access to MMIO
++ */
++#define SAVAGE_READ(reg)      DRM_READ32(  dev_priv->mmio, (reg) )
++#define SAVAGE_WRITE(reg)     DRM_WRITE32( dev_priv->mmio, (reg) )
++
++/*
++ * access to the burst command interface (BCI)
++ */
++#define SAVAGE_BCI_DEBUG 1
++
++#define BCI_LOCALS    volatile uint32_t *bci_ptr;
++
++#define BEGIN_BCI( n ) do {                   \
++      dev_priv->wait_fifo(dev_priv, (n));     \
++      bci_ptr = dev_priv->bci_ptr;            \
++} while(0)
++
++#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
++
++/*
++ * command DMA support
++ */
++#define SAVAGE_DMA_DEBUG 1
++
++#define DMA_LOCALS   uint32_t *dma_ptr;
++
++#define BEGIN_DMA( n ) do {                                           \
++      unsigned int cur = dev_priv->current_dma_page;                  \
++      unsigned int rest = SAVAGE_DMA_PAGE_SIZE -                      \
++              dev_priv->dma_pages[cur].used;                          \
++      if ((n) > rest) {                                               \
++              dma_ptr = savage_dma_alloc(dev_priv, (n));              \
++      } else { /* fast path for small allocations */                  \
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +       \
++                      cur * SAVAGE_DMA_PAGE_SIZE +                    \
++                      dev_priv->dma_pages[cur].used;                  \
++              if (dev_priv->dma_pages[cur].used == 0)                 \
++                      savage_dma_wait(dev_priv, cur);                 \
++              dev_priv->dma_pages[cur].used += (n);                   \
++      }                                                               \
++} while(0)
++
++#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
++
++#define DMA_COPY(src, n) do {                                 \
++      memcpy(dma_ptr, (src), (n)*4);                          \
++      dma_ptr += n;                                           \
++} while(0)
++
++#if SAVAGE_DMA_DEBUG
++#define DMA_COMMIT() do {                                             \
++      unsigned int cur = dev_priv->current_dma_page;                  \
++      uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle +    \
++                      cur * SAVAGE_DMA_PAGE_SIZE +                    \
++                      dev_priv->dma_pages[cur].used;                  \
++      if (dma_ptr != expected) {                                      \
++              DRM_ERROR("DMA allocation and use don't match: "        \
++                        "%p != %p\n", expected, dma_ptr);             \
++              savage_dma_reset(dev_priv);                             \
++      }                                                               \
++} while(0)
++#else
++#define DMA_COMMIT() do {/* nothing */} while(0)
++#endif
++
++#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
++
++/* Buffer aging via event tag
++ */
++
++#define UPDATE_EVENT_COUNTER( ) do {                  \
++      if (dev_priv->status_ptr) {                     \
++              uint16_t count;                         \
++              /* coordinate with Xserver */           \
++              count = dev_priv->status_ptr[1023];     \
++              if (count < dev_priv->event_counter)    \
++                      dev_priv->event_wrap++;         \
++              dev_priv->event_counter = count;        \
++      }                                               \
++} while(0)
++
++#define SET_AGE( age, e, w ) do {     \
++      (age)->event = e;               \
++      (age)->wrap = w;                \
++} while(0)
++
++#define TEST_AGE( age, e, w )                         \
++      ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
++
++#endif /* __SAVAGE_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_state.c git-nokia/drivers/gpu/drm-tungsten/savage_state.c
+--- git/drivers/gpu/drm-tungsten/savage_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1165 @@
++/* savage_state.c -- State and drawing support for Savage
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
++                             const struct drm_clip_rect *pbox)
++{
++      uint32_t scstart = dev_priv->state.s3d.new_scstart;
++      uint32_t scend = dev_priv->state.s3d.new_scend;
++      scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
++              ((uint32_t)pbox->x1 & 0x000007ff) |
++              (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
++      scend   = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
++              (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
++              ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
++      if (scstart != dev_priv->state.s3d.scstart ||
++          scend   != dev_priv->state.s3d.scend) {
++              DMA_LOCALS;
++              BEGIN_DMA(4);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
++              DMA_WRITE(scstart);
++              DMA_WRITE(scend);
++              dev_priv->state.s3d.scstart = scstart;
++              dev_priv->state.s3d.scend = scend;
++              dev_priv->waiting = 1;
++              DMA_COMMIT();
++      }
++}
++
++void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
++                            const struct drm_clip_rect *pbox)
++{
++      uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
++      uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
++      drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
++              ((uint32_t)pbox->x1 & 0x000007ff) |
++              (((uint32_t)pbox->y1 << 12) & 0x00fff000);
++      drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
++              (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
++              ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
++      if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
++          drawctrl1 != dev_priv->state.s4.drawctrl1) {
++              DMA_LOCALS;
++              BEGIN_DMA(4);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
++              DMA_WRITE(drawctrl0);
++              DMA_WRITE(drawctrl1);
++              dev_priv->state.s4.drawctrl0 = drawctrl0;
++              dev_priv->state.s4.drawctrl1 = drawctrl1;
++              dev_priv->waiting = 1;
++              DMA_COMMIT();
++      }
++}
++
++static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
++                               uint32_t addr)
++{
++      if ((addr & 6) != 2) { /* reserved bits */
++              DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
++              return -EINVAL;
++      }
++      if (!(addr & 1)) { /* local */
++              addr &= ~7;
++              if (addr < dev_priv->texture_offset ||
++                  addr >= dev_priv->texture_offset + dev_priv->texture_size) {
++                      DRM_ERROR
++                          ("bad texAddr%d %08x (local addr out of range)\n",
++                           unit, addr);
++                      return -EINVAL;
++              }
++      } else { /* AGP */
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
++                                unit, addr);
++                      return -EINVAL;
++              }
++              addr &= ~7;
++              if (addr < dev_priv->agp_textures->offset ||
++                  addr >= (dev_priv->agp_textures->offset +
++                           dev_priv->agp_textures->size)) {
++                      DRM_ERROR
++                          ("bad texAddr%d %08x (AGP addr out of range)\n",
++                           unit, addr);
++                      return -EINVAL;
++              }
++      }
++      return 0;
++}
++
++#define SAVE_STATE(reg,where)                 \
++      if(start <= reg && start + count > reg) \
++              dev_priv->state.where = regs[reg - start]
++#define SAVE_STATE_MASK(reg,where,mask) do {                  \
++      if(start <= reg && start + count > reg) {                       \
++              uint32_t tmp;                                   \
++              tmp = regs[reg - start];                        \
++              dev_priv->state.where = (tmp & (mask)) |        \
++                      (dev_priv->state.where & ~(mask));      \
++      }                                                       \
++} while (0)
++static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
++                                 unsigned int start, unsigned int count,
++                                 const uint32_t *regs)
++{
++      if (start < SAVAGE_TEXPALADDR_S3D ||
++          start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
++              DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
++                        start, start + count - 1);
++              return -EINVAL;
++      }
++
++      SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
++                      ~SAVAGE_SCISSOR_MASK_S3D);
++      SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
++                      ~SAVAGE_SCISSOR_MASK_S3D);
++
++      /* if any texture regs were changed ... */
++      if (start <= SAVAGE_TEXCTRL_S3D &&
++          start + count > SAVAGE_TEXPALADDR_S3D) {
++              /* ... check texture state */
++              SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
++              SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
++              if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
++                      return savage_verify_texaddr(dev_priv, 0,
++                                              dev_priv->state.s3d.texaddr);
++      }
++
++      return 0;
++}
++
++static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
++                                unsigned int start, unsigned int count,
++                                const uint32_t *regs)
++{
++      int ret = 0;
++
++      if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
++          start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
++              DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
++                        start, start + count - 1);
++              return -EINVAL;
++      }
++
++      SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
++                      ~SAVAGE_SCISSOR_MASK_S4);
++      SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
++                      ~SAVAGE_SCISSOR_MASK_S4);
++
++      /* if any texture regs were changed ... */
++      if (start <= SAVAGE_TEXDESCR_S4 &&
++          start + count > SAVAGE_TEXPALADDR_S4) {
++              /* ... check texture state */
++              SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
++              SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
++              SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
++              if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
++                      ret |= savage_verify_texaddr(dev_priv, 0,
++                                              dev_priv->state.s4.texaddr0);
++              if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
++                      ret |= savage_verify_texaddr(dev_priv, 1,
++                                              dev_priv->state.s4.texaddr1);
++      }
++
++      return ret;
++}
++#undef SAVE_STATE
++#undef SAVE_STATE_MASK
++
++static int savage_dispatch_state(drm_savage_private_t *dev_priv,
++                               const drm_savage_cmd_header_t *cmd_header,
++                               const uint32_t *regs)
++{
++      unsigned int count = cmd_header->state.count;
++      unsigned int start = cmd_header->state.start;
++      unsigned int count2 = 0;
++      unsigned int bci_size;
++      int ret;
++      DMA_LOCALS;
++
++      if (!count)
++              return 0;
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              ret = savage_verify_state_s3d(dev_priv, start, count, regs);
++              if (ret != 0)
++                      return ret;
++              /* scissor regs are emitted in savage_dispatch_draw */
++              if (start < SAVAGE_SCSTART_S3D) {
++                      if (start + count > SAVAGE_SCEND_S3D + 1)
++                              count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
++                      if (start + count > SAVAGE_SCSTART_S3D)
++                              count = SAVAGE_SCSTART_S3D - start;
++              } else if (start <= SAVAGE_SCEND_S3D) {
++                      if (start + count > SAVAGE_SCEND_S3D + 1) {
++                              count -= SAVAGE_SCEND_S3D + 1 - start;
++                              start = SAVAGE_SCEND_S3D + 1;
++                      } else
++                              return 0;
++              }
++      } else {
++              ret = savage_verify_state_s4(dev_priv, start, count, regs);
++              if (ret != 0)
++                      return ret;
++              /* scissor regs are emitted in savage_dispatch_draw */
++              if (start < SAVAGE_DRAWCTRL0_S4) {
++                      if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
++                              count2 = count -
++                                       (SAVAGE_DRAWCTRL1_S4 + 1 - start);
++                      if (start + count > SAVAGE_DRAWCTRL0_S4)
++                              count = SAVAGE_DRAWCTRL0_S4 - start;
++              } else if (start <= SAVAGE_DRAWCTRL1_S4) {
++                      if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
++                              count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
++                              start = SAVAGE_DRAWCTRL1_S4 + 1;
++                      } else
++                              return 0;
++              }
++      }
++
++      bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
++
++      if (cmd_header->state.global) {
++              BEGIN_DMA(bci_size + 1);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              dev_priv->waiting = 1;
++      } else {
++              BEGIN_DMA(bci_size);
++      }
++
++      do {
++              while (count > 0) {
++                      unsigned int n = count < 255 ? count : 255;
++                      DMA_SET_REGISTERS(start, n);
++                      DMA_COPY(regs, n);
++                      count -= n;
++                      start += n;
++                      regs += n;
++              }
++              start += 2;
++              regs += 2;
++              count = count2;
++              count2 = 0;
++      } while (count);
++
++      DMA_COMMIT();
++
++      return 0;
++}
++
++static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
++                                  const drm_savage_cmd_header_t *cmd_header,
++                                  const struct drm_buf *dmabuf)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->prim.prim;
++      unsigned int skip = cmd_header->prim.skip;
++      unsigned int n = cmd_header->prim.count;
++      unsigned int start = cmd_header->prim.start;
++      unsigned int i;
++      BCI_LOCALS;
++
++      if (!dmabuf) {
++              DRM_ERROR("called without dma buffers!\n");
++              return -EINVAL;
++      }
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of vertices %u in TRILIST\n",
++                                n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                         ("wrong number of vertices %u in TRIFAN/STRIP\n",
++                          n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip != 0) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++      } else {
++              unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
++                      (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
++                      (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
++              if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++              if (reorder) {
++                      DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
++                      return -EINVAL;
++              }
++      }
++
++      if (start + n > dmabuf->total / 32) {
++              DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
++                        start, start + n - 1, dmabuf->total / 32);
++              return -EINVAL;
++      }
++
++      /* Vertex DMA doesn't work with command DMA at the same time,
++       * so we use BCI_... to submit commands here. Flush buffered
++       * faked DMA first. */
++      DMA_FLUSH();
++
++      if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
++              BEGIN_BCI(2);
++              BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
++              BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
++              dev_priv->state.common.vbaddr = dmabuf->bus_address;
++      }
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
++              /* Workaround for what looks like a hardware bug. If a
++               * WAIT_3D_IDLE was emitted some time before the
++               * indexed drawing command then the engine will lock
++               * up. There are two known workarounds:
++               * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
++              BEGIN_BCI(63);
++              for (i = 0; i < 63; ++i)
++                      BCI_WRITE(BCI_CMD_WAIT);
++              dev_priv->waiting = 0;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 indices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++              if (reorder) {
++                      /* Need to reorder indices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { -1, -1, -1 };
++                      reorder[start % 3] = 2;
++
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, start + 2);
++
++                      for (i = start + 1; i + 1 < start + count; i += 2)
++                              BCI_WRITE((i + reorder[i % 3]) |
++                                        ((i + 1 +
++                                          reorder[(i + 1) % 3]) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i + reorder[i % 3]);
++              } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, start);
++
++                      for (i = start + 1; i + 1 < start + count; i += 2)
++                              BCI_WRITE(i | ((i + 1) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i);
++              } else {
++                      BEGIN_BCI((count + 2 + 1) / 2);
++                      BCI_DRAW_INDICES_S4(count, prim, skip);
++
++                      for (i = start; i + 1 < start + count; i += 2)
++                              BCI_WRITE(i | ((i + 1) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i);
++              }
++
++              start += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
++                                 const drm_savage_cmd_header_t *cmd_header,
++                                 const uint32_t *vtxbuf, unsigned int vb_size,
++                                 unsigned int vb_stride)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->prim.prim;
++      unsigned int skip = cmd_header->prim.skip;
++      unsigned int n = cmd_header->prim.count;
++      unsigned int start = cmd_header->prim.start;
++      unsigned int vtx_size;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of vertices %u in TRILIST\n",
++                                n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of vertices %u in TRIFAN/STRIP\n",
++                           n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip > SAVAGE_SKIP_ALL_S3D) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 8; /* full vertex */
++      } else {
++              if (skip > SAVAGE_SKIP_ALL_S4) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 10; /* full vertex */
++      }
++
++      vtx_size -= (skip & 1) + (skip >> 1 & 1) +
++              (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
++              (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
++
++      if (vtx_size > vb_stride) {
++              DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
++                        vtx_size, vb_stride);
++              return -EINVAL;
++      }
++
++      if (start + n > vb_size / (vb_stride * 4)) {
++              DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
++                        start, start + n - 1, vb_size / (vb_stride * 4));
++              return -EINVAL;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 vertices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++              if (reorder) {
++                      /* Need to reorder vertices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { -1, -1, -1 };
++                      reorder[start % 3] = 2;
++
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = start; i < start + count; ++i) {
++                              unsigned int j = i + reorder[i % 3];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              } else {
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      if (vb_stride == vtx_size) {
++                              DMA_COPY(&vtxbuf[vb_stride * start],
++                                       vtx_size * count);
++                      } else {
++                              for (i = start; i < start + count; ++i) {
++                                      DMA_COPY(&vtxbuf[vb_stride * i],
++                                               vtx_size);
++                              }
++                      }
++
++                      DMA_COMMIT();
++              }
++
++              start += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
++                                 const drm_savage_cmd_header_t *cmd_header,
++                                 const uint16_t *idx,
++                                 const struct drm_buf *dmabuf)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->idx.prim;
++      unsigned int skip = cmd_header->idx.skip;
++      unsigned int n = cmd_header->idx.count;
++      unsigned int i;
++      BCI_LOCALS;
++
++      if (!dmabuf) {
++              DRM_ERROR("called without dma buffers!\n");
++              return -EINVAL;
++      }
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of indices %u in TRIFAN/STRIP\n", n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip != 0) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++      } else {
++              unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
++                      (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
++                      (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
++              if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++              if (reorder) {
++                      DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
++                      return -EINVAL;
++              }
++      }
++
++      /* Vertex DMA doesn't work with command DMA at the same time,
++       * so we use BCI_... to submit commands here. Flush buffered
++       * faked DMA first. */
++      DMA_FLUSH();
++
++      if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
++              BEGIN_BCI(2);
++              BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
++              BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
++              dev_priv->state.common.vbaddr = dmabuf->bus_address;
++      }
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
++              /* Workaround for what looks like a hardware bug. If a
++               * WAIT_3D_IDLE was emitted some time before the
++               * indexed drawing command then the engine will lock
++               * up. There are two known workarounds:
++               * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
++              BEGIN_BCI(63);
++              for (i = 0; i < 63; ++i)
++                      BCI_WRITE(BCI_CMD_WAIT);
++              dev_priv->waiting = 0;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 indices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++
++              /* check indices */
++              for (i = 0; i < count; ++i) {
++                      if (idx[i] > dmabuf->total / 32) {
++                              DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
++                                        i, idx[i], dmabuf->total / 32);
++                              return -EINVAL;
++                      }
++              }
++
++              if (reorder) {
++                      /* Need to reorder indices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { 2, -1, -1 };
++
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
++
++                      for (i = 1; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i + reorder[i % 3]] |
++                                        (idx[i + 1 +
++                                         reorder[(i + 1) % 3]] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i + reorder[i % 3]]);
++              } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
++
++                      for (i = 1; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i] | (idx[i + 1] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i]);
++              } else {
++                      BEGIN_BCI((count + 2 + 1) / 2);
++                      BCI_DRAW_INDICES_S4(count, prim, skip);
++
++                      for (i = 0; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i] | (idx[i + 1] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i]);
++              }
++
++              idx += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
++                                const drm_savage_cmd_header_t *cmd_header,
++                                const uint16_t *idx,
++                                const uint32_t *vtxbuf,
++                                unsigned int vb_size, unsigned int vb_stride)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->idx.prim;
++      unsigned int skip = cmd_header->idx.skip;
++      unsigned int n = cmd_header->idx.count;
++      unsigned int vtx_size;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of indices %u in TRIFAN/STRIP\n", n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip > SAVAGE_SKIP_ALL_S3D) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 8; /* full vertex */
++      } else {
++              if (skip > SAVAGE_SKIP_ALL_S4) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 10; /* full vertex */
++      }
++
++      vtx_size -= (skip & 1) + (skip >> 1 & 1) +
++              (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
++              (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
++
++      if (vtx_size > vb_stride) {
++              DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
++                        vtx_size, vb_stride);
++              return -EINVAL;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 vertices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++
++              /* Check indices */
++              for (i = 0; i < count; ++i) {
++                      if (idx[i] > vb_size / (vb_stride * 4)) {
++                              DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
++                                        i, idx[i],  vb_size / (vb_stride * 4));
++                              return -EINVAL;
++                      }
++              }
++
++              if (reorder) {
++                      /* Need to reorder vertices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { 2, -1, -1 };
++
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = 0; i < count; ++i) {
++                              unsigned int j = idx[i + reorder[i % 3]];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              } else {
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = 0; i < count; ++i) {
++                              unsigned int j = idx[i];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              }
++
++              idx += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
++                               const drm_savage_cmd_header_t *cmd_header,
++                               const drm_savage_cmd_header_t *data,
++                               unsigned int nbox,
++                               const struct drm_clip_rect *boxes)
++{
++      unsigned int flags = cmd_header->clear0.flags;
++      unsigned int clear_cmd;
++      unsigned int i, nbufs;
++      DMA_LOCALS;
++
++      if (nbox == 0)
++              return 0;
++
++      clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
++              BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
++      BCI_CMD_SET_ROP(clear_cmd,0xCC);
++
++      nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
++          ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
++      if (nbufs == 0)
++              return 0;
++
++      if (data->clear1.mask != 0xffffffff) {
++              /* set mask */
++              BEGIN_DMA(2);
++              DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
++              DMA_WRITE(data->clear1.mask);
++              DMA_COMMIT();
++      }
++      for (i = 0; i < nbox; ++i) {
++              unsigned int x, y, w, h;
++              unsigned int buf;
++
++              x = boxes[i].x1, y = boxes[i].y1;
++              w = boxes[i].x2 - boxes[i].x1;
++              h = boxes[i].y2 - boxes[i].y1;
++              BEGIN_DMA(nbufs * 6);
++              for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
++                      if (!(flags & buf))
++                              continue;
++                      DMA_WRITE(clear_cmd);
++                      switch (buf) {
++                      case SAVAGE_FRONT:
++                              DMA_WRITE(dev_priv->front_offset);
++                              DMA_WRITE(dev_priv->front_bd);
++                              break;
++                      case SAVAGE_BACK:
++                              DMA_WRITE(dev_priv->back_offset);
++                              DMA_WRITE(dev_priv->back_bd);
++                              break;
++                      case SAVAGE_DEPTH:
++                              DMA_WRITE(dev_priv->depth_offset);
++                              DMA_WRITE(dev_priv->depth_bd);
++                              break;
++                      }
++                      DMA_WRITE(data->clear1.value);
++                      DMA_WRITE(BCI_X_Y(x, y));
++                      DMA_WRITE(BCI_W_H(w, h));
++              }
++              DMA_COMMIT();
++      }
++      if (data->clear1.mask != 0xffffffff) {
++              /* reset mask */
++              BEGIN_DMA(2);
++              DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
++              DMA_WRITE(0xffffffff);
++              DMA_COMMIT();
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
++                              unsigned int nbox, const struct drm_clip_rect *boxes)
++{
++      unsigned int swap_cmd;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (nbox == 0)
++              return 0;
++
++      swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
++              BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
++      BCI_CMD_SET_ROP(swap_cmd,0xCC);
++
++      for (i = 0; i < nbox; ++i) {
++              BEGIN_DMA(6);
++              DMA_WRITE(swap_cmd);
++              DMA_WRITE(dev_priv->back_offset);
++              DMA_WRITE(dev_priv->back_bd);
++              DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
++              DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
++              DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
++                                boxes[i].y2 - boxes[i].y1));
++              DMA_COMMIT();
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
++                              const drm_savage_cmd_header_t *start,
++                              const drm_savage_cmd_header_t *end,
++                              const struct drm_buf *dmabuf,
++                              const unsigned int *vtxbuf,
++                              unsigned int vb_size, unsigned int vb_stride,
++                              unsigned int nbox,
++                              const struct drm_clip_rect *boxes)
++{
++      unsigned int i, j;
++      int ret;
++
++      for (i = 0; i < nbox; ++i) {
++              const drm_savage_cmd_header_t *cmdbuf;
++              dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
++
++              cmdbuf = start;
++              while (cmdbuf < end) {
++                      drm_savage_cmd_header_t cmd_header;
++                      cmd_header = *cmdbuf;
++                      cmdbuf++;
++                      switch (cmd_header.cmd.cmd) {
++                      case SAVAGE_CMD_DMA_PRIM:
++                              ret = savage_dispatch_dma_prim(
++                                      dev_priv, &cmd_header, dmabuf);
++                              break;
++                      case SAVAGE_CMD_VB_PRIM:
++                              ret = savage_dispatch_vb_prim(
++                                      dev_priv, &cmd_header,
++                                      vtxbuf, vb_size, vb_stride);
++                              break;
++                      case SAVAGE_CMD_DMA_IDX:
++                              j = (cmd_header.idx.count + 3) / 4;
++                              /* j was check in savage_bci_cmdbuf */
++                              ret = savage_dispatch_dma_idx(dev_priv,
++                                      &cmd_header, (const uint16_t *)cmdbuf,
++                                      dmabuf);
++                              cmdbuf += j;
++                              break;
++                      case SAVAGE_CMD_VB_IDX:
++                              j = (cmd_header.idx.count + 3) / 4;
++                              /* j was check in savage_bci_cmdbuf */
++                              ret = savage_dispatch_vb_idx(dev_priv,
++                                      &cmd_header, (const uint16_t *)cmdbuf,
++                                      (const uint32_t *)vtxbuf, vb_size,
++                                      vb_stride);
++                              cmdbuf += j;
++                              break;
++                      default:
++                              /* What's the best return code? EFAULT? */
++                              DRM_ERROR("IMPLEMENTATION ERROR: "
++                                        "non-drawing-command %d\n",
++                                        cmd_header.cmd.cmd);
++                              return -EINVAL;
++                      }
++
++                      if (ret != 0)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *dmabuf;
++      drm_savage_cmdbuf_t *cmdbuf = data;
++      drm_savage_cmd_header_t *kcmd_addr = NULL;
++      drm_savage_cmd_header_t *first_draw_cmd;
++      unsigned int *kvb_addr = NULL;
++      struct drm_clip_rect *kbox_addr = NULL;
++      unsigned int i, j;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dma && dma->buflist) {
++              if (cmdbuf->dma_idx > dma->buf_count) {
++                      DRM_ERROR
++                          ("vertex buffer index %u out of range (0-%u)\n",
++                           cmdbuf->dma_idx, dma->buf_count - 1);
++                      return -EINVAL;
++              }
++              dmabuf = dma->buflist[cmdbuf->dma_idx];
++      } else {
++              dmabuf = NULL;
++      }
++
++      /* Copy the user buffers into kernel temporary areas.  This hasn't been
++       * a performance loss compared to VERIFYAREA_READ/
++       * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
++       * for locking on FreeBSD.
++       */
++      if (cmdbuf->size) {
++              kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
++              if (kcmd_addr == NULL)
++                      return -ENOMEM;
++
++              if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
++                                     cmdbuf->size * 8))
++              {
++                      drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
++                      return -EFAULT;
++              }
++              cmdbuf->cmd_addr = kcmd_addr;
++      }
++      if (cmdbuf->vb_size) {
++              kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
++              if (kvb_addr == NULL) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
++                                     cmdbuf->vb_size)) {
++                      ret = -EFAULT;
++                      goto done;
++              }
++              cmdbuf->vb_addr = kvb_addr;
++      }
++      if (cmdbuf->nbox) {
++              kbox_addr = drm_alloc(cmdbuf->nbox *
++                                    sizeof(struct drm_clip_rect),
++                                    DRM_MEM_DRIVER);
++              if (kbox_addr == NULL) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
++                                     cmdbuf->nbox *
++                                     sizeof(struct drm_clip_rect))) {
++                      ret = -EFAULT;
++                      goto done;
++              }
++              cmdbuf->box_addr = kbox_addr;
++      }
++
++      /* Make sure writes to DMA buffers are finished before sending
++       * DMA commands to the graphics hardware. */
++      DRM_MEMORYBARRIER();
++
++      /* Coming from user space. Don't know if the Xserver has
++       * emitted wait commands. Assuming the worst. */
++      dev_priv->waiting = 1;
++
++      i = 0;
++      first_draw_cmd = NULL;
++      while (i < cmdbuf->size) {
++              drm_savage_cmd_header_t cmd_header;
++              cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
++              cmdbuf->cmd_addr++;
++              i++;
++
++              /* Group drawing commands with same state to minimize
++               * iterations over clip rects. */
++              j = 0;
++              switch (cmd_header.cmd.cmd) {
++              case SAVAGE_CMD_DMA_IDX:
++              case SAVAGE_CMD_VB_IDX:
++                      j = (cmd_header.idx.count + 3) / 4;
++                      if (i + j > cmdbuf->size) {
++                              DRM_ERROR("indexed drawing command extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              return -EINVAL;
++                      }
++                      /* fall through */
++              case SAVAGE_CMD_DMA_PRIM:
++              case SAVAGE_CMD_VB_PRIM:
++                      if (!first_draw_cmd)
++                              first_draw_cmd = cmdbuf->cmd_addr - 1;
++                      cmdbuf->cmd_addr += j;
++                      i += j;
++                      break;
++              default:
++                      if (first_draw_cmd) {
++                              ret = savage_dispatch_draw(
++                                      dev_priv, first_draw_cmd,
++                                      cmdbuf->cmd_addr - 1,
++                                      dmabuf, cmdbuf->vb_addr,
++                                      cmdbuf->vb_size,
++                                      cmdbuf->vb_stride,
++                                      cmdbuf->nbox, cmdbuf->box_addr);
++                              if (ret != 0)
++                                      return ret;
++                              first_draw_cmd = NULL;
++                      }
++              }
++              if (first_draw_cmd)
++                      continue;
++
++              switch (cmd_header.cmd.cmd) {
++              case SAVAGE_CMD_STATE:
++                      j = (cmd_header.state.count + 1) / 2;
++                      if (i + j > cmdbuf->size) {
++                              DRM_ERROR("command SAVAGE_CMD_STATE extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              ret = -EINVAL;
++                              goto done;
++                      }
++                      ret = savage_dispatch_state(dev_priv, &cmd_header,
++                              (const uint32_t *)cmdbuf->cmd_addr);
++                      cmdbuf->cmd_addr += j;
++                      i += j;
++                      break;
++              case SAVAGE_CMD_CLEAR:
++                      if (i + 1 > cmdbuf->size) {
++                              DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              ret = -EINVAL;
++                              goto done;
++                      }
++                      ret = savage_dispatch_clear(dev_priv, &cmd_header,
++                                                  cmdbuf->cmd_addr,
++                                                  cmdbuf->nbox,
++                                                  cmdbuf->box_addr);
++                      cmdbuf->cmd_addr++;
++                      i++;
++                      break;
++              case SAVAGE_CMD_SWAP:
++                      ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
++                                                 cmdbuf->box_addr);
++                      break;
++              default:
++                      DRM_ERROR("invalid command 0x%x\n",
++                                cmd_header.cmd.cmd);
++                      DMA_FLUSH();
++                      ret = -EINVAL;
++                      goto done;
++              }
++
++              if (ret != 0) {
++                      DMA_FLUSH();
++                      goto done;
++              }
++      }
++
++      if (first_draw_cmd) {
++              ret = savage_dispatch_draw(
++                      dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
++                      cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
++                      cmdbuf->nbox, cmdbuf->box_addr);
++              if (ret != 0) {
++                      DMA_FLUSH();
++                      goto done;
++              }
++      }
++
++      DMA_FLUSH();
++
++      if (dmabuf && cmdbuf->discard) {
++              drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
++              uint16_t event;
++              event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
++              SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
++              savage_freelist_put(dev, dmabuf);
++      }
++
++done:
++      /* If we didn't need to allocate them, these'll be NULL */
++      drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
++      drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
++      drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drm.h git-nokia/drivers/gpu/drm-tungsten/sis_drm.h
+--- git/drivers/gpu/drm-tungsten/sis_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
++/*
++ * Copyright 2005 Eric Anholt
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#ifndef __SIS_DRM_H__
++#define __SIS_DRM_H__
++
++/* SiS specific ioctls */
++#define NOT_USED_0_3
++#define DRM_SIS_FB_ALLOC      0x04
++#define DRM_SIS_FB_FREE               0x05
++#define NOT_USED_6_12
++#define DRM_SIS_AGP_INIT      0x13
++#define DRM_SIS_AGP_ALLOC     0x14
++#define DRM_SIS_AGP_FREE      0x15
++#define DRM_SIS_FB_INIT               0x16
++
++#define DRM_IOCTL_SIS_FB_ALLOC                DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_FB_FREE         DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_AGP_INIT                DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
++#define DRM_IOCTL_SIS_AGP_ALLOC               DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_AGP_FREE                DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_FB_INIT         DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
++/*
++#define DRM_IOCTL_SIS_FLIP            DRM_IOW( 0x48, drm_sis_flip_t)
++#define DRM_IOCTL_SIS_FLIP_INIT               DRM_IO(  0x49)
++#define DRM_IOCTL_SIS_FLIP_FINAL      DRM_IO(  0x50)
++*/
++
++typedef struct {
++      int context;
++      unsigned int offset;
++      unsigned int size;
++      unsigned long free;
++} drm_sis_mem_t;
++
++typedef struct {
++      unsigned int offset, size;
++} drm_sis_agp_t;
++
++typedef struct {
++      unsigned int offset, size;
++} drm_sis_fb_t;
++
++#endif                                /* __SIS_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.c git-nokia/drivers/gpu/drm-tungsten/sis_drv.c
+--- git/drivers/gpu/drm-tungsten/sis_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,127 @@
++/* sis.c -- sis driver -*- linux-c -*-
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "sis_drm.h"
++#include "sis_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      sis_PCI_IDS
++};
++
++
++static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_sis_private_t *dev_priv;
++      int ret;
++
++      dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->chipset = chipset;
++      ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
++      if (ret) {
++              drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
++      }
++
++      return ret;
++}
++
++static int sis_driver_unload(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      drm_sman_takedown(&dev_priv->sman);
++      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
++      .load = sis_driver_load,
++      .unload = sis_driver_unload,
++      .context_dtor = NULL,
++      .dma_quiescent = sis_idle,
++      .reclaim_buffers = NULL,
++      .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
++      .lastclose = sis_lastclose,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = sis_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init sis_init(void)
++{
++      driver.num_ioctls = sis_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit sis_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(sis_init);
++module_exit(sis_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.h git-nokia/drivers/gpu/drm-tungsten/sis_drv.h
+--- git/drivers/gpu/drm-tungsten/sis_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,90 @@
++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _SIS_DRV_H_
++#define _SIS_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "SIS, Tungsten Graphics"
++#define DRIVER_NAME           "sis"
++#define DRIVER_DESC           "SIS 300/630/540 and XGI V3XE/V5/V8"
++#define DRIVER_DATE           "20070626"
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          3
++#define DRIVER_PATCHLEVEL     0
++
++enum sis_family {
++      SIS_OTHER = 0,
++      SIS_CHIP_315 = 1,
++};
++
++#if defined(__linux__)
++#define SIS_HAVE_CORE_MM
++#endif
++
++#ifdef SIS_HAVE_CORE_MM
++#include "drm_sman.h"
++
++#define SIS_BASE (dev_priv->mmio)
++#define SIS_READ(reg)  DRM_READ32(SIS_BASE, reg);
++#define SIS_WRITE(reg, val)   DRM_WRITE32(SIS_BASE, reg, val);
++
++typedef struct drm_sis_private {
++      drm_local_map_t *mmio;
++      unsigned int idle_fault;
++      struct drm_sman sman;
++      unsigned int chipset;
++      int vram_initialized;
++      int agp_initialized;
++      unsigned long vram_offset;
++      unsigned long agp_offset;
++} drm_sis_private_t;
++
++extern int sis_idle(struct drm_device *dev);
++extern void sis_reclaim_buffers_locked(struct drm_device *dev,
++                                     struct drm_file *file_priv);
++extern void sis_lastclose(struct drm_device *dev);
++
++#else
++#include "sis_ds.h"
++
++typedef struct drm_sis_private {
++      memHeap_t *AGPHeap;
++      memHeap_t *FBHeap;
++} drm_sis_private_t;
++
++extern int sis_init_context(struct drm_device * dev, int context);
++extern int sis_final_context(struct drm_device * dev, int context);
++
++#endif
++
++extern struct drm_ioctl_desc sis_ioctls[];
++extern int sis_max_ioctl;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_mm.c git-nokia/drivers/gpu/drm-tungsten/sis_mm.c
+--- git/drivers/gpu/drm-tungsten/sis_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,332 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++
++/*
++ * Authors:
++ *    Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "sis_drm.h"
++#include "sis_drv.h"
++
++#if defined(__linux__)
++#include <video/sisfb.h>
++#endif
++
++#define VIDEO_TYPE 0
++#define AGP_TYPE 1
++
++#define SIS_MM_ALIGN_SHIFT 4
++#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
++
++#if defined(__linux__) && defined(CONFIG_FB_SIS)
++/* fb management via fb device */
++
++#define SIS_MM_ALIGN_SHIFT 0
++#define SIS_MM_ALIGN_MASK 0
++
++static void *sis_sman_mm_allocate(void *private, unsigned long size,
++                                unsigned alignment)
++{
++      struct sis_memreq req;
++
++      req.size = size;
++      sis_malloc(&req);
++      if (req.size == 0)
++              return NULL;
++      else
++              return (void *)~req.offset;
++}
++
++static void sis_sman_mm_free(void *private, void *ref)
++{
++      sis_free(~((unsigned long)ref));
++}
++
++static void sis_sman_mm_destroy(void *private)
++{
++      ;
++}
++
++static unsigned long sis_sman_mm_offset(void *private, void *ref)
++{
++      return ~((unsigned long)ref);
++}
++
++#endif
++
++static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_fb_t *fb = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++#if defined(__linux__) && defined(CONFIG_FB_SIS)
++      {
++              struct drm_sman_mm sman_mm;
++              sman_mm.private = (void *)0xFFFFFFFF;
++              sman_mm.allocate = sis_sman_mm_allocate;
++              sman_mm.free = sis_sman_mm_free;
++              sman_mm.destroy = sis_sman_mm_destroy;
++              sman_mm.offset = sis_sman_mm_offset;
++              ret =
++                  drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
++      }
++#else
++      ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
++                               fb->size >> SIS_MM_ALIGN_SHIFT);
++#endif
++
++      if (ret) {
++              DRM_ERROR("VRAM memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->vram_initialized = 1;
++      dev_priv->vram_offset = fb->offset;
++
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
++
++      return 0;
++}
++
++static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
++                       void *data, int pool)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_mem_t *mem = data;
++      int retval = 0;
++      struct drm_memblock_item *item;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (0 == ((pool == 0) ? dev_priv->vram_initialized :
++                    dev_priv->agp_initialized)) {
++              DRM_ERROR
++                  ("Attempt to allocate from uninitialized memory manager.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
++      item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
++                            (unsigned long)file_priv);
++
++      mutex_unlock(&dev->struct_mutex);
++      if (item) {
++              mem->offset = ((pool == 0) ?
++                            dev_priv->vram_offset : dev_priv->agp_offset) +
++                  (item->mm->
++                   offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
++              mem->free = item->user_hash.key;
++              mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
++      } else {
++              mem->offset = 0;
++              mem->size = 0;
++              mem->free = 0;
++              retval = -ENOMEM;
++      }
++
++      DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
++                mem->offset);
++
++      return retval;
++}
++
++static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_mem_t *mem = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_free_key(&dev_priv->sman, mem->free);
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("free = 0x%lx\n", mem->free);
++
++      return ret;
++}
++
++static int sis_fb_alloc(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
++}
++
++static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_agp_t *agp = data;
++      int ret;
++      dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
++                               agp->size >> SIS_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("AGP memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->agp_initialized = 1;
++      dev_priv->agp_offset = agp->offset;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
++      return 0;
++}
++
++static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
++                             struct drm_file *file_priv)
++{
++
++      return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
++}
++
++static drm_local_map_t *sis_reg_init(struct drm_device *dev)
++{
++      struct drm_map_list *entry;
++      drm_local_map_t *map;
++
++      list_for_each_entry(entry, &dev->maplist, head) {
++              map = entry->map;
++              if (!map)
++                      continue;
++              if (map->type == _DRM_REGISTERS) {
++                      return map;
++              }
++      }
++      return NULL;
++}
++
++int sis_idle(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      uint32_t idle_reg;
++      unsigned long end;
++      int i;
++
++      if (dev_priv->idle_fault)
++              return 0;
++
++      if (dev_priv->mmio == NULL) {
++              dev_priv->mmio = sis_reg_init(dev);
++              if (dev_priv->mmio == NULL) {
++                      DRM_ERROR("Could not find register map.\n");
++                      return 0;
++              }
++      }
++
++      /*
++       * Implement a device switch here if needed
++       */
++
++      if (dev_priv->chipset != SIS_CHIP_315)
++              return 0;
++
++      /*
++       * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
++       * because its polling frequency is too low.
++       */
++
++      end = jiffies + (DRM_HZ * 3);
++
++      for (i=0; i<4; ++i) {
++              do {
++                      idle_reg = SIS_READ(0x85cc);
++              } while ( !time_after_eq(jiffies, end) &&
++                        ((idle_reg & 0x80000000) != 0x80000000));
++      }
++
++      if (time_after_eq(jiffies, end)) {
++              DRM_ERROR("Graphics engine idle timeout. "
++                        "Disabling idle check\n");
++              dev_priv->idle_fault = 1;
++      }
++
++      /*
++       * The caller never sees an error code. It gets trapped
++       * in libdrm.
++       */
++
++      return 0;
++}
++
++
++void sis_lastclose(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv)
++              return;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_sman_cleanup(&dev_priv->sman);
++      dev_priv->vram_initialized = 0;
++      dev_priv->agp_initialized = 0;
++      dev_priv->mmio = NULL;
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void sis_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      return;
++}
++
++struct drm_ioctl_desc sis_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
++};
++
++int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/tdfx_drv.c git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.c
+--- git/drivers/gpu/drm-tungsten/tdfx_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
++ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Daryll Strauss <daryll@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "tdfx_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      tdfx_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init tdfx_init(void)
++{
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit tdfx_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(tdfx_init);
++module_exit(tdfx_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/tdfx_drv.h git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.h
+--- git/drivers/gpu/drm-tungsten/tdfx_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __TDFX_H__
++#define __TDFX_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "VA Linux Systems Inc."
++
++#define DRIVER_NAME           "tdfx"
++#define DRIVER_DESC           "3dfx Banshee/Voodoo3+"
++#define DRIVER_DATE           "20010216"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_3d_reg.h git-nokia/drivers/gpu/drm-tungsten/via_3d_reg.h
+--- git/drivers/gpu/drm-tungsten/via_3d_reg.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_3d_reg.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1650 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef VIA_3D_REG_H
++#define VIA_3D_REG_H
++#define HC_REG_BASE             0x0400
++
++#define HC_REG_TRANS_SPACE      0x0040
++
++#define HC_ParaN_MASK           0xffffffff
++#define HC_Para_MASK            0x00ffffff
++#define HC_SubA_MASK            0xff000000
++#define HC_SubA_SHIFT           24
++/* Transmission Setting
++ */
++#define HC_REG_TRANS_SET        0x003c
++#define HC_ParaSubType_MASK     0xff000000
++#define HC_ParaType_MASK        0x00ff0000
++#define HC_ParaOS_MASK          0x0000ff00
++#define HC_ParaAdr_MASK         0x000000ff
++#define HC_ParaSubType_SHIFT    24
++#define HC_ParaType_SHIFT       16
++#define HC_ParaOS_SHIFT         8
++#define HC_ParaAdr_SHIFT        0
++
++#define HC_ParaType_CmdVdata    0x0000
++#define HC_ParaType_NotTex      0x0001
++#define HC_ParaType_Tex         0x0002
++#define HC_ParaType_Palette     0x0003
++#define HC_ParaType_PreCR       0x0010
++#define HC_ParaType_Auto        0x00fe
++
++/* Transmission Space
++ */
++#define HC_REG_Hpara0           0x0040
++#define HC_REG_HpataAF          0x02fc
++
++/* Read
++ */
++#define HC_REG_HREngSt          0x0000
++#define HC_REG_HRFIFOempty      0x0004
++#define HC_REG_HRFIFOfull       0x0008
++#define HC_REG_HRErr            0x000c
++#define HC_REG_FIFOstatus       0x0010
++/* HC_REG_HREngSt          0x0000
++ */
++#define HC_HDASZC_MASK          0x00010000
++#define HC_HSGEMI_MASK          0x0000f000
++#define HC_HLGEMISt_MASK        0x00000f00
++#define HC_HCRSt_MASK           0x00000080
++#define HC_HSE0St_MASK          0x00000040
++#define HC_HSE1St_MASK          0x00000020
++#define HC_HPESt_MASK           0x00000010
++#define HC_HXESt_MASK           0x00000008
++#define HC_HBESt_MASK           0x00000004
++#define HC_HE2St_MASK           0x00000002
++#define HC_HE3St_MASK           0x00000001
++/* HC_REG_HRFIFOempty      0x0004
++ */
++#define HC_HRZDempty_MASK       0x00000010
++#define HC_HRTXAempty_MASK      0x00000008
++#define HC_HRTXDempty_MASK      0x00000004
++#define HC_HWZDempty_MASK       0x00000002
++#define HC_HWCDempty_MASK       0x00000001
++/* HC_REG_HRFIFOfull       0x0008
++ */
++#define HC_HRZDfull_MASK        0x00000010
++#define HC_HRTXAfull_MASK       0x00000008
++#define HC_HRTXDfull_MASK       0x00000004
++#define HC_HWZDfull_MASK        0x00000002
++#define HC_HWCDfull_MASK        0x00000001
++/* HC_REG_HRErr            0x000c
++ */
++#define HC_HAGPCMErr_MASK       0x80000000
++#define HC_HAGPCMErrC_MASK      0x70000000
++/* HC_REG_FIFOstatus       0x0010
++ */
++#define HC_HRFIFOATall_MASK     0x80000000
++#define HC_HRFIFOATbusy_MASK    0x40000000
++#define HC_HRATFGMDo_MASK       0x00000100
++#define HC_HRATFGMDi_MASK       0x00000080
++#define HC_HRATFRZD_MASK        0x00000040
++#define HC_HRATFRTXA_MASK       0x00000020
++#define HC_HRATFRTXD_MASK       0x00000010
++#define HC_HRATFWZD_MASK        0x00000008
++#define HC_HRATFWCD_MASK        0x00000004
++#define HC_HRATTXTAG_MASK       0x00000002
++#define HC_HRATTXCH_MASK        0x00000001
++
++/* AGP Command Setting
++ */
++#define HC_SubA_HAGPBstL        0x0060
++#define HC_SubA_HAGPBendL       0x0061
++#define HC_SubA_HAGPCMNT        0x0062
++#define HC_SubA_HAGPBpL         0x0063
++#define HC_SubA_HAGPBpH         0x0064
++/* HC_SubA_HAGPCMNT        0x0062
++ */
++#define HC_HAGPCMNT_MASK        0x00800000
++#define HC_HCmdErrClr_MASK      0x00400000
++#define HC_HAGPBendH_MASK       0x0000ff00
++#define HC_HAGPBstH_MASK        0x000000ff
++#define HC_HAGPBendH_SHIFT      8
++#define HC_HAGPBstH_SHIFT       0
++/* HC_SubA_HAGPBpL         0x0063
++ */
++#define HC_HAGPBpL_MASK         0x00fffffc
++#define HC_HAGPBpID_MASK        0x00000003
++#define HC_HAGPBpID_PAUSE       0x00000000
++#define HC_HAGPBpID_JUMP        0x00000001
++#define HC_HAGPBpID_STOP        0x00000002
++/* HC_SubA_HAGPBpH         0x0064
++ */
++#define HC_HAGPBpH_MASK         0x00ffffff
++
++/* Miscellaneous Settings
++ */
++#define HC_SubA_HClipTB         0x0070
++#define HC_SubA_HClipLR         0x0071
++#define HC_SubA_HFPClipTL       0x0072
++#define HC_SubA_HFPClipBL       0x0073
++#define HC_SubA_HFPClipLL       0x0074
++#define HC_SubA_HFPClipRL       0x0075
++#define HC_SubA_HFPClipTBH      0x0076
++#define HC_SubA_HFPClipLRH      0x0077
++#define HC_SubA_HLP             0x0078
++#define HC_SubA_HLPRF           0x0079
++#define HC_SubA_HSolidCL        0x007a
++#define HC_SubA_HPixGC          0x007b
++#define HC_SubA_HSPXYOS         0x007c
++#define HC_SubA_HVertexCNT      0x007d
++
++#define HC_HClipT_MASK          0x00fff000
++#define HC_HClipT_SHIFT         12
++#define HC_HClipB_MASK          0x00000fff
++#define HC_HClipB_SHIFT         0
++#define HC_HClipL_MASK          0x00fff000
++#define HC_HClipL_SHIFT         12
++#define HC_HClipR_MASK          0x00000fff
++#define HC_HClipR_SHIFT         0
++#define HC_HFPClipBH_MASK       0x0000ff00
++#define HC_HFPClipBH_SHIFT      8
++#define HC_HFPClipTH_MASK       0x000000ff
++#define HC_HFPClipTH_SHIFT      0
++#define HC_HFPClipRH_MASK       0x0000ff00
++#define HC_HFPClipRH_SHIFT      8
++#define HC_HFPClipLH_MASK       0x000000ff
++#define HC_HFPClipLH_SHIFT      0
++#define HC_HSolidCH_MASK        0x000000ff
++#define HC_HPixGC_MASK          0x00800000
++#define HC_HSPXOS_MASK          0x00fff000
++#define HC_HSPXOS_SHIFT         12
++#define HC_HSPYOS_MASK          0x00000fff
++
++/* Command
++ * Command A
++ */
++#define HC_HCmdHeader_MASK      0xfe000000    /*0xffe00000 */
++#define HC_HE3Fire_MASK         0x00100000
++#define HC_HPMType_MASK         0x000f0000
++#define HC_HEFlag_MASK          0x0000e000
++#define HC_HShading_MASK        0x00001c00
++#define HC_HPMValidN_MASK       0x00000200
++#define HC_HPLEND_MASK          0x00000100
++#define HC_HVCycle_MASK         0x000000ff
++#define HC_HVCycle_Style_MASK   0x000000c0
++#define HC_HVCycle_ChgA_MASK    0x00000030
++#define HC_HVCycle_ChgB_MASK    0x0000000c
++#define HC_HVCycle_ChgC_MASK    0x00000003
++#define HC_HPMType_Point        0x00000000
++#define HC_HPMType_Line         0x00010000
++#define HC_HPMType_Tri          0x00020000
++#define HC_HPMType_TriWF        0x00040000
++#define HC_HEFlag_NoAA          0x00000000
++#define HC_HEFlag_ab            0x00008000
++#define HC_HEFlag_bc            0x00004000
++#define HC_HEFlag_ca            0x00002000
++#define HC_HShading_Solid       0x00000000
++#define HC_HShading_FlatA       0x00000400
++#define HC_HShading_FlatB       0x00000800
++#define HC_HShading_FlatC       0x00000c00
++#define HC_HShading_Gouraud     0x00001000
++#define HC_HVCycle_Full         0x00000000
++#define HC_HVCycle_AFP          0x00000040
++#define HC_HVCycle_One          0x000000c0
++#define HC_HVCycle_NewA         0x00000000
++#define HC_HVCycle_AA           0x00000010
++#define HC_HVCycle_AB           0x00000020
++#define HC_HVCycle_AC           0x00000030
++#define HC_HVCycle_NewB         0x00000000
++#define HC_HVCycle_BA           0x00000004
++#define HC_HVCycle_BB           0x00000008
++#define HC_HVCycle_BC           0x0000000c
++#define HC_HVCycle_NewC         0x00000000
++#define HC_HVCycle_CA           0x00000001
++#define HC_HVCycle_CB           0x00000002
++#define HC_HVCycle_CC           0x00000003
++
++/* Command B
++ */
++#define HC_HLPrst_MASK          0x00010000
++#define HC_HLLastP_MASK         0x00008000
++#define HC_HVPMSK_MASK          0x00007f80
++#define HC_HBFace_MASK          0x00000040
++#define HC_H2nd1VT_MASK         0x0000003f
++#define HC_HVPMSK_X             0x00004000
++#define HC_HVPMSK_Y             0x00002000
++#define HC_HVPMSK_Z             0x00001000
++#define HC_HVPMSK_W             0x00000800
++#define HC_HVPMSK_Cd            0x00000400
++#define HC_HVPMSK_Cs            0x00000200
++#define HC_HVPMSK_S             0x00000100
++#define HC_HVPMSK_T             0x00000080
++
++/* Enable Setting
++ */
++#define HC_SubA_HEnable         0x0000
++#define HC_HenTXEnvMap_MASK     0x00200000
++#define HC_HenVertexCNT_MASK    0x00100000
++#define HC_HenCPUDAZ_MASK       0x00080000
++#define HC_HenDASZWC_MASK       0x00040000
++#define HC_HenFBCull_MASK       0x00020000
++#define HC_HenCW_MASK           0x00010000
++#define HC_HenAA_MASK           0x00008000
++#define HC_HenST_MASK           0x00004000
++#define HC_HenZT_MASK           0x00002000
++#define HC_HenZW_MASK           0x00001000
++#define HC_HenAT_MASK           0x00000800
++#define HC_HenAW_MASK           0x00000400
++#define HC_HenSP_MASK           0x00000200
++#define HC_HenLP_MASK           0x00000100
++#define HC_HenTXCH_MASK         0x00000080
++#define HC_HenTXMP_MASK         0x00000040
++#define HC_HenTXPP_MASK         0x00000020
++#define HC_HenTXTR_MASK         0x00000010
++#define HC_HenCS_MASK           0x00000008
++#define HC_HenFOG_MASK          0x00000004
++#define HC_HenABL_MASK          0x00000002
++#define HC_HenDT_MASK           0x00000001
++
++/* Z Setting
++ */
++#define HC_SubA_HZWBBasL        0x0010
++#define HC_SubA_HZWBBasH        0x0011
++#define HC_SubA_HZWBType        0x0012
++#define HC_SubA_HZBiasL         0x0013
++#define HC_SubA_HZWBend         0x0014
++#define HC_SubA_HZWTMD          0x0015
++#define HC_SubA_HZWCDL          0x0016
++#define HC_SubA_HZWCTAGnum      0x0017
++#define HC_SubA_HZCYNum         0x0018
++#define HC_SubA_HZWCFire        0x0019
++/* HC_SubA_HZWBType
++ */
++#define HC_HZWBType_MASK        0x00800000
++#define HC_HZBiasedWB_MASK      0x00400000
++#define HC_HZONEasFF_MASK       0x00200000
++#define HC_HZOONEasFF_MASK      0x00100000
++#define HC_HZWBFM_MASK          0x00030000
++#define HC_HZWBLoc_MASK         0x0000c000
++#define HC_HZWBPit_MASK         0x00003fff
++#define HC_HZWBFM_16            0x00000000
++#define HC_HZWBFM_32            0x00020000
++#define HC_HZWBFM_24            0x00030000
++#define HC_HZWBLoc_Local        0x00000000
++#define HC_HZWBLoc_SyS          0x00004000
++/* HC_SubA_HZWBend
++ */
++#define HC_HZWBend_MASK         0x00ffe000
++#define HC_HZBiasH_MASK         0x000000ff
++#define HC_HZWBend_SHIFT        10
++/* HC_SubA_HZWTMD
++ */
++#define HC_HZWTMD_MASK          0x00070000
++#define HC_HEBEBias_MASK        0x00007f00
++#define HC_HZNF_MASK            0x000000ff
++#define HC_HZWTMD_NeverPass     0x00000000
++#define HC_HZWTMD_LT            0x00010000
++#define HC_HZWTMD_EQ            0x00020000
++#define HC_HZWTMD_LE            0x00030000
++#define HC_HZWTMD_GT            0x00040000
++#define HC_HZWTMD_NE            0x00050000
++#define HC_HZWTMD_GE            0x00060000
++#define HC_HZWTMD_AllPass       0x00070000
++#define HC_HEBEBias_SHIFT       8
++/* HC_SubA_HZWCDL          0x0016
++ */
++#define HC_HZWCDL_MASK          0x00ffffff
++/* HC_SubA_HZWCTAGnum      0x0017
++ */
++#define HC_HZWCTAGnum_MASK      0x00ff0000
++#define HC_HZWCTAGnum_SHIFT     16
++#define HC_HZWCDH_MASK          0x000000ff
++#define HC_HZWCDH_SHIFT         0
++/* HC_SubA_HZCYNum         0x0018
++ */
++#define HC_HZCYNum_MASK         0x00030000
++#define HC_HZCYNum_SHIFT        16
++#define HC_HZWCQWnum_MASK       0x00003fff
++#define HC_HZWCQWnum_SHIFT      0
++/* HC_SubA_HZWCFire        0x0019
++ */
++#define HC_ZWCFire_MASK         0x00010000
++#define HC_HZWCQWnumLast_MASK   0x00003fff
++#define HC_HZWCQWnumLast_SHIFT  0
++
++/* Stencil Setting
++ */
++#define HC_SubA_HSTREF          0x0023
++#define HC_SubA_HSTMD           0x0024
++/* HC_SubA_HSBFM
++ */
++#define HC_HSBFM_MASK           0x00030000
++#define HC_HSBLoc_MASK          0x0000c000
++#define HC_HSBPit_MASK          0x00003fff
++/* HC_SubA_HSTREF
++ */
++#define HC_HSTREF_MASK          0x00ff0000
++#define HC_HSTOPMSK_MASK        0x0000ff00
++#define HC_HSTBMSK_MASK         0x000000ff
++#define HC_HSTREF_SHIFT         16
++#define HC_HSTOPMSK_SHIFT       8
++/* HC_SubA_HSTMD
++ */
++#define HC_HSTMD_MASK           0x00070000
++#define HC_HSTOPSF_MASK         0x000001c0
++#define HC_HSTOPSPZF_MASK       0x00000038
++#define HC_HSTOPSPZP_MASK       0x00000007
++#define HC_HSTMD_NeverPass      0x00000000
++#define HC_HSTMD_LT             0x00010000
++#define HC_HSTMD_EQ             0x00020000
++#define HC_HSTMD_LE             0x00030000
++#define HC_HSTMD_GT             0x00040000
++#define HC_HSTMD_NE             0x00050000
++#define HC_HSTMD_GE             0x00060000
++#define HC_HSTMD_AllPass        0x00070000
++#define HC_HSTOPSF_KEEP         0x00000000
++#define HC_HSTOPSF_ZERO         0x00000040
++#define HC_HSTOPSF_REPLACE      0x00000080
++#define HC_HSTOPSF_INCRSAT      0x000000c0
++#define HC_HSTOPSF_DECRSAT      0x00000100
++#define HC_HSTOPSF_INVERT       0x00000140
++#define HC_HSTOPSF_INCR         0x00000180
++#define HC_HSTOPSF_DECR         0x000001c0
++#define HC_HSTOPSPZF_KEEP       0x00000000
++#define HC_HSTOPSPZF_ZERO       0x00000008
++#define HC_HSTOPSPZF_REPLACE    0x00000010
++#define HC_HSTOPSPZF_INCRSAT    0x00000018
++#define HC_HSTOPSPZF_DECRSAT    0x00000020
++#define HC_HSTOPSPZF_INVERT     0x00000028
++#define HC_HSTOPSPZF_INCR       0x00000030
++#define HC_HSTOPSPZF_DECR       0x00000038
++#define HC_HSTOPSPZP_KEEP       0x00000000
++#define HC_HSTOPSPZP_ZERO       0x00000001
++#define HC_HSTOPSPZP_REPLACE    0x00000002
++#define HC_HSTOPSPZP_INCRSAT    0x00000003
++#define HC_HSTOPSPZP_DECRSAT    0x00000004
++#define HC_HSTOPSPZP_INVERT     0x00000005
++#define HC_HSTOPSPZP_INCR       0x00000006
++#define HC_HSTOPSPZP_DECR       0x00000007
++
++/* Alpha Setting
++ */
++#define HC_SubA_HABBasL         0x0030
++#define HC_SubA_HABBasH         0x0031
++#define HC_SubA_HABFM           0x0032
++#define HC_SubA_HATMD           0x0033
++#define HC_SubA_HABLCsat        0x0034
++#define HC_SubA_HABLCop         0x0035
++#define HC_SubA_HABLAsat        0x0036
++#define HC_SubA_HABLAop         0x0037
++#define HC_SubA_HABLRCa         0x0038
++#define HC_SubA_HABLRFCa        0x0039
++#define HC_SubA_HABLRCbias      0x003a
++#define HC_SubA_HABLRCb         0x003b
++#define HC_SubA_HABLRFCb        0x003c
++#define HC_SubA_HABLRAa         0x003d
++#define HC_SubA_HABLRAb         0x003e
++/* HC_SubA_HABFM
++ */
++#define HC_HABFM_MASK           0x00030000
++#define HC_HABLoc_MASK          0x0000c000
++#define HC_HABPit_MASK          0x000007ff
++/* HC_SubA_HATMD
++ */
++#define HC_HATMD_MASK           0x00000700
++#define HC_HATREF_MASK          0x000000ff
++#define HC_HATMD_NeverPass      0x00000000
++#define HC_HATMD_LT             0x00000100
++#define HC_HATMD_EQ             0x00000200
++#define HC_HATMD_LE             0x00000300
++#define HC_HATMD_GT             0x00000400
++#define HC_HATMD_NE             0x00000500
++#define HC_HATMD_GE             0x00000600
++#define HC_HATMD_AllPass        0x00000700
++/* HC_SubA_HABLCsat
++ */
++#define HC_HABLCsat_MASK        0x00010000
++#define HC_HABLCa_MASK          0x0000fc00
++#define HC_HABLCa_C_MASK        0x0000c000
++#define HC_HABLCa_OPC_MASK      0x00003c00
++#define HC_HABLFCa_MASK         0x000003f0
++#define HC_HABLFCa_C_MASK       0x00000300
++#define HC_HABLFCa_OPC_MASK     0x000000f0
++#define HC_HABLCbias_MASK       0x0000000f
++#define HC_HABLCbias_C_MASK     0x00000008
++#define HC_HABLCbias_OPC_MASK   0x00000007
++/*-- Define the input color.
++ */
++#define HC_XC_Csrc              0x00000000
++#define HC_XC_Cdst              0x00000001
++#define HC_XC_Asrc              0x00000002
++#define HC_XC_Adst              0x00000003
++#define HC_XC_Fog               0x00000004
++#define HC_XC_HABLRC            0x00000005
++#define HC_XC_minSrcDst         0x00000006
++#define HC_XC_maxSrcDst         0x00000007
++#define HC_XC_mimAsrcInvAdst    0x00000008
++#define HC_XC_OPC               0x00000000
++#define HC_XC_InvOPC            0x00000010
++#define HC_XC_OPCp5             0x00000020
++/*-- Define the input Alpha
++ */
++#define HC_XA_OPA               0x00000000
++#define HC_XA_InvOPA            0x00000010
++#define HC_XA_OPAp5             0x00000020
++#define HC_XA_0                 0x00000000
++#define HC_XA_Asrc              0x00000001
++#define HC_XA_Adst              0x00000002
++#define HC_XA_Fog               0x00000003
++#define HC_XA_minAsrcFog        0x00000004
++#define HC_XA_minAsrcAdst       0x00000005
++#define HC_XA_maxAsrcFog        0x00000006
++#define HC_XA_maxAsrcAdst       0x00000007
++#define HC_XA_HABLRA            0x00000008
++#define HC_XA_minAsrcInvAdst    0x00000008
++#define HC_XA_HABLFRA           0x00000009
++/*--
++ */
++#define HC_HABLCa_OPC           (HC_XC_OPC << 10)
++#define HC_HABLCa_InvOPC        (HC_XC_InvOPC << 10)
++#define HC_HABLCa_OPCp5         (HC_XC_OPCp5 << 10)
++#define HC_HABLCa_Csrc          (HC_XC_Csrc << 10)
++#define HC_HABLCa_Cdst          (HC_XC_Cdst << 10)
++#define HC_HABLCa_Asrc          (HC_XC_Asrc << 10)
++#define HC_HABLCa_Adst          (HC_XC_Adst << 10)
++#define HC_HABLCa_Fog           (HC_XC_Fog << 10)
++#define HC_HABLCa_HABLRCa       (HC_XC_HABLRC << 10)
++#define HC_HABLCa_minSrcDst     (HC_XC_minSrcDst << 10)
++#define HC_HABLCa_maxSrcDst     (HC_XC_maxSrcDst << 10)
++#define HC_HABLFCa_OPC              (HC_XC_OPC << 4)
++#define HC_HABLFCa_InvOPC           (HC_XC_InvOPC << 4)
++#define HC_HABLFCa_OPCp5            (HC_XC_OPCp5 << 4)
++#define HC_HABLFCa_Csrc             (HC_XC_Csrc << 4)
++#define HC_HABLFCa_Cdst             (HC_XC_Cdst << 4)
++#define HC_HABLFCa_Asrc             (HC_XC_Asrc << 4)
++#define HC_HABLFCa_Adst             (HC_XC_Adst << 4)
++#define HC_HABLFCa_Fog              (HC_XC_Fog << 4)
++#define HC_HABLFCa_HABLRCa          (HC_XC_HABLRC << 4)
++#define HC_HABLFCa_minSrcDst        (HC_XC_minSrcDst << 4)
++#define HC_HABLFCa_maxSrcDst        (HC_XC_maxSrcDst << 4)
++#define HC_HABLFCa_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 4)
++#define HC_HABLCbias_HABLRCbias 0x00000000
++#define HC_HABLCbias_Asrc       0x00000001
++#define HC_HABLCbias_Adst       0x00000002
++#define HC_HABLCbias_Fog        0x00000003
++#define HC_HABLCbias_Cin        0x00000004
++/* HC_SubA_HABLCop         0x0035
++ */
++#define HC_HABLdot_MASK         0x00010000
++#define HC_HABLCop_MASK         0x00004000
++#define HC_HABLCb_MASK          0x00003f00
++#define HC_HABLCb_C_MASK        0x00003000
++#define HC_HABLCb_OPC_MASK      0x00000f00
++#define HC_HABLFCb_MASK         0x000000fc
++#define HC_HABLFCb_C_MASK       0x000000c0
++#define HC_HABLFCb_OPC_MASK     0x0000003c
++#define HC_HABLCshift_MASK      0x00000003
++#define HC_HABLCb_OPC           (HC_XC_OPC << 8)
++#define HC_HABLCb_InvOPC        (HC_XC_InvOPC << 8)
++#define HC_HABLCb_OPCp5         (HC_XC_OPCp5 << 8)
++#define HC_HABLCb_Csrc          (HC_XC_Csrc << 8)
++#define HC_HABLCb_Cdst          (HC_XC_Cdst << 8)
++#define HC_HABLCb_Asrc          (HC_XC_Asrc << 8)
++#define HC_HABLCb_Adst          (HC_XC_Adst << 8)
++#define HC_HABLCb_Fog           (HC_XC_Fog << 8)
++#define HC_HABLCb_HABLRCa       (HC_XC_HABLRC << 8)
++#define HC_HABLCb_minSrcDst     (HC_XC_minSrcDst << 8)
++#define HC_HABLCb_maxSrcDst     (HC_XC_maxSrcDst << 8)
++#define HC_HABLFCb_OPC              (HC_XC_OPC << 2)
++#define HC_HABLFCb_InvOPC           (HC_XC_InvOPC << 2)
++#define HC_HABLFCb_OPCp5            (HC_XC_OPCp5 << 2)
++#define HC_HABLFCb_Csrc             (HC_XC_Csrc << 2)
++#define HC_HABLFCb_Cdst             (HC_XC_Cdst << 2)
++#define HC_HABLFCb_Asrc             (HC_XC_Asrc << 2)
++#define HC_HABLFCb_Adst             (HC_XC_Adst << 2)
++#define HC_HABLFCb_Fog              (HC_XC_Fog << 2)
++#define HC_HABLFCb_HABLRCb          (HC_XC_HABLRC << 2)
++#define HC_HABLFCb_minSrcDst        (HC_XC_minSrcDst << 2)
++#define HC_HABLFCb_maxSrcDst        (HC_XC_maxSrcDst << 2)
++#define HC_HABLFCb_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 2)
++/* HC_SubA_HABLAsat        0x0036
++ */
++#define HC_HABLAsat_MASK        0x00010000
++#define HC_HABLAa_MASK          0x0000fc00
++#define HC_HABLAa_A_MASK        0x0000c000
++#define HC_HABLAa_OPA_MASK      0x00003c00
++#define HC_HABLFAa_MASK         0x000003f0
++#define HC_HABLFAa_A_MASK       0x00000300
++#define HC_HABLFAa_OPA_MASK     0x000000f0
++#define HC_HABLAbias_MASK       0x0000000f
++#define HC_HABLAbias_A_MASK     0x00000008
++#define HC_HABLAbias_OPA_MASK   0x00000007
++#define HC_HABLAa_OPA           (HC_XA_OPA << 10)
++#define HC_HABLAa_InvOPA        (HC_XA_InvOPA << 10)
++#define HC_HABLAa_OPAp5         (HC_XA_OPAp5 << 10)
++#define HC_HABLAa_0             (HC_XA_0 << 10)
++#define HC_HABLAa_Asrc          (HC_XA_Asrc << 10)
++#define HC_HABLAa_Adst          (HC_XA_Adst << 10)
++#define HC_HABLAa_Fog           (HC_XA_Fog << 10)
++#define HC_HABLAa_minAsrcFog    (HC_XA_minAsrcFog << 10)
++#define HC_HABLAa_minAsrcAdst   (HC_XA_minAsrcAdst << 10)
++#define HC_HABLAa_maxAsrcFog    (HC_XA_maxAsrcFog << 10)
++#define HC_HABLAa_maxAsrcAdst   (HC_XA_maxAsrcAdst << 10)
++#define HC_HABLAa_HABLRA        (HC_XA_HABLRA << 10)
++#define HC_HABLFAa_OPA          (HC_XA_OPA << 4)
++#define HC_HABLFAa_InvOPA       (HC_XA_InvOPA << 4)
++#define HC_HABLFAa_OPAp5        (HC_XA_OPAp5 << 4)
++#define HC_HABLFAa_0            (HC_XA_0 << 4)
++#define HC_HABLFAa_Asrc         (HC_XA_Asrc << 4)
++#define HC_HABLFAa_Adst         (HC_XA_Adst << 4)
++#define HC_HABLFAa_Fog          (HC_XA_Fog << 4)
++#define HC_HABLFAa_minAsrcFog   (HC_XA_minAsrcFog << 4)
++#define HC_HABLFAa_minAsrcAdst  (HC_XA_minAsrcAdst << 4)
++#define HC_HABLFAa_maxAsrcFog   (HC_XA_maxAsrcFog << 4)
++#define HC_HABLFAa_maxAsrcAdst  (HC_XA_maxAsrcAdst << 4)
++#define HC_HABLFAa_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 4)
++#define HC_HABLFAa_HABLFRA          (HC_XA_HABLFRA << 4)
++#define HC_HABLAbias_HABLRAbias 0x00000000
++#define HC_HABLAbias_Asrc       0x00000001
++#define HC_HABLAbias_Adst       0x00000002
++#define HC_HABLAbias_Fog        0x00000003
++#define HC_HABLAbias_Aaa        0x00000004
++/* HC_SubA_HABLAop         0x0037
++ */
++#define HC_HABLAop_MASK         0x00004000
++#define HC_HABLAb_MASK          0x00003f00
++#define HC_HABLAb_OPA_MASK      0x00000f00
++#define HC_HABLFAb_MASK         0x000000fc
++#define HC_HABLFAb_OPA_MASK     0x0000003c
++#define HC_HABLAshift_MASK      0x00000003
++#define HC_HABLAb_OPA           (HC_XA_OPA << 8)
++#define HC_HABLAb_InvOPA        (HC_XA_InvOPA << 8)
++#define HC_HABLAb_OPAp5         (HC_XA_OPAp5 << 8)
++#define HC_HABLAb_0             (HC_XA_0 << 8)
++#define HC_HABLAb_Asrc          (HC_XA_Asrc << 8)
++#define HC_HABLAb_Adst          (HC_XA_Adst << 8)
++#define HC_HABLAb_Fog           (HC_XA_Fog << 8)
++#define HC_HABLAb_minAsrcFog    (HC_XA_minAsrcFog << 8)
++#define HC_HABLAb_minAsrcAdst   (HC_XA_minAsrcAdst << 8)
++#define HC_HABLAb_maxAsrcFog    (HC_XA_maxAsrcFog << 8)
++#define HC_HABLAb_maxAsrcAdst   (HC_XA_maxAsrcAdst << 8)
++#define HC_HABLAb_HABLRA        (HC_XA_HABLRA << 8)
++#define HC_HABLFAb_OPA          (HC_XA_OPA << 2)
++#define HC_HABLFAb_InvOPA       (HC_XA_InvOPA << 2)
++#define HC_HABLFAb_OPAp5        (HC_XA_OPAp5 << 2)
++#define HC_HABLFAb_0            (HC_XA_0 << 2)
++#define HC_HABLFAb_Asrc         (HC_XA_Asrc << 2)
++#define HC_HABLFAb_Adst         (HC_XA_Adst << 2)
++#define HC_HABLFAb_Fog          (HC_XA_Fog << 2)
++#define HC_HABLFAb_minAsrcFog   (HC_XA_minAsrcFog << 2)
++#define HC_HABLFAb_minAsrcAdst  (HC_XA_minAsrcAdst << 2)
++#define HC_HABLFAb_maxAsrcFog   (HC_XA_maxAsrcFog << 2)
++#define HC_HABLFAb_maxAsrcAdst  (HC_XA_maxAsrcAdst << 2)
++#define HC_HABLFAb_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 2)
++#define HC_HABLFAb_HABLFRA          (HC_XA_HABLFRA << 2)
++/* HC_SubA_HABLRAa         0x003d
++ */
++#define HC_HABLRAa_MASK         0x00ff0000
++#define HC_HABLRFAa_MASK        0x0000ff00
++#define HC_HABLRAbias_MASK      0x000000ff
++#define HC_HABLRAa_SHIFT        16
++#define HC_HABLRFAa_SHIFT       8
++/* HC_SubA_HABLRAb         0x003e
++ */
++#define HC_HABLRAb_MASK         0x0000ff00
++#define HC_HABLRFAb_MASK        0x000000ff
++#define HC_HABLRAb_SHIFT        8
++
++/* Destination Setting
++ */
++#define HC_SubA_HDBBasL         0x0040
++#define HC_SubA_HDBBasH         0x0041
++#define HC_SubA_HDBFM           0x0042
++#define HC_SubA_HFBBMSKL        0x0043
++#define HC_SubA_HROP            0x0044
++/* HC_SubA_HDBFM           0x0042
++ */
++#define HC_HDBFM_MASK           0x001f0000
++#define HC_HDBLoc_MASK          0x0000c000
++#define HC_HDBPit_MASK          0x00003fff
++#define HC_HDBFM_RGB555         0x00000000
++#define HC_HDBFM_RGB565         0x00010000
++#define HC_HDBFM_ARGB4444       0x00020000
++#define HC_HDBFM_ARGB1555       0x00030000
++#define HC_HDBFM_BGR555         0x00040000
++#define HC_HDBFM_BGR565         0x00050000
++#define HC_HDBFM_ABGR4444       0x00060000
++#define HC_HDBFM_ABGR1555       0x00070000
++#define HC_HDBFM_ARGB0888       0x00080000
++#define HC_HDBFM_ARGB8888       0x00090000
++#define HC_HDBFM_ABGR0888       0x000a0000
++#define HC_HDBFM_ABGR8888       0x000b0000
++#define HC_HDBLoc_Local         0x00000000
++#define HC_HDBLoc_Sys           0x00004000
++/* HC_SubA_HROP            0x0044
++ */
++#define HC_HROP_MASK            0x00000f00
++#define HC_HFBBMSKH_MASK        0x000000ff
++#define HC_HROP_BLACK           0x00000000
++#define HC_HROP_DPon            0x00000100
++#define HC_HROP_DPna            0x00000200
++#define HC_HROP_Pn              0x00000300
++#define HC_HROP_PDna            0x00000400
++#define HC_HROP_Dn              0x00000500
++#define HC_HROP_DPx             0x00000600
++#define HC_HROP_DPan            0x00000700
++#define HC_HROP_DPa             0x00000800
++#define HC_HROP_DPxn            0x00000900
++#define HC_HROP_D               0x00000a00
++#define HC_HROP_DPno            0x00000b00
++#define HC_HROP_P               0x00000c00
++#define HC_HROP_PDno            0x00000d00
++#define HC_HROP_DPo             0x00000e00
++#define HC_HROP_WHITE           0x00000f00
++
++/* Fog Setting
++ */
++#define HC_SubA_HFogLF          0x0050
++#define HC_SubA_HFogCL          0x0051
++#define HC_SubA_HFogCH          0x0052
++#define HC_SubA_HFogStL         0x0053
++#define HC_SubA_HFogStH         0x0054
++#define HC_SubA_HFogOOdMF       0x0055
++#define HC_SubA_HFogOOdEF       0x0056
++#define HC_SubA_HFogEndL        0x0057
++#define HC_SubA_HFogDenst       0x0058
++/* HC_SubA_FogLF           0x0050
++ */
++#define HC_FogLF_MASK           0x00000010
++#define HC_FogEq_MASK           0x00000008
++#define HC_FogMD_MASK           0x00000007
++#define HC_FogMD_LocalFog        0x00000000
++#define HC_FogMD_LinearFog       0x00000002
++#define HC_FogMD_ExponentialFog  0x00000004
++#define HC_FogMD_Exponential2Fog 0x00000005
++/* #define HC_FogMD_FogTable       0x00000003 */
++
++/* HC_SubA_HFogDenst        0x0058
++ */
++#define HC_FogDenst_MASK        0x001fff00
++#define HC_FogEndL_MASK         0x000000ff
++
++/* Texture subtype definitions
++ */
++#define HC_SubType_Tex0         0x00000000
++#define HC_SubType_Tex1         0x00000001
++#define HC_SubType_TexGeneral   0x000000fe
++
++/* Attribute of texture n
++ */
++#define HC_SubA_HTXnL0BasL      0x0000
++#define HC_SubA_HTXnL1BasL      0x0001
++#define HC_SubA_HTXnL2BasL      0x0002
++#define HC_SubA_HTXnL3BasL      0x0003
++#define HC_SubA_HTXnL4BasL      0x0004
++#define HC_SubA_HTXnL5BasL      0x0005
++#define HC_SubA_HTXnL6BasL      0x0006
++#define HC_SubA_HTXnL7BasL      0x0007
++#define HC_SubA_HTXnL8BasL      0x0008
++#define HC_SubA_HTXnL9BasL      0x0009
++#define HC_SubA_HTXnLaBasL      0x000a
++#define HC_SubA_HTXnLbBasL      0x000b
++#define HC_SubA_HTXnLcBasL      0x000c
++#define HC_SubA_HTXnLdBasL      0x000d
++#define HC_SubA_HTXnLeBasL      0x000e
++#define HC_SubA_HTXnLfBasL      0x000f
++#define HC_SubA_HTXnL10BasL     0x0010
++#define HC_SubA_HTXnL11BasL     0x0011
++#define HC_SubA_HTXnL012BasH    0x0020
++#define HC_SubA_HTXnL345BasH    0x0021
++#define HC_SubA_HTXnL678BasH    0x0022
++#define HC_SubA_HTXnL9abBasH    0x0023
++#define HC_SubA_HTXnLcdeBasH    0x0024
++#define HC_SubA_HTXnLf1011BasH  0x0025
++#define HC_SubA_HTXnL0Pit       0x002b
++#define HC_SubA_HTXnL1Pit       0x002c
++#define HC_SubA_HTXnL2Pit       0x002d
++#define HC_SubA_HTXnL3Pit       0x002e
++#define HC_SubA_HTXnL4Pit       0x002f
++#define HC_SubA_HTXnL5Pit       0x0030
++#define HC_SubA_HTXnL6Pit       0x0031
++#define HC_SubA_HTXnL7Pit       0x0032
++#define HC_SubA_HTXnL8Pit       0x0033
++#define HC_SubA_HTXnL9Pit       0x0034
++#define HC_SubA_HTXnLaPit       0x0035
++#define HC_SubA_HTXnLbPit       0x0036
++#define HC_SubA_HTXnLcPit       0x0037
++#define HC_SubA_HTXnLdPit       0x0038
++#define HC_SubA_HTXnLePit       0x0039
++#define HC_SubA_HTXnLfPit       0x003a
++#define HC_SubA_HTXnL10Pit      0x003b
++#define HC_SubA_HTXnL11Pit      0x003c
++#define HC_SubA_HTXnL0_5WE      0x004b
++#define HC_SubA_HTXnL6_bWE      0x004c
++#define HC_SubA_HTXnLc_11WE     0x004d
++#define HC_SubA_HTXnL0_5HE      0x0051
++#define HC_SubA_HTXnL6_bHE      0x0052
++#define HC_SubA_HTXnLc_11HE     0x0053
++#define HC_SubA_HTXnL0OS        0x0077
++#define HC_SubA_HTXnTB          0x0078
++#define HC_SubA_HTXnMPMD        0x0079
++#define HC_SubA_HTXnCLODu       0x007a
++#define HC_SubA_HTXnFM          0x007b
++#define HC_SubA_HTXnTRCH        0x007c
++#define HC_SubA_HTXnTRCL        0x007d
++#define HC_SubA_HTXnTBC         0x007e
++#define HC_SubA_HTXnTRAH        0x007f
++#define HC_SubA_HTXnTBLCsat     0x0080
++#define HC_SubA_HTXnTBLCop      0x0081
++#define HC_SubA_HTXnTBLMPfog    0x0082
++#define HC_SubA_HTXnTBLAsat     0x0083
++#define HC_SubA_HTXnTBLRCa      0x0085
++#define HC_SubA_HTXnTBLRCb      0x0086
++#define HC_SubA_HTXnTBLRCc      0x0087
++#define HC_SubA_HTXnTBLRCbias   0x0088
++#define HC_SubA_HTXnTBLRAa      0x0089
++#define HC_SubA_HTXnTBLRFog     0x008a
++#define HC_SubA_HTXnBumpM00     0x0090
++#define HC_SubA_HTXnBumpM01     0x0091
++#define HC_SubA_HTXnBumpM10     0x0092
++#define HC_SubA_HTXnBumpM11     0x0093
++#define HC_SubA_HTXnLScale      0x0094
++#define HC_SubA_HTXSMD          0x0000
++/* HC_SubA_HTXnL012BasH    0x0020
++ */
++#define HC_HTXnL0BasH_MASK      0x000000ff
++#define HC_HTXnL1BasH_MASK      0x0000ff00
++#define HC_HTXnL2BasH_MASK      0x00ff0000
++#define HC_HTXnL1BasH_SHIFT     8
++#define HC_HTXnL2BasH_SHIFT     16
++/* HC_SubA_HTXnL345BasH    0x0021
++ */
++#define HC_HTXnL3BasH_MASK      0x000000ff
++#define HC_HTXnL4BasH_MASK      0x0000ff00
++#define HC_HTXnL5BasH_MASK      0x00ff0000
++#define HC_HTXnL4BasH_SHIFT     8
++#define HC_HTXnL5BasH_SHIFT     16
++/* HC_SubA_HTXnL678BasH    0x0022
++ */
++#define HC_HTXnL6BasH_MASK      0x000000ff
++#define HC_HTXnL7BasH_MASK      0x0000ff00
++#define HC_HTXnL8BasH_MASK      0x00ff0000
++#define HC_HTXnL7BasH_SHIFT     8
++#define HC_HTXnL8BasH_SHIFT     16
++/* HC_SubA_HTXnL9abBasH    0x0023
++ */
++#define HC_HTXnL9BasH_MASK      0x000000ff
++#define HC_HTXnLaBasH_MASK      0x0000ff00
++#define HC_HTXnLbBasH_MASK      0x00ff0000
++#define HC_HTXnLaBasH_SHIFT     8
++#define HC_HTXnLbBasH_SHIFT     16
++/* HC_SubA_HTXnLcdeBasH    0x0024
++ */
++#define HC_HTXnLcBasH_MASK      0x000000ff
++#define HC_HTXnLdBasH_MASK      0x0000ff00
++#define HC_HTXnLeBasH_MASK      0x00ff0000
++#define HC_HTXnLdBasH_SHIFT     8
++#define HC_HTXnLeBasH_SHIFT     16
++/* HC_SubA_HTXnLcdeBasH    0x0025
++ */
++#define HC_HTXnLfBasH_MASK      0x000000ff
++#define HC_HTXnL10BasH_MASK      0x0000ff00
++#define HC_HTXnL11BasH_MASK      0x00ff0000
++#define HC_HTXnL10BasH_SHIFT     8
++#define HC_HTXnL11BasH_SHIFT     16
++/* HC_SubA_HTXnL0Pit       0x002b
++ */
++#define HC_HTXnLnPit_MASK       0x00003fff
++#define HC_HTXnEnPit_MASK       0x00080000
++#define HC_HTXnLnPitE_MASK      0x00f00000
++#define HC_HTXnLnPitE_SHIFT     20
++/* HC_SubA_HTXnL0_5WE      0x004b
++ */
++#define HC_HTXnL0WE_MASK        0x0000000f
++#define HC_HTXnL1WE_MASK        0x000000f0
++#define HC_HTXnL2WE_MASK        0x00000f00
++#define HC_HTXnL3WE_MASK        0x0000f000
++#define HC_HTXnL4WE_MASK        0x000f0000
++#define HC_HTXnL5WE_MASK        0x00f00000
++#define HC_HTXnL1WE_SHIFT       4
++#define HC_HTXnL2WE_SHIFT       8
++#define HC_HTXnL3WE_SHIFT       12
++#define HC_HTXnL4WE_SHIFT       16
++#define HC_HTXnL5WE_SHIFT       20
++/* HC_SubA_HTXnL6_bWE      0x004c
++ */
++#define HC_HTXnL6WE_MASK        0x0000000f
++#define HC_HTXnL7WE_MASK        0x000000f0
++#define HC_HTXnL8WE_MASK        0x00000f00
++#define HC_HTXnL9WE_MASK        0x0000f000
++#define HC_HTXnLaWE_MASK        0x000f0000
++#define HC_HTXnLbWE_MASK        0x00f00000
++#define HC_HTXnL7WE_SHIFT       4
++#define HC_HTXnL8WE_SHIFT       8
++#define HC_HTXnL9WE_SHIFT       12
++#define HC_HTXnLaWE_SHIFT       16
++#define HC_HTXnLbWE_SHIFT       20
++/* HC_SubA_HTXnLc_11WE      0x004d
++ */
++#define HC_HTXnLcWE_MASK        0x0000000f
++#define HC_HTXnLdWE_MASK        0x000000f0
++#define HC_HTXnLeWE_MASK        0x00000f00
++#define HC_HTXnLfWE_MASK        0x0000f000
++#define HC_HTXnL10WE_MASK       0x000f0000
++#define HC_HTXnL11WE_MASK       0x00f00000
++#define HC_HTXnLdWE_SHIFT       4
++#define HC_HTXnLeWE_SHIFT       8
++#define HC_HTXnLfWE_SHIFT       12
++#define HC_HTXnL10WE_SHIFT      16
++#define HC_HTXnL11WE_SHIFT      20
++/* HC_SubA_HTXnL0_5HE      0x0051
++ */
++#define HC_HTXnL0HE_MASK        0x0000000f
++#define HC_HTXnL1HE_MASK        0x000000f0
++#define HC_HTXnL2HE_MASK        0x00000f00
++#define HC_HTXnL3HE_MASK        0x0000f000
++#define HC_HTXnL4HE_MASK        0x000f0000
++#define HC_HTXnL5HE_MASK        0x00f00000
++#define HC_HTXnL1HE_SHIFT       4
++#define HC_HTXnL2HE_SHIFT       8
++#define HC_HTXnL3HE_SHIFT       12
++#define HC_HTXnL4HE_SHIFT       16
++#define HC_HTXnL5HE_SHIFT       20
++/* HC_SubA_HTXnL6_bHE      0x0052
++ */
++#define HC_HTXnL6HE_MASK        0x0000000f
++#define HC_HTXnL7HE_MASK        0x000000f0
++#define HC_HTXnL8HE_MASK        0x00000f00
++#define HC_HTXnL9HE_MASK        0x0000f000
++#define HC_HTXnLaHE_MASK        0x000f0000
++#define HC_HTXnLbHE_MASK        0x00f00000
++#define HC_HTXnL7HE_SHIFT       4
++#define HC_HTXnL8HE_SHIFT       8
++#define HC_HTXnL9HE_SHIFT       12
++#define HC_HTXnLaHE_SHIFT       16
++#define HC_HTXnLbHE_SHIFT       20
++/* HC_SubA_HTXnLc_11HE      0x0053
++ */
++#define HC_HTXnLcHE_MASK        0x0000000f
++#define HC_HTXnLdHE_MASK        0x000000f0
++#define HC_HTXnLeHE_MASK        0x00000f00
++#define HC_HTXnLfHE_MASK        0x0000f000
++#define HC_HTXnL10HE_MASK       0x000f0000
++#define HC_HTXnL11HE_MASK       0x00f00000
++#define HC_HTXnLdHE_SHIFT       4
++#define HC_HTXnLeHE_SHIFT       8
++#define HC_HTXnLfHE_SHIFT       12
++#define HC_HTXnL10HE_SHIFT      16
++#define HC_HTXnL11HE_SHIFT      20
++/* HC_SubA_HTXnL0OS        0x0077
++ */
++#define HC_HTXnL0OS_MASK        0x003ff000
++#define HC_HTXnLVmax_MASK       0x00000fc0
++#define HC_HTXnLVmin_MASK       0x0000003f
++#define HC_HTXnL0OS_SHIFT       12
++#define HC_HTXnLVmax_SHIFT      6
++/* HC_SubA_HTXnTB          0x0078
++ */
++#define HC_HTXnTB_MASK          0x00f00000
++#define HC_HTXnFLSe_MASK        0x0000e000
++#define HC_HTXnFLSs_MASK        0x00001c00
++#define HC_HTXnFLTe_MASK        0x00000380
++#define HC_HTXnFLTs_MASK        0x00000070
++#define HC_HTXnFLDs_MASK        0x0000000f
++#define HC_HTXnTB_NoTB          0x00000000
++#define HC_HTXnTB_TBC_S         0x00100000
++#define HC_HTXnTB_TBC_T         0x00200000
++#define HC_HTXnTB_TB_S          0x00400000
++#define HC_HTXnTB_TB_T          0x00800000
++#define HC_HTXnFLSe_Nearest     0x00000000
++#define HC_HTXnFLSe_Linear      0x00002000
++#define HC_HTXnFLSe_NonLinear   0x00004000
++#define HC_HTXnFLSe_Sharp       0x00008000
++#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
++#define HC_HTXnFLSs_Nearest     0x00000000
++#define HC_HTXnFLSs_Linear      0x00000400
++#define HC_HTXnFLSs_NonLinear   0x00000800
++#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
++#define HC_HTXnFLTe_Nearest     0x00000000
++#define HC_HTXnFLTe_Linear      0x00000080
++#define HC_HTXnFLTe_NonLinear   0x00000100
++#define HC_HTXnFLTe_Sharp       0x00000180
++#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
++#define HC_HTXnFLTs_Nearest     0x00000000
++#define HC_HTXnFLTs_Linear      0x00000010
++#define HC_HTXnFLTs_NonLinear   0x00000020
++#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
++#define HC_HTXnFLDs_Tex0        0x00000000
++#define HC_HTXnFLDs_Nearest     0x00000001
++#define HC_HTXnFLDs_Linear      0x00000002
++#define HC_HTXnFLDs_NonLinear   0x00000003
++#define HC_HTXnFLDs_Dither      0x00000004
++#define HC_HTXnFLDs_ConstLOD    0x00000005
++#define HC_HTXnFLDs_Ani         0x00000006
++#define HC_HTXnFLDs_AniDither   0x00000007
++/* HC_SubA_HTXnMPMD        0x0079
++ */
++#define HC_HTXnMPMD_SMASK       0x00070000
++#define HC_HTXnMPMD_TMASK       0x00380000
++#define HC_HTXnLODDTf_MASK      0x00000007
++#define HC_HTXnXY2ST_MASK       0x00000008
++#define HC_HTXnMPMD_Tsingle     0x00000000
++#define HC_HTXnMPMD_Tclamp      0x00080000
++#define HC_HTXnMPMD_Trepeat     0x00100000
++#define HC_HTXnMPMD_Tmirror     0x00180000
++#define HC_HTXnMPMD_Twrap       0x00200000
++#define HC_HTXnMPMD_Ssingle     0x00000000
++#define HC_HTXnMPMD_Sclamp      0x00010000
++#define HC_HTXnMPMD_Srepeat     0x00020000
++#define HC_HTXnMPMD_Smirror     0x00030000
++#define HC_HTXnMPMD_Swrap       0x00040000
++/* HC_SubA_HTXnCLODu       0x007a
++ */
++#define HC_HTXnCLODu_MASK       0x000ffc00
++#define HC_HTXnCLODd_MASK       0x000003ff
++#define HC_HTXnCLODu_SHIFT      10
++/* HC_SubA_HTXnFM          0x007b
++ */
++#define HC_HTXnFM_MASK          0x00ff0000
++#define HC_HTXnLoc_MASK         0x00000003
++#define HC_HTXnFM_INDEX         0x00000000
++#define HC_HTXnFM_Intensity     0x00080000
++#define HC_HTXnFM_Lum           0x00100000
++#define HC_HTXnFM_Alpha         0x00180000
++#define HC_HTXnFM_DX            0x00280000
++#define HC_HTXnFM_ARGB16        0x00880000
++#define HC_HTXnFM_ARGB32        0x00980000
++#define HC_HTXnFM_ABGR16        0x00a80000
++#define HC_HTXnFM_ABGR32        0x00b80000
++#define HC_HTXnFM_RGBA16        0x00c80000
++#define HC_HTXnFM_RGBA32        0x00d80000
++#define HC_HTXnFM_BGRA16        0x00e80000
++#define HC_HTXnFM_BGRA32        0x00f80000
++#define HC_HTXnFM_BUMPMAP       0x00380000
++#define HC_HTXnFM_Index1        (HC_HTXnFM_INDEX     | 0x00000000)
++#define HC_HTXnFM_Index2        (HC_HTXnFM_INDEX     | 0x00010000)
++#define HC_HTXnFM_Index4        (HC_HTXnFM_INDEX     | 0x00020000)
++#define HC_HTXnFM_Index8        (HC_HTXnFM_INDEX     | 0x00030000)
++#define HC_HTXnFM_T1            (HC_HTXnFM_Intensity | 0x00000000)
++#define HC_HTXnFM_T2            (HC_HTXnFM_Intensity | 0x00010000)
++#define HC_HTXnFM_T4            (HC_HTXnFM_Intensity | 0x00020000)
++#define HC_HTXnFM_T8            (HC_HTXnFM_Intensity | 0x00030000)
++#define HC_HTXnFM_L1            (HC_HTXnFM_Lum       | 0x00000000)
++#define HC_HTXnFM_L2            (HC_HTXnFM_Lum       | 0x00010000)
++#define HC_HTXnFM_L4            (HC_HTXnFM_Lum       | 0x00020000)
++#define HC_HTXnFM_L8            (HC_HTXnFM_Lum       | 0x00030000)
++#define HC_HTXnFM_AL44          (HC_HTXnFM_Lum       | 0x00040000)
++#define HC_HTXnFM_AL88          (HC_HTXnFM_Lum       | 0x00050000)
++#define HC_HTXnFM_A1            (HC_HTXnFM_Alpha     | 0x00000000)
++#define HC_HTXnFM_A2            (HC_HTXnFM_Alpha     | 0x00010000)
++#define HC_HTXnFM_A4            (HC_HTXnFM_Alpha     | 0x00020000)
++#define HC_HTXnFM_A8            (HC_HTXnFM_Alpha     | 0x00030000)
++#define HC_HTXnFM_DX1           (HC_HTXnFM_DX        | 0x00010000)
++#define HC_HTXnFM_DX23          (HC_HTXnFM_DX        | 0x00020000)
++#define HC_HTXnFM_DX45          (HC_HTXnFM_DX        | 0x00030000)
++#define HC_HTXnFM_RGB555        (HC_HTXnFM_ARGB16    | 0x00000000)
++#define HC_HTXnFM_RGB565        (HC_HTXnFM_ARGB16    | 0x00010000)
++#define HC_HTXnFM_ARGB1555      (HC_HTXnFM_ARGB16    | 0x00020000)
++#define HC_HTXnFM_ARGB4444      (HC_HTXnFM_ARGB16    | 0x00030000)
++#define HC_HTXnFM_ARGB0888      (HC_HTXnFM_ARGB32    | 0x00000000)
++#define HC_HTXnFM_ARGB8888      (HC_HTXnFM_ARGB32    | 0x00010000)
++#define HC_HTXnFM_BGR555        (HC_HTXnFM_ABGR16    | 0x00000000)
++#define HC_HTXnFM_BGR565        (HC_HTXnFM_ABGR16    | 0x00010000)
++#define HC_HTXnFM_ABGR1555      (HC_HTXnFM_ABGR16    | 0x00020000)
++#define HC_HTXnFM_ABGR4444      (HC_HTXnFM_ABGR16    | 0x00030000)
++#define HC_HTXnFM_ABGR0888      (HC_HTXnFM_ABGR32    | 0x00000000)
++#define HC_HTXnFM_ABGR8888      (HC_HTXnFM_ABGR32    | 0x00010000)
++#define HC_HTXnFM_RGBA5550      (HC_HTXnFM_RGBA16    | 0x00000000)
++#define HC_HTXnFM_RGBA5551      (HC_HTXnFM_RGBA16    | 0x00020000)
++#define HC_HTXnFM_RGBA4444      (HC_HTXnFM_RGBA16    | 0x00030000)
++#define HC_HTXnFM_RGBA8880      (HC_HTXnFM_RGBA32    | 0x00000000)
++#define HC_HTXnFM_RGBA8888      (HC_HTXnFM_RGBA32    | 0x00010000)
++#define HC_HTXnFM_BGRA5550      (HC_HTXnFM_BGRA16    | 0x00000000)
++#define HC_HTXnFM_BGRA5551      (HC_HTXnFM_BGRA16    | 0x00020000)
++#define HC_HTXnFM_BGRA4444      (HC_HTXnFM_BGRA16    | 0x00030000)
++#define HC_HTXnFM_BGRA8880      (HC_HTXnFM_BGRA32    | 0x00000000)
++#define HC_HTXnFM_BGRA8888      (HC_HTXnFM_BGRA32    | 0x00010000)
++#define HC_HTXnFM_VU88          (HC_HTXnFM_BUMPMAP   | 0x00000000)
++#define HC_HTXnFM_LVU655        (HC_HTXnFM_BUMPMAP   | 0x00010000)
++#define HC_HTXnFM_LVU888        (HC_HTXnFM_BUMPMAP   | 0x00020000)
++#define HC_HTXnLoc_Local        0x00000000
++#define HC_HTXnLoc_Sys          0x00000002
++#define HC_HTXnLoc_AGP          0x00000003
++/* HC_SubA_HTXnTRAH        0x007f
++ */
++#define HC_HTXnTRAH_MASK        0x00ff0000
++#define HC_HTXnTRAL_MASK        0x0000ff00
++#define HC_HTXnTBA_MASK         0x000000ff
++#define HC_HTXnTRAH_SHIFT       16
++#define HC_HTXnTRAL_SHIFT       8
++/* HC_SubA_HTXnTBLCsat     0x0080
++ *-- Define the input texture.
++ */
++#define HC_XTC_TOPC             0x00000000
++#define HC_XTC_InvTOPC          0x00000010
++#define HC_XTC_TOPCp5           0x00000020
++#define HC_XTC_Cbias            0x00000000
++#define HC_XTC_InvCbias         0x00000010
++#define HC_XTC_0                0x00000000
++#define HC_XTC_Dif              0x00000001
++#define HC_XTC_Spec             0x00000002
++#define HC_XTC_Tex              0x00000003
++#define HC_XTC_Cur              0x00000004
++#define HC_XTC_Adif             0x00000005
++#define HC_XTC_Fog              0x00000006
++#define HC_XTC_Atex             0x00000007
++#define HC_XTC_Acur             0x00000008
++#define HC_XTC_HTXnTBLRC        0x00000009
++#define HC_XTC_Ctexnext         0x0000000a
++/*--
++ */
++#define HC_HTXnTBLCsat_MASK     0x00800000
++#define HC_HTXnTBLCa_MASK       0x000fc000
++#define HC_HTXnTBLCb_MASK       0x00001f80
++#define HC_HTXnTBLCc_MASK       0x0000003f
++#define HC_HTXnTBLCa_TOPC       (HC_XTC_TOPC << 14)
++#define HC_HTXnTBLCa_InvTOPC    (HC_XTC_InvTOPC << 14)
++#define HC_HTXnTBLCa_TOPCp5     (HC_XTC_TOPCp5 << 14)
++#define HC_HTXnTBLCa_0          (HC_XTC_0 << 14)
++#define HC_HTXnTBLCa_Dif        (HC_XTC_Dif << 14)
++#define HC_HTXnTBLCa_Spec       (HC_XTC_Spec << 14)
++#define HC_HTXnTBLCa_Tex        (HC_XTC_Tex << 14)
++#define HC_HTXnTBLCa_Cur        (HC_XTC_Cur << 14)
++#define HC_HTXnTBLCa_Adif       (HC_XTC_Adif << 14)
++#define HC_HTXnTBLCa_Fog        (HC_XTC_Fog << 14)
++#define HC_HTXnTBLCa_Atex       (HC_XTC_Atex << 14)
++#define HC_HTXnTBLCa_Acur       (HC_XTC_Acur << 14)
++#define HC_HTXnTBLCa_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 14)
++#define HC_HTXnTBLCa_Ctexnext   (HC_XTC_Ctexnext << 14)
++#define HC_HTXnTBLCb_TOPC       (HC_XTC_TOPC << 7)
++#define HC_HTXnTBLCb_InvTOPC    (HC_XTC_InvTOPC << 7)
++#define HC_HTXnTBLCb_TOPCp5     (HC_XTC_TOPCp5 << 7)
++#define HC_HTXnTBLCb_0          (HC_XTC_0 << 7)
++#define HC_HTXnTBLCb_Dif        (HC_XTC_Dif << 7)
++#define HC_HTXnTBLCb_Spec       (HC_XTC_Spec << 7)
++#define HC_HTXnTBLCb_Tex        (HC_XTC_Tex << 7)
++#define HC_HTXnTBLCb_Cur        (HC_XTC_Cur << 7)
++#define HC_HTXnTBLCb_Adif       (HC_XTC_Adif << 7)
++#define HC_HTXnTBLCb_Fog        (HC_XTC_Fog << 7)
++#define HC_HTXnTBLCb_Atex       (HC_XTC_Atex << 7)
++#define HC_HTXnTBLCb_Acur       (HC_XTC_Acur << 7)
++#define HC_HTXnTBLCb_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 7)
++#define HC_HTXnTBLCb_Ctexnext   (HC_XTC_Ctexnext << 7)
++#define HC_HTXnTBLCc_TOPC       (HC_XTC_TOPC << 0)
++#define HC_HTXnTBLCc_InvTOPC    (HC_XTC_InvTOPC << 0)
++#define HC_HTXnTBLCc_TOPCp5     (HC_XTC_TOPCp5 << 0)
++#define HC_HTXnTBLCc_0          (HC_XTC_0 << 0)
++#define HC_HTXnTBLCc_Dif        (HC_XTC_Dif << 0)
++#define HC_HTXnTBLCc_Spec       (HC_XTC_Spec << 0)
++#define HC_HTXnTBLCc_Tex        (HC_XTC_Tex << 0)
++#define HC_HTXnTBLCc_Cur        (HC_XTC_Cur << 0)
++#define HC_HTXnTBLCc_Adif       (HC_XTC_Adif << 0)
++#define HC_HTXnTBLCc_Fog        (HC_XTC_Fog << 0)
++#define HC_HTXnTBLCc_Atex       (HC_XTC_Atex << 0)
++#define HC_HTXnTBLCc_Acur       (HC_XTC_Acur << 0)
++#define HC_HTXnTBLCc_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 0)
++#define HC_HTXnTBLCc_Ctexnext   (HC_XTC_Ctexnext << 0)
++/* HC_SubA_HTXnTBLCop      0x0081
++ */
++#define HC_HTXnTBLdot_MASK      0x00c00000
++#define HC_HTXnTBLCop_MASK      0x00380000
++#define HC_HTXnTBLCbias_MASK    0x0007c000
++#define HC_HTXnTBLCshift_MASK   0x00001800
++#define HC_HTXnTBLAop_MASK      0x00000380
++#define HC_HTXnTBLAbias_MASK    0x00000078
++#define HC_HTXnTBLAshift_MASK   0x00000003
++#define HC_HTXnTBLCop_Add       0x00000000
++#define HC_HTXnTBLCop_Sub       0x00080000
++#define HC_HTXnTBLCop_Min       0x00100000
++#define HC_HTXnTBLCop_Max       0x00180000
++#define HC_HTXnTBLCop_Mask      0x00200000
++#define HC_HTXnTBLCbias_Cbias           (HC_XTC_Cbias << 14)
++#define HC_HTXnTBLCbias_InvCbias        (HC_XTC_InvCbias << 14)
++#define HC_HTXnTBLCbias_0               (HC_XTC_0 << 14)
++#define HC_HTXnTBLCbias_Dif             (HC_XTC_Dif << 14)
++#define HC_HTXnTBLCbias_Spec            (HC_XTC_Spec << 14)
++#define HC_HTXnTBLCbias_Tex             (HC_XTC_Tex << 14)
++#define HC_HTXnTBLCbias_Cur             (HC_XTC_Cur << 14)
++#define HC_HTXnTBLCbias_Adif            (HC_XTC_Adif << 14)
++#define HC_HTXnTBLCbias_Fog             (HC_XTC_Fog << 14)
++#define HC_HTXnTBLCbias_Atex            (HC_XTC_Atex << 14)
++#define HC_HTXnTBLCbias_Acur            (HC_XTC_Acur << 14)
++#define HC_HTXnTBLCbias_HTXnTBLRC       (HC_XTC_HTXnTBLRC << 14)
++#define HC_HTXnTBLCshift_1      0x00000000
++#define HC_HTXnTBLCshift_2      0x00000800
++#define HC_HTXnTBLCshift_No     0x00001000
++#define HC_HTXnTBLCshift_DotP   0x00001800
++/*=* John Sheng [2003.7.18] texture combine *=*/
++#define HC_HTXnTBLDOT3   0x00080000
++#define HC_HTXnTBLDOT4   0x000C0000
++
++#define HC_HTXnTBLAop_Add       0x00000000
++#define HC_HTXnTBLAop_Sub       0x00000080
++#define HC_HTXnTBLAop_Min       0x00000100
++#define HC_HTXnTBLAop_Max       0x00000180
++#define HC_HTXnTBLAop_Mask      0x00000200
++#define HC_HTXnTBLAbias_Inv             0x00000040
++#define HC_HTXnTBLAbias_Adif            0x00000000
++#define HC_HTXnTBLAbias_Fog             0x00000008
++#define HC_HTXnTBLAbias_Acur            0x00000010
++#define HC_HTXnTBLAbias_HTXnTBLRAbias   0x00000018
++#define HC_HTXnTBLAbias_Atex            0x00000020
++#define HC_HTXnTBLAshift_1      0x00000000
++#define HC_HTXnTBLAshift_2      0x00000001
++#define HC_HTXnTBLAshift_No     0x00000002
++/* #define HC_HTXnTBLAshift_DotP   0x00000003 */
++/* HC_SubA_HTXnTBLMPFog    0x0082
++ */
++#define HC_HTXnTBLMPfog_MASK    0x00e00000
++#define HC_HTXnTBLMPfog_0       0x00000000
++#define HC_HTXnTBLMPfog_Adif    0x00200000
++#define HC_HTXnTBLMPfog_Fog     0x00400000
++#define HC_HTXnTBLMPfog_Atex    0x00600000
++#define HC_HTXnTBLMPfog_Acur    0x00800000
++#define HC_HTXnTBLMPfog_GHTXnTBLRFog    0x00a00000
++/* HC_SubA_HTXnTBLAsat     0x0083
++ *-- Define the texture alpha input.
++ */
++#define HC_XTA_TOPA             0x00000000
++#define HC_XTA_InvTOPA          0x00000008
++#define HC_XTA_TOPAp5           0x00000010
++#define HC_XTA_Adif             0x00000000
++#define HC_XTA_Fog              0x00000001
++#define HC_XTA_Acur             0x00000002
++#define HC_XTA_HTXnTBLRA        0x00000003
++#define HC_XTA_Atex             0x00000004
++#define HC_XTA_Atexnext         0x00000005
++/*--
++ */
++#define HC_HTXnTBLAsat_MASK     0x00800000
++#define HC_HTXnTBLAMB_MASK      0x00700000
++#define HC_HTXnTBLAa_MASK       0x0007c000
++#define HC_HTXnTBLAb_MASK       0x00000f80
++#define HC_HTXnTBLAc_MASK       0x0000001f
++#define HC_HTXnTBLAMB_SHIFT     20
++#define HC_HTXnTBLAa_TOPA       (HC_XTA_TOPA << 14)
++#define HC_HTXnTBLAa_InvTOPA    (HC_XTA_InvTOPA << 14)
++#define HC_HTXnTBLAa_TOPAp5     (HC_XTA_TOPAp5 << 14)
++#define HC_HTXnTBLAa_Adif       (HC_XTA_Adif << 14)
++#define HC_HTXnTBLAa_Fog        (HC_XTA_Fog << 14)
++#define HC_HTXnTBLAa_Acur       (HC_XTA_Acur << 14)
++#define HC_HTXnTBLAa_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 14)
++#define HC_HTXnTBLAa_Atex       (HC_XTA_Atex << 14)
++#define HC_HTXnTBLAa_Atexnext   (HC_XTA_Atexnext << 14)
++#define HC_HTXnTBLAb_TOPA       (HC_XTA_TOPA << 7)
++#define HC_HTXnTBLAb_InvTOPA    (HC_XTA_InvTOPA << 7)
++#define HC_HTXnTBLAb_TOPAp5     (HC_XTA_TOPAp5 << 7)
++#define HC_HTXnTBLAb_Adif       (HC_XTA_Adif << 7)
++#define HC_HTXnTBLAb_Fog        (HC_XTA_Fog << 7)
++#define HC_HTXnTBLAb_Acur       (HC_XTA_Acur << 7)
++#define HC_HTXnTBLAb_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 7)
++#define HC_HTXnTBLAb_Atex       (HC_XTA_Atex << 7)
++#define HC_HTXnTBLAb_Atexnext   (HC_XTA_Atexnext << 7)
++#define HC_HTXnTBLAc_TOPA       (HC_XTA_TOPA << 0)
++#define HC_HTXnTBLAc_InvTOPA    (HC_XTA_InvTOPA << 0)
++#define HC_HTXnTBLAc_TOPAp5     (HC_XTA_TOPAp5 << 0)
++#define HC_HTXnTBLAc_Adif       (HC_XTA_Adif << 0)
++#define HC_HTXnTBLAc_Fog        (HC_XTA_Fog << 0)
++#define HC_HTXnTBLAc_Acur       (HC_XTA_Acur << 0)
++#define HC_HTXnTBLAc_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 0)
++#define HC_HTXnTBLAc_Atex       (HC_XTA_Atex << 0)
++#define HC_HTXnTBLAc_Atexnext   (HC_XTA_Atexnext << 0)
++/* HC_SubA_HTXnTBLRAa      0x0089
++ */
++#define HC_HTXnTBLRAa_MASK      0x00ff0000
++#define HC_HTXnTBLRAb_MASK      0x0000ff00
++#define HC_HTXnTBLRAc_MASK      0x000000ff
++#define HC_HTXnTBLRAa_SHIFT     16
++#define HC_HTXnTBLRAb_SHIFT     8
++#define HC_HTXnTBLRAc_SHIFT     0
++/* HC_SubA_HTXnTBLRFog     0x008a
++ */
++#define HC_HTXnTBLRFog_MASK     0x0000ff00
++#define HC_HTXnTBLRAbias_MASK   0x000000ff
++#define HC_HTXnTBLRFog_SHIFT    8
++#define HC_HTXnTBLRAbias_SHIFT  0
++/* HC_SubA_HTXnLScale      0x0094
++ */
++#define HC_HTXnLScale_MASK      0x0007fc00
++#define HC_HTXnLOff_MASK        0x000001ff
++#define HC_HTXnLScale_SHIFT     10
++/* HC_SubA_HTXSMD          0x0000
++ */
++#define HC_HTXSMD_MASK          0x00000080
++#define HC_HTXTMD_MASK          0x00000040
++#define HC_HTXNum_MASK          0x00000038
++#define HC_HTXTRMD_MASK         0x00000006
++#define HC_HTXCHCLR_MASK        0x00000001
++#define HC_HTXNum_SHIFT         3
++
++/* Texture Palette n
++ */
++#define HC_SubType_TexPalette0  0x00000000
++#define HC_SubType_TexPalette1  0x00000001
++#define HC_SubType_FogTable     0x00000010
++#define HC_SubType_Stipple      0x00000014
++/* HC_SubA_TexPalette0     0x0000
++ */
++#define HC_HTPnA_MASK           0xff000000
++#define HC_HTPnR_MASK           0x00ff0000
++#define HC_HTPnG_MASK           0x0000ff00
++#define HC_HTPnB_MASK           0x000000ff
++/* HC_SubA_FogTable        0x0010
++ */
++#define HC_HFPn3_MASK           0xff000000
++#define HC_HFPn2_MASK           0x00ff0000
++#define HC_HFPn1_MASK           0x0000ff00
++#define HC_HFPn_MASK            0x000000ff
++#define HC_HFPn3_SHIFT          24
++#define HC_HFPn2_SHIFT          16
++#define HC_HFPn1_SHIFT          8
++
++/* Auto Testing & Security
++ */
++#define HC_SubA_HenFIFOAT       0x0000
++#define HC_SubA_HFBDrawFirst    0x0004
++#define HC_SubA_HFBBasL         0x0005
++#define HC_SubA_HFBDst          0x0006
++/* HC_SubA_HenFIFOAT       0x0000
++ */
++#define HC_HenFIFOAT_MASK       0x00000020
++#define HC_HenGEMILock_MASK     0x00000010
++#define HC_HenFBASwap_MASK      0x00000008
++#define HC_HenOT_MASK           0x00000004
++#define HC_HenCMDQ_MASK         0x00000002
++#define HC_HenTXCTSU_MASK       0x00000001
++/* HC_SubA_HFBDrawFirst    0x0004
++ */
++#define HC_HFBDrawFirst_MASK    0x00000800
++#define HC_HFBQueue_MASK        0x00000400
++#define HC_HFBLock_MASK         0x00000200
++#define HC_HEOF_MASK            0x00000100
++#define HC_HFBBasH_MASK         0x000000ff
++
++/* GEMI Setting
++ */
++#define HC_SubA_HTArbRCM        0x0008
++#define HC_SubA_HTArbRZ         0x000a
++#define HC_SubA_HTArbWZ         0x000b
++#define HC_SubA_HTArbRTX        0x000c
++#define HC_SubA_HTArbRCW        0x000d
++#define HC_SubA_HTArbE2         0x000e
++#define HC_SubA_HArbRQCM        0x0010
++#define HC_SubA_HArbWQCM        0x0011
++#define HC_SubA_HGEMITout       0x0020
++#define HC_SubA_HFthRTXD        0x0040
++#define HC_SubA_HFthRTXA        0x0044
++#define HC_SubA_HCMDQstL        0x0050
++#define HC_SubA_HCMDQendL       0x0051
++#define HC_SubA_HCMDQLen        0x0052
++/* HC_SubA_HTArbRCM        0x0008
++ */
++#define HC_HTArbRCM_MASK        0x0000ffff
++/* HC_SubA_HTArbRZ         0x000a
++ */
++#define HC_HTArbRZ_MASK         0x0000ffff
++/* HC_SubA_HTArbWZ         0x000b
++ */
++#define HC_HTArbWZ_MASK         0x0000ffff
++/* HC_SubA_HTArbRTX        0x000c
++ */
++#define HC_HTArbRTX_MASK        0x0000ffff
++/* HC_SubA_HTArbRCW        0x000d
++ */
++#define HC_HTArbRCW_MASK        0x0000ffff
++/* HC_SubA_HTArbE2         0x000e
++ */
++#define HC_HTArbE2_MASK         0x0000ffff
++/* HC_SubA_HArbRQCM        0x0010
++ */
++#define HC_HTArbRQCM_MASK       0x0000ffff
++/* HC_SubA_HArbWQCM        0x0011
++ */
++#define HC_HArbWQCM_MASK        0x0000ffff
++/* HC_SubA_HGEMITout       0x0020
++ */
++#define HC_HGEMITout_MASK       0x000f0000
++#define HC_HNPArbZC_MASK        0x0000ffff
++#define HC_HGEMITout_SHIFT      16
++/* HC_SubA_HFthRTXD        0x0040
++ */
++#define HC_HFthRTXD_MASK        0x00ff0000
++#define HC_HFthRZD_MASK         0x0000ff00
++#define HC_HFthWZD_MASK         0x000000ff
++#define HC_HFthRTXD_SHIFT       16
++#define HC_HFthRZD_SHIFT        8
++/* HC_SubA_HFthRTXA        0x0044
++ */
++#define HC_HFthRTXA_MASK        0x000000ff
++
++/******************************************************************************
++** Define the Halcyon Internal register access constants. For simulator only.
++******************************************************************************/
++#define HC_SIMA_HAGPBstL        0x0000
++#define HC_SIMA_HAGPBendL       0x0001
++#define HC_SIMA_HAGPCMNT        0x0002
++#define HC_SIMA_HAGPBpL         0x0003
++#define HC_SIMA_HAGPBpH         0x0004
++#define HC_SIMA_HClipTB         0x0005
++#define HC_SIMA_HClipLR         0x0006
++#define HC_SIMA_HFPClipTL       0x0007
++#define HC_SIMA_HFPClipBL       0x0008
++#define HC_SIMA_HFPClipLL       0x0009
++#define HC_SIMA_HFPClipRL       0x000a
++#define HC_SIMA_HFPClipTBH      0x000b
++#define HC_SIMA_HFPClipLRH      0x000c
++#define HC_SIMA_HLP             0x000d
++#define HC_SIMA_HLPRF           0x000e
++#define HC_SIMA_HSolidCL        0x000f
++#define HC_SIMA_HPixGC          0x0010
++#define HC_SIMA_HSPXYOS         0x0011
++#define HC_SIMA_HCmdA           0x0012
++#define HC_SIMA_HCmdB           0x0013
++#define HC_SIMA_HEnable         0x0014
++#define HC_SIMA_HZWBBasL        0x0015
++#define HC_SIMA_HZWBBasH        0x0016
++#define HC_SIMA_HZWBType        0x0017
++#define HC_SIMA_HZBiasL         0x0018
++#define HC_SIMA_HZWBend         0x0019
++#define HC_SIMA_HZWTMD          0x001a
++#define HC_SIMA_HZWCDL          0x001b
++#define HC_SIMA_HZWCTAGnum      0x001c
++#define HC_SIMA_HZCYNum         0x001d
++#define HC_SIMA_HZWCFire        0x001e
++/* #define HC_SIMA_HSBBasL         0x001d */
++/* #define HC_SIMA_HSBBasH         0x001e */
++/* #define HC_SIMA_HSBFM           0x001f */
++#define HC_SIMA_HSTREF          0x0020
++#define HC_SIMA_HSTMD           0x0021
++#define HC_SIMA_HABBasL         0x0022
++#define HC_SIMA_HABBasH         0x0023
++#define HC_SIMA_HABFM           0x0024
++#define HC_SIMA_HATMD           0x0025
++#define HC_SIMA_HABLCsat        0x0026
++#define HC_SIMA_HABLCop         0x0027
++#define HC_SIMA_HABLAsat        0x0028
++#define HC_SIMA_HABLAop         0x0029
++#define HC_SIMA_HABLRCa         0x002a
++#define HC_SIMA_HABLRFCa        0x002b
++#define HC_SIMA_HABLRCbias      0x002c
++#define HC_SIMA_HABLRCb         0x002d
++#define HC_SIMA_HABLRFCb        0x002e
++#define HC_SIMA_HABLRAa         0x002f
++#define HC_SIMA_HABLRAb         0x0030
++#define HC_SIMA_HDBBasL         0x0031
++#define HC_SIMA_HDBBasH         0x0032
++#define HC_SIMA_HDBFM           0x0033
++#define HC_SIMA_HFBBMSKL        0x0034
++#define HC_SIMA_HROP            0x0035
++#define HC_SIMA_HFogLF          0x0036
++#define HC_SIMA_HFogCL          0x0037
++#define HC_SIMA_HFogCH          0x0038
++#define HC_SIMA_HFogStL         0x0039
++#define HC_SIMA_HFogStH         0x003a
++#define HC_SIMA_HFogOOdMF       0x003b
++#define HC_SIMA_HFogOOdEF       0x003c
++#define HC_SIMA_HFogEndL        0x003d
++#define HC_SIMA_HFogDenst       0x003e
++/*---- start of texture 0 setting ----
++ */
++#define HC_SIMA_HTX0L0BasL      0x0040
++#define HC_SIMA_HTX0L1BasL      0x0041
++#define HC_SIMA_HTX0L2BasL      0x0042
++#define HC_SIMA_HTX0L3BasL      0x0043
++#define HC_SIMA_HTX0L4BasL      0x0044
++#define HC_SIMA_HTX0L5BasL      0x0045
++#define HC_SIMA_HTX0L6BasL      0x0046
++#define HC_SIMA_HTX0L7BasL      0x0047
++#define HC_SIMA_HTX0L8BasL      0x0048
++#define HC_SIMA_HTX0L9BasL      0x0049
++#define HC_SIMA_HTX0LaBasL      0x004a
++#define HC_SIMA_HTX0LbBasL      0x004b
++#define HC_SIMA_HTX0LcBasL      0x004c
++#define HC_SIMA_HTX0LdBasL      0x004d
++#define HC_SIMA_HTX0LeBasL      0x004e
++#define HC_SIMA_HTX0LfBasL      0x004f
++#define HC_SIMA_HTX0L10BasL     0x0050
++#define HC_SIMA_HTX0L11BasL     0x0051
++#define HC_SIMA_HTX0L012BasH    0x0052
++#define HC_SIMA_HTX0L345BasH    0x0053
++#define HC_SIMA_HTX0L678BasH    0x0054
++#define HC_SIMA_HTX0L9abBasH    0x0055
++#define HC_SIMA_HTX0LcdeBasH    0x0056
++#define HC_SIMA_HTX0Lf1011BasH  0x0057
++#define HC_SIMA_HTX0L0Pit       0x0058
++#define HC_SIMA_HTX0L1Pit       0x0059
++#define HC_SIMA_HTX0L2Pit       0x005a
++#define HC_SIMA_HTX0L3Pit       0x005b
++#define HC_SIMA_HTX0L4Pit       0x005c
++#define HC_SIMA_HTX0L5Pit       0x005d
++#define HC_SIMA_HTX0L6Pit       0x005e
++#define HC_SIMA_HTX0L7Pit       0x005f
++#define HC_SIMA_HTX0L8Pit       0x0060
++#define HC_SIMA_HTX0L9Pit       0x0061
++#define HC_SIMA_HTX0LaPit       0x0062
++#define HC_SIMA_HTX0LbPit       0x0063
++#define HC_SIMA_HTX0LcPit       0x0064
++#define HC_SIMA_HTX0LdPit       0x0065
++#define HC_SIMA_HTX0LePit       0x0066
++#define HC_SIMA_HTX0LfPit       0x0067
++#define HC_SIMA_HTX0L10Pit      0x0068
++#define HC_SIMA_HTX0L11Pit      0x0069
++#define HC_SIMA_HTX0L0_5WE      0x006a
++#define HC_SIMA_HTX0L6_bWE      0x006b
++#define HC_SIMA_HTX0Lc_11WE     0x006c
++#define HC_SIMA_HTX0L0_5HE      0x006d
++#define HC_SIMA_HTX0L6_bHE      0x006e
++#define HC_SIMA_HTX0Lc_11HE     0x006f
++#define HC_SIMA_HTX0L0OS        0x0070
++#define HC_SIMA_HTX0TB          0x0071
++#define HC_SIMA_HTX0MPMD        0x0072
++#define HC_SIMA_HTX0CLODu       0x0073
++#define HC_SIMA_HTX0FM          0x0074
++#define HC_SIMA_HTX0TRCH        0x0075
++#define HC_SIMA_HTX0TRCL        0x0076
++#define HC_SIMA_HTX0TBC         0x0077
++#define HC_SIMA_HTX0TRAH        0x0078
++#define HC_SIMA_HTX0TBLCsat     0x0079
++#define HC_SIMA_HTX0TBLCop      0x007a
++#define HC_SIMA_HTX0TBLMPfog    0x007b
++#define HC_SIMA_HTX0TBLAsat     0x007c
++#define HC_SIMA_HTX0TBLRCa      0x007d
++#define HC_SIMA_HTX0TBLRCb      0x007e
++#define HC_SIMA_HTX0TBLRCc      0x007f
++#define HC_SIMA_HTX0TBLRCbias   0x0080
++#define HC_SIMA_HTX0TBLRAa      0x0081
++#define HC_SIMA_HTX0TBLRFog     0x0082
++#define HC_SIMA_HTX0BumpM00     0x0083
++#define HC_SIMA_HTX0BumpM01     0x0084
++#define HC_SIMA_HTX0BumpM10     0x0085
++#define HC_SIMA_HTX0BumpM11     0x0086
++#define HC_SIMA_HTX0LScale      0x0087
++/*---- end of texture 0 setting ----      0x008f
++ */
++#define HC_SIMA_TX0TX1_OFF      0x0050
++/*---- start of texture 1 setting ----
++ */
++#define HC_SIMA_HTX1L0BasL      (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L1BasL      (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L2BasL      (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L3BasL      (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L4BasL      (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L5BasL      (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6BasL      (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L7BasL      (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L8BasL      (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9BasL      (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LaBasL      (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LbBasL      (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcBasL      (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LdBasL      (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LeBasL      (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LfBasL      (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L10BasL     (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L11BasL     (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L012BasH    (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L345BasH    (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L678BasH    (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9abBasH    (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcdeBasH    (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lf1011BasH  (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0Pit       (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L1Pit       (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L2Pit       (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L3Pit       (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L4Pit       (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L5Pit       (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6Pit       (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L7Pit       (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L8Pit       (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9Pit       (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LaPit       (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LbPit       (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcPit       (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LdPit       (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LePit       (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LfPit       (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L10Pit      (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L11Pit      (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0_5WE      (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6_bWE      (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lc_11WE     (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0_5HE      (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6_bHE      (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lc_11HE      (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0OS        (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TB          (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1MPMD        (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1CLODu       (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1FM          (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRCH        (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRCL        (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBC         (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRAH        (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LTC         (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LTA         (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLCsat     (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLCop      (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLMPfog    (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLAsat     (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCa      (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCb      (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCc      (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCbias   (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRAa      (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRFog     (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM00     (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM01     (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM10     (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM11     (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LScale      (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
++/*---- end of texture 1 setting ---- 0xaf
++ */
++#define HC_SIMA_HTXSMD          0x00b0
++#define HC_SIMA_HenFIFOAT       0x00b1
++#define HC_SIMA_HFBDrawFirst    0x00b2
++#define HC_SIMA_HFBBasL         0x00b3
++#define HC_SIMA_HTArbRCM        0x00b4
++#define HC_SIMA_HTArbRZ         0x00b5
++#define HC_SIMA_HTArbWZ         0x00b6
++#define HC_SIMA_HTArbRTX        0x00b7
++#define HC_SIMA_HTArbRCW        0x00b8
++#define HC_SIMA_HTArbE2         0x00b9
++#define HC_SIMA_HGEMITout       0x00ba
++#define HC_SIMA_HFthRTXD        0x00bb
++#define HC_SIMA_HFthRTXA        0x00bc
++/* Define the texture palette 0
++ */
++#define HC_SIMA_HTP0            0x0100
++#define HC_SIMA_HTP1            0x0200
++#define HC_SIMA_FOGTABLE        0x0300
++#define HC_SIMA_STIPPLE         0x0400
++#define HC_SIMA_HE3Fire         0x0440
++#define HC_SIMA_TRANS_SET       0x0441
++#define HC_SIMA_HREngSt         0x0442
++#define HC_SIMA_HRFIFOempty     0x0443
++#define HC_SIMA_HRFIFOfull      0x0444
++#define HC_SIMA_HRErr           0x0445
++#define HC_SIMA_FIFOstatus      0x0446
++
++/******************************************************************************
++** Define the AGP command header.
++******************************************************************************/
++#define HC_ACMD_MASK            0xfe000000
++#define HC_ACMD_SUB_MASK        0x0c000000
++#define HC_ACMD_HCmdA           0xee000000
++#define HC_ACMD_HCmdB           0xec000000
++#define HC_ACMD_HCmdC           0xea000000
++#define HC_ACMD_H1              0xf0000000
++#define HC_ACMD_H2              0xf2000000
++#define HC_ACMD_H3              0xf4000000
++#define HC_ACMD_H4              0xf6000000
++
++#define HC_ACMD_H1IO_MASK       0x000001ff
++#define HC_ACMD_H2IO1_MASK      0x001ff000
++#define HC_ACMD_H2IO2_MASK      0x000001ff
++#define HC_ACMD_H2IO1_SHIFT     12
++#define HC_ACMD_H2IO2_SHIFT     0
++#define HC_ACMD_H3IO_MASK       0x000001ff
++#define HC_ACMD_H3COUNT_MASK    0x01fff000
++#define HC_ACMD_H3COUNT_SHIFT   12
++#define HC_ACMD_H4ID_MASK       0x000001ff
++#define HC_ACMD_H4COUNT_MASK    0x01fffe00
++#define HC_ACMD_H4COUNT_SHIFT   9
++
++/********************************************************************************
++** Define Header
++********************************************************************************/
++#define HC_HEADER2            0xF210F110
++
++/********************************************************************************
++** Define Dummy Value
++********************************************************************************/
++#define HC_DUMMY              0xCCCCCCCC
++/********************************************************************************
++** Define for DMA use
++********************************************************************************/
++#define HALCYON_HEADER2     0XF210F110
++#define HALCYON_FIRECMD     0XEE100000
++#define HALCYON_FIREMASK    0XFFF00000
++#define HALCYON_CMDB        0XEC000000
++#define HALCYON_CMDBMASK    0XFFFE0000
++#define HALCYON_SUB_ADDR0   0X00000000
++#define HALCYON_HEADER1MASK 0XFFFFFC00
++#define HALCYON_HEADER1     0XF0000000
++#define HC_SubA_HAGPBstL        0x0060
++#define HC_SubA_HAGPBendL       0x0061
++#define HC_SubA_HAGPCMNT        0x0062
++#define HC_SubA_HAGPBpL         0x0063
++#define HC_SubA_HAGPBpH         0x0064
++#define HC_HAGPCMNT_MASK        0x00800000
++#define HC_HCmdErrClr_MASK      0x00400000
++#define HC_HAGPBendH_MASK       0x0000ff00
++#define HC_HAGPBstH_MASK        0x000000ff
++#define HC_HAGPBendH_SHIFT      8
++#define HC_HAGPBstH_SHIFT       0
++#define HC_HAGPBpL_MASK         0x00fffffc
++#define HC_HAGPBpID_MASK        0x00000003
++#define HC_HAGPBpID_PAUSE       0x00000000
++#define HC_HAGPBpID_JUMP        0x00000001
++#define HC_HAGPBpID_STOP        0x00000002
++#define HC_HAGPBpH_MASK         0x00ffffff
++
++#define VIA_VIDEO_HEADER5       0xFE040000
++#define VIA_VIDEO_HEADER6       0xFE050000
++#define VIA_VIDEO_HEADER7       0xFE060000
++#define VIA_VIDEOMASK           0xFFFF0000
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_buffer.c git-nokia/drivers/gpu/drm-tungsten/via_buffer.c
+--- git/drivers/gpu/drm-tungsten/via_buffer.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_buffer.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,163 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
++{
++      return drm_agp_init_ttm(dev);
++}
++
++int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass,
++                  uint32_t * type)
++{
++      *type = 3;
++      return 0;
++}
++
++int via_invalidate_caches(struct drm_device * dev, uint64_t flags)
++{
++      /*
++       * FIXME: Invalidate texture caches here.
++       */
++
++      return 0;
++}
++
++
++static int via_vram_info(struct drm_device *dev,
++                       unsigned long *offset,
++                       unsigned long *size)
++{
++      struct pci_dev *pdev = dev->pdev;
++      unsigned long flags;
++
++      int ret = -EINVAL;
++      int i;
++      for (i=0; i<6; ++i) {
++              flags = pci_resource_flags(pdev, i);
++              if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
++                  (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
++                      ret = 0;
++                      break;
++              }
++      }
++
++      if (ret) {
++              DRM_ERROR("Could not find VRAM PCI resource\n");
++              return ret;
++      }
++
++      *offset = pci_resource_start(pdev, i);
++      *size = pci_resource_end(pdev, i) - *offset + 1;
++      return 0;
++}
++
++int via_init_mem_type(struct drm_device * dev, uint32_t type,
++                     struct drm_mem_type_manager * man)
++{
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              /* System memory */
++
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                      _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              break;
++
++      case DRM_BO_MEM_TT:
++              /* Dynamic agpgart memory */
++
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
++
++              /* Only to get pte protection right. */
++
++              man->drm_bus_maptype = _DRM_AGP;
++              break;
++
++      case DRM_BO_MEM_VRAM:
++              /* "On-card" video ram */
++
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
++              man->io_addr = NULL;
++              return via_vram_info(dev, &man->io_offset, &man->io_size);
++              break;
++
++      case DRM_BO_MEM_PRIV0:
++              /* Pre-bound agpgart memory */
++
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              break;
++
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++uint64_t via_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
++      case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
++              return DRM_BO_MEM_TT;
++      case DRM_BO_MEM_VRAM:
++              if (bo->mem.num_pages > 128)
++                      return DRM_BO_MEM_TT;
++              else
++                      return DRM_BO_MEM_LOCAL;
++      default:
++              return DRM_BO_MEM_LOCAL;
++      }
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dmablit.c git-nokia/drivers/gpu/drm-tungsten/via_dmablit.c
+--- git/drivers/gpu/drm-tungsten/via_dmablit.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dmablit.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,829 @@
++/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
++ *
++ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Thomas Hellstrom.
++ *    Partially based on code obtained from Digeo Inc.
++ */
++
++
++/*
++ * Unmaps the DMA mappings.
++ * FIXME: Is this a NoOp on x86? Also
++ * FIXME: What happens if this one is called and a pending blit has previously done
++ * the same DMA mappings?
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "via_dmablit.h"
++
++#include <linux/pagemap.h>
++
++#define VIA_PGDN(x)             (((unsigned long)(x)) & PAGE_MASK)
++#define VIA_PGOFF(x)            (((unsigned long)(x)) & ~PAGE_MASK)
++#define VIA_PFN(x)              ((unsigned long)(x) >> PAGE_SHIFT)
++
++typedef struct _drm_via_descriptor {
++      uint32_t mem_addr;
++      uint32_t dev_addr;
++      uint32_t size;
++      uint32_t next;
++} drm_via_descriptor_t;
++
++
++/*
++ * Unmap a DMA mapping.
++ */
++
++
++
++static void
++via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
++{
++      int num_desc = vsg->num_desc;
++      unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
++      unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
++      drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
++              descriptor_this_page;
++      dma_addr_t next = vsg->chain_start;
++
++      while(num_desc--) {
++              if (descriptor_this_page-- == 0) {
++                      cur_descriptor_page--;
++                      descriptor_this_page = vsg->descriptors_per_page - 1;
++                      desc_ptr = vsg->desc_pages[cur_descriptor_page] +
++                              descriptor_this_page;
++              }
++              dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
++              dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
++              next = (dma_addr_t) desc_ptr->next;
++              desc_ptr--;
++      }
++}
++
++/*
++ * If mode = 0, count how many descriptors are needed.
++ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
++ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
++ * 'next' field without syncing calls when the descriptor is already mapped.
++ */
++
++static void
++via_map_blit_for_device(struct pci_dev *pdev,
++                 const drm_via_dmablit_t *xfer,
++                 drm_via_sg_info_t *vsg,
++                 int mode)
++{
++      unsigned cur_descriptor_page = 0;
++      unsigned num_descriptors_this_page = 0;
++      unsigned char *mem_addr = xfer->mem_addr;
++      unsigned char *cur_mem;
++      unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
++      uint32_t fb_addr = xfer->fb_addr;
++      uint32_t cur_fb;
++      unsigned long line_len;
++      unsigned remaining_len;
++      int num_desc = 0;
++      int cur_line;
++      dma_addr_t next = 0 | VIA_DMA_DPR_EC;
++      drm_via_descriptor_t *desc_ptr = NULL;
++
++      if (mode == 1)
++              desc_ptr = vsg->desc_pages[cur_descriptor_page];
++
++      for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
++
++              line_len = xfer->line_length;
++              cur_fb = fb_addr;
++              cur_mem = mem_addr;
++
++              while (line_len > 0) {
++
++                      remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
++                      line_len -= remaining_len;
++
++                      if (mode == 1) {
++                              desc_ptr->mem_addr = dma_map_page(&pdev->dev,
++                                      vsg->pages[VIA_PFN(cur_mem) -
++                                      VIA_PFN(first_addr)],
++                                      VIA_PGOFF(cur_mem), remaining_len,
++                                      vsg->direction);
++                              desc_ptr->dev_addr = cur_fb;
++
++                              desc_ptr->size = remaining_len;
++                              desc_ptr->next = (uint32_t) next;
++                              next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
++                                                    DMA_TO_DEVICE);
++                              desc_ptr++;
++                              if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
++                                      num_descriptors_this_page = 0;
++                                      desc_ptr = vsg->desc_pages[++cur_descriptor_page];
++                              }
++                      }
++
++                      num_desc++;
++                      cur_mem += remaining_len;
++                      cur_fb += remaining_len;
++              }
++
++              mem_addr += xfer->mem_stride;
++              fb_addr += xfer->fb_stride;
++      }
++
++      if (mode == 1) {
++              vsg->chain_start = next;
++              vsg->state = dr_via_device_mapped;
++      }
++      vsg->num_desc = num_desc;
++}
++
++/*
++ * Function that frees up all resources for a blit. It is usable even if the
++ * blit info has only been partially built as long as the status enum is consistent
++ * with the actual status of the used resources.
++ */
++
++
++static void
++via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
++{
++      struct page *page;
++      int i;
++
++      switch(vsg->state) {
++      case dr_via_device_mapped:
++              via_unmap_blit_from_device(pdev, vsg);
++      case dr_via_desc_pages_alloc:
++              for (i=0; i<vsg->num_desc_pages; ++i) {
++                      if (vsg->desc_pages[i] != NULL)
++                        free_page((unsigned long)vsg->desc_pages[i]);
++              }
++              kfree(vsg->desc_pages);
++      case dr_via_pages_locked:
++              for (i=0; i<vsg->num_pages; ++i) {
++                      if ( NULL != (page = vsg->pages[i])) {
++                              if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
++                                      SetPageDirty(page);
++                              page_cache_release(page);
++                      }
++              }
++      case dr_via_pages_alloc:
++              vfree(vsg->pages);
++      default:
++              vsg->state = dr_via_sg_init;
++      }
++      if (vsg->bounce_buffer) {
++              vfree(vsg->bounce_buffer);
++              vsg->bounce_buffer = NULL;
++      }
++      vsg->free_on_sequence = 0;
++}
++
++/*
++ * Fire a blit engine.
++ */
++
++static void
++via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
++                VIA_DMA_CSR_DE);
++      VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
++      VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
++      DRM_WRITEMEMORYBARRIER();
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
++      VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
++}
++
++/*
++ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
++ * occur here if the calling user does not have access to the submitted address.
++ */
++
++static int
++via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
++{
++      int ret;
++      unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
++      vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
++              first_pfn + 1;
++
++      if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
++              return -ENOMEM;
++      memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
++      down_read(&current->mm->mmap_sem);
++      ret = get_user_pages(current, current->mm,
++                           (unsigned long)xfer->mem_addr,
++                           vsg->num_pages,
++                           (vsg->direction == DMA_FROM_DEVICE),
++                           0, vsg->pages, NULL);
++
++      up_read(&current->mm->mmap_sem);
++      if (ret != vsg->num_pages) {
++              if (ret < 0)
++                      return ret;
++              vsg->state = dr_via_pages_locked;
++              return -EINVAL;
++      }
++      vsg->state = dr_via_pages_locked;
++      DRM_DEBUG("DMA pages locked\n");
++      return 0;
++}
++
++/*
++ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
++ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
++ * quite large for some blits, and pages don't need to be contingous.
++ */
++
++static int
++via_alloc_desc_pages(drm_via_sg_info_t *vsg)
++{
++      int i;
++
++      vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
++      vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
++              vsg->descriptors_per_page;
++
++      if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
++              return -ENOMEM;
++
++      memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
++      vsg->state = dr_via_desc_pages_alloc;
++      for (i=0; i<vsg->num_desc_pages; ++i) {
++              if (NULL == (vsg->desc_pages[i] =
++                           (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
++                      return -ENOMEM;
++      }
++      DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
++                vsg->num_desc);
++      return 0;
++}
++
++static void
++via_abort_dmablit(struct drm_device *dev, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
++}
++
++static void
++via_dmablit_engine_off(struct drm_device *dev, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
++}
++
++
++
++/*
++ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
++ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
++ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
++ * the workqueue task takes care of processing associated with the old blit.
++ */
++
++void
++via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
++      int cur;
++      int done_transfer;
++      unsigned long irqsave=0;
++      uint32_t status = 0;
++
++      DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
++                engine, from_irq, (unsigned long) blitq);
++
++      if (from_irq) {
++              spin_lock(&blitq->blit_lock);
++      } else {
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      done_transfer = blitq->is_active &&
++        (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
++      done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
++
++      cur = blitq->cur;
++      if (done_transfer) {
++
++              blitq->blits[cur]->aborted = blitq->aborting;
++              blitq->done_blit_handle++;
++              DRM_WAKEUP(blitq->blit_queue + cur);
++
++              cur++;
++              if (cur >= VIA_NUM_BLIT_SLOTS)
++                      cur = 0;
++              blitq->cur = cur;
++
++              /*
++               * Clear transfer done flag.
++               */
++
++              VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
++
++              blitq->is_active = 0;
++              blitq->aborting = 0;
++              schedule_work(&blitq->wq);
++
++      } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
++
++              /*
++               * Abort transfer after one second.
++               */
++
++              via_abort_dmablit(dev, engine);
++              blitq->aborting = 1;
++              blitq->end = jiffies + DRM_HZ;
++      }
++
++      if (!blitq->is_active) {
++              if (blitq->num_outstanding) {
++                      via_fire_dmablit(dev, blitq->blits[cur], engine);
++                      blitq->is_active = 1;
++                      blitq->cur = cur;
++                      blitq->num_outstanding--;
++                      blitq->end = jiffies + DRM_HZ;
++                      if (!timer_pending(&blitq->poll_timer)) {
++                              blitq->poll_timer.expires = jiffies+1;
++                              add_timer(&blitq->poll_timer);
++                      }
++              } else {
++                      if (timer_pending(&blitq->poll_timer)) {
++                              del_timer(&blitq->poll_timer);
++                      }
++                      via_dmablit_engine_off(dev, engine);
++              }
++      }
++
++      if (from_irq) {
++              spin_unlock(&blitq->blit_lock);
++      } else {
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      }
++}
++
++
++
++/*
++ * Check whether this blit is still active, performing necessary locking.
++ */
++
++static int
++via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
++{
++      unsigned long irqsave;
++      uint32_t slot;
++      int active;
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      /*
++       * Allow for handle wraparounds.
++       */
++
++      active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
++              ((blitq->cur_blit_handle - handle) <= (1 << 23));
++
++      if (queue && active) {
++              slot = handle - blitq->done_blit_handle + blitq->cur -1;
++              if (slot >= VIA_NUM_BLIT_SLOTS) {
++                      slot -= VIA_NUM_BLIT_SLOTS;
++              }
++              *queue = blitq->blit_queue + slot;
++      }
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++      return active;
++}
++
++/*
++ * Sync. Wait for at least three seconds for the blit to be performed.
++ */
++
++static int
++via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
++      wait_queue_head_t *queue;
++      int ret = 0;
++
++      if (via_dmablit_active(blitq, engine, handle, &queue)) {
++              DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
++                          !via_dmablit_active(blitq, engine, handle, NULL));
++      }
++      DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
++                handle, engine, ret);
++
++      return ret;
++}
++
++
++/*
++ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
++ * a) Broken hardware (typically those that don't have any video capture facility).
++ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
++ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
++ * irqs, it will shorten the latency somewhat.
++ */
++
++
++
++static void
++via_dmablit_timer(unsigned long data)
++{
++      drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
++      struct drm_device *dev = blitq->dev;
++      int engine = (int)
++              (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
++
++      DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
++                (unsigned long) jiffies);
++
++      via_dmablit_handler(dev, engine, 0);
++
++      if (!timer_pending(&blitq->poll_timer)) {
++              blitq->poll_timer.expires = jiffies+1;
++              add_timer(&blitq->poll_timer);
++
++              /*
++               * Rerun handler to delete timer if engines are off, and
++               * to shorten abort latency. This is a little nasty.
++               */
++
++              via_dmablit_handler(dev, engine, 0);
++      }
++}
++
++
++
++
++/*
++ * Workqueue task that frees data and mappings associated with a blit.
++ * Also wakes up waiting processes. Each of these tasks handles one
++ * blit engine only and may not be called on each interrupt.
++ */
++
++
++static void
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++via_dmablit_workqueue(void *data)
++#else
++via_dmablit_workqueue(struct work_struct *work)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
++#else
++      drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
++#endif
++      struct drm_device *dev = blitq->dev;
++      unsigned long irqsave;
++      drm_via_sg_info_t *cur_sg;
++      int cur_released;
++
++
++      DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
++                (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      while(blitq->serviced != blitq->cur) {
++
++              cur_released = blitq->serviced++;
++
++              DRM_DEBUG("Releasing blit slot %d\n", cur_released);
++
++              if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
++                      blitq->serviced = 0;
++
++              cur_sg = blitq->blits[cur_released];
++              blitq->num_free++;
++
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++              DRM_WAKEUP(&blitq->busy_queue);
++
++              via_free_sg_info(dev->pdev, cur_sg);
++              kfree(cur_sg);
++
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++}
++
++
++/*
++ * Init all blit engines. Currently we use two, but some hardware have 4.
++ */
++
++
++void
++via_init_dmablit(struct drm_device *dev)
++{
++      int i,j;
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq;
++
++      pci_set_master(dev->pdev);
++
++      for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
++              blitq = dev_priv->blit_queues + i;
++              blitq->dev = dev;
++              blitq->cur_blit_handle = 0;
++              blitq->done_blit_handle = 0;
++              blitq->head = 0;
++              blitq->cur = 0;
++              blitq->serviced = 0;
++              blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
++              blitq->num_outstanding = 0;
++              blitq->is_active = 0;
++              blitq->aborting = 0;
++              spin_lock_init(&blitq->blit_lock);
++              for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
++                      DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
++              }
++              DRM_INIT_WAITQUEUE(&blitq->busy_queue);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++              INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
++#else
++              INIT_WORK(&blitq->wq, via_dmablit_workqueue);
++#endif
++              init_timer(&blitq->poll_timer);
++              blitq->poll_timer.function = &via_dmablit_timer;
++              blitq->poll_timer.data = (unsigned long) blitq;
++      }
++}
++
++/*
++ * Build all info and do all mappings required for a blit.
++ */
++
++
++static int
++via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
++{
++      int draw = xfer->to_fb;
++      int ret = 0;
++
++      vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
++      vsg->bounce_buffer = NULL;
++
++      vsg->state = dr_via_sg_init;
++
++      if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
++              DRM_ERROR("Zero size bitblt.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * Below check is a driver limitation, not a hardware one. We
++       * don't want to lock unused pages, and don't want to incoporate the
++       * extra logic of avoiding them. Make sure there are no.
++       * (Not a big limitation anyway.)
++       */
++
++      if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
++              DRM_ERROR("Too large system memory stride. Stride: %d, "
++                        "Length: %d\n", xfer->mem_stride, xfer->line_length);
++              return -EINVAL;
++      }
++
++      if ((xfer->mem_stride == xfer->line_length) &&
++          (xfer->fb_stride == xfer->line_length)) {
++              xfer->mem_stride *= xfer->num_lines;
++              xfer->line_length = xfer->mem_stride;
++              xfer->fb_stride = xfer->mem_stride;
++              xfer->num_lines = 1;
++      }
++
++      /*
++       * Don't lock an arbitrary large number of pages, since that causes a
++       * DOS security hole.
++       */
++
++      if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
++              DRM_ERROR("Too large PCI DMA bitblt.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * we allow a negative fb stride to allow flipping of images in
++       * transfer.
++       */
++
++      if (xfer->mem_stride < xfer->line_length ||
++          abs(xfer->fb_stride) < xfer->line_length) {
++              DRM_ERROR("Invalid frame-buffer / memory stride.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * A hardware bug seems to be worked around if system memory addresses start on
++       * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
++       * about this. Meanwhile, impose the following restrictions:
++       */
++
++#ifdef VIA_BUGFREE
++      if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
++          ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
++              DRM_ERROR("Invalid DRM bitblt alignment.\n");
++              return -EINVAL;
++      }
++#else
++      if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
++          ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
++              DRM_ERROR("Invalid DRM bitblt alignment.\n");
++              return -EINVAL;
++      }
++#endif
++
++      if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
++              DRM_ERROR("Could not lock DMA pages.\n");
++              via_free_sg_info(dev->pdev, vsg);
++              return ret;
++      }
++
++      via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
++      if (0 != (ret = via_alloc_desc_pages(vsg))) {
++              DRM_ERROR("Could not allocate DMA descriptor pages.\n");
++              via_free_sg_info(dev->pdev, vsg);
++              return ret;
++      }
++      via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
++
++      return 0;
++}
++
++
++/*
++ * Reserve one free slot in the blit queue. Will wait for one second for one
++ * to become available. Otherwise -EBUSY is returned.
++ */
++
++static int
++via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
++{
++      int ret=0;
++      unsigned long irqsave;
++
++      DRM_DEBUG("Num free is %d\n", blitq->num_free);
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      while(blitq->num_free == 0) {
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++              DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
++              if (ret) {
++                      return (-EINTR == ret) ? -EAGAIN : ret;
++              }
++
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      blitq->num_free--;
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++      return 0;
++}
++
++/*
++ * Hand back a free slot if we changed our mind.
++ */
++
++static void
++via_dmablit_release_slot(drm_via_blitq_t *blitq)
++{
++      unsigned long irqsave;
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      blitq->num_free++;
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      DRM_WAKEUP( &blitq->busy_queue );
++}
++
++/*
++ * Grab a free slot. Build blit info and queue a blit.
++ */
++
++
++static int
++via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_sg_info_t *vsg;
++      drm_via_blitq_t *blitq;
++      int ret;
++      int engine;
++      unsigned long irqsave;
++
++      if (dev_priv == NULL) {
++              DRM_ERROR("Called without initialization.\n");
++              return -EINVAL;
++      }
++
++      engine = (xfer->to_fb) ? 0 : 1;
++      blitq = dev_priv->blit_queues + engine;
++      if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
++              return ret;
++      }
++      if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
++              via_dmablit_release_slot(blitq);
++              return -ENOMEM;
++      }
++      if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
++              via_dmablit_release_slot(blitq);
++              kfree(vsg);
++              return ret;
++      }
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      blitq->blits[blitq->head++] = vsg;
++      if (blitq->head >= VIA_NUM_BLIT_SLOTS)
++              blitq->head = 0;
++      blitq->num_outstanding++;
++      xfer->sync.sync_handle = ++blitq->cur_blit_handle;
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      xfer->sync.engine = engine;
++
++      via_dmablit_handler(dev, engine, 0);
++
++      return 0;
++}
++
++/*
++ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
++ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
++ * case it returns with -EAGAIN for the signal to be delivered.
++ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
++ */
++
++int
++via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
++{
++      drm_via_blitsync_t *sync = data;
++      int err;
++
++      if (sync->engine >= VIA_NUM_BLIT_ENGINES)
++              return -EINVAL;
++
++      err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
++
++      if (-EINTR == err)
++              err = -EAGAIN;
++
++      return err;
++}
++
++
++/*
++ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
++ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
++ * be reissued. See the above IOCTL code.
++ */
++
++int
++via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
++{
++      drm_via_dmablit_t *xfer = data;
++      int err;
++
++      err = via_dmablit(dev, xfer);
++
++      return err;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dmablit.h git-nokia/drivers/gpu/drm-tungsten/via_dmablit.h
+--- git/drivers/gpu/drm-tungsten/via_dmablit.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dmablit.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,140 @@
++/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
++ *
++ * Copyright 2005 Thomas Hellstrom.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Thomas Hellstrom.
++ *    Register info from Digeo Inc.
++ */
++
++#ifndef _VIA_DMABLIT_H
++#define _VIA_DMABLIT_H
++
++#include <linux/dma-mapping.h>
++
++#define VIA_NUM_BLIT_ENGINES 2
++#define VIA_NUM_BLIT_SLOTS 8
++
++struct _drm_via_descriptor;
++
++typedef struct _drm_via_sg_info {
++      struct page **pages;
++      unsigned long num_pages;
++      struct _drm_via_descriptor **desc_pages;
++      int num_desc_pages;
++      int num_desc;
++      enum dma_data_direction direction;
++      unsigned char *bounce_buffer;
++      dma_addr_t chain_start;
++      uint32_t free_on_sequence;
++      unsigned int descriptors_per_page;
++      int aborted;
++      enum {
++              dr_via_device_mapped,
++              dr_via_desc_pages_alloc,
++              dr_via_pages_locked,
++              dr_via_pages_alloc,
++              dr_via_sg_init
++      } state;
++} drm_via_sg_info_t;
++
++typedef struct _drm_via_blitq {
++      struct drm_device *dev;
++      uint32_t cur_blit_handle;
++      uint32_t done_blit_handle;
++      unsigned serviced;
++      unsigned head;
++      unsigned cur;
++      unsigned num_free;
++      unsigned num_outstanding;
++      unsigned long end;
++      int aborting;
++      int is_active;
++      drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
++      spinlock_t blit_lock;
++      wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
++      wait_queue_head_t busy_queue;
++      struct work_struct wq;
++      struct timer_list poll_timer;
++} drm_via_blitq_t;
++
++
++/*
++ *  PCI DMA Registers
++ *  Channels 2 & 3 don't seem to be implemented in hardware.
++ */
++
++#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */
++#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */
++#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */
++#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */
++
++#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */
++#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */
++#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */
++#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */
++
++#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */
++#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */
++#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */
++#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */
++
++#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */
++#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */
++#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */
++#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */
++
++#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */
++#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */
++#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */
++#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */
++
++#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */
++#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */
++#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */
++#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */
++
++#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */
++
++/* Define for DMA engine */
++/* DPR */
++#define VIA_DMA_DPR_EC                (1<<1)  /* end of chain */
++#define VIA_DMA_DPR_DDIE      (1<<2)  /* descriptor done interrupt enable */
++#define VIA_DMA_DPR_DT                (1<<3)  /* direction of transfer (RO) */
++
++/* MR */
++#define VIA_DMA_MR_CM         (1<<0)  /* chaining mode */
++#define VIA_DMA_MR_TDIE               (1<<1)  /* transfer done interrupt enable */
++#define VIA_DMA_MR_HENDMACMD          (1<<7) /* ? */
++
++/* CSR */
++#define VIA_DMA_CSR_DE                (1<<0)  /* DMA enable */
++#define VIA_DMA_CSR_TS                (1<<1)  /* transfer start */
++#define VIA_DMA_CSR_TA                (1<<2)  /* transfer abort */
++#define VIA_DMA_CSR_TD                (1<<3)  /* transfer done */
++#define VIA_DMA_CSR_DD                (1<<4)  /* descriptor done */
++#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
++
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dma.c git-nokia/drivers/gpu/drm-tungsten/via_dma.c
+--- git/drivers/gpu/drm-tungsten/via_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,763 @@
++/* via_dma.c -- DMA support for the VIA Unichrome/Pro
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
++ * All Rights Reserved.
++ *
++ * Copyright 2004 The Unichrome project.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Tungsten Graphics,
++ *    Erdi Chen,
++ *    Thomas Hellstrom.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "via_3d_reg.h"
++
++#define SetReg2DAGP(nReg, nData) {                            \
++      *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;  \
++      *((uint32_t *)(vb) + 1) = (nData);                      \
++      vb = ((uint32_t *)vb) + 2;                              \
++      dev_priv->dma_low +=8;                                  \
++}
++
++#define via_flush_write_combine() DRM_MEMORYBARRIER()
++
++#define VIA_OUT_RING_QW(w1,w2)                        \
++      *vb++ = (w1);                           \
++      *vb++ = (w2);                           \
++      dev_priv->dma_low += 8;
++
++static void via_cmdbuf_start(drm_via_private_t *dev_priv);
++static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
++static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
++static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
++static int via_wait_idle(drm_via_private_t *dev_priv);
++static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
++
++
++/*
++ * Free space in command buffer.
++ */
++
++static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
++
++      return ((hw_addr <= dev_priv->dma_low) ?
++              (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
++              (hw_addr - dev_priv->dma_low));
++}
++
++/*
++ * How much does the command regulator lag behind?
++ */
++
++static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
++
++      return ((hw_addr <= dev_priv->dma_low) ?
++              (dev_priv->dma_low - hw_addr) :
++              (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
++}
++
++/*
++ * Check that the given size fits in the buffer, otherwise wait.
++ */
++
++static inline int
++via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t cur_addr, hw_addr, next_addr;
++      volatile uint32_t *hw_addr_ptr;
++      uint32_t count;
++      hw_addr_ptr = dev_priv->hw_addr_ptr;
++      cur_addr = dev_priv->dma_low;
++      next_addr = cur_addr + size + 512 * 1024;
++      count = 1000000;
++      do {
++              hw_addr = *hw_addr_ptr - agp_base;
++              if (count-- == 0) {
++                      DRM_ERROR
++                          ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
++                           hw_addr, cur_addr, next_addr);
++                      return -1;
++              }
++              if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
++                      msleep(1);
++      } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
++      return 0;
++}
++
++
++/*
++ * Checks whether buffer head has reach the end. Rewind the ring buffer
++ * when necessary.
++ *
++ * Returns virtual pointer to ring buffer.
++ */
++
++static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
++                                    unsigned int size)
++{
++      if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
++          dev_priv->dma_high) {
++              via_cmdbuf_rewind(dev_priv);
++      }
++      if (via_cmdbuf_wait(dev_priv, size) != 0) {
++              return NULL;
++      }
++
++      return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
++}
++
++int via_dma_cleanup(struct drm_device * dev)
++{
++      if (dev->dev_private) {
++              drm_via_private_t *dev_priv =
++                      (drm_via_private_t *) dev->dev_private;
++
++              if (dev_priv->ring.virtual_start) {
++                      via_cmdbuf_reset(dev_priv);
++
++                      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++                      dev_priv->ring.virtual_start = NULL;
++              }
++
++      }
++
++      return 0;
++}
++
++static int via_initialize(struct drm_device * dev,
++                        drm_via_private_t * dev_priv,
++                        drm_via_dma_init_t * init)
++{
++      if (!dev_priv || !dev_priv->mmio) {
++              DRM_ERROR("via_dma_init called before via_map_init\n");
++              return -EFAULT;
++      }
++
++      if (dev_priv->ring.virtual_start != NULL) {
++              DRM_ERROR("called again without calling cleanup\n");
++              return -EFAULT;
++      }
++
++      if (!dev->agp || !dev->agp->base) {
++              DRM_ERROR("called with no agp memory available\n");
++              return -EFAULT;
++      }
++
++      if (dev_priv->chipset == VIA_DX9_0) {
++              DRM_ERROR("AGP DMA is not supported on this chip\n");
++              return -EINVAL;
++      }
++
++      dev_priv->ring.map.offset = dev->agp->base + init->offset;
++      dev_priv->ring.map.size = init->size;
++      dev_priv->ring.map.type = 0;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++
++      if (dev_priv->ring.map.handle == NULL) {
++              via_dma_cleanup(dev);
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      dev_priv->dma_ptr = dev_priv->ring.virtual_start;
++      dev_priv->dma_low = 0;
++      dev_priv->dma_high = init->size;
++      dev_priv->dma_wrap = init->size;
++      dev_priv->dma_offset = init->offset;
++      dev_priv->last_pause_ptr = NULL;
++      dev_priv->hw_addr_ptr =
++              (volatile uint32_t *)((char *)dev_priv->mmio->handle +
++              init->reg_pause_addr);
++
++      via_cmdbuf_start(dev_priv);
++
++      return 0;
++}
++
++static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_dma_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case VIA_INIT_DMA:
++              if (!DRM_SUSER(DRM_CURPROC))
++                      retcode = -EPERM;
++              else
++                      retcode = via_initialize(dev, dev_priv, init);
++              break;
++      case VIA_CLEANUP_DMA:
++              if (!DRM_SUSER(DRM_CURPROC))
++                      retcode = -EPERM;
++              else
++                      retcode = via_dma_cleanup(dev);
++              break;
++      case VIA_DMA_INITIALIZED:
++              retcode = (dev_priv->ring.virtual_start != NULL) ?
++                      0 : -EFAULT;
++              break;
++      default:
++              retcode = -EINVAL;
++              break;
++      }
++
++      return retcode;
++}
++
++
++
++static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
++{
++      drm_via_private_t *dev_priv;
++      uint32_t *vb;
++      int ret;
++
++      dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (dev_priv->ring.virtual_start == NULL) {
++              DRM_ERROR("called without initializing AGP ring buffer.\n");
++              return -EFAULT;
++      }
++
++      if (cmd->size > VIA_PCI_BUF_SIZE) {
++              return -ENOMEM;
++      }
++
++      if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
++              return -EFAULT;
++
++      /*
++       * Running this function on AGP memory is dead slow. Therefore
++       * we run it on a temporary cacheable system memory buffer and
++       * copy it to AGP memory when ready.
++       */
++
++      if ((ret =
++           via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
++                                     cmd->size, dev, 1))) {
++              return ret;
++      }
++
++      vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
++      if (vb == NULL) {
++              return -EAGAIN;
++      }
++
++      memcpy(vb, dev_priv->pci_buf, cmd->size);
++
++      dev_priv->dma_low += cmd->size;
++
++      /*
++       * Small submissions somehow stalls the CPU. (AGP cache effects?)
++       * pad to greater size.
++       */
++
++      if (cmd->size < 0x100)
++              via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
++      via_cmdbuf_pause(dev_priv);
++
++      return 0;
++}
++
++int via_driver_dma_quiescent(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      if (!via_wait_idle(dev_priv)) {
++              return -EBUSY;
++      }
++      return 0;
++}
++
++static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return via_driver_dma_quiescent(dev);
++}
++
++static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
++
++      ret = via_dispatch_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              return ret;
++      }
++
++      return 0;
++}
++
++static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
++                                    drm_via_cmdbuffer_t * cmd)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      if (cmd->size > VIA_PCI_BUF_SIZE) {
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
++              return -EFAULT;
++
++      if ((ret =
++           via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
++                                     cmd->size, dev, 0))) {
++              return ret;
++      }
++
++      ret =
++          via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
++                                   cmd->size);
++      return ret;
++}
++
++static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
++
++      ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              return ret;
++      }
++
++      return 0;
++}
++
++static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
++                                       uint32_t * vb, int qw_count)
++{
++      for (; qw_count > 0; --qw_count) {
++              VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
++      }
++      return vb;
++}
++
++/*
++ * This function is used internally by ring buffer mangement code.
++ *
++ * Returns virtual pointer to ring buffer.
++ */
++static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
++{
++      return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
++}
++
++/*
++ * Hooks a segment of data into the tail of the ring-buffer by
++ * modifying the pause address stored in the buffer itself. If
++ * the regulator has already paused, restart it.
++ */
++static int via_hook_segment(drm_via_private_t * dev_priv,
++                          uint32_t pause_addr_hi, uint32_t pause_addr_lo,
++                          int no_pci_fire)
++{
++      int paused, count;
++      volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
++      uint32_t reader,ptr;
++      uint32_t diff;
++
++      paused = 0;
++      via_flush_write_combine();
++      (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
++
++      *paused_at = pause_addr_lo;
++      via_flush_write_combine();
++      (void) *paused_at;
++
++      reader = *(dev_priv->hw_addr_ptr);
++      ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
++              dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
++
++      dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
++
++      /*
++       * If there is a possibility that the command reader will 
++       * miss the new pause address and pause on the old one,
++       * In that case we need to program the new start address
++       * using PCI.
++       */
++
++      diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++      count = 10000000;
++      while(diff == 0 && count--) {
++              paused = (VIA_READ(0x41c) & 0x80000000);
++              if (paused) 
++                      break;
++              reader = *(dev_priv->hw_addr_ptr);
++              diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++      }
++
++      paused = VIA_READ(0x41c) & 0x80000000;
++
++      if (paused && !no_pci_fire) {
++              reader = *(dev_priv->hw_addr_ptr);
++              diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++              diff &= (dev_priv->dma_high - 1);
++              if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
++                      DRM_ERROR("Paused at incorrect address. "
++                                "0x%08x, 0x%08x 0x%08x\n",
++                                ptr, reader, dev_priv->dma_diff);
++              } else if (diff == 0) {
++                      /*
++                       * There is a concern that these writes may stall the PCI bus
++                       * if the GPU is not idle. However, idling the GPU first
++                       * doesn't make a difference.
++                       */
++
++                      VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
++                      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
++                      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
++                      VIA_READ(VIA_REG_TRANSPACE);
++              }
++      }
++
++      return paused;
++}
++
++
++
++static int via_wait_idle(drm_via_private_t *dev_priv)
++{
++      int count = 10000000;
++
++      while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
++
++      while (count-- && (VIA_READ(VIA_REG_STATUS) &
++                         (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
++                          VIA_3D_ENG_BUSY))) ;
++      return count;
++}
++
++static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
++                             uint32_t addr, uint32_t *cmd_addr_hi,
++                             uint32_t *cmd_addr_lo, int skip_wait)
++{
++      uint32_t agp_base;
++      uint32_t cmd_addr, addr_lo, addr_hi;
++      uint32_t *vb;
++      uint32_t qw_pad_count;
++
++      if (!skip_wait)
++              via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
++
++      vb = via_get_dma(dev_priv);
++      VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
++                      (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
++
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
++              ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
++
++      cmd_addr = (addr) ? addr :
++              agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
++      addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
++                 (cmd_addr & HC_HAGPBpL_MASK));
++      addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
++
++      vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
++      VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
++      return vb;
++}
++
++static void via_cmdbuf_start(drm_via_private_t * dev_priv)
++{
++      uint32_t pause_addr_lo, pause_addr_hi;
++      uint32_t start_addr, start_addr_lo;
++      uint32_t end_addr, end_addr_lo;
++      uint32_t command;
++      uint32_t agp_base;
++      uint32_t ptr;
++      uint32_t reader;
++      int count;
++
++      dev_priv->dma_low = 0;
++
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      start_addr = agp_base;
++      end_addr = agp_base + dev_priv->dma_high;
++
++      start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
++      end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
++      command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
++                 ((end_addr & 0xff000000) >> 16));
++
++      dev_priv->last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
++                            &pause_addr_hi, & pause_addr_lo, 1) - 1;
++
++      via_flush_write_combine();
++      (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
++
++      VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
++      VIA_WRITE(VIA_REG_TRANSPACE, command);
++      VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
++      VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
++
++      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
++      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
++      DRM_WRITEMEMORYBARRIER();
++      VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
++      VIA_READ(VIA_REG_TRANSPACE);
++
++      dev_priv->dma_diff = 0;
++
++      count = 10000000;
++      while (!(VIA_READ(0x41c) & 0x80000000) && count--);
++
++      reader = *(dev_priv->hw_addr_ptr);
++      ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
++          dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
++
++      /*
++       * This is the difference between where we tell the
++       * command reader to pause and where it actually pauses.
++       * This differs between hw implementation so we need to
++       * detect it.
++       */
++
++      dev_priv->dma_diff = ptr - reader;
++}
++
++static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
++{
++      uint32_t *vb;
++
++      via_cmdbuf_wait(dev_priv, qwords + 2);
++      vb = via_get_dma(dev_priv);
++      VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
++      via_align_buffer(dev_priv, vb, qwords);
++}
++
++static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
++{
++      uint32_t *vb = via_get_dma(dev_priv);
++      SetReg2DAGP(0x0C, (0 | (0 << 16)));
++      SetReg2DAGP(0x10, 0 | (0 << 16));
++      SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
++}
++
++static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
++{
++      uint32_t agp_base;
++      uint32_t pause_addr_lo, pause_addr_hi;
++      uint32_t jump_addr_lo, jump_addr_hi;
++      volatile uint32_t *last_pause_ptr;
++      uint32_t dma_low_save1, dma_low_save2;
++      
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
++                    &jump_addr_lo, 0);
++
++      dev_priv->dma_wrap = dev_priv->dma_low;
++
++      /*
++       * Wrap command buffer to the beginning.
++       */
++
++      dev_priv->dma_low = 0;
++      if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
++              DRM_ERROR("via_cmdbuf_jump failed\n");
++      }
++
++      via_dummy_bitblt(dev_priv);
++      via_dummy_bitblt(dev_priv);
++
++      last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                        &pause_addr_lo, 0) - 1;
++      via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                    &pause_addr_lo, 0);
++
++      *last_pause_ptr = pause_addr_lo;
++      dma_low_save1 = dev_priv->dma_low;
++      
++      /*
++       * Now, set a trap that will pause the regulator if it tries to rerun the old
++       * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
++       * and reissues the jump command over PCI, while the regulator has already taken the jump
++       * and actually paused at the current buffer end).
++       * There appears to be no other way to detect this condition, since the hw_addr_pointer
++       * does not seem to get updated immediately when a jump occurs.
++       */
++
++      last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                            &pause_addr_lo, 0) - 1;
++      via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                    &pause_addr_lo, 0);
++      *last_pause_ptr = pause_addr_lo;
++      
++      dma_low_save2 = dev_priv->dma_low;
++      dev_priv->dma_low = dma_low_save1;
++      via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
++      dev_priv->dma_low = dma_low_save2;
++      via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
++}
++
++
++static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_jump(dev_priv);
++}
++
++static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
++{
++      uint32_t pause_addr_lo, pause_addr_hi;
++
++      via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
++      via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
++}
++
++
++static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
++}
++
++static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
++      via_wait_idle(dev_priv);
++}
++
++/*
++ * User interface to the space and lag functions.
++ */
++
++static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuf_size_t *d_siz = data;
++      int ret = 0;
++      uint32_t tmp_size, count;
++      drm_via_private_t *dev_priv;
++
++      DRM_DEBUG("\n");
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (dev_priv->ring.virtual_start == NULL) {
++              DRM_ERROR("called without initializing AGP ring buffer.\n");
++              return -EFAULT;
++      }
++
++      count = 1000000;
++      tmp_size = d_siz->size;
++      switch (d_siz->func) {
++      case VIA_CMDBUF_SPACE:
++              while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
++                     && count--) {
++                      if (!d_siz->wait) {
++                              break;
++                      }
++              }
++              if (!count) {
++                      DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
++                      ret = -EAGAIN;
++              }
++              break;
++      case VIA_CMDBUF_LAG:
++              while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
++                     && count--) {
++                      if (!d_siz->wait) {
++                              break;
++                      }
++              }
++              if (!count) {
++                      DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
++                      ret = -EAGAIN;
++              }
++              break;
++      default:
++              ret = -EFAULT;
++      }
++      d_siz->size = tmp_size;
++
++      return ret;
++}
++
++#ifndef VIA_HAVE_DMABLIT
++int
++via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
++      DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
++      return -EINVAL;
++}
++int
++via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
++      DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
++      return -EINVAL;
++}
++#endif
++
++struct drm_ioctl_desc via_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
++};
++
++int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drm.h git-nokia/drivers/gpu/drm-tungsten/via_drm.h
+--- git/drivers/gpu/drm-tungsten/via_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,282 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _VIA_DRM_H_
++#define _VIA_DRM_H_
++
++/* WARNING: These defines must be the same as what the Xserver uses.
++ * if you change them, you must change the defines in the Xserver.
++ */
++
++#ifndef _VIA_DEFINES_
++#define _VIA_DEFINES_
++
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include "via_drmclient.h"
++#endif
++
++/*
++ * With the arrival of libdrm there is a need to version this file.
++ * As usual, bump MINOR for new features, MAJOR for changes that create
++ * backwards incompatibilities, (which should be avoided whenever possible).
++ */
++
++#define VIA_DRM_DRIVER_DATE           "20070202"
++
++#define VIA_DRM_DRIVER_MAJOR          2
++#define VIA_DRM_DRIVER_MINOR          11
++#define VIA_DRM_DRIVER_PATCHLEVEL     1
++#define VIA_DRM_DRIVER_VERSION          (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR))
++
++#define VIA_NR_SAREA_CLIPRECTS                8
++#define VIA_NR_XVMC_PORTS            10
++#define VIA_NR_XVMC_LOCKS            5
++#define VIA_MAX_CACHELINE_SIZE          64
++#define XVMCLOCKPTR(saPriv,lockNo)                                    \
++      ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
++                                    (VIA_MAX_CACHELINE_SIZE - 1)) &   \
++                                   ~(VIA_MAX_CACHELINE_SIZE - 1)) +   \
++                                  VIA_MAX_CACHELINE_SIZE*(lockNo)))
++#define VIA_NR_TEX_REGIONS 64
++
++#endif
++
++#define DRM_VIA_FENCE_TYPE_ACCEL 0x00000002
++
++/* VIA specific ioctls */
++#define DRM_VIA_ALLOCMEM      0x00
++#define DRM_VIA_FREEMEM               0x01
++#define DRM_VIA_AGP_INIT      0x02
++#define DRM_VIA_FB_INIT               0x03
++#define DRM_VIA_MAP_INIT      0x04
++#define DRM_VIA_DEC_FUTEX       0x05
++#define NOT_USED
++#define DRM_VIA_DMA_INIT      0x07
++#define DRM_VIA_CMDBUFFER     0x08
++#define DRM_VIA_FLUSH         0x09
++#define DRM_VIA_PCICMD                0x0a
++#define DRM_VIA_CMDBUF_SIZE   0x0b
++#define NOT_USED
++#define DRM_VIA_WAIT_IRQ      0x0d
++#define DRM_VIA_DMA_BLIT      0x0e
++#define DRM_VIA_BLIT_SYNC       0x0f
++
++#define DRM_IOCTL_VIA_ALLOCMEM          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
++#define DRM_IOCTL_VIA_FREEMEM   DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
++#define DRM_IOCTL_VIA_AGP_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
++#define DRM_IOCTL_VIA_FB_INIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
++#define DRM_IOCTL_VIA_MAP_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
++#define DRM_IOCTL_VIA_DEC_FUTEX   DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
++#define DRM_IOCTL_VIA_DMA_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
++#define DRM_IOCTL_VIA_CMDBUFFER         DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
++#define DRM_IOCTL_VIA_FLUSH     DRM_IO(  DRM_COMMAND_BASE + DRM_VIA_FLUSH)
++#define DRM_IOCTL_VIA_PCICMD    DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
++#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
++                                          drm_via_cmdbuf_size_t)
++#define DRM_IOCTL_VIA_WAIT_IRQ    DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
++#define DRM_IOCTL_VIA_DMA_BLIT    DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
++#define DRM_IOCTL_VIA_BLIT_SYNC   DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
++
++/* Indices into buf.Setup where various bits of state are mirrored per
++ * context and per buffer.  These can be fired at the card as a unit,
++ * or in a piecewise fashion as required.
++ */
++
++#define VIA_TEX_SETUP_SIZE 8
++
++/* Flags for clear ioctl
++ */
++#define VIA_FRONT   0x1
++#define VIA_BACK    0x2
++#define VIA_DEPTH   0x4
++#define VIA_STENCIL 0x8
++
++#define VIA_MEM_VIDEO   0     /* matches drm constant */
++#define VIA_MEM_AGP     1     /* matches drm constant */
++#define VIA_MEM_SYSTEM  2
++#define VIA_MEM_MIXED   3
++#define VIA_MEM_UNKNOWN 4
++
++typedef struct {
++      uint32_t offset;
++      uint32_t size;
++} drm_via_agp_t;
++
++typedef struct {
++      uint32_t offset;
++      uint32_t size;
++} drm_via_fb_t;
++
++typedef struct {
++      uint32_t context;
++      uint32_t type;
++      uint32_t size;
++      unsigned long index;
++      unsigned long offset;
++} drm_via_mem_t;
++
++typedef struct _drm_via_init {
++      enum {
++              VIA_INIT_MAP = 0x01,
++              VIA_CLEANUP_MAP = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long agpAddr;
++} drm_via_init_t;
++
++typedef struct _drm_via_futex {
++      enum {
++              VIA_FUTEX_WAIT = 0x00,
++              VIA_FUTEX_WAKE = 0X01
++      } func;
++      uint32_t ms;
++      uint32_t lock;
++      uint32_t val;
++} drm_via_futex_t;
++
++typedef struct _drm_via_dma_init {
++      enum {
++              VIA_INIT_DMA = 0x01,
++              VIA_CLEANUP_DMA = 0x02,
++              VIA_DMA_INITIALIZED = 0x03
++      } func;
++
++      unsigned long offset;
++      unsigned long size;
++      unsigned long reg_pause_addr;
++} drm_via_dma_init_t;
++
++typedef struct _drm_via_cmdbuffer {
++      char __user *buf;
++      unsigned long size;
++} drm_via_cmdbuffer_t;
++
++/* Warning: If you change the SAREA structure you must change the Xserver
++ * structure as well */
++
++typedef struct _drm_via_tex_region {
++      unsigned char next, prev;       /* indices to form a circular LRU  */
++      unsigned char inUse;    /* owned by a client, or free? */
++      int age;                /* tracked by clients to update local LRU's */
++} drm_via_tex_region_t;
++
++typedef struct _drm_via_sarea {
++      unsigned int dirty;
++      unsigned int nbox;
++      struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
++      drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
++      int texAge;             /* last time texture was uploaded */
++      int ctxOwner;           /* last context to upload state */
++      int vertexPrim;
++
++      /*
++       * Below is for XvMC.
++       * We want the lock integers alone on, and aligned to, a cache line.
++       * Therefore this somewhat strange construct.
++       */
++
++      char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
++
++      unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
++      unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
++      unsigned int XvMCCtxNoGrabbed;  /* Last context to hold decoder */
++
++      /* Used by the 3d driver only at this point, for pageflipping:
++       */
++      unsigned int pfCurrentOffset;
++} drm_via_sarea_t;
++
++typedef struct _drm_via_cmdbuf_size {
++      enum {
++              VIA_CMDBUF_SPACE = 0x01,
++              VIA_CMDBUF_LAG = 0x02
++      } func;
++      int wait;
++      uint32_t size;
++} drm_via_cmdbuf_size_t;
++
++typedef enum {
++      VIA_IRQ_ABSOLUTE = 0x0,
++      VIA_IRQ_RELATIVE = 0x1,
++      VIA_IRQ_SIGNAL = 0x10000000,
++      VIA_IRQ_FORCE_SEQUENCE = 0x20000000
++} via_irq_seq_type_t;
++
++#define VIA_IRQ_FLAGS_MASK 0xF0000000
++
++enum drm_via_irqs {
++      drm_via_irq_hqv0 = 0,
++      drm_via_irq_hqv1,
++      drm_via_irq_dma0_dd,
++      drm_via_irq_dma0_td,
++      drm_via_irq_dma1_dd,
++      drm_via_irq_dma1_td,
++      drm_via_irq_num
++};
++
++struct drm_via_wait_irq_request {
++      unsigned irq;
++      via_irq_seq_type_t type;
++      uint32_t sequence;
++      uint32_t signal;
++};
++
++typedef union drm_via_irqwait {
++      struct drm_via_wait_irq_request request;
++      struct drm_wait_vblank_reply reply;
++} drm_via_irqwait_t;
++
++typedef struct drm_via_blitsync {
++      uint32_t sync_handle;
++      unsigned engine;
++} drm_via_blitsync_t;
++
++/*
++ * Below,"flags" is currently unused but will be used for possible future
++ * extensions like kernel space bounce buffers for bad alignments and
++ * blit engine busy-wait polling for better latency in the absence of
++ * interrupts.
++ */
++
++typedef struct drm_via_dmablit {
++      uint32_t num_lines;
++      uint32_t line_length;
++
++      uint32_t fb_addr;
++      uint32_t fb_stride;
++
++      unsigned char *mem_addr;
++      uint32_t mem_stride;
++
++      uint32_t flags;
++      int to_fb;
++
++      drm_via_blitsync_t sync;
++} drm_via_dmablit_t;
++
++
++#endif                                /* _VIA_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drv.c git-nokia/drivers/gpu/drm-tungsten/via_drv.c
+--- git/drivers/gpu/drm-tungsten/via_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,157 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++#include "drm_pciids.h"
++
++
++static int dri_library_name(struct drm_device * dev, char * buf)
++{
++      return snprintf(buf, PAGE_SIZE, "unichrome\n");
++}
++
++static struct pci_device_id pciidlist[] = {
++      viadrv_PCI_IDS
++};
++
++
++#ifdef VIA_HAVE_FENCE
++extern struct drm_fence_driver via_fence_driver;
++#endif
++
++#ifdef VIA_HAVE_BUFFER
++
++/**
++ * If there's no thrashing. This is the preferred memory type order.
++ */
++static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
++
++/**
++ * If we have thrashing, most memory will be evicted to TT anyway, so we might as well
++ * just move the new buffer into TT from the start.
++ */
++static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
++
++
++static struct drm_bo_driver via_bo_driver = {
++      .mem_type_prio = via_mem_prios,
++      .mem_busy_prio = via_busy_prios,
++      .num_mem_type_prio = ARRAY_SIZE(via_mem_prios),
++      .num_mem_busy_prio = ARRAY_SIZE(via_busy_prios),
++      .create_ttm_backend_entry = via_create_ttm_backend_entry,
++      .fence_type = via_fence_types,
++      .invalidate_caches = via_invalidate_caches,
++      .init_mem_type = via_init_mem_type,
++      .evict_flags = via_evict_flags,
++      .move = NULL,
++      .ttm_cache_flush = NULL,
++      .command_stream_barrier = NULL
++};
++#endif
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
++          DRIVER_IRQ_SHARED,
++      .load = via_driver_load,
++      .unload = via_driver_unload,
++#ifndef VIA_HAVE_CORE_MM
++      .context_ctor = via_init_context,
++#endif
++      .context_dtor = via_final_context,
++      .get_vblank_counter = via_get_vblank_counter,
++      .enable_vblank = via_enable_vblank,
++      .disable_vblank = via_disable_vblank,
++      .irq_preinstall = via_driver_irq_preinstall,
++      .irq_postinstall = via_driver_irq_postinstall,
++      .irq_uninstall = via_driver_irq_uninstall,
++      .irq_handler = via_driver_irq_handler,
++      .dma_quiescent = via_driver_dma_quiescent,
++      .dri_library_name = dri_library_name,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .reclaim_buffers_locked = NULL,
++#ifdef VIA_HAVE_CORE_MM
++      .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
++      .lastclose = via_lastclose,
++#endif
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = via_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++#ifdef VIA_HAVE_FENCE
++      .fence_driver = &via_fence_driver,
++#endif
++#ifdef VIA_HAVE_BUFFER
++      .bo_driver = &via_bo_driver,
++#endif
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = VIA_DRM_DRIVER_DATE,
++      .major = VIA_DRM_DRIVER_MAJOR,
++      .minor = VIA_DRM_DRIVER_MINOR,
++      .patchlevel = VIA_DRM_DRIVER_PATCHLEVEL
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init via_init(void)
++{
++      driver.num_ioctls = via_max_ioctl;
++
++      via_init_command_verifier();
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit via_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(via_init);
++module_exit(via_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drv.h git-nokia/drivers/gpu/drm-tungsten/via_drv.h
+--- git/drivers/gpu/drm-tungsten/via_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,211 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _VIA_DRV_H_
++#define _VIA_DRV_H_
++
++#include "drm_sman.h"
++#define DRIVER_AUTHOR "Various"
++
++#define DRIVER_NAME           "via"
++#define DRIVER_DESC           "VIA Unichrome / Pro"
++
++#include "via_verifier.h"
++
++/*
++ * Registers go here.
++ */
++
++
++#define CMDBUF_ALIGNMENT_SIZE   (0x100)
++#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
++
++/* defines for VIA 3D registers */
++#define VIA_REG_STATUS                0x400
++#define VIA_REG_TRANSET               0x43C
++#define VIA_REG_TRANSPACE       0x440
++
++/* VIA_REG_STATUS(0x400): Engine Status */
++#define VIA_CMD_RGTR_BUSY       0x00000080    /* Command Regulator is busy */
++#define VIA_2D_ENG_BUSY               0x00000001      /* 2D Engine is busy */
++#define VIA_3D_ENG_BUSY               0x00000002      /* 3D Engine is busy */
++#define VIA_VR_QUEUE_BUSY       0x00020000    /* Virtual Queue is busy */
++
++
++
++#if defined(__linux__)
++#include "via_dmablit.h"
++
++/*
++ * This define and all its references can be removed when
++ * the DMA blit code has been implemented for FreeBSD.
++ */
++#define VIA_HAVE_DMABLIT 1
++#define VIA_HAVE_CORE_MM 1
++#define VIA_HAVE_FENCE   1
++#define VIA_HAVE_BUFFER  1
++#endif
++
++#define VIA_PCI_BUF_SIZE 60000
++#define VIA_FIRE_BUF_SIZE  1024
++#define VIA_NUM_IRQS 4
++
++typedef struct drm_via_ring_buffer {
++      drm_local_map_t map;
++      char *virtual_start;
++} drm_via_ring_buffer_t;
++
++typedef uint32_t maskarray_t[5];
++
++typedef struct drm_via_irq {
++      atomic_t irq_received;
++      uint32_t pending_mask;
++      uint32_t enable_mask;
++      wait_queue_head_t irq_queue;
++} drm_via_irq_t;
++
++typedef struct drm_via_private {
++      drm_via_sarea_t *sarea_priv;
++      drm_local_map_t *sarea;
++      drm_local_map_t *fb;
++      drm_local_map_t *mmio;
++      unsigned long agpAddr;
++      wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
++      char *dma_ptr;
++      unsigned int dma_low;
++      unsigned int dma_high;
++      unsigned int dma_offset;
++      uint32_t dma_wrap;
++      volatile uint32_t *last_pause_ptr;
++      volatile uint32_t *hw_addr_ptr;
++      drm_via_ring_buffer_t ring;
++      struct timeval last_vblank;
++      int last_vblank_valid;
++      unsigned usec_per_vblank;
++      atomic_t vbl_received;
++      drm_via_state_t hc_state;
++      char pci_buf[VIA_PCI_BUF_SIZE];
++      const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
++      uint32_t num_fire_offsets;
++      int chipset;
++      drm_via_irq_t via_irqs[VIA_NUM_IRQS];
++      unsigned num_irqs;
++      maskarray_t *irq_masks;
++      uint32_t irq_enable_mask;
++      uint32_t irq_pending_mask;
++      int *irq_map;
++      /* Memory manager stuff */
++#ifdef VIA_HAVE_CORE_MM
++      unsigned int idle_fault;
++      struct drm_sman sman;
++      int vram_initialized;
++      int agp_initialized;
++      unsigned long vram_offset;
++      unsigned long agp_offset;
++#endif
++#ifdef VIA_HAVE_DMABLIT
++      drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
++#endif
++        uint32_t dma_diff;
++#ifdef VIA_HAVE_FENCE
++      spinlock_t fence_lock;
++      uint32_t emit_0_sequence;
++      int have_idlelock;
++      struct timer_list fence_timer;
++#endif
++} drm_via_private_t;
++
++enum via_family {
++  VIA_OTHER = 0,     /* Baseline */
++  VIA_PRO_GROUP_A,   /* Another video engine and DMA commands */
++  VIA_DX9_0          /* Same video as pro_group_a, but 3D is unsupported */
++};
++
++/* VIA MMIO register access */
++#define VIA_BASE ((dev_priv->mmio))
++
++#define VIA_READ(reg)         DRM_READ32(VIA_BASE, reg)
++#define VIA_WRITE(reg,val)    DRM_WRITE32(VIA_BASE, reg, val)
++#define VIA_READ8(reg)                DRM_READ8(VIA_BASE, reg)
++#define VIA_WRITE8(reg,val)   DRM_WRITE8(VIA_BASE, reg, val)
++
++extern struct drm_ioctl_desc via_ioctls[];
++extern int via_max_ioctl;
++
++extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
++extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
++
++extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
++extern int via_driver_unload(struct drm_device *dev);
++extern int via_final_context(struct drm_device * dev, int context);
++
++extern int via_do_cleanup_map(struct drm_device * dev);
++extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int via_enable_vblank(struct drm_device *dev, int crtc);
++extern void via_disable_vblank(struct drm_device *dev, int crtc);
++
++extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
++extern void via_driver_irq_preinstall(struct drm_device * dev);
++extern int via_driver_irq_postinstall(struct drm_device * dev);
++extern void via_driver_irq_uninstall(struct drm_device * dev);
++
++extern int via_dma_cleanup(struct drm_device * dev);
++extern void via_init_command_verifier(void);
++extern int via_driver_dma_quiescent(struct drm_device * dev);
++extern void via_init_futex(drm_via_private_t *dev_priv);
++extern void via_cleanup_futex(drm_via_private_t *dev_priv);
++extern void via_release_futex(drm_via_private_t *dev_priv, int context);
++
++#ifdef VIA_HAVE_CORE_MM
++extern void via_reclaim_buffers_locked(struct drm_device *dev,
++                                     struct drm_file *file_priv);
++extern void via_lastclose(struct drm_device *dev);
++#else
++extern int via_init_context(struct drm_device * dev, int context);
++#endif
++
++#ifdef VIA_HAVE_DMABLIT
++extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
++extern void via_init_dmablit(struct drm_device *dev);
++#endif
++
++#ifdef VIA_HAVE_BUFFER
++extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
++extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
++                             struct drm_mem_type_manager *man);
++extern uint64_t via_evict_flags(struct drm_buffer_object *bo);
++extern int via_move(struct drm_buffer_object *bo, int evict,
++              int no_wait, struct drm_bo_mem_reg *new_mem);
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_fence.c git-nokia/drivers/gpu/drm-tungsten/via_fence.c
+--- git/drivers/gpu/drm-tungsten/via_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++/*
++ * DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
++ * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
++ */
++
++static void via_fence_poll(struct drm_device *dev, uint32_t class,
++                         uint32_t waiting_types)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      uint32_t signaled_flush_types = 0;
++      uint32_t status;
++
++      if (class != 0)
++              return;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      spin_lock(&dev_priv->fence_lock);
++      if (waiting_types) {
++
++              /*
++               * Take the idlelock. This guarantees that the next time a client tries
++               * to grab the lock, it will stall until the idlelock is released. This
++               * guarantees that eventually, the GPU engines will be idle, but nothing
++               * else. It cannot be used to protect the hardware.
++               */
++
++
++              if (!dev_priv->have_idlelock) {
++                      drm_idlelock_take(&dev->lock);
++                      dev_priv->have_idlelock = 1;
++              }
++
++              /*
++               * Check if AGP command reader is idle.
++               */
++
++              if (waiting_types & DRM_FENCE_TYPE_EXE)
++                      if (VIA_READ(0x41C) & 0x80000000)
++                              signaled_flush_types |= DRM_FENCE_TYPE_EXE;
++
++              /*
++               * Check VRAM command queue empty and 2D + 3D engines idle.
++               */
++
++              if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) {
++                      status = VIA_READ(VIA_REG_STATUS);
++                      if ((status & VIA_VR_QUEUE_BUSY) &&
++                          !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
++                              signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
++              }
++
++              if (signaled_flush_types) {
++                      waiting_types &= ~signaled_flush_types;
++                      if (!waiting_types && dev_priv->have_idlelock) {
++                              drm_idlelock_release(&dev->lock);
++                              dev_priv->have_idlelock = 0;
++                      }
++                      drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
++                                        signaled_flush_types, 0);
++              }
++      }
++
++      spin_unlock(&dev_priv->fence_lock);
++
++      return;
++}
++
++
++/**
++ * Emit a fence sequence.
++ */
++
++static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
++                                 uint32_t * sequence, uint32_t * native_type)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (!dev_priv)
++              return -EINVAL;
++
++      switch(class) {
++      case 0: /* AGP command stream */
++
++              /*
++               * The sequence number isn't really used by the hardware yet.
++               */
++
++              spin_lock(&dev_priv->fence_lock);
++              *sequence = ++dev_priv->emit_0_sequence;
++              spin_unlock(&dev_priv->fence_lock);
++
++              /*
++               * When drm_fence_handler() is called with flush type 0x01, and a
++               * sequence number, That means that the EXE flag is expired.
++               * Nothing else. No implicit flushing or other engines idle.
++               */
++
++              *native_type = DRM_FENCE_TYPE_EXE;
++              break;
++      default:
++              ret = -EINVAL;
++              break;
++      }
++      return ret;
++}
++
++/**
++ * No irq fence expirations implemented yet.
++ * Although both the HQV engines and PCI dmablit engines signal
++ * idle with an IRQ, we haven't implemented this yet.
++ * This means that the drm fence manager will always poll for engine idle,
++ * unless the caller wanting to wait for a fence object has indicated a lazy wait.
++ */
++
++static int via_fence_has_irq(struct drm_device * dev, uint32_t class,
++                           uint32_t flags)
++{
++      return 0;
++}
++
++struct drm_fence_driver via_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = (1 << 30),
++      .flush_diff = (1 << 20),
++      .sequence_mask = 0xffffffffU,
++      .has_irq = via_fence_has_irq,
++      .emit = via_fence_emit_sequence,
++      .poll = via_fence_poll,
++      .needed_flush = NULL,
++      .wait = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/via_irq.c git-nokia/drivers/gpu/drm-tungsten/via_irq.c
+--- git/drivers/gpu/drm-tungsten/via_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,403 @@
++/* via_irq.c
++ *
++ * Copyright 2004 BEAM Ltd.
++ * Copyright 2002 Tungsten Graphics, Inc.
++ * Copyright 2005 Thomas Hellstrom.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Terry Barnaby <terry1@beam.ltd.uk>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Thomas Hellstrom <unichrome@shipmail.org>
++ *
++ * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
++ * interrupt, as well as an infrastructure to handle other interrupts of the chip.
++ * The refresh rate is also calculated for video playback sync purposes.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++#define VIA_REG_INTERRUPT       0x200
++
++/* VIA_REG_INTERRUPT */
++#define VIA_IRQ_GLOBAL          (1 << 31)
++#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
++#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
++#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
++#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
++#define VIA_IRQ_HQV0_PENDING    (1 << 9)
++#define VIA_IRQ_HQV1_PENDING    (1 << 10)
++#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
++#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
++#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
++#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
++#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
++#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
++#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
++#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
++
++
++/*
++ * Device-specific IRQs go here. This type might need to be extended with
++ * the register if there are multiple IRQ control registers.
++ * Currently we activate the HQV interrupts of  Unichrome Pro group A.
++ */
++
++static maskarray_t via_pro_group_a_irqs[] = {
++      {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
++       0x00000000 },
++      {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
++       0x00000000 },
++      {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++      {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++};
++static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
++static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
++
++static maskarray_t via_unichrome_irqs[] = {
++      {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++      {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
++};
++static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
++static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
++
++
++static unsigned time_diff(struct timeval *now,struct timeval *then)
++{
++      return (now->tv_usec >= then->tv_usec) ?
++              now->tv_usec - then->tv_usec :
++              1000000 - (then->tv_usec - now->tv_usec);
++}
++
++u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++      int handled = 0;
++      struct timeval cur_vblank;
++      drm_via_irq_t *cur_irq = dev_priv->via_irqs;
++      int i;
++
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      if (status & VIA_IRQ_VBLANK_PENDING) {
++              atomic_inc(&dev_priv->vbl_received);
++              if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++#ifdef __linux__
++                      do_gettimeofday(&cur_vblank);
++#else
++                      microtime(&cur_vblank);
++#endif
++                      if (dev_priv->last_vblank_valid) {
++                              dev_priv->usec_per_vblank =
++                                      time_diff(&cur_vblank,
++                                                &dev_priv->last_vblank) >> 4;
++                      }
++                      dev_priv->last_vblank = cur_vblank;
++                      dev_priv->last_vblank_valid = 1;
++              }
++              if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++                      DRM_DEBUG("US per vblank is: %u\n",
++                                dev_priv->usec_per_vblank);
++              }
++              drm_handle_vblank(dev, 0);
++              handled = 1;
++      }
++
++      for (i = 0; i < dev_priv->num_irqs; ++i) {
++              if (status & cur_irq->pending_mask) {
++                      atomic_inc(&cur_irq->irq_received);
++                      DRM_WAKEUP(&cur_irq->irq_queue);
++                      handled = 1;
++#ifdef VIA_HAVE_DMABLIT
++                      if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
++                              via_dmablit_handler(dev, 0, 1);
++                      } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
++                              via_dmablit_handler(dev, 1, 1);
++                      }
++#endif
++              }
++              cur_irq++;
++      }
++
++      /* Acknowlege interrupts */
++      VIA_WRITE(VIA_REG_INTERRUPT, status);
++
++
++      if (handled)
++              return IRQ_HANDLED;
++      else
++              return IRQ_NONE;
++}
++
++static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
++{
++      u32 status;
++
++      if (dev_priv) {
++              /* Acknowlege interrupts */
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status |
++                        dev_priv->irq_pending_mask);
++      }
++}
++
++int via_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      u32 status;
++
++      if (crtc != 0) {
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++              return -EINVAL;
++      }
++
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
++
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
++
++      return 0;
++}
++
++void via_disable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
++
++      if (crtc != 0)
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++}
++
++static int
++via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
++                  unsigned int *sequence)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      unsigned int cur_irq_sequence;
++      drm_via_irq_t *cur_irq;
++      int ret = 0;
++      maskarray_t *masks;
++      int real_irq;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (irq >= drm_via_irq_num) {
++              DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
++              return -EINVAL;
++      }
++
++      real_irq = dev_priv->irq_map[irq];
++
++      if (real_irq < 0) {
++              DRM_ERROR("Video IRQ %d not available on this hardware.\n",
++                        irq);
++              return -EINVAL;
++      }
++
++      masks = dev_priv->irq_masks;
++      cur_irq = dev_priv->via_irqs + real_irq;
++
++      if (masks[real_irq][2] && !force_sequence) {
++              DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
++                          ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
++                           masks[irq][4]));
++              cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++      } else {
++              DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
++                          (((cur_irq_sequence =
++                             atomic_read(&cur_irq->irq_received)) -
++                            *sequence) <= (1 << 23)));
++      }
++      *sequence = cur_irq_sequence;
++      return ret;
++}
++
++
++/*
++ * drm_dma.h hooks
++ */
++
++void via_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++      drm_via_irq_t *cur_irq;
++      int i;
++
++      DRM_DEBUG("dev_priv: %p\n", dev_priv);
++      if (dev_priv) {
++              cur_irq = dev_priv->via_irqs;
++
++              dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
++              dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
++
++              if (dev_priv->chipset == VIA_PRO_GROUP_A ||
++                  dev_priv->chipset == VIA_DX9_0) {
++                      dev_priv->irq_masks = via_pro_group_a_irqs;
++                      dev_priv->num_irqs = via_num_pro_group_a;
++                      dev_priv->irq_map = via_irqmap_pro_group_a;
++              } else {
++                      dev_priv->irq_masks = via_unichrome_irqs;
++                      dev_priv->num_irqs = via_num_unichrome;
++                      dev_priv->irq_map = via_irqmap_unichrome;
++              }
++
++              for (i = 0; i < dev_priv->num_irqs; ++i) {
++                      atomic_set(&cur_irq->irq_received, 0);
++                      cur_irq->enable_mask = dev_priv->irq_masks[i][0];
++                      cur_irq->pending_mask = dev_priv->irq_masks[i][1];
++                      DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
++                      dev_priv->irq_enable_mask |= cur_irq->enable_mask;
++                      dev_priv->irq_pending_mask |= cur_irq->pending_mask;
++                      cur_irq++;
++
++                      DRM_DEBUG("Initializing IRQ %d\n", i);
++              }
++
++              dev_priv->last_vblank_valid = 0;
++
++              /* Clear VSync interrupt regs */
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status &
++                        ~(dev_priv->irq_enable_mask));
++
++              /* Clear bits if they're already high */
++              viadrv_acknowledge_irqs(dev_priv);
++      }
++}
++
++int via_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++
++      DRM_DEBUG("via_driver_irq_postinstall\n");
++      if (!dev_priv)
++              return -EINVAL;
++
++      drm_vblank_init(dev, 1);
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
++                | dev_priv->irq_enable_mask);
++
++      /* Some magic, oh for some data sheets ! */
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
++
++      return 0;
++}
++
++void via_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++
++      DRM_DEBUG("\n");
++      if (dev_priv) {
++
++              /* Some more magic, oh for some data sheets ! */
++
++              VIA_WRITE8(0x83d4, 0x11);
++              VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
++
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status &
++                        ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
++      }
++}
++
++int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_irqwait_t *irqwait = data;
++      struct timeval now;
++      int ret = 0;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_irq_t *cur_irq = dev_priv->via_irqs;
++      int force_sequence;
++
++      if (!dev->irq)
++              return -EINVAL;
++
++      if (irqwait->request.irq >= dev_priv->num_irqs) {
++              DRM_ERROR("Trying to wait on unknown irq %d\n",
++                        irqwait->request.irq);
++              return -EINVAL;
++      }
++
++      cur_irq += irqwait->request.irq;
++
++      switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
++      case VIA_IRQ_RELATIVE:
++              irqwait->request.sequence +=
++                      atomic_read(&cur_irq->irq_received);
++              irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
++      case VIA_IRQ_ABSOLUTE:
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (irqwait->request.type & VIA_IRQ_SIGNAL) {
++              DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
++              return -EINVAL;
++      }
++
++      force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
++
++      ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
++                                &irqwait->request.sequence);
++#ifdef __linux__
++      do_gettimeofday(&now);
++#else
++      microtime(&now);
++#endif
++      irqwait->reply.tval_sec = now.tv_sec;
++      irqwait->reply.tval_usec = now.tv_usec;
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_map.c git-nokia/drivers/gpu/drm-tungsten/via_map.c
+--- git/drivers/gpu/drm-tungsten/via_map.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_map.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,139 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
++      if (!dev_priv->fb) {
++              DRM_ERROR("could not find framebuffer!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("could not find mmio region!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv =
++          (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                               init->sarea_priv_offset);
++
++      dev_priv->agpAddr = init->agpAddr;
++
++      via_init_futex( dev_priv );
++#ifdef VIA_HAVE_DMABLIT
++      via_init_dmablit( dev );
++#endif
++#ifdef VIA_HAVE_FENCE
++      dev_priv->emit_0_sequence = 0;
++      dev_priv->have_idlelock = 0;
++      spin_lock_init(&dev_priv->fence_lock);
++#endif /* VIA_HAVE_FENCE */
++      dev->dev_private = (void *)dev_priv;
++#ifdef VIA_HAVE_BUFFER
++      ret = drm_bo_driver_init(dev);
++      if (ret)
++              DRM_ERROR("Could not initialize buffer object driver.\n");
++#endif
++      return ret;
++
++}
++
++int via_do_cleanup_map(struct drm_device * dev)
++{
++      via_dma_cleanup(dev);
++
++      return 0;
++}
++
++
++int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      switch (init->func) {
++      case VIA_INIT_MAP:
++              return via_do_init_map(dev, init);
++      case VIA_CLEANUP_MAP:
++              return via_do_cleanup_map(dev);
++      }
++
++      return -EINVAL;
++}
++
++int via_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_via_private_t *dev_priv;
++      int ret = 0;
++
++      dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->chipset = chipset;
++
++#ifdef VIA_HAVE_CORE_MM
++      ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
++      if (ret) {
++              drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++      }
++#endif
++      return ret;
++}
++
++int via_driver_unload(struct drm_device *dev)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++#ifdef VIA_HAVE_CORE_MM
++      drm_sman_takedown(&dev_priv->sman);
++#endif
++      drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_mm.c git-nokia/drivers/gpu/drm-tungsten/via_mm.c
+--- git/drivers/gpu/drm-tungsten/via_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,196 @@
++/*
++ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "drm_sman.h"
++
++#define VIA_MM_ALIGN_SHIFT 4
++#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
++
++int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_agp_t *agp = data;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
++                               agp->size >> VIA_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("AGP memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->agp_initialized = 1;
++      dev_priv->agp_offset = agp->offset;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
++      return 0;
++}
++
++int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_fb_t *fb = data;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
++                               fb->size >> VIA_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("VRAM memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->vram_initialized = 1;
++      dev_priv->vram_offset = fb->offset;
++
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
++
++      return 0;
++
++}
++
++int via_final_context(struct drm_device *dev, int context)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      via_release_futex(dev_priv, context);
++
++#if defined(__linux__)
++      /* Linux specific until context tracking code gets ported to BSD */
++      /* Last context, perform cleanup */
++      if (dev->ctx_count == 1 && dev->dev_private) {
++              DRM_DEBUG("Last Context\n");
++              if (dev->irq)
++                      drm_irq_uninstall(dev);
++              via_cleanup_futex(dev_priv);
++              via_do_cleanup_map(dev);
++      }
++#endif
++      return 1;
++}
++
++void via_lastclose(struct drm_device *dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (!dev_priv)
++              return;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_sman_cleanup(&dev_priv->sman);
++      dev_priv->vram_initialized = 0;
++      dev_priv->agp_initialized = 0;
++      mutex_unlock(&dev->struct_mutex);
++}
++
++int via_mem_alloc(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_via_mem_t *mem = data;
++      int retval = 0;
++      struct drm_memblock_item *item;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      unsigned long tmpSize;
++
++      if (mem->type > VIA_MEM_AGP) {
++              DRM_ERROR("Unknown memory type allocation\n");
++              return -EINVAL;
++      }
++      mutex_lock(&dev->struct_mutex);
++      if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
++                    dev_priv->agp_initialized)) {
++              DRM_ERROR
++                  ("Attempt to allocate from uninitialized memory manager.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
++      item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
++                            (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      if (item) {
++              mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
++                            dev_priv->vram_offset : dev_priv->agp_offset) +
++                  (item->mm->
++                   offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
++              mem->index = item->user_hash.key;
++      } else {
++              mem->offset = 0;
++              mem->size = 0;
++              mem->index = 0;
++              DRM_DEBUG("Video memory allocation failed\n");
++              retval = -ENOMEM;
++      }
++
++      return retval;
++}
++
++int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      drm_via_mem_t *mem = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_free_key(&dev_priv->sman, mem->index);
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("free = 0x%lx\n", mem->index);
++
++      return ret;
++}
++
++
++void via_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      return;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_verifier.c git-nokia/drivers/gpu/drm-tungsten/via_verifier.c
+--- git/drivers/gpu/drm-tungsten/via_verifier.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_verifier.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1121 @@
++/*
++ * Copyright 2004 The Unichrome Project. All Rights Reserved.
++ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellstrom 2004, 2005.
++ * This code was written using docs obtained under NDA from VIA Inc.
++ *
++ * Don't run this code directly on an AGP buffer. Due to cache problems it will
++ * be very slow.
++ */
++
++#include "via_3d_reg.h"
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_verifier.h"
++#include "via_drv.h"
++
++typedef enum {
++      state_command,
++      state_header2,
++      state_header1,
++      state_vheader5,
++      state_vheader6,
++      state_error
++} verifier_state_t;
++
++typedef enum {
++      no_check = 0,
++      check_for_header2,
++      check_for_header1,
++      check_for_header2_err,
++      check_for_header1_err,
++      check_for_fire,
++      check_z_buffer_addr0,
++      check_z_buffer_addr1,
++      check_z_buffer_addr_mode,
++      check_destination_addr0,
++      check_destination_addr1,
++      check_destination_addr_mode,
++      check_for_dummy,
++      check_for_dd,
++      check_texture_addr0,
++      check_texture_addr1,
++      check_texture_addr2,
++      check_texture_addr3,
++      check_texture_addr4,
++      check_texture_addr5,
++      check_texture_addr6,
++      check_texture_addr7,
++      check_texture_addr8,
++      check_texture_addr_mode,
++      check_for_vertex_count,
++      check_number_texunits,
++      forbidden_command
++} hazard_t;
++
++/*
++ * Associates each hazard above with a possible multi-command
++ * sequence. For example an address that is split over multiple
++ * commands and that needs to be checked at the first command
++ * that does not include any part of the address.
++ */
++
++static drm_via_sequence_t seqs[] = {
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      z_address,
++      z_address,
++      z_address,
++      dest_address,
++      dest_address,
++      dest_address,
++      no_sequence,
++      no_sequence,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      no_sequence
++};
++
++typedef struct {
++      unsigned int code;
++      hazard_t hz;
++} hz_init_t;
++
++static hz_init_t init_table1[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xee, check_for_fire},
++      {0xcc, check_for_dummy},
++      {0xdd, check_for_dd},
++      {0x00, no_check},
++      {0x10, check_z_buffer_addr0},
++      {0x11, check_z_buffer_addr1},
++      {0x12, check_z_buffer_addr_mode},
++      {0x13, no_check},
++      {0x14, no_check},
++      {0x15, no_check},
++      {0x23, no_check},
++      {0x24, no_check},
++      {0x33, no_check},
++      {0x34, no_check},
++      {0x35, no_check},
++      {0x36, no_check},
++      {0x37, no_check},
++      {0x38, no_check},
++      {0x39, no_check},
++      {0x3A, no_check},
++      {0x3B, no_check},
++      {0x3C, no_check},
++      {0x3D, no_check},
++      {0x3E, no_check},
++      {0x40, check_destination_addr0},
++      {0x41, check_destination_addr1},
++      {0x42, check_destination_addr_mode},
++      {0x43, no_check},
++      {0x44, no_check},
++      {0x50, no_check},
++      {0x51, no_check},
++      {0x52, no_check},
++      {0x53, no_check},
++      {0x54, no_check},
++      {0x55, no_check},
++      {0x56, no_check},
++      {0x57, no_check},
++      {0x58, no_check},
++      {0x70, no_check},
++      {0x71, no_check},
++      {0x78, no_check},
++      {0x79, no_check},
++      {0x7A, no_check},
++      {0x7B, no_check},
++      {0x7C, no_check},
++      {0x7D, check_for_vertex_count}
++};
++
++static hz_init_t init_table2[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xee, check_for_fire},
++      {0xcc, check_for_dummy},
++      {0x00, check_texture_addr0},
++      {0x01, check_texture_addr0},
++      {0x02, check_texture_addr0},
++      {0x03, check_texture_addr0},
++      {0x04, check_texture_addr0},
++      {0x05, check_texture_addr0},
++      {0x06, check_texture_addr0},
++      {0x07, check_texture_addr0},
++      {0x08, check_texture_addr0},
++      {0x09, check_texture_addr0},
++      {0x20, check_texture_addr1},
++      {0x21, check_texture_addr1},
++      {0x22, check_texture_addr1},
++      {0x23, check_texture_addr4},
++      {0x2B, check_texture_addr3},
++      {0x2C, check_texture_addr3},
++      {0x2D, check_texture_addr3},
++      {0x2E, check_texture_addr3},
++      {0x2F, check_texture_addr3},
++      {0x30, check_texture_addr3},
++      {0x31, check_texture_addr3},
++      {0x32, check_texture_addr3},
++      {0x33, check_texture_addr3},
++      {0x34, check_texture_addr3},
++      {0x4B, check_texture_addr5},
++      {0x4C, check_texture_addr6},
++      {0x51, check_texture_addr7},
++      {0x52, check_texture_addr8},
++      {0x77, check_texture_addr2},
++      {0x78, no_check},
++      {0x79, no_check},
++      {0x7A, no_check},
++      {0x7B, check_texture_addr_mode},
++      {0x7C, no_check},
++      {0x7D, no_check},
++      {0x7E, no_check},
++      {0x7F, no_check},
++      {0x80, no_check},
++      {0x81, no_check},
++      {0x82, no_check},
++      {0x83, no_check},
++      {0x85, no_check},
++      {0x86, no_check},
++      {0x87, no_check},
++      {0x88, no_check},
++      {0x89, no_check},
++      {0x8A, no_check},
++      {0x90, no_check},
++      {0x91, no_check},
++      {0x92, no_check},
++      {0x93, no_check}
++};
++
++static hz_init_t init_table3[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xcc, check_for_dummy},
++      {0x00, check_number_texunits}
++};
++
++static hazard_t table1[256];
++static hazard_t table2[256];
++static hazard_t table3[256];
++
++static __inline__ int
++eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
++{
++      if ((buf_end - *buf) >= num_words) {
++              *buf += num_words;
++              return 0;
++      }
++      DRM_ERROR("Illegal termination of DMA command buffer\n");
++      return 1;
++}
++
++/*
++ * Partially stolen from drm_memory.h
++ */
++
++static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
++                                                        unsigned long offset,
++                                                        unsigned long size,
++                                                        struct drm_device *dev)
++{
++#ifdef __linux__
++      struct drm_map_list *r_list;
++#endif
++      drm_local_map_t *map = seq->map_cache;
++
++      if (map && map->offset <= offset
++          && (offset + size) <= (map->offset + map->size)) {
++              return map;
++      }
++#ifdef __linux__
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              map = r_list->map;
++              if (!map)
++                      continue;
++#else
++      TAILQ_FOREACH(map, &dev->maplist, link) {
++#endif
++              if (map->offset <= offset
++                  && (offset + size) <= (map->offset + map->size)
++                  && !(map->flags & _DRM_RESTRICTED)
++                  && (map->type == _DRM_AGP)) {
++                      seq->map_cache = map;
++                      return map;
++              }
++      }
++      return NULL;
++}
++
++/*
++ * Require that all AGP texture levels reside in the same AGP map which should
++ * be mappable by the client. This is not a big restriction.
++ * FIXME: To actually enforce this security policy strictly, drm_rmmap
++ * would have to wait for dma quiescent before removing an AGP map.
++ * The via_drm_lookup_agp_map call in reality seems to take
++ * very little CPU time.
++ */
++
++static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
++{
++      switch (cur_seq->unfinished) {
++      case z_address:
++              DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
++              break;
++      case dest_address:
++              DRM_DEBUG("Destination start address is 0x%x\n",
++                        cur_seq->d_addr);
++              break;
++      case tex_address:
++              if (cur_seq->agp_texture) {
++                      unsigned start =
++                          cur_seq->tex_level_lo[cur_seq->texture];
++                      unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
++                      unsigned long lo = ~0, hi = 0, tmp;
++                      uint32_t *addr, *pitch, *height, tex;
++                      unsigned i;
++                      int npot;
++
++                      if (end > 9)
++                              end = 9;
++                      if (start > 9)
++                              start = 9;
++
++                      addr =
++                          &(cur_seq->t_addr[tex = cur_seq->texture][start]);
++                      pitch = &(cur_seq->pitch[tex][start]);
++                      height = &(cur_seq->height[tex][start]);
++                      npot = cur_seq->tex_npot[tex];
++                      for (i = start; i <= end; ++i) {
++                              tmp = *addr++;
++                              if (tmp < lo)
++                                      lo = tmp;
++                              if (i == 0 && npot)
++                                      tmp += (*height++ * *pitch++);
++                              else
++                                      tmp += (*height++ << *pitch++);
++                              if (tmp > hi)
++                                      hi = tmp;
++                      }
++
++                      if (!via_drm_lookup_agp_map
++                          (cur_seq, lo, hi - lo, cur_seq->dev)) {
++                              DRM_ERROR
++                                  ("AGP texture is not in allowed map\n");
++                              return 2;
++                      }
++              }
++              break;
++      default:
++              break;
++      }
++      cur_seq->unfinished = no_sequence;
++      return 0;
++}
++
++static __inline__ int
++investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
++{
++      register uint32_t tmp, *tmp_addr;
++
++      if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
++              int ret;
++              if ((ret = finish_current_sequence(cur_seq)))
++                      return ret;
++      }
++
++      switch (hz) {
++      case check_for_header2:
++              if (cmd == HALCYON_HEADER2)
++                      return 1;
++              return 0;
++      case check_for_header1:
++              if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                      return 1;
++              return 0;
++      case check_for_header2_err:
++              if (cmd == HALCYON_HEADER2)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
++              break;
++      case check_for_header1_err:
++              if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
++              break;
++      case check_for_fire:
++              if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
++              break;
++      case check_for_dummy:
++              if (HC_DUMMY == cmd)
++                      return 0;
++              DRM_ERROR("Illegal DMA HC_DUMMY command\n");
++              break;
++      case check_for_dd:
++              if (0xdddddddd == cmd)
++                      return 0;
++              DRM_ERROR("Illegal DMA 0xdddddddd command\n");
++              break;
++      case check_z_buffer_addr0:
++              cur_seq->unfinished = z_address;
++              cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
++                  (cmd & 0x00FFFFFF);
++              return 0;
++      case check_z_buffer_addr1:
++              cur_seq->unfinished = z_address;
++              cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
++                  ((cmd & 0xFF) << 24);
++              return 0;
++      case check_z_buffer_addr_mode:
++              cur_seq->unfinished = z_address;
++              if ((cmd & 0x0000C000) == 0)
++                      return 0;
++              DRM_ERROR("Attempt to place Z buffer in system memory\n");
++              return 2;
++      case check_destination_addr0:
++              cur_seq->unfinished = dest_address;
++              cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
++                  (cmd & 0x00FFFFFF);
++              return 0;
++      case check_destination_addr1:
++              cur_seq->unfinished = dest_address;
++              cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
++                  ((cmd & 0xFF) << 24);
++              return 0;
++      case check_destination_addr_mode:
++              cur_seq->unfinished = dest_address;
++              if ((cmd & 0x0000C000) == 0)
++                      return 0;
++              DRM_ERROR
++                  ("Attempt to place 3D drawing buffer in system memory\n");
++              return 2;
++      case check_texture_addr0:
++              cur_seq->unfinished = tex_address;
++              tmp = (cmd >> 24);
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
++              *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
++              return 0;
++      case check_texture_addr1:
++              cur_seq->unfinished = tex_address;
++              tmp = ((cmd >> 24) - 0x20);
++              tmp += tmp << 1;
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
++              tmp_addr++;
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
++              tmp_addr++;
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
++              return 0;
++      case check_texture_addr2:
++              cur_seq->unfinished = tex_address;
++              cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
++              cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
++              return 0;
++      case check_texture_addr3:
++              cur_seq->unfinished = tex_address;
++              tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
++              if (tmp == 0 &&
++                  (cmd & HC_HTXnEnPit_MASK)) {
++                      cur_seq->pitch[cur_seq->texture][tmp] =
++                              (cmd & HC_HTXnLnPit_MASK);
++                      cur_seq->tex_npot[cur_seq->texture] = 1;
++              } else {
++                      cur_seq->pitch[cur_seq->texture][tmp] =
++                              (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
++                      cur_seq->tex_npot[cur_seq->texture] = 0;
++                      if (cmd & 0x000FFFFF) {
++                              DRM_ERROR
++                                      ("Unimplemented texture level 0 pitch mode.\n");
++                              return 2;
++                      }
++              }
++              return 0;
++      case check_texture_addr4:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
++              return 0;
++      case check_texture_addr5:
++      case check_texture_addr6:
++              cur_seq->unfinished = tex_address;
++              /*
++               * Texture width. We don't care since we have the pitch.
++               */
++              return 0;
++      case check_texture_addr7:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
++              tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
++              tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
++              tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
++              tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
++              tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
++              tmp_addr[0] = 1 << (cmd & 0x0000000F);
++              return 0;
++      case check_texture_addr8:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
++              tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
++              tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
++              tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
++              tmp_addr[6] = 1 << (cmd & 0x0000000F);
++              return 0;
++      case check_texture_addr_mode:
++              cur_seq->unfinished = tex_address;
++              if (2 == (tmp = cmd & 0x00000003)) {
++                      DRM_ERROR
++                          ("Attempt to fetch texture from system memory.\n");
++                      return 2;
++              }
++              cur_seq->agp_texture = (tmp == 3);
++              cur_seq->tex_palette_size[cur_seq->texture] =
++                  (cmd >> 16) & 0x000000007;
++              return 0;
++      case check_for_vertex_count:
++              cur_seq->vertex_count = cmd & 0x0000FFFF;
++              return 0;
++      case check_number_texunits:
++              cur_seq->multitex = (cmd >> 3) & 1;
++              return 0;
++      default:
++              DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
++              return 2;
++      }
++      return 2;
++}
++
++static __inline__ int
++via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
++                  drm_via_state_t * cur_seq)
++{
++      drm_via_private_t *dev_priv =
++          (drm_via_private_t *) cur_seq->dev->dev_private;
++      uint32_t a_fire, bcmd, dw_count;
++      int ret = 0;
++      int have_fire;
++      const uint32_t *buf = *buffer;
++
++      while (buf < buf_end) {
++              have_fire = 0;
++              if ((buf_end - buf) < 2) {
++                      DRM_ERROR
++                          ("Unexpected termination of primitive list.\n");
++                      ret = 1;
++                      break;
++              }
++              if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
++                      break;
++              bcmd = *buf++;
++              if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
++                      DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
++                                *buf);
++                      ret = 1;
++                      break;
++              }
++              a_fire =
++                  *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
++                  HC_HE3Fire_MASK;
++
++              /*
++               * How many dwords per vertex ?
++               */
++
++              if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
++                      DRM_ERROR("Illegal B command vertex data for AGP.\n");
++                      ret = 1;
++                      break;
++              }
++
++              dw_count = 0;
++              if (bcmd & (1 << 7))
++                      dw_count += (cur_seq->multitex) ? 2 : 1;
++              if (bcmd & (1 << 8))
++                      dw_count += (cur_seq->multitex) ? 2 : 1;
++              if (bcmd & (1 << 9))
++                      dw_count++;
++              if (bcmd & (1 << 10))
++                      dw_count++;
++              if (bcmd & (1 << 11))
++                      dw_count++;
++              if (bcmd & (1 << 12))
++                      dw_count++;
++              if (bcmd & (1 << 13))
++                      dw_count++;
++              if (bcmd & (1 << 14))
++                      dw_count++;
++
++              while (buf < buf_end) {
++                      if (*buf == a_fire) {
++                              if (dev_priv->num_fire_offsets >=
++                                  VIA_FIRE_BUF_SIZE) {
++                                      DRM_ERROR("Fire offset buffer full.\n");
++                                      ret = 1;
++                                      break;
++                              }
++                              dev_priv->fire_offsets[dev_priv->
++                                                     num_fire_offsets++] =
++                                  buf;
++                              have_fire = 1;
++                              buf++;
++                              if (buf < buf_end && *buf == a_fire)
++                                      buf++;
++                              break;
++                      }
++                      if ((*buf == HALCYON_HEADER2) ||
++                          ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
++                              DRM_ERROR("Missing Vertex Fire command, "
++                                        "Stray Vertex Fire command  or verifier "
++                                        "lost sync.\n");
++                              ret = 1;
++                              break;
++                      }
++                      if ((ret = eat_words(&buf, buf_end, dw_count)))
++                              break;
++              }
++              if (buf >= buf_end && !have_fire) {
++                      DRM_ERROR("Missing Vertex Fire command or verifier "
++                                "lost sync.\n");
++                      ret = 1;
++                      break;
++              }
++              if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
++                      DRM_ERROR("AGP Primitive list end misaligned.\n");
++                      ret = 1;
++                      break;
++              }
++      }
++      *buffer = buf;
++      return ret;
++}
++
++static __inline__ verifier_state_t
++via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
++                drm_via_state_t * hc_state)
++{
++      uint32_t cmd;
++      int hz_mode;
++      hazard_t hz;
++      const uint32_t *buf = *buffer;
++      const hazard_t *hz_table;
++
++      if ((buf_end - buf) < 2) {
++              DRM_ERROR
++                  ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
++              return state_error;
++      }
++      buf++;
++      cmd = (*buf++ & 0xFFFF0000) >> 16;
++
++      switch (cmd) {
++      case HC_ParaType_CmdVdata:
++              if (via_check_prim_list(&buf, buf_end, hc_state))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case HC_ParaType_NotTex:
++              hz_table = table1;
++              break;
++      case HC_ParaType_Tex:
++              hc_state->texture = 0;
++              hz_table = table2;
++              break;
++      case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
++              hc_state->texture = 1;
++              hz_table = table2;
++              break;
++      case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
++              hz_table = table3;
++              break;
++      case HC_ParaType_Auto:
++              if (eat_words(&buf, buf_end, 2))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
++              if (eat_words(&buf, buf_end, 32))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
++      case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
++              DRM_ERROR("Texture palettes are rejected because of "
++                        "lack of info how to determine their size.\n");
++              return state_error;
++      case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
++              DRM_ERROR("Fog factor palettes are rejected because of "
++                        "lack of info how to determine their size.\n");
++              return state_error;
++      default:
++
++              /*
++               * There are some unimplemented HC_ParaTypes here, that
++               * need to be implemented if the Mesa driver is extended.
++               */
++
++              DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
++                        "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
++                        cmd, *(buf - 2));
++              *buffer = buf;
++              return state_error;
++      }
++
++      while (buf < buf_end) {
++              cmd = *buf++;
++              if ((hz = hz_table[cmd >> 24])) {
++                      if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
++                              if (hz_mode == 1) {
++                                      buf--;
++                                      break;
++                              }
++                              return state_error;
++                      }
++              } else if (hc_state->unfinished &&
++                         finish_current_sequence(hc_state)) {
++                      return state_error;
++              }
++      }
++      if (hc_state->unfinished && finish_current_sequence(hc_state)) {
++              return state_error;
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                const uint32_t * buf_end, int *fire_count)
++{
++      uint32_t cmd;
++      const uint32_t *buf = *buffer;
++      const uint32_t *next_fire;
++      int burst = 0;
++
++      next_fire = dev_priv->fire_offsets[*fire_count];
++      buf++;
++      cmd = (*buf & 0xFFFF0000) >> 16;
++      VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
++      switch (cmd) {
++      case HC_ParaType_CmdVdata:
++              while ((buf < buf_end) &&
++                     (*fire_count < dev_priv->num_fire_offsets) &&
++                     (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
++                      while (buf <= next_fire) {
++                              VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
++                                        (burst & 63), *buf++);
++                              burst += 4;
++                      }
++                      if ((buf < buf_end)
++                          && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
++                              buf++;
++
++                      if (++(*fire_count) < dev_priv->num_fire_offsets)
++                              next_fire = dev_priv->fire_offsets[*fire_count];
++              }
++              break;
++      default:
++              while (buf < buf_end) {
++
++                      if (*buf == HC_HEADER2 ||
++                          (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
++                          (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
++                          (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              break;
++
++                      VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
++                                (burst & 63), *buf++);
++                      burst += 4;
++              }
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ int verify_mmio_address(uint32_t address)
++{
++      if ((address > 0x3FF) && (address < 0xC00)) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access 3D- or command burst area.\n");
++              return 1;
++      } else if ((address > 0xCFF) && (address < 0x1300)) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access PCI DMA area.\n");
++              return 1;
++      } else if (address > 0x13FF) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access VGA registers.\n");
++              return 1;
++      }
++      return 0;
++}
++
++static __inline__ int
++verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
++                uint32_t dwords)
++{
++      const uint32_t *buf = *buffer;
++
++      if (buf_end - buf < dwords) {
++              DRM_ERROR("Illegal termination of video command.\n");
++              return 1;
++      }
++      while (dwords--) {
++              if (*buf++) {
++                      DRM_ERROR("Illegal video command tail.\n");
++                      return 1;
++              }
++      }
++      *buffer = buf;
++      return 0;
++}
++
++static __inline__ verifier_state_t
++via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t cmd;
++      const uint32_t *buf = *buffer;
++      verifier_state_t ret = state_command;
++
++      while (buf < buf_end) {
++              cmd = *buf;
++              if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
++                  (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
++                      if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                              break;
++                      DRM_ERROR("Invalid HALCYON_HEADER1 command. "
++                                "Attempt to access 3D- or command burst area.\n");
++                      ret = state_error;
++                      break;
++              } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
++                      if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                              break;
++                      DRM_ERROR("Invalid HALCYON_HEADER1 command. "
++                                "Attempt to access VGA registers.\n");
++                      ret = state_error;
++                      break;
++              } else {
++                      buf += 2;
++              }
++      }
++      *buffer = buf;
++      return ret;
++}
++
++static __inline__ verifier_state_t
++via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                const uint32_t * buf_end)
++{
++      register uint32_t cmd;
++      const uint32_t *buf = *buffer;
++
++      while (buf < buf_end) {
++              cmd = *buf;
++              if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                      break;
++              VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
++              buf++;
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t data;
++      const uint32_t *buf = *buffer;
++
++      if (buf_end - buf < 4) {
++              DRM_ERROR("Illegal termination of video header5 command\n");
++              return state_error;
++      }
++
++      data = *buf++ & ~VIA_VIDEOMASK;
++      if (verify_mmio_address(data))
++              return state_error;
++
++      data = *buf++;
++      if (*buf++ != 0x00F50000) {
++              DRM_ERROR("Illegal header5 header data\n");
++              return state_error;
++      }
++      if (*buf++ != 0x00000000) {
++              DRM_ERROR("Illegal header5 header data\n");
++              return state_error;
++      }
++      if (eat_words(&buf, buf_end, data))
++              return state_error;
++      if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
++              return state_error;
++      *buffer = buf;
++      return state_command;
++
++}
++
++static __inline__ verifier_state_t
++via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                 const uint32_t * buf_end)
++{
++      uint32_t addr, count, i;
++      const uint32_t *buf = *buffer;
++
++      addr = *buf++ & ~VIA_VIDEOMASK;
++      i = count = *buf;
++      buf += 3;
++      while (i--) {
++              VIA_WRITE(addr, *buf++);
++      }
++      if (count & 3)
++              buf += 4 - (count & 3);
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t data;
++      const uint32_t *buf = *buffer;
++      uint32_t i;
++
++      if (buf_end - buf < 4) {
++              DRM_ERROR("Illegal termination of video header6 command\n");
++              return state_error;
++      }
++      buf++;
++      data = *buf++;
++      if (*buf++ != 0x00F60000) {
++              DRM_ERROR("Illegal header6 header data\n");
++              return state_error;
++      }
++      if (*buf++ != 0x00000000) {
++              DRM_ERROR("Illegal header6 header data\n");
++              return state_error;
++      }
++      if ((buf_end - buf) < (data << 1)) {
++              DRM_ERROR("Illegal termination of video header6 command\n");
++              return state_error;
++      }
++      for (i = 0; i < data; ++i) {
++              if (verify_mmio_address(*buf++))
++                      return state_error;
++              buf++;
++      }
++      data <<= 1;
++      if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
++              return state_error;
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                 const uint32_t * buf_end)
++{
++
++      uint32_t addr, count, i;
++      const uint32_t *buf = *buffer;
++
++      i = count = *++buf;
++      buf += 3;
++      while (i--) {
++              addr = *buf++;
++              VIA_WRITE(addr, *buf++);
++      }
++      count <<= 1;
++      if (count & 3)
++              buf += 4 - (count & 3);
++      *buffer = buf;
++      return state_command;
++}
++
++int
++via_verify_command_stream(const uint32_t * buf, unsigned int size,
++                        struct drm_device * dev, int agp)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_state_t *hc_state = &dev_priv->hc_state;
++      drm_via_state_t saved_state = *hc_state;
++      uint32_t cmd;
++      const uint32_t *buf_end = buf + (size >> 2);
++      verifier_state_t state = state_command;
++      int cme_video;
++      int supported_3d;
++
++      cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
++                   dev_priv->chipset == VIA_DX9_0);
++
++      supported_3d = dev_priv->chipset != VIA_DX9_0;
++
++      hc_state->dev = dev;
++      hc_state->unfinished = no_sequence;
++      hc_state->map_cache = NULL;
++      hc_state->agp = agp;
++      hc_state->buf_start = buf;
++      dev_priv->num_fire_offsets = 0;
++
++      while (buf < buf_end) {
++
++              switch (state) {
++              case state_header2:
++                      state = via_check_header2(&buf, buf_end, hc_state);
++                      break;
++              case state_header1:
++                      state = via_check_header1(&buf, buf_end);
++                      break;
++              case state_vheader5:
++                      state = via_check_vheader5(&buf, buf_end);
++                      break;
++              case state_vheader6:
++                      state = via_check_vheader6(&buf, buf_end);
++                      break;
++              case state_command:
++                      if ((HALCYON_HEADER2 == (cmd = *buf)) &&
++                          supported_3d)
++                              state = state_header2;
++                      else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                              state = state_header1;
++                      else if (cme_video
++                               && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
++                              state = state_vheader5;
++                      else if (cme_video
++                               && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              state = state_vheader6;
++                      else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
++                              DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
++                              state = state_error;
++                      } else {
++                              DRM_ERROR
++                                  ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
++                                   cmd);
++                              state = state_error;
++                      }
++                      break;
++              case state_error:
++              default:
++                      *hc_state = saved_state;
++                      return -EINVAL;
++              }
++      }
++      if (state == state_error) {
++              *hc_state = saved_state;
++              return -EINVAL;
++      }
++      return 0;
++}
++
++int
++via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
++                       unsigned int size)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      uint32_t cmd;
++      const uint32_t *buf_end = buf + (size >> 2);
++      verifier_state_t state = state_command;
++      int fire_count = 0;
++
++      while (buf < buf_end) {
++
++              switch (state) {
++              case state_header2:
++                      state =
++                          via_parse_header2(dev_priv, &buf, buf_end,
++                                            &fire_count);
++                      break;
++              case state_header1:
++                      state = via_parse_header1(dev_priv, &buf, buf_end);
++                      break;
++              case state_vheader5:
++                      state = via_parse_vheader5(dev_priv, &buf, buf_end);
++                      break;
++              case state_vheader6:
++                      state = via_parse_vheader6(dev_priv, &buf, buf_end);
++                      break;
++              case state_command:
++                      if (HALCYON_HEADER2 == (cmd = *buf))
++                              state = state_header2;
++                      else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                              state = state_header1;
++                      else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
++                              state = state_vheader5;
++                      else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              state = state_vheader6;
++                      else {
++                              DRM_ERROR
++                                  ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
++                                   cmd);
++                              state = state_error;
++                      }
++                      break;
++              case state_error:
++              default:
++                      return -EINVAL;
++              }
++      }
++      if (state == state_error) {
++              return -EINVAL;
++      }
++      return 0;
++}
++
++static void
++setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
++{
++      int i;
++
++      for (i = 0; i < 256; ++i) {
++              table[i] = forbidden_command;
++      }
++
++      for (i = 0; i < size; ++i) {
++              table[init_table[i].code] = init_table[i].hz;
++      }
++}
++
++void via_init_command_verifier(void)
++{
++      setup_hazard_table(init_table1, table1,
++                         sizeof(init_table1) / sizeof(hz_init_t));
++      setup_hazard_table(init_table2, table2,
++                         sizeof(init_table2) / sizeof(hz_init_t));
++      setup_hazard_table(init_table3, table3,
++                         sizeof(init_table3) / sizeof(hz_init_t));
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_verifier.h git-nokia/drivers/gpu/drm-tungsten/via_verifier.h
+--- git/drivers/gpu/drm-tungsten/via_verifier.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_verifier.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,62 @@
++/*
++ * Copyright 2004 The Unichrome Project. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellström 2004.
++ */
++
++#ifndef _VIA_VERIFIER_H_
++#define _VIA_VERIFIER_H_
++
++typedef enum {
++      no_sequence = 0,
++      z_address,
++      dest_address,
++      tex_address
++} drm_via_sequence_t;
++
++typedef struct {
++      unsigned texture;
++      uint32_t z_addr;
++      uint32_t d_addr;
++      uint32_t t_addr[2][10];
++      uint32_t pitch[2][10];
++      uint32_t height[2][10];
++      uint32_t tex_level_lo[2];
++      uint32_t tex_level_hi[2];
++      uint32_t tex_palette_size[2];
++      uint32_t tex_npot[2];
++      drm_via_sequence_t unfinished;
++      int agp_texture;
++      int multitex;
++      struct drm_device *dev;
++      drm_local_map_t *map_cache;
++      uint32_t vertex_count;
++      int agp;
++      const uint32_t *buf_start;
++} drm_via_state_t;
++
++extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
++                                  struct drm_device *dev, int agp);
++extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
++                                   unsigned int size);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_video.c git-nokia/drivers/gpu/drm-tungsten/via_video.c
+--- git/drivers/gpu/drm-tungsten/via_video.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_video.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/*
++ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellstrom 2005.
++ *
++ * Video and XvMC related functions.
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++void via_init_futex(drm_via_private_t * dev_priv)
++{
++      unsigned int i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
++              DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
++              XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
++      }
++}
++
++void via_cleanup_futex(drm_via_private_t * dev_priv)
++{
++}
++
++void via_release_futex(drm_via_private_t * dev_priv, int context)
++{
++      unsigned int i;
++      volatile int *lock;
++
++      if (!dev_priv->sarea_priv)
++              return;
++
++      for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
++              lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
++              if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
++                      if (_DRM_LOCK_IS_HELD(*lock)
++                          && (*lock & _DRM_LOCK_CONT)) {
++                              DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
++                      }
++                      *lock = 0;
++              }
++      }
++}
++
++int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_futex_t *fx = data;
++      volatile int *lock;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      if (fx->lock > VIA_NR_XVMC_LOCKS)
++              return -EFAULT;
++
++      lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
++
++      switch (fx->func) {
++      case VIA_FUTEX_WAIT:
++              DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
++                          (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
++              return ret;
++      case VIA_FUTEX_WAKE:
++              DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
++              return 0;
++      }
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_cmdlist.c git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.c
+--- git/drivers/gpu/drm-tungsten/xgi_cmdlist.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,328 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++static void xgi_emit_flush(struct xgi_info * info, bool stop);
++static void xgi_emit_nop(struct xgi_info * info);
++static unsigned int get_batch_command(enum xgi_batch_type type);
++static void triggerHWCommandList(struct xgi_info * info);
++static void xgi_cmdlist_reset(struct xgi_info * info);
++
++
++/**
++ * Graphic engine register (2d/3d) acessing interface
++ */
++static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
++{
++#ifdef XGI_MMIO_DEBUG
++      DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
++               map->handle, addr, data);
++#endif
++      DRM_WRITE32(map, addr, data);
++}
++
++
++int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
++                         struct drm_file * filp)
++{
++      struct xgi_mem_alloc mem_alloc = {
++              .location = XGI_MEMLOC_NON_LOCAL,
++              .size = size,
++      };
++      int err;
++
++      err = xgi_alloc(info, &mem_alloc, filp);
++      if (err) {
++              return err;
++      }
++
++      info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
++      info->cmdring.size = mem_alloc.size;
++      info->cmdring.ring_hw_base = mem_alloc.hw_addr;
++      info->cmdring.last_ptr = NULL;
++      info->cmdring.ring_offset = 0;
++
++      return 0;
++}
++
++
++/**
++ * get_batch_command - Get the command ID for the current begin type.
++ * @type: Type of the current batch
++ *
++ * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
++ *
++ * This function assumes that @type is on the range [0,3].
++ */
++unsigned int get_batch_command(enum xgi_batch_type type)
++{
++      static const unsigned int ports[4] = {
++              0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
++      };
++
++      return ports[type];
++}
++
++
++int xgi_submit_cmdlist(struct drm_device * dev, void * data,
++                     struct drm_file * filp)
++{
++      struct xgi_info *const info = dev->dev_private;
++      const struct xgi_cmd_info *const pCmdInfo =
++              (struct xgi_cmd_info *) data;
++      const unsigned int cmd = get_batch_command(pCmdInfo->type);
++      u32 begin[4];
++
++
++      begin[0] = (cmd << 24) | BEGIN_VALID_MASK
++              | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
++      begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
++      begin[2] = pCmdInfo->hw_addr >> 4;
++      begin[3] = 0;
++
++      if (info->cmdring.last_ptr == NULL) {
++              const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
++
++
++              /* Enable PCI Trigger Mode
++               */
++              dwWriteReg(info->mmio_map,
++                         BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                         (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
++                         M2REG_CLEAR_COUNTERS_MASK | 0x08 |
++                         M2REG_PCI_TRIGGER_MODE_MASK);
++
++              dwWriteReg(info->mmio_map,
++                         BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                         (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
++                         M2REG_PCI_TRIGGER_MODE_MASK);
++
++
++              /* Send PCI begin command
++               */
++              dwWriteReg(info->mmio_map, portOffset,      begin[0]);
++              dwWriteReg(info->mmio_map, portOffset +  4, begin[1]);
++              dwWriteReg(info->mmio_map, portOffset +  8, begin[2]);
++              dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
++      } else {
++              DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
++
++              if (pCmdInfo->type == BTYPE_3D) {
++                      xgi_emit_flush(info, false);
++              }
++
++              info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
++              info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]);
++              info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]);
++              DRM_WRITEMEMORYBARRIER();
++              info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]);
++
++              triggerHWCommandList(info);
++      }
++
++      info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
++#ifdef XGI_HAVE_FENCE
++      drm_fence_flush_old(info->dev, 0, info->next_sequence);
++#endif /* XGI_HAVE_FENCE */
++      return 0;
++}
++
++
++/*
++    state:      0 - console
++                1 - graphic
++                2 - fb
++                3 - logout
++*/
++int xgi_state_change(struct xgi_info * info, unsigned int to,
++                   unsigned int from)
++{
++#define STATE_CONSOLE   0
++#define STATE_GRAPHIC   1
++#define STATE_FBTERM    2
++#define STATE_LOGOUT    3
++#define STATE_REBOOT    4
++#define STATE_SHUTDOWN  5
++
++      if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
++              DRM_INFO("Leaving graphical mode (probably VT switch)\n");
++      } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
++              DRM_INFO("Entering graphical mode (probably VT switch)\n");
++              xgi_cmdlist_reset(info);
++      } else if ((from == STATE_GRAPHIC)
++                 && ((to == STATE_LOGOUT)
++                     || (to == STATE_REBOOT)
++                     || (to == STATE_SHUTDOWN))) {
++              DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
++      } else {
++              DRM_ERROR("Invalid state change.\n");
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++int xgi_state_change_ioctl(struct drm_device * dev, void * data,
++                         struct drm_file * filp)
++{
++      struct xgi_state_info *const state =
++              (struct xgi_state_info *) data;
++      struct xgi_info *info = dev->dev_private;
++
++
++      return xgi_state_change(info, state->_toState, state->_fromState);
++}
++
++
++void xgi_cmdlist_reset(struct xgi_info * info)
++{
++      info->cmdring.last_ptr = NULL;
++      info->cmdring.ring_offset = 0;
++}
++
++
++void xgi_cmdlist_cleanup(struct xgi_info * info)
++{
++      if (info->cmdring.ring_hw_base != 0) {
++              /* If command lists have been issued, terminate the command
++               * list chain with a flush command.
++               */
++              if (info->cmdring.last_ptr != NULL) {
++                      xgi_emit_flush(info, false);
++                      xgi_emit_nop(info);
++              }
++
++              xgi_waitfor_pci_idle(info);
++
++              (void) memset(&info->cmdring, 0, sizeof(info->cmdring));
++      }
++}
++
++static void triggerHWCommandList(struct xgi_info * info)
++{
++      static unsigned int s_triggerID = 1;
++
++      dwWriteReg(info->mmio_map,
++                 BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
++                 0x05000000 + (0x0ffff & s_triggerID++));
++}
++
++
++/**
++ * Emit a flush to the CRTL command stream.
++ * @info XGI info structure
++ *
++ * This function assumes info->cmdring.ptr is non-NULL.
++ */
++void xgi_emit_flush(struct xgi_info * info, bool stop)
++{
++      const u32 flush_command[8] = {
++              ((0x10 << 24)
++               | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
++              BEGIN_LINK_ENABLE_MASK | (0x00004),
++              0x00000000, 0x00000000,
++
++              /* Flush the 2D engine with the default 32 clock delay.
++               */
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++      };
++      const unsigned int flush_size = sizeof(flush_command);
++      u32 *batch_addr;
++      u32 hw_addr;
++      unsigned int i;
++
++
++      /* check buf is large enough to contain a new flush batch */
++      if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
++              info->cmdring.ring_offset = 0;
++      }
++
++      hw_addr = info->cmdring.ring_hw_base
++              + info->cmdring.ring_offset;
++      batch_addr = info->cmdring.ptr
++              + (info->cmdring.ring_offset / 4);
++
++      for (i = 0; i < (flush_size / 4); i++) {
++              batch_addr[i] = cpu_to_le32(flush_command[i]);
++      }
++
++      if (stop) {
++              *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK);
++      }
++
++      info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4));
++      info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4);
++      info->cmdring.last_ptr[3] = 0;
++      DRM_WRITEMEMORYBARRIER();
++      info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
++              | (BEGIN_VALID_MASK));
++
++      triggerHWCommandList(info);
++
++      info->cmdring.ring_offset += flush_size;
++      info->cmdring.last_ptr = batch_addr;
++}
++
++
++/**
++ * Emit an empty command to the CRTL command stream.
++ * @info XGI info structure
++ *
++ * This function assumes info->cmdring.ptr is non-NULL.  In addition, since
++ * this function emits a command that does not have linkage information,
++ * it sets info->cmdring.ptr to NULL.
++ */
++void xgi_emit_nop(struct xgi_info * info)
++{
++      info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK
++              | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence));
++      info->cmdring.last_ptr[2] = 0;
++      info->cmdring.last_ptr[3] = 0;
++      DRM_WRITEMEMORYBARRIER();
++      info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
++              | (BEGIN_VALID_MASK));
++
++      triggerHWCommandList(info);
++
++      info->cmdring.last_ptr = NULL;
++}
++
++
++void xgi_emit_irq(struct xgi_info * info)
++{
++      if (info->cmdring.last_ptr == NULL)
++              return;
++
++      xgi_emit_flush(info, true);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_cmdlist.h git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.h
+--- git/drivers/gpu/drm-tungsten/xgi_cmdlist.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,66 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_CMDLIST_H_
++#define _XGI_CMDLIST_H_
++
++struct xgi_cmdring_info {
++      /**
++       * Kernel space pointer to the base of the command ring.
++       */
++      u32 * ptr;
++
++      /**
++       * Size, in bytes, of the command ring.
++       */
++      unsigned int size;
++
++      /**
++       * Base address of the command ring from the hardware's PoV.
++       */
++      unsigned int ring_hw_base;
++
++      u32 * last_ptr;
++
++      /**
++       * Offset, in bytes, from the start of the ring to the next available
++       * location to store a command.
++       */
++      unsigned int ring_offset;
++};
++
++struct xgi_info;
++extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
++      struct drm_file * filp);
++
++extern int xgi_state_change(struct xgi_info * info, unsigned int to,
++      unsigned int from);
++
++extern void xgi_cmdlist_cleanup(struct xgi_info * info);
++
++extern void xgi_emit_irq(struct xgi_info * info);
++
++#endif                                /* _XGI_CMDLIST_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drm.h git-nokia/drivers/gpu/drm-tungsten/xgi_drm.h
+--- git/drivers/gpu/drm-tungsten/xgi_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,137 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR
++ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_DRM_H_
++#define _XGI_DRM_H_
++
++#include <linux/types.h>
++#include <asm/ioctl.h>
++
++struct drm_xgi_sarea {
++      __u16 device_id;
++      __u16 vendor_id;
++
++      char device_name[32];
++
++      unsigned int scrn_start;
++      unsigned int scrn_xres;
++      unsigned int scrn_yres;
++      unsigned int scrn_bpp;
++      unsigned int scrn_pitch;
++};
++
++
++struct xgi_bootstrap {
++      /**
++       * Size of PCI-e GART range in megabytes.
++       */
++      struct drm_map gart;
++};
++
++
++enum xgi_mem_location {
++      XGI_MEMLOC_NON_LOCAL = 0,
++      XGI_MEMLOC_LOCAL = 1,
++      XGI_MEMLOC_INVALID = 0x7fffffff
++};
++
++struct xgi_mem_alloc {
++      /**
++       * Memory region to be used for allocation.
++       *
++       * Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.
++       */
++      unsigned int location;
++
++      /**
++       * Number of bytes request.
++       *
++       * On successful allocation, set to the actual number of bytes
++       * allocated.
++       */
++      unsigned int size;
++
++      /**
++       * Address of the memory from the graphics hardware's point of view.
++       */
++      __u32 hw_addr;
++
++      /**
++       * Offset of the allocation in the mapping.
++       */
++      __u32 offset;
++
++      /**
++       * Magic handle used to release memory.
++       *
++       * See also DRM_XGI_FREE ioctl.
++       */
++      __u32 index;
++};
++
++enum xgi_batch_type {
++      BTYPE_2D = 0,
++      BTYPE_3D = 1,
++      BTYPE_FLIP = 2,
++      BTYPE_CTRL = 3,
++      BTYPE_NONE = 0x7fffffff
++};
++
++struct xgi_cmd_info {
++      __u32 type;
++      __u32 hw_addr;
++      __u32 size;
++      __u32 id;
++};
++
++struct xgi_state_info {
++      unsigned int _fromState;
++      unsigned int _toState;
++};
++
++
++/*
++ * Ioctl definitions
++ */
++
++#define DRM_XGI_BOOTSTRAP           0
++#define DRM_XGI_ALLOC               1
++#define DRM_XGI_FREE                2
++#define DRM_XGI_SUBMIT_CMDLIST      3
++#define DRM_XGI_STATE_CHANGE        4
++#define DRM_XGI_SET_FENCE           5
++#define DRM_XGI_WAIT_FENCE          6
++
++#define XGI_IOCTL_BOOTSTRAP         DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
++#define XGI_IOCTL_ALLOC             DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
++#define XGI_IOCTL_FREE              DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
++#define XGI_IOCTL_SUBMIT_CMDLIST    DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
++#define XGI_IOCTL_STATE_CHANGE      DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
++#define XGI_IOCTL_SET_FENCE         DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_SET_FENCE, u32)
++#define XGI_IOCTL_WAIT_FENCE        DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_WAIT_FENCE, u32)
++
++#endif /* _XGI_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drv.c git-nokia/drivers/gpu/drm-tungsten/xgi_drv.c
+--- git/drivers/gpu/drm-tungsten/xgi_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,441 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "drmP.h"
++#include "drm.h"
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      xgi_PCI_IDS
++};
++
++#ifdef XGI_HAVE_FENCE
++extern struct drm_fence_driver xgi_fence_driver;
++#endif /* XGI_HAVE_FENCE */
++
++int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
++
++static struct drm_ioctl_desc xgi_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
++};
++
++static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
++static int xgi_driver_unload(struct drm_device *dev);
++static void xgi_driver_lastclose(struct drm_device * dev);
++static void xgi_reclaim_buffers_locked(struct drm_device * dev,
++      struct drm_file * filp);
++static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
++static int xgi_kern_isr_postinstall(struct drm_device * dev);
++
++
++static struct drm_driver driver = {
++      .driver_features =
++              DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
++              DRIVER_IRQ_SHARED | DRIVER_SG,
++      .dev_priv_size = sizeof(struct xgi_info),
++      .load = xgi_driver_load,
++      .unload = xgi_driver_unload,
++      .lastclose = xgi_driver_lastclose,
++      .dma_quiescent = NULL,
++      .irq_preinstall = NULL,
++      .irq_postinstall = xgi_kern_isr_postinstall,
++      .irq_uninstall = NULL,
++      .irq_handler = xgi_kern_isr,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = xgi_ioctls,
++      .dma_ioctl = NULL,
++
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = xgi_compat_ioctl,
++#endif
++      },
++
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++#ifdef XGI_HAVE_FENCE
++      .fence_driver = &xgi_fence_driver,
++#endif /* XGI_HAVE_FENCE */
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init xgi_init(void)
++{
++      driver.num_ioctls = xgi_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit xgi_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(xgi_init);
++module_exit(xgi_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
++
++
++void xgi_engine_init(struct xgi_info * info)
++{
++      u8 temp;
++
++
++      OUT3C5B(info->mmio_map, 0x11, 0x92);
++
++      /* -------> copy from OT2D
++       * PCI Retry Control Register.
++       * disable PCI read retry & enable write retry in mem. (10xx xxxx)b
++       */
++      temp = IN3X5B(info->mmio_map, 0x55);
++      OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
++
++      xgi_enable_ge(info);
++
++      /* Enable linear addressing of the card. */
++      temp = IN3X5B(info->mmio_map, 0x21);
++      OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
++
++      /* Enable 32-bit internal data path */
++      temp = IN3X5B(info->mmio_map, 0x2A);
++      OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
++
++      /* Enable PCI burst write ,disable burst read and enable MMIO. */
++      /*
++       * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
++       * 7 ---- Pixel Data Format 1:  big endian 0:  little endian
++       * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]#  with Big Endian Format
++       * 2 ---- PCI Burst Write Enable
++       * 1 ---- PCI Burst Read Enable
++       * 0 ---- MMIO Control
++       */
++      temp = IN3X5B(info->mmio_map, 0x39);
++      OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
++
++      /* enable GEIO decode */
++      /* temp = IN3X5B(info->mmio_map, 0x29);
++       * OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
++       */
++
++      /* Enable graphic engine I/O PCI retry function*/
++      /* temp = IN3X5B(info->mmio_map, 0x62);
++       * OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
++       */
++
++      /* protect all register except which protected by 3c5.0e.7 */
++        /* OUT3C5B(info->mmio_map, 0x11, 0x87); */
++}
++
++
++int xgi_bootstrap(struct drm_device * dev, void * data,
++                struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++      struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
++      struct drm_map_list *maplist;
++      int err;
++
++
++      DRM_SPININIT(&info->fence_lock, "fence lock");
++      info->next_sequence = 0;
++      info->complete_sequence = 0;
++
++      if (info->mmio_map == NULL) {
++              err = drm_addmap(dev, info->mmio.base, info->mmio.size,
++                               _DRM_REGISTERS, _DRM_KERNEL,
++                               &info->mmio_map);
++              if (err) {
++                      DRM_ERROR("Unable to map MMIO region: %d\n", err);
++                      return err;
++              }
++
++              xgi_enable_mmio(info);
++              xgi_engine_init(info);
++      }
++
++
++      info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
++
++      DRM_INFO("fb   base: 0x%lx, size: 0x%x (probed)\n",
++               (unsigned long) info->fb.base, info->fb.size);
++
++
++      if ((info->fb.base == 0) || (info->fb.size == 0)) {
++              DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
++                        (unsigned long) info->fb.base, info->fb.size);
++              return -EINVAL;
++      }
++
++
++      /* Init the resource manager */
++      if (!info->fb_heap_initialized) {
++              err = xgi_fb_heap_init(info);
++              if (err) {
++                      DRM_ERROR("Unable to initialize FB heap.\n");
++                      return err;
++              }
++      }
++
++
++      info->pcie.size = bs->gart.size;
++
++      /* Init the resource manager */
++      if (!info->pcie_heap_initialized) {
++              err = xgi_pcie_heap_init(info);
++              if (err) {
++                      DRM_ERROR("Unable to initialize GART heap.\n");
++                      return err;
++              }
++
++              /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
++              err = xgi_cmdlist_initialize(info, 0x100000, filp);
++              if (err) {
++                      DRM_ERROR("xgi_cmdlist_initialize() failed\n");
++                      return err;
++              }
++      }
++
++
++      if (info->pcie_map == NULL) {
++              err = drm_addmap(info->dev, 0, info->pcie.size,
++                               _DRM_SCATTER_GATHER, _DRM_LOCKED,
++                               & info->pcie_map);
++              if (err) {
++                      DRM_ERROR("Could not add map for GART backing "
++                                "store.\n");
++                      return err;
++              }
++      }
++
++
++      maplist = drm_find_matching_map(dev, info->pcie_map);
++      if (maplist == NULL) {
++              DRM_ERROR("Could not find GART backing store map.\n");
++              return -EINVAL;
++      }
++
++      bs->gart = *info->pcie_map;
++      bs->gart.handle = (void *)(unsigned long) maplist->user_token;
++      return 0;
++}
++
++
++void xgi_driver_lastclose(struct drm_device * dev)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      if (info != NULL) {
++              if (info->mmio_map != NULL) {
++                      xgi_cmdlist_cleanup(info);
++                      xgi_disable_ge(info);
++                      xgi_disable_mmio(info);
++              }
++
++              /* The core DRM lastclose routine will destroy all of our
++               * mappings for us.  NULL out the pointers here so that
++               * xgi_bootstrap can do the right thing.
++               */
++              info->pcie_map = NULL;
++              info->mmio_map = NULL;
++              info->fb_map = NULL;
++
++              if (info->pcie_heap_initialized) {
++                      drm_ati_pcigart_cleanup(dev, &info->gart_info);
++              }
++
++              if (info->fb_heap_initialized
++                  || info->pcie_heap_initialized) {
++                      drm_sman_cleanup(&info->sman);
++
++                      info->fb_heap_initialized = false;
++                      info->pcie_heap_initialized = false;
++              }
++      }
++}
++
++
++void xgi_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file * filp)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      mutex_lock(&info->dev->struct_mutex);
++      if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
++              mutex_unlock(&info->dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
++      mutex_unlock(&info->dev->struct_mutex);
++      return;
++}
++
++
++/*
++ * driver receives an interrupt if someone waiting, then hand it off.
++ */
++irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      struct xgi_info *info = dev->dev_private;
++      const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
++                                      (0x2800
++                                       + M2REG_AUTO_LINK_STATUS_ADDRESS)))
++              & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
++                 | M2REG_ACTIVE_INTERRUPT_0_MASK
++                 | M2REG_ACTIVE_INTERRUPT_2_MASK
++                 | M2REG_ACTIVE_INTERRUPT_3_MASK);
++
++
++      if (irq_bits != 0) {
++              DRM_WRITE32(info->mmio_map,
++                          0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                          cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
++#ifdef XGI_HAVE_FENCE
++              xgi_fence_handler(dev);
++#endif /* XGI_HAVE_FENCE */
++              DRM_WAKEUP(&info->fence_queue);
++              return IRQ_HANDLED;
++      } else {
++              return IRQ_NONE;
++      }
++}
++
++
++int xgi_kern_isr_postinstall(struct drm_device * dev)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      DRM_INIT_WAITQUEUE(&info->fence_queue);
++      return 0;
++}
++
++
++int xgi_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
++      int err;
++
++      if (!info)
++              return -ENOMEM;
++
++      (void) memset(info, 0, sizeof(*info));
++      dev->dev_private = info;
++      info->dev = dev;
++
++      info->mmio.base = drm_get_resource_start(dev, 1);
++      info->mmio.size = drm_get_resource_len(dev, 1);
++
++      DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
++               (unsigned long) info->mmio.base, info->mmio.size);
++
++
++      if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
++              DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
++                        (unsigned long) info->mmio.base, info->mmio.size);
++              err = -EINVAL;
++              goto fail;
++      }
++
++
++      info->fb.base = drm_get_resource_start(dev, 0);
++      info->fb.size = drm_get_resource_len(dev, 0);
++
++      DRM_INFO("fb   base: 0x%lx, size: 0x%x\n",
++               (unsigned long) info->fb.base, info->fb.size);
++
++
++      err = drm_sman_init(&info->sman, 2, 12, 8);
++      if (err) {
++              goto fail;
++      }
++
++
++      return 0;
++
++fail:
++      drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
++      return err;
++}
++
++int xgi_driver_unload(struct drm_device *dev)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      drm_sman_takedown(&info->sman);
++      drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drv.h git-nokia/drivers/gpu/drm-tungsten/xgi_drv.h
+--- git/drivers/gpu/drm-tungsten/xgi_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_DRV_H_
++#define _XGI_DRV_H_
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sman.h"
++
++#define DRIVER_AUTHOR         "Andrea Zhang <andrea_zhang@macrosynergy.com>"
++
++#define DRIVER_NAME           "xgi"
++#define DRIVER_DESC           "XGI XP5 / XP10 / XG47"
++#define DRIVER_DATE           "20080612"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          2
++#define DRIVER_PATCHLEVEL     0
++
++#include "xgi_cmdlist.h"
++#include "xgi_drm.h"
++
++struct xgi_aperture {
++      dma_addr_t base;
++      unsigned int size;
++};
++
++struct xgi_info {
++      struct drm_device *dev;
++
++      bool bootstrap_done;
++
++      /* physical characteristics */
++      struct xgi_aperture mmio;
++      struct xgi_aperture fb;
++      struct xgi_aperture pcie;
++
++      struct drm_map *mmio_map;
++      struct drm_map *pcie_map;
++      struct drm_map *fb_map;
++
++      /* look up table parameters */
++      struct drm_ati_pcigart_info gart_info;
++      unsigned int lutPageSize;
++
++      struct drm_sman sman;
++      bool fb_heap_initialized;
++      bool pcie_heap_initialized;
++
++      struct xgi_cmdring_info cmdring;
++
++      DRM_SPINTYPE fence_lock;
++      wait_queue_head_t fence_queue;
++      unsigned complete_sequence;
++      unsigned next_sequence;
++};
++
++extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
++      unsigned long arg);
++
++extern int xgi_fb_heap_init(struct xgi_info * info);
++
++extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
++      struct drm_file * filp);
++
++extern int xgi_free(struct xgi_info * info, unsigned int index,
++      struct drm_file * filp);
++
++extern int xgi_pcie_heap_init(struct xgi_info * info);
++
++extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
++
++extern void xgi_enable_mmio(struct xgi_info * info);
++extern void xgi_disable_mmio(struct xgi_info * info);
++extern void xgi_enable_ge(struct xgi_info * info);
++extern void xgi_disable_ge(struct xgi_info * info);
++
++/* TTM-style fences.
++ */
++#ifdef XGI_HAVE_FENCE
++extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
++extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
++      uint32_t flags, uint32_t * sequence, uint32_t * native_type);
++extern void xgi_fence_handler(struct drm_device * dev);
++extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
++      uint32_t flags);
++#endif /* XGI_HAVE_FENCE */
++
++
++/* Non-TTM-style fences.
++ */
++extern int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++
++extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_free_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_submit_cmdlist(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_state_change_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_fb.c git-nokia/drivers/gpu/drm-tungsten/xgi_fb.c
+--- git/drivers/gpu/drm-tungsten/xgi_fb.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_fb.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++
++#define XGI_FB_HEAP_START 0x1000000
++
++int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
++            struct drm_file * filp)
++{
++      struct drm_memblock_item *block;
++      const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
++              ? "on-card" : "GART";
++
++
++      if ((alloc->location != XGI_MEMLOC_LOCAL)
++          && (alloc->location != XGI_MEMLOC_NON_LOCAL)) {
++              DRM_ERROR("Invalid memory pool (0x%08x) specified.\n",
++                        alloc->location);
++              return -EINVAL;
++      }
++
++      if ((alloc->location == XGI_MEMLOC_LOCAL)
++          ? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
++              DRM_ERROR("Attempt to allocate from uninitialized memory "
++                        "pool (0x%08x).\n", alloc->location);
++              return -EINVAL;
++      }
++
++      mutex_lock(&info->dev->struct_mutex);
++      block = drm_sman_alloc(&info->sman, alloc->location, alloc->size,
++                             0, (unsigned long) filp);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      if (block == NULL) {
++              alloc->size = 0;
++              DRM_ERROR("%s memory allocation failed\n", mem_name);
++              return -ENOMEM;
++      } else {
++              alloc->offset = (*block->mm->offset)(block->mm,
++                                                   block->mm_info);
++              alloc->hw_addr = alloc->offset;
++              alloc->index = block->user_hash.key;
++
++              if (block->user_hash.key != (unsigned long) alloc->index) {
++                      DRM_ERROR("%s truncated handle %lx for pool %d "
++                                "offset %x\n",
++                                __func__, block->user_hash.key,
++                                alloc->location, alloc->offset);
++              }
++
++              if (alloc->location == XGI_MEMLOC_NON_LOCAL) {
++                      alloc->hw_addr += info->pcie.base;
++              }
++
++              DRM_DEBUG("%s memory allocation succeeded: 0x%x\n",
++                        mem_name, alloc->offset);
++      }
++
++      return 0;
++}
++
++
++int xgi_alloc_ioctl(struct drm_device * dev, void * data,
++                  struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp);
++}
++
++
++int xgi_free(struct xgi_info * info, unsigned int index,
++           struct drm_file * filp)
++{
++      int err;
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_free_key(&info->sman, index);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      return err;
++}
++
++
++int xgi_free_ioctl(struct drm_device * dev, void * data,
++                 struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      return xgi_free(info, *(unsigned int *) data, filp);
++}
++
++
++int xgi_fb_heap_init(struct xgi_info * info)
++{
++      int err;
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
++                               XGI_FB_HEAP_START,
++                               info->fb.size - XGI_FB_HEAP_START);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      info->fb_heap_initialized = (err == 0);
++      return err;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_fence.c git-nokia/drivers/gpu/drm-tungsten/xgi_fence.c
+--- git/drivers/gpu/drm-tungsten/xgi_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,186 @@
++/*
++ * (C) Copyright IBM Corporation 2007
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Ian Romanick <idr@us.ibm.com>
++ */
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++static int xgi_low_level_fence_emit(struct drm_device *dev, u32 *sequence)
++{
++      struct xgi_info *const info = dev->dev_private;
++
++      if (info == NULL) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_SPINLOCK(&info->fence_lock);
++      info->next_sequence++;
++      if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
++              info->next_sequence = 1;
++      }
++
++      *sequence = (u32) info->next_sequence;
++      DRM_SPINUNLOCK(&info->fence_lock);
++
++
++      xgi_emit_irq(info);
++      return 0;
++}
++
++#define GET_BEGIN_ID(i) (le32_to_cpu(DRM_READ32((i)->mmio_map, 0x2820)) \
++                               & BEGIN_BEGIN_IDENTIFICATION_MASK)
++
++static int xgi_low_level_fence_wait(struct drm_device *dev, unsigned *sequence)
++{
++      struct xgi_info *const info = dev->dev_private;
++      unsigned int cur_fence;
++      int ret = 0;
++
++      if (info == NULL) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      /* Assume that the user has missed the current sequence number
++       * by about a day rather than she wants to wait for years
++       * using fences.
++       */
++      DRM_WAIT_ON(ret, info->fence_queue, 3 * DRM_HZ,
++                  ((((cur_fence = GET_BEGIN_ID(info))
++                    - *sequence) & BEGIN_BEGIN_IDENTIFICATION_MASK)
++                   <= (1 << 18)));
++
++      info->complete_sequence = cur_fence;
++      *sequence = cur_fence;
++
++      return ret;
++}
++
++
++int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
++                      struct drm_file * filp)
++{
++      (void) filp;
++      return xgi_low_level_fence_emit(dev, (u32 *) data);
++}
++
++
++int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
++                       struct drm_file * filp)
++{
++      (void) filp;
++      return xgi_low_level_fence_wait(dev, (u32 *) data);
++}
++
++
++#ifdef XGI_HAVE_FENCE
++static void xgi_fence_poll(struct drm_device * dev, uint32_t class, 
++                         uint32_t waiting_types)
++{
++      struct xgi_info * info = dev->dev_private;
++      uint32_t signaled_types = 0;
++
++
++      if ((info == NULL) || (class != 0))
++              return;
++
++      DRM_SPINLOCK(&info->fence_lock);
++
++      if (waiting_types) {
++              if (waiting_types & DRM_FENCE_TYPE_EXE) {
++                      const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
++                                                      0x2820))
++                              & BEGIN_BEGIN_IDENTIFICATION_MASK;
++
++                      if (begin_id != info->complete_sequence) {
++                              info->complete_sequence = begin_id;
++                              signaled_types |= DRM_FENCE_TYPE_EXE;
++                      }
++              }
++
++              if (signaled_types) {
++                      drm_fence_handler(dev, 0, info->complete_sequence,
++                                        signaled_types, 0);
++              }
++      }
++
++      DRM_SPINUNLOCK(&info->fence_lock);
++}
++
++
++int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
++                          uint32_t flags, uint32_t * sequence,
++                          uint32_t * native_type)
++{
++      int err;
++
++      (void) flags;
++
++      if (class != 0)
++              return -EINVAL;
++
++      err = xgi_low_level_fence_emit(dev, sequence);
++      if (err)
++              return err;
++
++      *native_type = DRM_FENCE_TYPE_EXE;
++      return 0;
++}
++
++
++void xgi_fence_handler(struct drm_device * dev)
++{
++      struct drm_fence_manager * fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++
++      write_lock(&fm->lock);
++      xgi_fence_poll(dev, 0, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++
++int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
++{
++      return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
++}
++
++struct drm_fence_driver xgi_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
++      .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
++      .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
++      .has_irq = xgi_fence_has_irq,
++      .emit = xgi_fence_emit_sequence,
++      .flush = NULL,
++      .poll = xgi_fence_poll,
++      .needed_flush = NULL,
++      .wait = NULL
++};
++
++#endif /* XGI_HAVE_FENCE */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_ioc32.c git-nokia/drivers/gpu/drm-tungsten/xgi_ioc32.c
+--- git/drivers/gpu/drm-tungsten/xgi_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,140 @@
++/*
++ * (C) Copyright IBM Corporation 2007
++ * Copyright (C) Paul Mackerras 2005.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Ian Romanick <idr@us.ibm.com>
++ */
++
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "xgi_drm.h"
++
++/* This is copied from drm_ioc32.c.
++ */
++struct drm_map32 {
++      u32 offset;             /**< Requested physical address (0 for SAREA)*/
++      u32 size;               /**< Requested physical size (bytes) */
++      enum drm_map_type type; /**< Type of memory to map */
++      enum drm_map_flags flags;       /**< Flags */
++      u32 handle;             /**< User-space: "Handle" to pass to mmap() */
++      int mtrr;               /**< MTRR slot used */
++};
++
++struct drm32_xgi_bootstrap {
++      struct drm_map32 gart;
++};
++
++
++extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
++
++static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd,
++                              unsigned long arg)
++{
++      struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg;
++      struct drm32_xgi_bootstrap bs32;
++      struct xgi_bootstrap __user *bs;
++      int err;
++      void *handle;
++
++
++      if (copy_from_user(&bs32, argp, sizeof(bs32))) {
++              return -EFAULT;
++      }
++
++      bs = compat_alloc_user_space(sizeof(*bs));
++      if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) {
++              return -EFAULT;
++      }
++
++      if (__put_user(bs32.gart.offset, &bs->gart.offset)
++          || __put_user(bs32.gart.size, &bs->gart.size)
++          || __put_user(bs32.gart.type, &bs->gart.type)
++          || __put_user(bs32.gart.flags, &bs->gart.flags)) {
++              return -EFAULT;
++      }
++
++      err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP,
++                      (unsigned long)bs);
++      if (err) {
++              return err;
++      }
++
++      if (__get_user(bs32.gart.offset, &bs->gart.offset)
++          || __get_user(bs32.gart.mtrr, &bs->gart.mtrr)
++          || __get_user(handle, &bs->gart.handle)) {
++              return -EFAULT;
++      }
++
++      bs32.gart.handle = (unsigned long)handle;
++      if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) {
++              printk(KERN_ERR "%s truncated handle %p for type %d "
++                     "offset %x\n",
++                     __func__, handle, bs32.gart.type, bs32.gart.offset);
++      }
++
++      if (copy_to_user(argp, &bs32, sizeof(bs32))) {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++
++drm_ioctl_compat_t *xgi_compat_ioctls[] = {
++      [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
++                    unsigned long arg)
++{
++      const unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls))
++              fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();
++      ret = (fn != NULL)
++              ? (*fn)(filp, cmd, arg)
++              : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_misc.c git-nokia/drivers/gpu/drm-tungsten/xgi_misc.c
+--- git/drivers/gpu/drm-tungsten/xgi_misc.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_misc.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,477 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++
++#include <linux/delay.h>
++
++/*
++ * irq functions
++ */
++#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
++
++static unsigned int s_invalid_begin = 0;
++
++static bool xgi_validate_signal(struct drm_map * map)
++{
++      if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) {
++              u16 check;
++
++              /* Check Read back status */
++              DRM_WRITE8(map, 0x235c, 0x80);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++
++              if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
++                      return false;
++              }
++
++              /* Check RO channel */
++              DRM_WRITE8(map, 0x235c, 0x83);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
++                      return false;
++              }
++
++              /* Check RW channel */
++              DRM_WRITE8(map, 0x235c, 0x88);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
++                      return false;
++              }
++
++              /* Check RO channel outstanding */
++              DRM_WRITE8(map, 0x235c, 0x8f);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if (0 != (check & 0x3ff)) {
++                      return false;
++              }
++
++              /* Check RW channel outstanding */
++              DRM_WRITE8(map, 0x235c, 0x90);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if (0 != (check & 0x3ff)) {
++                      return false;
++              }
++
++              /* No pending PCIE request. GE stall. */
++      }
++
++      return true;
++}
++
++
++static void xgi_ge_hang_reset(struct drm_map * map)
++{
++      int time_out = 0xffff;
++
++      DRM_WRITE8(map, 0xb057, 8);
++      while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
++              while (0 != ((--time_out) & 0xfff))
++                      /* empty */ ;
++
++              if (0 == time_out) {
++                      u8 old_3ce;
++                      u8 old_3cf;
++                      u8 old_index;
++                      u8 old_36;
++
++                      DRM_INFO("Can not reset back 0x%x!\n",
++                               le32_to_cpu(DRM_READ32(map, 0x2800)));
++
++                      DRM_WRITE8(map, 0xb057, 0);
++
++                      /* Have to use 3x5.36 to reset. */
++                      /* Save and close dynamic gating */
++
++                      old_3ce = DRM_READ8(map, 0x3ce);
++                      DRM_WRITE8(map, 0x3ce, 0x2a);
++                      old_3cf = DRM_READ8(map, 0x3cf);
++                      DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);
++
++                      /* Reset GE */
++                      old_index = DRM_READ8(map, 0x3d4);
++                      DRM_WRITE8(map, 0x3d4, 0x36);
++                      old_36 = DRM_READ8(map, 0x3d5);
++                      DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
++
++                      while (0 != ((--time_out) & 0xfff))
++                              /* empty */ ;
++
++                      DRM_WRITE8(map, 0x3d5, old_36);
++                      DRM_WRITE8(map, 0x3d4, old_index);
++
++                      /* Restore dynamic gating */
++                      DRM_WRITE8(map, 0x3cf, old_3cf);
++                      DRM_WRITE8(map, 0x3ce, old_3ce);
++                      break;
++              }
++      }
++
++      DRM_WRITE8(map, 0xb057, 0);
++}
++
++
++bool xgi_ge_irq_handler(struct xgi_info * info)
++{
++      const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
++      bool is_support_auto_reset = false;
++
++      /* Check GE on/off */
++      if (0 == (0xffffc0f0 & int_status)) {
++              if (0 != (0x1000 & int_status)) {
++                      /* We got GE stall interrupt.
++                       */
++                      DRM_WRITE32(info->mmio_map, 0x2810,
++                                  cpu_to_le32(int_status | 0x04000000));
++
++                      if (is_support_auto_reset) {
++                              static cycles_t last_tick;
++                              static unsigned continue_int_count = 0;
++
++                              /* OE II is busy. */
++
++                              if (!xgi_validate_signal(info->mmio_map)) {
++                                      /* Nothing but skip. */
++                              } else if (0 == continue_int_count++) {
++                                      last_tick = get_cycles();
++                              } else {
++                                      const cycles_t new_tick = get_cycles();
++                                      if ((new_tick - last_tick) >
++                                          STALL_INTERRUPT_RESET_THRESHOLD) {
++                                              continue_int_count = 0;
++                                      } else if (continue_int_count >= 3) {
++                                              continue_int_count = 0;
++
++                                              /* GE Hung up, need reset. */
++                                              DRM_INFO("Reset GE!\n");
++
++                                              xgi_ge_hang_reset(info->mmio_map);
++                                      }
++                              }
++                      }
++              } else if (0 != (0x1 & int_status)) {
++                      s_invalid_begin++;
++                      DRM_WRITE32(info->mmio_map, 0x2810,
++                                  cpu_to_le32((int_status & ~0x01) | 0x04000000));
++              }
++
++              return true;
++      }
++
++      return false;
++}
++
++bool xgi_crt_irq_handler(struct xgi_info * info)
++{
++      bool ret = false;
++      u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
++
++      /* CRT1 interrupt just happened
++       */
++      if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
++              u8 op3cf_3d;
++              u8 op3cf_37;
++
++              /* What happened?
++               */
++              op3cf_37 = IN3CFB(info->mmio_map, 0x37);
++
++              /* Clear CRT interrupt
++               */
++              op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
++              OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
++              OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
++              ret = true;
++      }
++      DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
++
++      return (ret);
++}
++
++bool xgi_dvi_irq_handler(struct xgi_info * info)
++{
++      bool ret = false;
++      const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
++
++      /* DVI interrupt just happened
++       */
++      if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
++              const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
++              u8 op3cf_39;
++              u8 op3cf_37;
++              u8 op3x5_5a;
++
++              /* What happened?
++               */
++              op3cf_37 = IN3CFB(info->mmio_map, 0x37);
++
++              /* Notify BIOS that DVI plug/unplug happened
++               */
++              op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
++              OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
++
++              DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
++
++              /* Clear DVI interrupt
++               */
++              op3cf_39 = IN3CFB(info->mmio_map, 0x39);
++              OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
++              OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
++
++              ret = true;
++      }
++      DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
++
++      return (ret);
++}
++
++
++static void dump_reg_header(unsigned regbase)
++{
++      printk("\n=====xgi_dump_register========0x%x===============\n",
++             regbase);
++      printk("    0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f\n");
++}
++
++
++static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
++{
++      unsigned i, j;
++      u8 temp;
++
++
++      dump_reg_header(regbase);
++      for (i = 0; i < 0x10; i++) {
++              printk("%1x ", i);
++
++              for (j = 0; j < 0x10; j++) {
++                      DRM_WRITE8(info->mmio_map, regbase - 1,
++                                 (i * 0x10) + j);
++                      temp = DRM_READ8(info->mmio_map, regbase);
++                      printk("%3x", temp);
++              }
++              printk("\n");
++      }
++}
++
++
++static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
++{
++      unsigned i, j;
++
++
++      dump_reg_header(regbase);
++      for (i = 0; i < range; i++) {
++              printk("%1x ", i);
++
++              for (j = 0; j < 0x10; j++) {
++                      u8 temp = DRM_READ8(info->mmio_map,
++                                          regbase + (i * 0x10) + j);
++                      printk("%3x", temp);
++              }
++              printk("\n");
++      }
++}
++
++
++void xgi_dump_register(struct xgi_info * info)
++{
++      dump_indexed_reg(info, 0x3c5);
++      dump_indexed_reg(info, 0x3d5);
++      dump_indexed_reg(info, 0x3cf);
++
++      dump_reg(info, 0xB000, 0x05);
++      dump_reg(info, 0x2200, 0x0B);
++      dump_reg(info, 0x2300, 0x07);
++      dump_reg(info, 0x2400, 0x10);
++      dump_reg(info, 0x2800, 0x10);
++}
++
++
++#define WHOLD_GE_STATUS             0x2800
++
++/* Test everything except the "whole GE busy" bit, the "master engine busy"
++ * bit, and the reserved bits [26:21].
++ */
++#define IDLE_MASK                   ~((1U<<31) | (1U<<28) | (0x3f<<21))
++
++void xgi_waitfor_pci_idle(struct xgi_info * info)
++{
++      unsigned int idleCount = 0;
++      u32 old_status = 0;
++      unsigned int same_count = 0;
++
++      while (idleCount < 5) {
++              const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
++                      & IDLE_MASK;
++
++              if (status == old_status) {
++                      same_count++;
++
++                      if ((same_count % 100) == 0) {
++                              DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
++                                        old_status, same_count);
++                      }
++              } else {
++                      old_status = status;
++                      same_count = 0;
++              }
++
++              if (status != 0) {
++                      msleep(1);
++                      idleCount = 0;
++              } else {
++                      idleCount++;
++              }
++      }
++}
++
++
++void xgi_enable_mmio(struct xgi_info * info)
++{
++      u8 protect = 0;
++      u8 temp;
++
++      /* Unprotect registers */
++      DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
++      protect = DRM_READ8(info->mmio_map, 0x3C5);
++      DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
++
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
++
++      /* Enable MMIO */
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
++
++      /* Protect registers */
++      OUT3C5B(info->mmio_map, 0x11, protect);
++}
++
++
++void xgi_disable_mmio(struct xgi_info * info)
++{
++      u8 protect = 0;
++      u8 temp;
++
++      /* Unprotect registers */
++      DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
++      protect = DRM_READ8(info->mmio_map, 0x3C5);
++      DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
++
++      /* Disable MMIO access */
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
++
++      /* Protect registers */
++      OUT3C5B(info->mmio_map, 0x11, protect);
++}
++
++
++void xgi_enable_ge(struct xgi_info * info)
++{
++      u8 bOld3cf2a;
++      int wait = 0;
++
++      OUT3C5B(info->mmio_map, 0x11, 0x92);
++
++      /* Save and close dynamic gating
++       */
++      bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
++      OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);
++
++      /* Enable 2D and 3D GE
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Reset both 3D and 2D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL,
++              (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Enable 2D engine only
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);
++
++      /* Enable 2D+3D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      /* Restore dynamic gating
++       */
++      OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
++}
++
++
++void xgi_disable_ge(struct xgi_info * info)
++{
++      int wait = 0;
++
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Reset both 3D and 2D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL,
++              (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Disable 2D engine and 3D engine.
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_misc.h git-nokia/drivers/gpu/drm-tungsten/xgi_misc.h
+--- git/drivers/gpu/drm-tungsten/xgi_misc.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_misc.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,37 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_MISC_H_
++#define _XGI_MISC_H_
++
++extern void xgi_dump_register(struct xgi_info * info);
++
++extern bool xgi_ge_irq_handler(struct xgi_info * info);
++extern bool xgi_crt_irq_handler(struct xgi_info * info);
++extern bool xgi_dvi_irq_handler(struct xgi_info * info);
++extern void xgi_waitfor_pci_idle(struct xgi_info * info);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_pcie.c git-nokia/drivers/gpu/drm-tungsten/xgi_pcie.c
+--- git/drivers/gpu/drm-tungsten/xgi_pcie.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_pcie.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++
++void xgi_gart_flush(struct drm_device *dev)
++{
++      struct xgi_info *const info = dev->dev_private;
++      u8 temp;
++
++      DRM_MEMORYBARRIER();
++
++      /* Set GART in SFB */
++      temp = DRM_READ8(info->mmio_map, 0xB00C);
++      DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
++
++      /* Set GART base address to HW */
++      DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
++
++      /* Flush GART table. */
++      DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);
++      DRM_WRITE8(info->mmio_map, 0xB03F, 0x00);
++}
++
++
++int xgi_pcie_heap_init(struct xgi_info * info)
++{
++      u8 temp = 0;
++      int err;
++      struct drm_scatter_gather request;
++
++      /* Get current FB aperture size */
++      temp = IN3X5B(info->mmio_map, 0x27);
++      DRM_INFO("In3x5(0x27): 0x%x \n", temp);
++
++      if (temp & 0x01) {      /* 256MB; Jong 06/05/2006; 0x10000000 */
++              info->pcie.base = 256 * 1024 * 1024;
++      } else {                /* 128MB; Jong 06/05/2006; 0x08000000 */
++              info->pcie.base = 128 * 1024 * 1024;
++      }
++
++
++      DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
++
++      /* Get current lookup table page size */
++      temp = DRM_READ8(info->mmio_map, 0xB00C);
++      if (temp & 0x04) {      /* 8KB */
++              info->lutPageSize = 8 * 1024;
++      } else {                /* 4KB */
++              info->lutPageSize = 4 * 1024;
++      }
++
++      DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
++
++
++      request.size = info->pcie.size;
++      err = drm_sg_alloc(info->dev, & request);
++      if (err) {
++              DRM_ERROR("cannot allocate PCIE GART backing store!  "
++                        "size = %d\n", info->pcie.size);
++              return err;
++      }
++
++      info->gart_info.table_mask = DMA_BIT_MASK(32);
++      info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
++      info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++      info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
++
++      if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) {
++              DRM_ERROR("failed to init PCI GART!\n");
++              return -ENOMEM;
++      }
++
++
++      xgi_gart_flush(info->dev);
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL,
++                               0, info->pcie.size);
++      mutex_unlock(&info->dev->struct_mutex);
++      if (err) {
++              drm_ati_pcigart_cleanup(info->dev, &info->gart_info);
++      }
++
++      info->pcie_heap_initialized = (err == 0);
++      return err;
++}
++
++
++/**
++ * xgi_find_pcie_virt
++ * @address: GE HW address
++ *
++ * Returns CPU virtual address.  Assumes the CPU VAddr is continuous in not
++ * the same block
++ */
++void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
++{
++      const unsigned long offset = address - info->pcie.base;
++
++      return ((u8 *) info->dev->sg->virtual) + offset;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_regs.h git-nokia/drivers/gpu/drm-tungsten/xgi_regs.h
+--- git/drivers/gpu/drm-tungsten/xgi_regs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_regs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_REGS_H_
++#define _XGI_REGS_H_
++
++#include "drmP.h"
++#include "drm.h"
++
++#define MAKE_MASK(bits)  ((1U << (bits)) - 1)
++
++#define ONE_BIT_MASK        MAKE_MASK(1)
++#define TWENTY_BIT_MASK     MAKE_MASK(20)
++#define TWENTYONE_BIT_MASK  MAKE_MASK(21)
++#define TWENTYTWO_BIT_MASK  MAKE_MASK(22)
++
++
++/* Port 0x3d4/0x3d5, index 0x2a */
++#define XGI_INTERFACE_SEL 0x2a
++#define DUAL_64BIT        (1U<<7)
++#define INTERNAL_32BIT    (1U<<6)
++#define EN_SEP_WR         (1U<<5)
++#define POWER_DOWN_SEL    (1U<<4)
++/*#define RESERVED_3      (1U<<3) */
++#define SUBS_MCLK_PCICLK  (1U<<2)
++#define MEM_SIZE_MASK     (3<<0)
++#define MEM_SIZE_32MB     (0<<0)
++#define MEM_SIZE_64MB     (1<<0)
++#define MEM_SIZE_128MB    (2<<0)
++#define MEM_SIZE_256MB    (3<<0)
++
++/* Port 0x3d4/0x3d5, index 0x36 */
++#define XGI_GE_CNTL 0x36
++#define GE_ENABLE        (1U<<7)
++/*#define RESERVED_6     (1U<<6) */
++/*#define RESERVED_5     (1U<<5) */
++#define GE_RESET         (1U<<4)
++/*#define RESERVED_3     (1U<<3) */
++#define GE_ENABLE_3D     (1U<<2)
++/*#define RESERVED_1     (1U<<1) */
++/*#define RESERVED_0     (1U<<0) */
++
++/* Port 0x3ce/0x3cf, index 0x2a */
++#define XGI_MISC_CTRL 0x2a
++#define MOTION_VID_SUSPEND   (1U<<7)
++#define DVI_CRTC_TIMING_SEL  (1U<<6)
++#define LCD_SEL_CTL_NEW      (1U<<5)
++#define LCD_SEL_EXT_DELYCTRL (1U<<4)
++#define REG_LCDDPARST        (1U<<3)
++#define LCD2DPAOFF           (1U<<2)
++/*#define RESERVED_1         (1U<<1) */
++#define EN_GEPWM             (1U<<0)  /* Enable GE power management */
++
++
++#define BASE_3D_ENG 0x2800
++
++#define M2REG_FLUSH_ENGINE_ADDRESS 0x000
++#define M2REG_FLUSH_ENGINE_COMMAND 0x00
++#define M2REG_FLUSH_FLIP_ENGINE_MASK              (ONE_BIT_MASK<<21)
++#define M2REG_FLUSH_2D_ENGINE_MASK                (ONE_BIT_MASK<<20)
++#define M2REG_FLUSH_3D_ENGINE_MASK                TWENTY_BIT_MASK
++
++#define M2REG_RESET_ADDRESS 0x004
++#define M2REG_RESET_COMMAND 0x01
++#define M2REG_RESET_STATUS2_MASK                  (ONE_BIT_MASK<<10)
++#define M2REG_RESET_STATUS1_MASK                  (ONE_BIT_MASK<<9)
++#define M2REG_RESET_STATUS0_MASK                  (ONE_BIT_MASK<<8)
++#define M2REG_RESET_3DENG_MASK                    (ONE_BIT_MASK<<4)
++#define M2REG_RESET_2DENG_MASK                    (ONE_BIT_MASK<<2)
++
++/* Write register */
++#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010
++#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04
++#define M2REG_CLEAR_TIMER_INTERRUPT_MASK          (ONE_BIT_MASK<<11)
++#define M2REG_CLEAR_INTERRUPT_3_MASK              (ONE_BIT_MASK<<10)
++#define M2REG_CLEAR_INTERRUPT_2_MASK              (ONE_BIT_MASK<<9)
++#define M2REG_CLEAR_INTERRUPT_0_MASK              (ONE_BIT_MASK<<8)
++#define M2REG_CLEAR_COUNTERS_MASK                 (ONE_BIT_MASK<<4)
++#define M2REG_PCI_TRIGGER_MODE_MASK               (ONE_BIT_MASK<<1)
++#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK    (ONE_BIT_MASK<<0)
++
++/* Read register */
++#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010
++#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04
++#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK          (ONE_BIT_MASK<<11)
++#define M2REG_ACTIVE_INTERRUPT_3_MASK              (ONE_BIT_MASK<<10)
++#define M2REG_ACTIVE_INTERRUPT_2_MASK              (ONE_BIT_MASK<<9)
++#define M2REG_ACTIVE_INTERRUPT_0_MASK              (ONE_BIT_MASK<<8)
++#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK    (ONE_BIT_MASK<<0)
++
++#define     M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014
++#define     M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05
++
++
++/**
++ * Begin instruction, double-word 0
++ */
++#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK   (ONE_BIT_MASK<<22)
++#define BEGIN_VALID_MASK                        (ONE_BIT_MASK<<20)
++#define BEGIN_BEGIN_IDENTIFICATION_MASK         TWENTY_BIT_MASK
++
++/**
++ * Begin instruction, double-word 1
++ */
++#define BEGIN_LINK_ENABLE_MASK                  (ONE_BIT_MASK<<31)
++#define BEGIN_COMMAND_LIST_LENGTH_MASK          TWENTYTWO_BIT_MASK
++
++
++/* Hardware access functions */
++static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3C4, index);
++      DRM_WRITE8(map, 0x3C5, data);
++}
++
++static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3D4, index);
++      DRM_WRITE8(map, 0x3D5, data);
++}
++
++static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3CE, index);
++      DRM_WRITE8(map, 0x3CF, data);
++}
++
++static inline u8 IN3C5B(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3C4, index);
++      return DRM_READ8(map, 0x3C5);
++}
++
++static inline u8 IN3X5B(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3D4, index);
++      return DRM_READ8(map, 0x3D5);
++}
++
++static inline u8 IN3CFB(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3CE, index);
++      return DRM_READ8(map, 0x3CF);
++}
++
++#endif
+diff -Nurd git/drivers/gpu/Kconfig git-nokia/drivers/gpu/Kconfig
+--- git/drivers/gpu/Kconfig    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/Kconfig      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,20 @@
++source drivers/gpu/pvr/Kconfig
++
++choice DRM_VERSION
++      prompt "Direct Rendering Manager"
++      optional
++
++menuconfig DRM_VER_ORIG
++      bool "Original version"
++      select DRM
++
++menuconfig DRM_VER_TUNGSTEN
++      bool "Tungsten version"
++      select DRM_TUNGSTEN
++
++endchoice
++
++source drivers/gpu/drm/Kconfig
++
++source drivers/gpu/drm-tungsten/Kconfig
++
+diff -Nurd git/drivers/gpu/pvr/include4/dbgdrvif.h git-nokia/drivers/gpu/pvr/include4/dbgdrvif.h
+--- git/drivers/gpu/pvr/include4/dbgdrvif.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/dbgdrvif.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,259 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED                  0x00000001
++#define DEBUG_CAPMODE_CONTINUOUS              0x00000002
++#define DEBUG_CAPMODE_HOTKEY                  0x00000004
++
++#define DEBUG_OUTMODE_STANDARDDBG             0x00000001
++#define DEBUG_OUTMODE_MONO                            0x00000002
++#define DEBUG_OUTMODE_STREAMENABLE            0x00000004
++#define DEBUG_OUTMODE_ASYNC                           0x00000008
++#define DEBUG_OUTMODE_SGXVGA            0x00000010
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM  0x00000001
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002
++#define DEBUG_FLAGS_ENABLESAMPLE              0x00000004
++
++#define DEBUG_FLAGS_TEXTSTREAM                        0x80000000
++
++#define DEBUG_LEVEL_0                                 0x00000001
++#define DEBUG_LEVEL_1                                 0x00000003
++#define DEBUG_LEVEL_2                                 0x00000007
++#define DEBUG_LEVEL_3                                 0x0000000F
++#define DEBUG_LEVEL_4                                 0x0000001F
++#define DEBUG_LEVEL_5                                 0x0000003F
++#define DEBUG_LEVEL_6                                 0x0000007F
++#define DEBUG_LEVEL_7                                 0x000000FF
++#define DEBUG_LEVEL_8                                 0x000001FF
++#define DEBUG_LEVEL_9                                 0x000003FF
++#define DEBUG_LEVEL_10                                        0x000007FF
++#define DEBUG_LEVEL_11                                        0x00000FFF
++
++#define DEBUG_LEVEL_SEL0                              0x00000001
++#define DEBUG_LEVEL_SEL1                              0x00000002
++#define DEBUG_LEVEL_SEL2                              0x00000004
++#define DEBUG_LEVEL_SEL3                              0x00000008
++#define DEBUG_LEVEL_SEL4                              0x00000010
++#define DEBUG_LEVEL_SEL5                              0x00000020
++#define DEBUG_LEVEL_SEL6                              0x00000040
++#define DEBUG_LEVEL_SEL7                              0x00000080
++#define DEBUG_LEVEL_SEL8                              0x00000100
++#define DEBUG_LEVEL_SEL9                              0x00000200
++#define DEBUG_LEVEL_SEL10                             0x00000400
++#define DEBUG_LEVEL_SEL11                             0x00000800
++
++#define DEBUG_SERVICE_IOCTL_BASE              0x800
++#define DEBUG_SERVICE_CREATESTREAM            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING             CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING              CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE                           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ                            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME                        CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME                        CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE             CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2                  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM                 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF                 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF                  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef struct _DBG_IN_CREATESTREAM_
++{
++      IMG_UINT32 ui32Pages;
++      IMG_UINT32 ui32CapMode;
++      IMG_UINT32 ui32OutMode;
++      IMG_CHAR *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_
++{
++      IMG_BOOL bResetStream;
++      IMG_CHAR *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_CHAR *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32StringLen;
++      IMG_CHAR *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32End;
++      IMG_UINT32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_UINT32 ui32TransferSize;
++      IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_
++{
++      IMG_VOID *pvStream;
++      IMG_BOOL bReadInitBuffer;
++      IMG_UINT32 ui32OutBufferSize;
++      IMG_UINT8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_
++{
++      IMG_VOID *pvStream;
++      IMG_BOOL bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_
++{
++      IMG_UINT32 ui32Flags;
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_UINT32 ui32BufferSize;
++      IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF                0x00000001
++
++typedef struct _DBG_STREAM_
++{
++      struct _DBG_STREAM_ *psNext;
++      struct _DBG_STREAM_ *psInitStream;
++      IMG_BOOL   bInitPhaseComplete;
++      IMG_UINT32 ui32Flags;
++      IMG_UINT32 ui32Base;
++      IMG_UINT32 ui32Size;
++      IMG_UINT32 ui32RPtr;
++      IMG_UINT32 ui32WPtr;
++      IMG_UINT32 ui32DataWritten;
++      IMG_UINT32 ui32CapMode;
++      IMG_UINT32 ui32OutMode;
++      IMG_UINT32 ui32DebugLevel;
++      IMG_UINT32 ui32DefaultMode;
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32End;
++      IMG_UINT32 ui32Current;
++      IMG_UINT32 ui32Access;
++      IMG_UINT32 ui32SampleRate;
++      IMG_UINT32 ui32Reserved;
++      IMG_UINT32 ui32Timeout;
++      IMG_UINT32 ui32Marker;
++      IMG_CHAR szName[30];            
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_
++{
++      IMG_UINT32 ui32Size;
++      IMG_VOID *      (IMG_CALLCONV *pfnCreateStream)                 (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
++      IMG_VOID        (IMG_CALLCONV *pfnDestroyStream)                (PDBG_STREAM psStream);
++      IMG_VOID *      (IMG_CALLCONV *pfnFindStream)                   (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteString)                  (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadString)                   (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteBIN)                             (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadBIN)                              (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
++      IMG_VOID        (IMG_CALLCONV *pfnSetCaptureMode)               (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
++      IMG_VOID        (IMG_CALLCONV *pfnSetOutputMode)                (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
++      IMG_VOID        (IMG_CALLCONV *pfnSetDebugLevel)                (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
++      IMG_VOID        (IMG_CALLCONV *pfnSetFrame)                             (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetFrame)                             (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnOverrideMode)                 (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
++      IMG_VOID        (IMG_CALLCONV *pfnDefaultMode)                  (PDBG_STREAM psStream);
++      IMG_UINT32      (IMG_CALLCONV *pfnDBGDrivWrite2)                (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteStringCM)                (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteBINCM)                   (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_VOID        (IMG_CALLCONV *pfnSetMarker)                    (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetMarker)                    (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnEndInitPhase)                 (PDBG_STREAM psStream);
++      IMG_UINT32      (IMG_CALLCONV *pfnIsCaptureFrame)               (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteLF)                              (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadLF)                               (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetStreamOffset)              (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnSetStreamOffset)              (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++      IMG_UINT32      (IMG_CALLCONV *pfnIsLastCaptureFrame)   (PDBG_STREAM psStream);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/include4/img_defs.h git-nokia/drivers/gpu/pvr/include4/img_defs.h
+--- git/drivers/gpu/pvr/include4/img_defs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/img_defs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,100 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++typedef               enum    img_tag_TriStateSwitch
++{
++      IMG_ON          =       0x00,
++      IMG_OFF,
++      IMG_IGNORE
++
++} img_TriStateSwitch, * img_pTriStateSwitch;
++
++#define               IMG_SUCCESS                             0
++
++
++#define               IMG_NULL                                0
++#define               IMG_NO_REG                              1
++
++#if defined (NO_INLINE_FUNCS)
++      #define INLINE
++      #define FORCE_INLINE
++#else
++#if defined (__cplusplus)
++      #define INLINE                                  inline
++      #define FORCE_INLINE                    inline
++#else
++      #define INLINE                                  __inline
++      #define FORCE_INLINE                    static __inline
++#endif
++#endif
++
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define       PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#if defined(UNICODE)
++typedef unsigned short                TCHAR, *PTCHAR, *PTSTR;
++#else 
++typedef char                          TCHAR, *PTCHAR, *PTSTR;
++#endif        
++
++                      #if defined(__linux__)
++
++                              #define IMG_CALLCONV
++                              #define IMG_INTERNAL    __attribute__ ((visibility ("hidden")))
++                              #define IMG_EXPORT
++                              #define IMG_IMPORT
++                              #define IMG_RESTRICT    __restrict__
++
++                      #else
++                                      #error("define an OS")
++                      #endif
++
++#ifndef IMG_ABORT
++      #define IMG_ABORT()     abort()
++#endif
++
++#ifndef IMG_MALLOC
++      #define IMG_MALLOC(A)           malloc  (A)
++#endif
++
++#ifndef IMG_FREE
++      #define IMG_FREE(A)                     free    (A)
++#endif
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/img_types.h git-nokia/drivers/gpu/pvr/include4/img_types.h
+--- git/drivers/gpu/pvr/include4/img_types.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/img_types.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,111 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#include "img_defs.h"
++
++typedef unsigned int  IMG_UINT,       *IMG_PUINT;
++typedef signed int            IMG_INT,        *IMG_PINT;
++
++typedef unsigned char IMG_UINT8,      *IMG_PUINT8;
++typedef unsigned char IMG_BYTE,       *IMG_PBYTE;
++typedef signed char           IMG_INT8,       *IMG_PINT8;
++typedef char                  IMG_CHAR,       *IMG_PCHAR;
++
++typedef unsigned short        IMG_UINT16,     *IMG_PUINT16;
++typedef signed short  IMG_INT16,      *IMG_PINT16;
++typedef unsigned long IMG_UINT32,     *IMG_PUINT32;
++typedef signed long           IMG_INT32,      *IMG_PINT32;
++
++      #if defined(LINUX)
++
++      #else
++
++              #error("define an OS")
++
++      #endif
++
++#if !(defined(LINUX) && defined (__KERNEL__))
++typedef float                 IMG_FLOAT,      *IMG_PFLOAT;
++typedef double                        IMG_DOUBLE, *IMG_PDOUBLE;
++#endif
++
++typedef       enum tag_img_bool
++{
++      IMG_FALSE               = 0,
++      IMG_TRUE                = 1,
++      IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++typedef void                  IMG_VOID,       *IMG_PVOID;
++
++typedef IMG_INT32             IMG_RESULT;
++
++typedef IMG_UINT32      IMG_UINTPTR_T;
++
++typedef IMG_PVOID       IMG_HANDLE;
++
++typedef void**                        IMG_HVOID,      * IMG_PHVOID;
++
++typedef IMG_UINT32      IMG_SIZE_T;
++
++#define IMG_NULL              0
++
++
++typedef IMG_PVOID IMG_CPU_VIRTADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_CPU_PHYADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_DEV_VIRTADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_DEV_PHYADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_SYS_PHYADDR;
++
++typedef struct _SYSTEM_ADDR_
++{
++      
++      IMG_UINT32      ui32PageCount;
++      union
++      {
++              
++
++
++              IMG_SYS_PHYADDR sContig;                
++
++              
++
++
++
++
++
++              IMG_SYS_PHYADDR asNonContig[1];
++      } u;
++} SYSTEM_ADDR;
++
++#endif        
+diff -Nurd git/drivers/gpu/pvr/include4/ioctldef.h git-nokia/drivers/gpu/pvr/include4/ioctldef.h
+--- git/drivers/gpu/pvr/include4/ioctldef.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/ioctldef.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i)     (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP                0x00000001
++#define FILE_DEVICE_CD_ROM              0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
++#define FILE_DEVICE_CONTROLLER          0x00000004
++#define FILE_DEVICE_DATALINK            0x00000005
++#define FILE_DEVICE_DFS                 0x00000006
++#define FILE_DEVICE_DISK                0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
++#define FILE_DEVICE_FILE_SYSTEM         0x00000009
++#define FILE_DEVICE_INPORT_PORT         0x0000000a
++#define FILE_DEVICE_KEYBOARD            0x0000000b
++#define FILE_DEVICE_MAILSLOT            0x0000000c
++#define FILE_DEVICE_MIDI_IN             0x0000000d
++#define FILE_DEVICE_MIDI_OUT            0x0000000e
++#define FILE_DEVICE_MOUSE               0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER  0x00000010
++#define FILE_DEVICE_NAMED_PIPE          0x00000011
++#define FILE_DEVICE_NETWORK             0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER     0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL                0x00000015
++#define FILE_DEVICE_PARALLEL_PORT       0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD    0x00000017
++#define FILE_DEVICE_PRINTER             0x00000018
++#define FILE_DEVICE_SCANNER             0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT   0x0000001a
++#define FILE_DEVICE_SERIAL_PORT         0x0000001b
++#define FILE_DEVICE_SCREEN              0x0000001c
++#define FILE_DEVICE_SOUND               0x0000001d
++#define FILE_DEVICE_STREAMS             0x0000001e
++#define FILE_DEVICE_TAPE                0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
++#define FILE_DEVICE_TRANSPORT           0x00000021
++#define FILE_DEVICE_UNKNOWN             0x00000022
++#define FILE_DEVICE_VIDEO               0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
++#define FILE_DEVICE_WAVE_IN             0x00000025
++#define FILE_DEVICE_WAVE_OUT            0x00000026
++#define FILE_DEVICE_8042_PORT           0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
++#define FILE_DEVICE_BATTERY             0x00000029
++#define FILE_DEVICE_BUS_EXTENDER        0x0000002a
++#define FILE_DEVICE_MODEM               0x0000002b
++#define FILE_DEVICE_VDM                 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE        0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) (                 \
++    ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED                 0
++#define METHOD_IN_DIRECT                1
++#define METHOD_OUT_DIRECT               2
++#define METHOD_NEITHER                  3
++
++#define FILE_ANY_ACCESS                 0
++#define FILE_READ_ACCESS          ( 0x0001 )    
++#define FILE_WRITE_ACCESS         ( 0x0002 )    
++
++#endif 
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/pdumpdefs.h git-nokia/drivers/gpu/pvr/include4/pdumpdefs.h
+--- git/drivers/gpu/pvr/include4/pdumpdefs.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pdumpdefs.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,92 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++      PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++      PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++      PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++      PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++      PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++      PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++      PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++      PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++      PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++      PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++      PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++      PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++      PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 =30,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 =31,
++      PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 =32,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 =33,
++      
++      PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++      PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++      PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++      PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++      PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++      
++      PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++      PDUMP_POLL_OPERATOR_EQUAL = 0,
++      PDUMP_POLL_OPERATOR_LESS = 1,
++      PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++      PDUMP_POLL_OPERATOR_GREATER = 3,
++      PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++      PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/pvr_debug.h git-nokia/drivers/gpu/pvr/include4/pvr_debug.h
+--- git/drivers/gpu/pvr/include4/pvr_debug.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pvr_debug.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN     (512)
++
++#define DBGPRIV_FATAL         0x01
++#define DBGPRIV_ERROR         0x02
++#define DBGPRIV_WARNING               0x04
++#define DBGPRIV_MESSAGE               0x08
++#define DBGPRIV_VERBOSE               0x10
++#define DBGPRIV_CALLTRACE     0x20
++#define DBGPRIV_ALLOC         0x40
++#define DBGPRIV_ALLLEVELS     (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL         DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR         DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING               DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE               DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE               DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE     DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC         DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if defined(DEBUG)
++
++      #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++      #define PVR_DPF(X)              PVRSRVDebugPrintf X
++      #define PVR_TRACE(X)    PVRSRVTrace X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
++                                                                      IMG_UINT32 ui32Line);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
++                                                                      const IMG_CHAR *pszFileName,
++                                                                      IMG_UINT32 ui32Line,
++                                                                      const IMG_CHAR *pszFormat,
++                                                                      ...);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++IMG_VOID PVRSRVDebugSetLevel (IMG_UINT32 uDebugLevel);
++
++              #define PVR_DBG_BREAK
++
++#else
++
++#if defined(TIMING)
++
++      #define PVR_ASSERT(EXPR)
++      #define PVR_DPF(X)
++      #define PVR_TRACE(X)    PVRSRVTrace X
++      #define PVR_DBG_BREAK
++
++IMG_EXPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++#else
++
++      #define PVR_ASSERT(EXPR)
++      #define PVR_DPF(X)
++      #define PVR_TRACE(X)
++      #define PVR_DBG_BREAK
++
++#endif 
++#endif 
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/include4/pvrversion.h git-nokia/drivers/gpu/pvr/include4/pvrversion.h
+--- git/drivers/gpu/pvr/include4/pvrversion.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pvrversion.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,37 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 1
++#define PVRVERSION_BRANCH 11
++#define PVRVERSION_BUILD 970
++#define PVRVERSION_STRING "1.1.11.970"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/regpaths.h git-nokia/drivers/gpu/pvr/include4/regpaths.h
+--- git/drivers/gpu/pvr/include4/regpaths.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/regpaths.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT                              "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY                              "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY                           "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY                  "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT                  POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/servicesext.h git-nokia/drivers/gpu/pvr/include4/servicesext.h
+--- git/drivers/gpu/pvr/include4/servicesext.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/servicesext.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,415 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY       (1)             
++
++typedef enum _PVRSRV_ERROR_
++{
++      PVRSRV_OK                                                               =  0,
++      PVRSRV_ERROR_GENERIC                                    =  1,
++      PVRSRV_ERROR_OUT_OF_MEMORY                              =  2,
++      PVRSRV_ERROR_TOO_MANY_BUFFERS                   =  3,
++      PVRSRV_ERROR_SYMBOL_NOT_FOUND                   =  4,
++      PVRSRV_ERROR_OUT_OF_HSPACE                              =  5,
++      PVRSRV_ERROR_INVALID_PARAMS                             =  6,
++      PVRSRV_ERROR_TILE_MAP_FAILED                    =  7,
++      PVRSRV_ERROR_INIT_FAILURE                               =  8,
++      PVRSRV_ERROR_CANT_REGISTER_CALLBACK     =  9,
++      PVRSRV_ERROR_INVALID_DEVICE                             = 10,
++      PVRSRV_ERROR_NOT_OWNER                                  = 11,
++      PVRSRV_ERROR_BAD_MAPPING                                = 12,
++      PVRSRV_ERROR_TIMEOUT                                    = 13,
++      PVRSRV_ERROR_NO_PRIMARY                                 = 14,
++      PVRSRV_ERROR_FLIP_CHAIN_EXISTS                  = 15,
++      PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA     = 16,
++      PVRSRV_ERROR_SCENE_INVALID                              = 17,
++      PVRSRV_ERROR_STREAM_ERROR                               = 18,
++      PVRSRV_ERROR_INVALID_INTERRUPT          = 19,
++      PVRSRV_ERROR_FAILED_DEPENDENCIES                = 20,
++      PVRSRV_ERROR_CMD_NOT_PROCESSED                  = 21,
++      PVRSRV_ERROR_CMD_TOO_BIG                                = 22,
++      PVRSRV_ERROR_DEVICE_REGISTER_FAILED     = 23,
++      PVRSRV_ERROR_FIFO_SPACE                                 = 24,
++      PVRSRV_ERROR_TA_RECOVERY                                = 25,
++      PVRSRV_ERROR_INDOSORLOWPOWER                    = 26,
++      PVRSRV_ERROR_TOOMANYBUFFERS                             = 27,
++      PVRSRV_ERROR_NOT_SUPPORTED                              = 28,
++      PVRSRV_ERROR_PROCESSING_BLOCKED                 = 29,
++
++
++      PVRSRV_ERROR_CANNOT_FLUSH_QUEUE                 = 31,
++      PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE             = 32,
++      PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS   = 33,
++      PVRSRV_ERROR_RETRY                                              = 34,
++
++      PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++      PVRSRV_DEVICE_CLASS_3D                          = 0 ,
++      PVRSRV_DEVICE_CLASS_DISPLAY                     = 1 ,
++      PVRSRV_DEVICE_CLASS_BUFFER                      = 2 ,
++      PVRSRV_DEVICE_CLASS_VIDEO                       = 3 ,
++
++      PVRSRV_DEVICE_CLASS_FORCE_I32           = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++ 
++typedef enum _PVRSRV_POWER_STATE_
++{
++      PVRSRV_POWER_Unspecified                        = -1,   
++      PVRSRV_POWER_STATE_D0                           = 0,    
++      PVRSRV_POWER_STATE_D1                           = 1,    
++      PVRSRV_POWER_STATE_D2                           = 2,    
++      PVRSRV_POWER_STATE_D3                           = 3,    
++      PVRSRV_POWER_STATE_D4                           = 4,    
++
++      PVRSRV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVR_POWER_STATE, *PPVR_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++      PVRSRV_PIXEL_FORMAT_UNKNOWN                     =  0,
++      PVRSRV_PIXEL_FORMAT_RGB565                      =  1,
++      PVRSRV_PIXEL_FORMAT_RGB555                      =  2,
++      PVRSRV_PIXEL_FORMAT_RGB888                      =  3,
++      PVRSRV_PIXEL_FORMAT_BGR888                      =  4,
++      PVRSRV_PIXEL_FORMAT_YUV420                      =  5,
++      PVRSRV_PIXEL_FORMAT_YUV444                      =  6,
++      PVRSRV_PIXEL_FORMAT_VUY444                      =  7,
++      PVRSRV_PIXEL_FORMAT_GREY_SCALE          =  8,
++      PVRSRV_PIXEL_FORMAT_YUYV                        =  9,
++      PVRSRV_PIXEL_FORMAT_YVYU                        = 10,
++      PVRSRV_PIXEL_FORMAT_UYVY                        = 11, 
++      PVRSRV_PIXEL_FORMAT_VYUY                        = 12,
++      PVRSRV_PIXEL_FORMAT_PAL12                       = 13,
++      PVRSRV_PIXEL_FORMAT_PAL8                        = 14,
++      PVRSRV_PIXEL_FORMAT_PAL4                        = 15,
++      PVRSRV_PIXEL_FORMAT_PAL2                        = 16,
++      PVRSRV_PIXEL_FORMAT_PAL1                        = 17,
++      PVRSRV_PIXEL_FORMAT_ARGB1555            = 18,
++      PVRSRV_PIXEL_FORMAT_ARGB4444            = 19, 
++      PVRSRV_PIXEL_FORMAT_ARGB8888            = 20,
++      PVRSRV_PIXEL_FORMAT_ABGR8888            = 21,
++      PVRSRV_PIXEL_FORMAT_YV12                        = 22,
++      PVRSRV_PIXEL_FORMAT_I420                        = 23,
++      PVRSRV_PIXEL_FORMAT_DXT1                        = 24,
++    PVRSRV_PIXEL_FORMAT_IMC2            = 25,
++
++      PVRSRV_PIXEL_FORMAT_G16R16,
++      PVRSRV_PIXEL_FORMAT_G16R16F,
++      PVRSRV_PIXEL_FORMAT_ARGB8332,
++      PVRSRV_PIXEL_FORMAT_A2RGB10,
++      PVRSRV_PIXEL_FORMAT_A2BGR10,
++      PVRSRV_PIXEL_FORMAT_ABGR16,
++      PVRSRV_PIXEL_FORMAT_ABGR16F,
++      PVRSRV_PIXEL_FORMAT_ABGR32F,
++      PVRSRV_PIXEL_FORMAT_R32F,
++      PVRSRV_PIXEL_FORMAT_A8,
++      PVRSRV_PIXEL_FORMAT_L8,
++      PVRSRV_PIXEL_FORMAT_A8L8,
++      PVRSRV_PIXEL_FORMAT_L16,
++      PVRSRV_PIXEL_FORMAT_R16F,
++      PVRSRV_PIXEL_FORMAT_L6V5U5,
++      PVRSRV_PIXEL_FORMAT_V8U8,
++      PVRSRV_PIXEL_FORMAT_V16U16,
++      PVRSRV_PIXEL_FORMAT_QWVU8888,
++      PVRSRV_PIXEL_FORMAT_D16,
++      PVRSRV_PIXEL_FORMAT_D24S8,
++      PVRSRV_PIXEL_FORMAT_D24X8,
++      PVRSRV_PIXEL_FORMAT_D32F,
++      PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++      PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++      PVRSRV_PIXEL_FORMAT_YUY2,
++      PVRSRV_PIXEL_FORMAT_DXT23,
++      PVRSRV_PIXEL_FORMAT_DXT45,      
++      PVRSRV_PIXEL_FORMAT_G32R32F,    
++
++      PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++      PVRSRV_ALPHA_FORMAT_UNKNOWN             =  0x00000000,
++      PVRSRV_ALPHA_FORMAT_PRE                 =  0x00000001,
++      PVRSRV_ALPHA_FORMAT_NONPRE              =  0x00000002,
++      PVRSRV_ALPHA_FORMAT_MASK                =  0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++      PVRSRV_COLOURSPACE_FORMAT_UNKNOWN               =  0x00000000,
++      PVRSRV_COLOURSPACE_FORMAT_LINEAR                =  0x00010000,
++      PVRSRV_COLOURSPACE_FORMAT_NONLINEAR             =  0x00020000,
++      PVRSRV_COLOURSPACE_FORMAT_MASK                  =  0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED                (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY         (1<<1)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++      
++      IMG_UINT32                                      ui32WriteOpsPending;
++      volatile IMG_UINT32                     ui32WriteOpsComplete;
++
++      
++      IMG_UINT32                                      ui32ReadOpsPending;
++      volatile IMG_UINT32                     ui32ReadOpsComplete;
++      
++      
++      IMG_UINT32                                      ui32LastOpDumpVal;
++      IMG_UINT32                                      ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                        *psSyncData;
++
++      
++
++
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_HANDLE                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                      hKernelSyncInfo;
++      
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG 
++{
++      volatile IMG_UINT32 ui32Lock;
++      IMG_UINT32                      ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
++typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
++
++typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); 
++typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); 
++
++
++typedef struct _IMG_RECT_
++{
++      IMG_INT32       x0;
++      IMG_INT32       y0;     
++      IMG_INT32       x1;     
++      IMG_INT32       y1;     
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++      IMG_INT16       x0;
++      IMG_INT16       y0;     
++      IMG_INT16       x1;     
++      IMG_INT16       y1;     
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE, 
++                                                                                      IMG_HANDLE, 
++                                                                                      IMG_SYS_PHYADDR**, 
++                                                                                      IMG_UINT32*, 
++                                                                                      IMG_VOID**, 
++                                                                                      IMG_HANDLE*, 
++                                                                                      IMG_BOOL*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++      IMG_UINT32      ui32ByteStride;
++      IMG_UINT32      ui32Width;
++      IMG_UINT32      ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++      
++      DISPLAY_DIMS                    sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++      
++      DISPLAY_DIMS                    sDims;
++      
++      IMG_UINT32                              ui32RefreshHZ;
++      
++      IMG_UINT32                              ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50) 
++
++typedef struct DISPLAY_INFO_TAG
++{
++      IMG_UINT32 ui32MaxSwapChains;
++      
++      IMG_UINT32 ui32MaxSwapChainBuffers;
++
++      IMG_UINT32 ui32MinSwapInterval;
++
++      IMG_UINT32 ui32MaxSwapInterval;
++
++      IMG_CHAR        szDisplayName[MAX_DISPLAY_NAME_SIZE];
++
++#if defined(SUPPORT_HW_CURSOR)
++      IMG_UINT16      ui32CursorWidth;
++      IMG_UINT16      ui32CursorHeight;
++#endif
++      
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++      IMG_UINT32              ui32Size;
++      IMG_UINT32      ui32FBPhysBaseAddress;
++      IMG_UINT32              ui32FBMemAvailable;                     
++      IMG_UINT32      ui32SysPhysBaseAddress;
++      IMG_UINT32              ui32SysSize;
++      IMG_UINT32              ui32DevIRQ;
++}ACCESS_INFO; 
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++      IMG_UINT16                      ui16Width;
++      IMG_UINT16                      ui16Height;
++      IMG_INT16                       i16XHot;
++      IMG_INT16                       i16YHot;
++      
++      
++      IMG_VOID*               pvMask;
++      IMG_INT16                       i16MaskByteStride;
++      
++      
++      IMG_VOID*                       pvColour;
++      IMG_INT16                       i16ColourByteStride;
++      PVRSRV_PIXEL_FORMAT     eColourPixelFormat; 
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY  (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION            (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE                       (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION            (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++      
++      IMG_UINT32 ui32Flags;
++      
++      
++      IMG_BOOL bVisible;
++      
++      
++      IMG_INT16 i16XPos;
++      IMG_INT16 i16YPos;
++      
++      
++      PVRSRV_CURSOR_SHAPE sCursorShape;
++      
++      
++      IMG_UINT32 ui32Rotation;
++ 
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++    IMG_UINT32                ui32DevCookie;
++    IMG_PCHAR         pszKey;
++    IMG_PCHAR         pszValue;
++    IMG_PCHAR         pszBuf;
++    IMG_UINT32                ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE       (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE             (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601                  (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709                  (1 << 1)
++
++typedef struct BUFFER_INFO_TAG
++{
++      IMG_UINT32                      ui32BufferCount;
++      IMG_UINT32                      ui32BufferDeviceID;
++      PVRSRV_PIXEL_FORMAT     pixelformat;
++      IMG_UINT32                      ui32ByteStride;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32Flags;
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++      WEAVE=0x0,
++      BOB_ODD,
++      BOB_EVEN,
++      BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/services.h git-nokia/drivers/gpu/pvr/include4/services.h
+--- git/drivers/gpu/pvr/include4/services.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/services.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,790 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#if defined(SERVICES4)
++#define IMG_CONST const
++#else
++#define IMG_CONST
++#endif
++
++#define PVRSRV_MAX_CMD_SIZE           1024
++
++#define PVRSRV_MAX_DEVICES            16      
++
++#define PVRSRV_MEM_READ                                               (1<<0)
++#define PVRSRV_MEM_WRITE                                      (1<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT                   (1<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ                         (1<<3)
++#define PVRSRV_MEM_INTERLEAVED                                (1<<4)
++#define PVRSRV_MEM_DUMMY                                      (1<<5)
++#define PVRSRV_MEM_EDM_PROTECT                                (1<<6)
++#define PVRSRV_MEM_ZERO                     (1<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR     (1<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION      (1<<9)
++#define PVRSRV_MEM_NO_RESMAN                          (1<<10)
++
++#define PVRSRV_HAP_CACHED                                     (1<<12)
++#define PVRSRV_HAP_UNCACHED                                   (1<<13)
++#define PVRSRV_HAP_WRITECOMBINE                               (1<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK                     (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY                                (1<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS                     (1<<16)
++#define PVRSRV_HAP_MULTI_PROCESS                      (1<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS      (1<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL                     (1<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK                               (PVRSRV_HAP_KERNEL_ONLY \
++                                            |PVRSRV_HAP_SINGLE_PROCESS \
++                                            |PVRSRV_HAP_MULTI_PROCESS \
++                                            |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++                                            |PVRSRV_HAP_NO_CPU_VIRTUAL)
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT   (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL            (1<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS                                        0               
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT                 1               
++#define PVRSRV_PRE_STATE_CHANGE_MASK                  0x80    
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE                     (1)      
++
++
++#define PVRSRVRESMAN_PROCESSID_FIND                   (0xffffffff) 
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT                        (1<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT            (1<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT             (1<<2)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE                        20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE                 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT             0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA                 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG                  0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++      PVRSRV_DEVICE_TYPE_UNKNOWN                      = 0 ,
++      PVRSRV_DEVICE_TYPE_MBX1                         = 1 ,
++      PVRSRV_DEVICE_TYPE_MBX1_LITE            = 2 ,
++
++      PVRSRV_DEVICE_TYPE_M24VA                        = 3,
++      PVRSRV_DEVICE_TYPE_MVDA2                        = 4,
++      PVRSRV_DEVICE_TYPE_MVED1                        = 5,
++      PVRSRV_DEVICE_TYPE_MSVDX                        = 6,
++
++      PVRSRV_DEVICE_TYPE_SGX                          = 7,
++
++      
++      PVRSRV_DEVICE_TYPE_EXT                          = 8,
++
++    PVRSRV_DEVICE_TYPE_LAST             = 8,
++
++      PVRSRV_DEVICE_TYPE_FORCE_I32            = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ )     (  ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1))  )
++#define HEAP_IDX( _heap_id_ )                         ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ )                         ( (_heap_id_)>>24 )
++
++typedef enum
++{
++      IMG_EGL        = 0x00000001,
++      IMG_OPENGLES1  = 0x00000002,
++      IMG_OPENGLES2  = 0x00000003,
++      IMG_D3DM           = 0x00000004,
++      IMG_SRV_UM         = 0x00000005,
++      IMG_OPENVG         = 0x00000006
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE       256
++
++typedef enum
++{
++      IMG_STRING_TYPE         = 1,
++      IMG_FLOAT_TYPE          ,
++      IMG_UINT_TYPE           ,
++      IMG_INT_TYPE            ,
++      IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef enum _PVR_POWER_CONTROL_
++{
++      PVRSRV_POWER_CONTROL_SET                        = 0,    
++      PVRSRV_POWER_CONTROL_RETRY                      = 1,    
++      PVRSRV_POWER_CONTROL_QUERY                      = 2,    
++
++      PVRSRV_POWER_CONTROL_FORCE_I32 = 0x7fffffff
++
++} PVR_POWER_CONTROL, *PPVR_POWER_CONTROL;
++
++typedef struct _PVRSRV_CONNECTION_
++{
++      IMG_HANDLE hServices;                                   
++      IMG_UINT32 ui32ProcessID;                               
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++      PVRSRV_CONNECTION       sConnection;            
++      IMG_HANDLE                      hDevCookie;                     
++
++} PVRSRV_DEV_DATA, *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++      IMG_UINT32                      ui32UpdateAddr;         
++      IMG_UINT32                      ui32UpdateVal;          
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++      IMG_UINT32                      ui32RegAddr;    
++      IMG_UINT32                      ui32RegVal;             
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_  
++{
++      IMG_DEV_VIRTADDR        sDevVirtAddr;                   
++    IMG_HANDLE          hOSMemHandle;           
++      IMG_HANDLE                      hBuffer;                                
++      IMG_HANDLE                      hResItem;                               
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++      
++      IMG_PVOID                               pvLinAddr;      
++
++#if defined(SERVICES4)
++    
++      IMG_PVOID                               pvLinAddrKM;
++#endif
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddr;
++
++      
++
++
++
++
++      IMG_CPU_PHYADDR                 sCpuPAddr;
++
++      
++      IMG_UINT32                              ui32Flags;
++
++      
++
++
++      IMG_UINT32                              ui32ClientFlags;
++      
++      
++      IMG_UINT32                              ui32AllocSize;          
++                                                                                              
++
++      
++      struct _PVRSRV_CLIENT_SYNC_INFO_        *psClientSyncInfo;
++
++      
++      IMG_HANDLE                                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                                      hKernelMemInfo;
++      
++      
++      IMG_HANDLE                                                      hResItem;
++      
++      
++
++
++      struct _PVRSRV_CLIENT_MEM_INFO_         *psNext;
++      
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#if 0
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                        *psSyncData;
++
++      
++
++
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_HANDLE                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                      hKernelSyncInfo;
++      
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++#endif
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++      IMG_UINT32                      ui32HeapID;
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_DEV_VIRTADDR        sDevVAddrBase;
++      IMG_UINT32                      ui32HeapByteSize;
++      IMG_UINT32                      ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;            
++      PVRSRV_DEVICE_CLASS             eDeviceClass;           
++      IMG_UINT32                              ui32DeviceIndex;        
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++      IMG_UINT32      ui32StateRequest;               
++      IMG_UINT32      ui32StatePresent;               
++
++      
++      IMG_VOID        *pvSOCTimerRegisterKM;
++      IMG_VOID        *pvSOCTimerRegisterUM;
++
++      
++      IMG_VOID        *pvSOCClockGateRegs;    
++      IMG_UINT32      ui32SOCClockGateRegsSize;
++      
++      
++      IMG_CHAR        *pszMemoryStr;
++      IMG_UINT32      ui32MemoryStrLen;
++      
++      
++      
++} PVRSRV_MISC_INFO;
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION                  *psConnection,
++                                                                                                      IMG_UINT32                                      *puiNumDevices,
++                                                                                                      PVRSRV_DEVICE_IDENTIFIER        *puiDevIDs);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION         *psConnection,
++                                                                                                      IMG_UINT32                      uiDevIndex,
++                                                                                                      PVRSRV_DEV_DATA         *psDevData,
++                                                                                                      PVRSRV_DEVICE_TYPE      eDeviceType);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++IMG_IMPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++
++IMG_IMPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++
++IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR PollForValue (volatile IMG_UINT32 *pui32LinMemAddr,
++                                                                      IMG_UINT32 ui32Value,
++                                                                      IMG_UINT32 ui32Mask,
++                                                                      IMG_UINT32 ui32Waitus,
++                                                                      IMG_UINT32 ui32Tries);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE *phDevMemContext,
++                                                                                      IMG_UINT32 *pui32SharedHeapCount,
++                                                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE                      hDevMemContext);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapMemInfoToUser(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                               PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                               IMG_VOID* ppvUserLinAddr,
++                                                                                               IMG_HANDLE* phUserMappingInfo);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapMemInfoFromUser(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                                       PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                                       IMG_PVOID pvUserLinAddr,
++                                                                                                       IMG_HANDLE hUserMappingInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA      *psDevData,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Attribs,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_CLIENT_MEM_INFO  **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA       *psDevData,
++                                                              PVRSRV_CLIENT_MEM_INFO          *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE                      hDevMemHeap,
++                                                                                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                                                                                      IMG_UINT32                      ui32Size,
++                                                                                      IMG_UINT32                      ui32Alignment,
++                                                                                      PVRSRV_CLIENT_MEM_INFO          **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                                      PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO *psSrcMemInfo,
++                                                                      IMG_HANDLE hDstDevMemHeap,
++                                                                      PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA       *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO          *psMemInfo,
++                                                                      IMG_SYS_PHYADDR                         *psSysPAddr,
++                                                                      IMG_UINT32                                      ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO          *psMemInfo,
++                                                                      IMG_UINT32                                      ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                              IMG_UINT32                              ui32ByteSize, 
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysPAddr,
++                                                                                              PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA                     *psDevData,
++                                                                                              PVRSRV_CLIENT_MEM_INFO  *psClientMemInfo,
++                                                                                              IMG_UINT32                              ui32Attribs);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              IMG_HANDLE hDeviceClassBuffer,
++                                                                              PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                        IMG_SYS_PHYADDR sSysPhysAddr,
++                                                                        IMG_UINT32 uiSizeInBytes,
++                                                                        IMG_PVOID *ppvUserAddr,
++                                                                        IMG_UINT32 *puiActualSize,
++                                                                        IMG_PVOID *ppvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              IMG_PVOID pvUserAddr,
++                                                                              IMG_PVOID pvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPowerControl(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                       PVR_POWER_CONTROL eControlMode,
++                                                                                       PVR_POWER_STATE *pePVRPowerState);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++      PVRSRV_SYNCVAL_READ                             = IMG_TRUE,
++      PVRSRV_SYNCVAL_WRITE                    = IMG_FALSE,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef IMG_UINT32 PVRSRV_SYNCVAL;
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++
++IMG_IMPORT 
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection, 
++                                                                                                      PVRSRV_DEVICE_CLASS DeviceClass, 
++                                                                                                      IMG_UINT32 *pui32DevCount,
++                                                                                                      IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION     *psConnection, IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
++                                                                                      IMG_UINT32              *pui32Count, 
++                                                                                      DISPLAY_FORMAT  *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
++                                                                              IMG_UINT32              *pui32Count, 
++                                                                              DISPLAY_FORMAT  *psFormat,
++                                                                              DISPLAY_DIMS    *psDims);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
++                                                                              DISPLAY_INFO* psDisplayInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE                         hDevice,
++                                                                                                      IMG_UINT32                              ui32Flags,
++                                                                                                      DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++                                                                                                      DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                                                      IMG_UINT32                              ui32BufferCount, 
++                                                                                                      IMG_UINT32                              ui32OEMFlags, 
++                                                                                                      IMG_UINT32                              *pui32SwapChainID, 
++                                                                                                      IMG_HANDLE                              *phSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE              hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE      hSwapChain,
++                                                                              IMG_RECT        *psDstRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE      hSwapChain,
++                                                                              IMG_RECT        *psSrcRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE      hSwapChain,
++                                                                                      IMG_UINT32      ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE      hSwapChain,
++                                                                                      IMG_UINT32      ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
++                                                                      IMG_HANDLE hSwapChain,
++                                                                      IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE hBuffer,
++                                                                              IMG_UINT32 ui32ClipRectCount,
++                                                                              IMG_RECT *psClipRect,
++                                                                              IMG_UINT32 ui32SwapInterval,
++                                                                              IMG_HANDLE hPrivateTag);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, 
++                                                                                              IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
++                                                                                              BUFFER_INFO     *psBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
++                                                                                              IMG_UINT32 ui32BufferIndex,
++                                                                                              IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                IMG_UINT32 ui32Offset,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask,
++                                                                                IMG_BOOL bLastFrame,
++                                                                                IMG_BOOL bOverwrite);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++                                                                                IMG_BOOL bIsRead,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                      IMG_PVOID pvAltLinAddr,
++                                                                      PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                      IMG_UINT32 ui32Offset,
++                                                                      IMG_UINT32 ui32Bytes,
++                                                                      IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                              IMG_PVOID pvAltLinAddr,
++                                                                              PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++                                                                              IMG_UINT32 ui32Offset,
++                                                                              IMG_UINT32 ui32Bytes);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue,
++                                                                                      IMG_UINT32 ui32Flags);
++
++#ifdef SERVICES4
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                                       IMG_UINT32 ui32RegAddr,
++                                                                                                       IMG_UINT32 ui32RegValue,
++                                                                                                       IMG_UINT32 ui32Mask,
++                                                                                                       IMG_UINT32 ui32Flags);
++#endif
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue,
++                                                                                      IMG_UINT32 ui32Mask);
++
++#ifdef SERVICES4
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                              IMG_UINT32 ui32Offset,
++                                                                                              IMG_DEV_PHYADDR sPDDevPAddr);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_HANDLE                      hKernelMemInfo,
++                                                                                              IMG_DEV_PHYADDR         *pPages,
++                                                                                              IMG_UINT32                      ui32NumPages,
++                                                                                              IMG_DEV_VIRTADDR        sDevAddr,
++                                                                                              IMG_UINT32                      ui32Start,
++                                                                                              IMG_UINT32                      ui32Length,
++                                                                                              IMG_BOOL                        bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                        IMG_UINT32 ui32Frame);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                       IMG_CONST IMG_CHAR *pszComment,
++                                                                                       IMG_BOOL bContinuous);
++
++#if defined(SERVICES4)
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                        IMG_BOOL bContinuous,
++                                                                                        IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                                         IMG_UINT32 ui32Flags,
++                                                                                                         IMG_CONST IMG_CHAR *pszFormat, ...);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_CHAR *pszString,
++                                                                                              IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_BOOL *pbIsCapturing);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_CHAR *pszFileName,
++                                                                                      IMG_UINT32 ui32FileOffset,
++                                                                                      IMG_UINT32 ui32Width,
++                                                                                      IMG_UINT32 ui32Height,
++                                                                                      IMG_UINT32 ui32StrideInBytes,
++                                                                                      IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                                                      IMG_UINT32 ui32Size,
++                                                                                      PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                                                      PDUMP_MEM_FORMAT eMemFormat,
++                                                                                      IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_CONST IMG_CHAR *pszFileName,
++                                                                                      IMG_UINT32 ui32FileOffset,
++                                                                                      IMG_UINT32 ui32Address,
++                                                                                      IMG_UINT32 ui32Size,
++                                                                                      IMG_UINT32 ui32PDumpFlags);
++
++#ifdef SERVICES4
++IMG_IMPORT
++IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_UINT32 ui32RegOffset,
++                                                                                              IMG_BOOL bLastFrame);
++#endif
++
++IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(IMG_CHAR *pszLibraryName);
++IMG_IMPORT PVRSRV_ERROR       PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
++IMG_IMPORT PVRSRV_ERROR       PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
++
++IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
++IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
++IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
++IMG_IMPORT PVRSRV_ERROR PVRSRVLockResource (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID, IMG_BOOL bBlock);
++IMG_IMPORT PVRSRV_ERROR PVRSRVUnlockResource (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID);
++
++#ifdef DEBUG
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVIsResourceLocked (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID);
++#endif
++
++
++
++
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++                                                                                                              const IMG_CHAR *pszAppName,
++                                                                                                              IMG_VOID **ppvState);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++                                                                               IMG_VOID *pvHintState);
++
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID                    *pvHintState,
++                                                                                                const IMG_CHAR        *pszHintName,
++                                                                                                IMG_DATA_TYPE         eDataType,
++                                                                                                const IMG_VOID        *pvDefault,
++                                                                                                IMG_VOID                      *pvReturn);
++
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
++IMG_IMPORT IMG_VOID  IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
++IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(IMG_PVOID *ppvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(IMG_PVOID pvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVLockMutex(IMG_PVOID pvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnlockMutex(IMG_PVOID pvMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_UINT32 ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_UINT32 ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_VOID  PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
++IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_UINT32 ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++#endif 
++
++PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION *psConnection, 
++                                                                      IMG_HANDLE hOSEvent, 
++                                                                      IMG_UINT32 ui32MSTimeout);
++
++#define TIME_NOT_PASSED_UINT32(a,b,c)         ((a - b) < c)
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/sgxapi_km.h git-nokia/drivers/gpu/pvr/include4/sgxapi_km.h
+--- git/drivers/gpu/pvr/include4/sgxapi_km.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/sgxapi_km.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,170 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++#if defined(__linux__) && !defined(USE_CODE)
++      #if defined(__KERNEL__)
++              #include <asm/unistd.h>
++      #else
++              #include <unistd.h>
++      #endif
++#endif
++
++#define SGX_GENERAL_HEAP_ID                                   0
++#define SGX_TADATA_HEAP_ID                                    1
++#define SGX_KERNEL_CODE_HEAP_ID                               2
++#define SGX_VIDEO_CODE_HEAP_ID                                3
++#define SGX_KERNEL_VIDEO_DATA_HEAP_ID         4
++#define SGX_PIXELSHADER_HEAP_ID                               5
++#define SGX_VERTEXSHADER_HEAP_ID                      6
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID         7
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID                8
++#define SGX_SYNCINFO_HEAP_ID                          9
++#define SGX_3DPARAMETERS_HEAP_ID                      10
++#define SGX_GENERAL_MAPPING_HEAP_ID                   11
++#define SGX_UNDEFINED_HEAP_ID                         (-1)
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      #define SGX_2D_HEAP_ID                                  12
++      #define SGX_MAX_HEAP_ID                                 13
++#else
++      #define SGX_MAX_HEAP_ID                                 12
++#endif
++
++#define SGX_MAX_TA_STATUS_VALS        32
++#define SGX_MAX_3D_STATUS_VALS        2
++
++#define PFLAGS_POWERDOWN                      0x00000001
++#define PFLAGS_POWERUP                                0x00000002
++ 
++typedef struct _SGX_SLAVE_PORT_
++{
++      IMG_PVOID                               pvData;                                 
++      IMG_UINT32                              ui32DataRange;                  
++      IMG_PUINT32                             pui32Offset;                    
++      IMG_SYS_PHYADDR                 sPhysBase;                              
++}SGX_SLAVE_PORT;
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++      SGX_MISC_INFO_REQUEST_FORCE_I16                                 =  0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++typedef struct _SGX_MISC_INFO_
++{
++      SGX_MISC_INFO_REQUEST   eRequest;       
++
++      union
++      {
++              IMG_UINT32      reserved;       
++      } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS              3
++#endif
++
++#ifdef PDUMP
++
++#define PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH            256
++
++typedef struct _PVR3DIF4_KICKTA_DUMPBITMAP_
++{
++      IMG_DEV_VIRTADDR        sDevBaseAddr;
++      IMG_UINT32                      ui32Flags;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32Stride;
++      IMG_UINT32                      ui32PDUMPFormat;
++      IMG_UINT32                      ui32BytesPP;
++      IMG_CHAR                        pszName[PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} PVR3DIF4_KICKTA_DUMPBITMAP, *PPVR3DIF4_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE        (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++      
++      IMG_UINT32                                              ui32RenderNumForTA;
++
++      
++      IMG_UINT32                                              ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _PVR3DIF4_KICKTA_DUMP_ROFF_
++{
++      IMG_HANDLE                      hKernelMemInfo;                                         
++      IMG_UINT32                      uiAllocIndex;                                           
++      IMG_UINT32                      ui32Offset;                                                     
++      IMG_UINT32                      ui32Value;                                                      
++      IMG_PCHAR                       pszName;                                                        
++} PVR3DIF4_KICKTA_DUMP_ROFF, *PPVR3DIF4_KICKTA_DUMP_ROFF;
++#endif        
++
++typedef struct _PVR3DIF4_KICKTA_DUMP_BUFFER_
++{
++      IMG_UINT32                      ui32SpaceUsed;
++      IMG_UINT32                      ui32Start;                                                      
++      IMG_UINT32                      ui32End;                                                        
++      IMG_UINT32                      ui32BufferSize;                                         
++      IMG_UINT32                      ui32BackEndLength;                                      
++      IMG_UINT32                      uiAllocIndex;
++      IMG_HANDLE                      hKernelMemInfo;
++      IMG_PCHAR                       pszName;                                                        
++} PVR3DIF4_KICKTA_DUMP_BUFFER, *PPVR3DIF4_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _PVR3DIF4_KICKTA_PDUMP_
++{
++      
++      PPVR3DIF4_KICKTA_DUMPBITMAP             psPDumpBitmapArray;
++      IMG_UINT32                                              ui32PDumpBitmapSize;
++
++      
++      PPVR3DIF4_KICKTA_DUMP_BUFFER    psBufferArray;
++      IMG_UINT32                                              ui32BufferArraySize;
++
++      
++      PPVR3DIF4_KICKTA_DUMP_ROFF              psROffArray;
++      IMG_UINT32                                              ui32ROffArraySize;
++} PVR3DIF4_KICKTA_PDUMP, *PPVR3DIF4_KICKTA_PDUMP;
++#endif        
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/sgxscript.h git-nokia/drivers/gpu/pvr/include4/sgxscript.h
+--- git/drivers/gpu/pvr/include4/sgxscript.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/sgxscript.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#define       SGX_MAX_INIT_COMMANDS   64
++#define       SGX_MAX_DEINIT_COMMANDS 16
++
++typedef       enum _SGX_INIT_OPERATION
++{
++      SGX_INIT_OP_ILLEGAL = 0,
++      SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++      SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++      SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++      SGX_INIT_OPERATION eOp;
++      struct {
++              SGX_INIT_OPERATION eOp;
++              IMG_UINT32 ui32Offset;
++              IMG_UINT32 ui32Value;
++      } sWriteHWReg;
++#if defined(PDUMP)
++      struct {
++              SGX_INIT_OPERATION eOp;
++              IMG_UINT32 ui32Offset;
++              IMG_UINT32 ui32Value;
++      } sPDumpHWReg;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++      SGX_INIT_COMMAND asInitCommands[SGX_MAX_INIT_COMMANDS];
++      SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/Kconfig git-nokia/drivers/gpu/pvr/Kconfig
+--- git/drivers/gpu/pvr/Kconfig        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/Kconfig  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,255 @@
++#
++# PowerVR Services framework from Imagination Technologies
++#
++
++menuconfig PVR
++      tristate "PowerVR Services framework"
++      help
++        Kernel-level support for the Imagination PowerVR Services framework.
++        This module provides support for resource handling for a PVR
++        compatible 2D/3D graphics accelerator like the Imagination MBX and
++        SGX accelerator cores.
++
++if PVR
++
++config PVR_TRANSFER_QUEUE
++      depends on PVR_SERVICES4
++      bool
++
++config PVR_SUPPORT_SRVINIT
++      depends on PVR_SERVICES4
++      bool
++
++config PVR_SUPPORT_SECURE_HANDLES
++      bool
++      depends on PVR_SERVICES4
++      default y
++
++config PVR_NEW_TRANSFER_QUEUE
++      bool
++      depends on !PVR_SERVICES4
++      default y
++
++
++choice PVR_SERVICES
++      prompt "Services version"
++
++config PVR_SERVICES4
++      bool "Version 4"
++      select PVR_TRANSFER_QUEUE
++      select PVR_SUPPORT_SRVINIT
++
++endchoice
++
++
++choice PVR_SGXCORE
++      prompt "SGX core"
++
++config PVR_SGXCORE_530
++      bool "530"
++
++endchoice
++
++
++choice PVR_SGX_CORE_REV
++      prompt "SGX core revision"
++
++config PVR_SGX_CORE_REV_103
++      bool "103"
++
++endchoice
++
++config PVR_PVR2D_ALT_2DHW
++      bool
++      default y
++
++config PVR_NO_HARDWARE
++      bool
++
++choice PVR_SYSTEM
++      prompt "PVR system"
++
++config PVR_SYSTEM_OMAP3430
++      bool "OMAP3430"
++      select PVR_SGXCORE_530
++      select PVR_SGX_CORE_REV_103
++      select PVR_PVR2D_ALT_2DHW
++
++config PVR_SYSTEM_NO_HARDWARE
++      bool "No hardware"
++      select PVR_NO_HARDWARE
++endchoice
++
++config PVR_BUFFERCLASS_EXAMPLE
++      bool "Buffer class example"
++
++config PVR_USE_PTHREADS
++      bool
++      default y
++
++config PVR_OPTIMISE_NON_NPTL_SINGLE_THREAD_TLS_LOOKUP
++      bool
++
++config PVR_DISABLE_THREADS
++      bool
++
++config PVR_SUPPORT_DRI2
++      bool
++
++config PVR_PDUMP
++      bool
++
++config PVR_SUPPORT_XWS
++      bool
++
++config PVR_SUPPORT_POWER_MANAGEMENT
++      bool
++
++config PVR_SUPPORT_BUFFER_CLASS
++      bool
++
++
++config PVR_SUPPORT_DYNAMIC_PBRESIZE
++      bool
++
++config PVR_USE_FBDEV
++      bool
++
++config PVR_FBDEV_NAME
++      string
++      depends on PVR_USE_FBDEV
++
++config PVR_SUPPORT_DYNAMIC_3DCLOCKGATING
++      bool
++
++config PVR_REENTRANCY_PROTECTION
++      bool
++
++config PVR_SCHEDULER_CONTROL_SUPPORT
++      bool
++
++config PVR_USE_IMG_POWER_DOMAIN_FUNCTION
++      bool
++
++config PVR_USE_DMALLOC
++      bool
++
++config PVR_SUPPORT_LINUX_X86_WRITECOMBINE
++      bool
++
++config PVR_SGX_PDS_EVENTS_DISABLED
++      bool
++
++config PVR_USE_SUPPORT_NO_TA3D_OVERLAP
++      bool
++
++config PVR_SUPPORT_SGX_TILING
++      bool
++
++config PVR_TRANSFER_QUEUE
++      bool
++      default y
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++      bool
++      default y if PVR_SERVICES4
++
++config PVR_SUPPORT_SGX_MMU_DUMMY_PAGE
++      bool
++
++config PVR_PVRSRV_USSE_EDM_STATUS_DEBUG
++      bool
++
++config PVR_SYS_USING_INTERRUPTS
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_SUPPORT_HW_RECOVERY
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_SUPPORT_ACTIVE_POWER_MANAGEMENT
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_USE_GCC__thread_KEYWORD
++      bool
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++      default y
++      bool
++
++config PVR_LDM_PLATFORM
++      bool
++
++config PVR_LDM_PCI
++      bool
++
++config PVR_PVR_MANUAL_POWER_CONTROL
++      bool
++
++choice PVR_BUILD
++      bool "Type of build"
++
++config PVR_BUILD_RELEASE
++      bool "Release"
++      help
++        Release build.
++
++config PVR_BUILD_DEBUG
++      bool "Debugging"
++      help
++        Debug build.
++
++config PVR_BUILD_TIMING
++      bool "Timing"
++      help
++        Timing build.
++
++endchoice
++
++if PVR_BUILD_DEBUG
++
++config PVR_DEBUG_LINUX_MEMORY_ALLOCATIONS
++      bool "Debug memory allocations"
++      default y
++
++config PVR_DEBUG_LINUX_MEM_AREAS
++      bool "Debug memory areas"
++      default y
++
++config PVR_DEBUG_LINUX_MMAP_AREAS
++      bool "Debug mmap areas"
++      default y
++
++config PVR_DEBUG_LINUX_XML_PROC_FILES
++      bool "Debug XML proc files"
++      default n
++
++config PVR_DEBUG_LINUX_SLAB_ALLOCATIONS
++      bool "Debug SLAB allocations"
++      default n
++
++config PVR_DEBUG_BRIDGE_KM
++      bool "Debug brdige module"
++      default y
++
++config PVR_DEBUG_TRACE_BRIDGE_KM
++      bool "Debug trace bridge module"
++      default n
++
++config PVR_DEBUG_BEIDGE_KM_DISPATCH_TABLE
++      bool "Debug bridge module's dispatch table"
++      default n
++
++endif         # PVR_BUILD_DEBUG
++
++config PVR_SUPPORT_SGX1
++      bool
++      default y
++
++endif         # PVR
++
+diff -Nurd git/drivers/gpu/pvr/Makefile git-nokia/drivers/gpu/pvr/Makefile
+--- git/drivers/gpu/pvr/Makefile       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/Makefile 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,77 @@
++pvrobj :=
++pvrobj-$(CONFIG_PVR_SYSTEM_OMAP3430)          += omaplfb.o
++pvrobj-$(CONFIG_PVR_BUFFERCLASS_EXAMPLE)      += bc_example.o
++
++obj-$(CONFIG_PVR) += pvrsrvkm.o $(pvrobj-y)
++
++PVR_SYSTEM-$(CONFIG_PVR_SYSTEM_OMAP3430)=omap3430
++PVR_SYSTEM-$(CONFIG_PVR_SYSTEM_NO_HARDWARE)=no_hardware
++
++pvrsrvkm-objs:= services4/srvkm/env/linux/osfunc.o            \
++              services4/srvkm/env/linux/mmap.o                \
++              services4/srvkm/env/linux/module.o              \
++              services4/srvkm/env/linux/pdump.o               \
++              services4/srvkm/env/linux/proc.o                \
++              services4/srvkm/env/linux/pvr_bridge_k.o        \
++              services4/srvkm/env/linux/pvr_debug.o           \
++              services4/srvkm/env/linux/mm.o                  \
++              services4/srvkm/env/linux/mutex.o               \
++              services4/srvkm/common/buffer_manager.o         \
++              services4/srvkm/common/devicemem.o              \
++              services4/srvkm/common/deviceclass.o            \
++              services4/srvkm/common/handle.o                 \
++              services4/srvkm/common/hash.o                   \
++              services4/srvkm/common/metrics.o                \
++              services4/srvkm/common/pvrsrv.o                 \
++              services4/srvkm/common/queue.o                  \
++              services4/srvkm/common/ra.o                     \
++              services4/srvkm/common/resman.o                 \
++              services4/srvkm/common/power.o                  \
++              services4/srvkm/common/mem.o                    \
++              services4/srvkm/bridged/bridged_pvr_bridge.o    \
++              services4/srvkm/devices/sgx/sgxinit.o           \
++              services4/srvkm/devices/sgx/sgxutils.o          \
++              services4/srvkm/devices/sgx/sgxkick.o           \
++              services4/srvkm/devices/sgx/sgxtransfer.o       \
++              services4/srvkm/devices/sgx/mmu.o               \
++              services4/srvkm/devices/sgx/pb.o                \
++              services4/srvkm/devices/sgx/sgx2dcore.o         \
++              services4/srvkm/common/perproc.o
++
++pvrsrvkm-objs+=       services4/system/$(PVR_SYSTEM-y)/sysconfig.o    \
++              services4/system/$(PVR_SYSTEM-y)/sysutils.o
++
++omaplfb-objs :=       services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.o \
++              services4/3rdparty/dc_omap3430_linux/omaplfb_linux.o
++
++bc_example-objs :=                                                        \
++      services4/3rdparty/bufferclass_example/bufferclass_example.o        \
++      services4/3rdparty/bufferclass_example/bufferclass_example_linux.o  \
++      services4/3rdparty/bufferclass_example/bufferclass_example_private.o
++
++INCLUDES :=   -I$(src)/services4/srvkm/env/linux      \
++              -I$(src)/services4/srvkm/include        \
++              -I$(src)/services4/srvkm/bridged        \
++              -I$(src)/services4/srvkm/devices/sgx    \
++              -I$(src)/services4/srvkm/include        \
++              -I$(src)/services4/srvkm/hwdefs         \
++              -I$(src)/include4                       \
++              -I$(src)/services4/system/include       \
++              -I$(src)/services4/include
++
++INCLUDES +=   -I$(src)/services4/system/$(PVR_SYSTEM-y)
++
++# pvrconf.h translates kbuild options to PVR options.
++# We could do away with it by renaming the options in the source itself,
++# which would also result in finer grained option dependency, that is
++# avoiding the rebuild of files not affected by an option change.
++#
++PVR_OPTS := -include $(src)/pvrconf.h
++
++DATE := $(shell date "+%a %B %d %Z %Y")
++PVR_OPTS += -D"PVR_BUILD_DIR=KBUILD_STR($(src))" -D"PVR_BUILD_DATE=KBUILD_STR($(DATE))"
++
++PVR_OPTS += -DLINUX -D__linux__ -DLDM_PLATFORM
++
++ccflags-y :=  $(PVR_OPTS) $(INCLUDES)
++
+diff -Nurd git/drivers/gpu/pvr/pvrconf.h git-nokia/drivers/gpu/pvr/pvrconf.h
+--- git/drivers/gpu/pvr/pvrconf.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/pvrconf.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,118 @@
++#ifndef _PVRCONF_H
++#define _PVRCONF_H
++
++/* Define PVR equivalents of the kbuild config options */
++
++#ifdef CONFIG_PVR_TRANSFER_QUEUE
++#define TRANSFER_QUEUE
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SRVINIT
++#define SUPPORT_SRVINIT
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SECURE_HANDLES
++#define SUPPORT_SECURE_HANDLES
++#endif
++#ifdef CONFIG_PVR_SERVICES4
++#define SERVICES4
++#endif
++#ifdef CONFIG_PVR_SGXCORE_530
++#define SGXCORE_530
++#endif
++#ifdef CONFIG_PVR_SGX_CORE_REV_103
++#define SGX_CORE_REV_103
++#endif
++#ifdef CONFIG_PVR_PVR2D_ALT_2DHW
++#define PVR2D_ALT_2DHW
++#endif
++#ifdef CONFIG_PVR_SYSTEM_OMAP3430
++#define SYSTEM_OMAP3430
++#endif
++#ifdef CONFIG_PVR_SYSTEM_NO_HARDWARE
++#define SYSTEM_NO_HARDWARE
++#endif
++#ifdef CONFIG_PVR_BUFFERCLASS_EXAMPLE
++#define BUFFERCLASS_EXAMPLE
++#endif
++#ifdef CONFIG_PVR_USE_PTHREADS
++#define USE_PTHREADS
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT
++#define SUPPORT_SGX_EVENT_OBJECT
++#endif
++#ifdef CONFIG_PVR_SYS_USING_INTERRUPTS
++#define SYS_USING_INTERRUPTS
++#endif
++#ifdef CONFIG_PVR_SUPPORT_HW_RECOVERY
++#define SUPPORT_HW_RECOVERY
++#endif
++#ifdef CONFIG_PVR_SUPPORT_ACTIVE_POWER_MANAGEMENT
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT
++#endif
++#ifdef CONFIG_PVR_BUILD_RELEASE
++#define BUILD_RELEASE
++#endif
++#ifdef CONFIG_PVR_BUILD_DEBUG
++#define BUILD_DEBUG
++#endif
++#ifdef CONFIG_PVR_BUILD_TIMING
++#define BUILD_TIMING
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MEMORY_ALLOCATIONS
++#define DEBUG_LINUX_MEMORY_ALLOCATIONS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MEM_AREAS
++#define DEBUG_LINUX_MEM_AREAS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MMAP_AREAS
++#define DEBUG_LINUX_MMAP_AREAS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_XML_PROC_FILES
++#define DEBUG_LINUX_XML_PROC_FILES
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_SLAB_ALLOCATIONS
++#define DEBUG_LINUX_SLAB_ALLOCATIONS
++#endif
++#ifdef CONFIG_PVR_DEBUG_BRIDGE_KM
++#define DEBUG_BRIDGE_KM
++#endif
++#ifdef CONFIG_PVR_DEBUG_TRACE_BRIDGE_KM
++#define DEBUG_TRACE_BRIDGE_KM
++#endif
++#ifdef CONFIG_PVR_DEBUG_BEIDGE_KM_DISPATCH_TABLE
++#define DEBUG_BEIDGE_KM_DISPATCH_TABLE
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SGX1
++#define SUPPORT_SGX1
++#endif
++
++#ifdef CONFIG_PVR_BUILD_DEBUG
++#define PVR_BUILD_TYPE        "debug"
++#define DEBUG
++#endif
++
++#ifdef CONFIG_PVR_BUILD_RELEASE
++#define PVR_BUILD_TYPE        "release"
++#define RELEASE
++#endif
++
++#ifdef CONFIG_PVR_BUILD_TIMING
++#define PVR_BUILD_TYPE        "timing"
++#define TIMING
++#endif
++
++#ifdef CONFIG_PVR_SERVICES4
++#define _XOPEN_SOURCE 600
++#endif
++
++#ifdef CONFIG_PVR_SGX_CORE_REV_103
++#define SGX_CORE_REV  103
++#endif
++
++#ifdef CONFIG_PVR_SGXCORE_530
++#define SGXCORE               530
++#define SGX530
++#define SUPPORT_SGX530
++#endif
++
++#endif                /* _PVRCONF_H */
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,307 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++
++
++static IMG_VOID *gpvAnchor = IMG_NULL;
++static PFN_BC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
++
++BC_EXAMPLE_DEVINFO * GetAnchorPtr(IMG_VOID)
++{
++      return (BC_EXAMPLE_DEVINFO *)gpvAnchor;
++}
++
++static IMG_VOID SetAnchorPtr(BC_EXAMPLE_DEVINFO *psDevInfo)
++{
++      gpvAnchor = (IMG_VOID*)psDevInfo;
++}
++
++
++static PVRSRV_ERROR OpenBCDevice(IMG_HANDLE *phDevice)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();
++
++      
++      *phDevice = (IMG_HANDLE)psDevInfo;
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR CloseBCDevice(IMG_HANDLE hDevice)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR GetBCBuffer(IMG_HANDLE                    hDevice,
++                                                              IMG_UINT32                      ui32BufferNumber,
++                                                              PVRSRV_SYNC_DATA        *psSyncData,
++                                                              IMG_HANDLE                      *phBuffer)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++
++      if(!hDevice || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDevInfo = (BC_EXAMPLE_DEVINFO*)hDevice;
++
++      if( ui32BufferNumber < psDevInfo->sBufferInfo.ui32BufferCount )
++      {
++              psDevInfo->psSystemBuffer[ui32BufferNumber].psSyncData = psSyncData;
++              *phBuffer = (IMG_HANDLE)&psDevInfo->psSystemBuffer[ui32BufferNumber];
++      }
++      else
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR GetBCInfo(IMG_HANDLE hDevice, BUFFER_INFO *psBCInfo)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++
++      if(!hDevice || !psBCInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDevInfo = (BC_EXAMPLE_DEVINFO*)hDevice;
++
++      *psBCInfo = psDevInfo->sBufferInfo;
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR GetBCBufferAddr(IMG_HANDLE                hDevice,
++                                                                      IMG_HANDLE              hBuffer,
++                                                                      IMG_SYS_PHYADDR **ppsSysAddr,
++                                                                      IMG_UINT32              *pui32ByteSize,
++                                                                      IMG_VOID                **ppvCpuVAddr,
++                                                                      IMG_HANDLE              *phOSMapInfo,
++                                                                      IMG_BOOL                *pbIsContiguous)
++{
++      BC_EXAMPLE_BUFFER *psBuffer;
++
++      if(!hDevice || !hBuffer || !ppsSysAddr || !pui32ByteSize)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBuffer = (BC_EXAMPLE_BUFFER *) hBuffer;
++
++      *ppsSysAddr = &psBuffer->sPageAlignSysAddr;
++      *ppvCpuVAddr = psBuffer->sCPUVAddr;
++
++      *pui32ByteSize = psBuffer->ui32Size;
++
++      *phOSMapInfo = IMG_NULL;
++      *pbIsContiguous = IMG_TRUE;
++
++      return PVRSRV_OK;
++}
++
++
++
++
++PVRSRV_ERROR  BC_Example_Init(IMG_VOID)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++      IMG_CPU_PHYADDR         sSystemBufferCPUPAddr;
++      IMG_UINT32 i;
++      
++
++
++
++      
++
++
++
++      
++
++      psDevInfo = GetAnchorPtr();
++
++      if (psDevInfo == IMG_NULL)
++      {
++              
++              psDevInfo = (BC_EXAMPLE_DEVINFO *)AllocKernelMem(sizeof(BC_EXAMPLE_DEVINFO));
++
++              if(!psDevInfo)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              SetAnchorPtr((IMG_VOID*)psDevInfo);
++
++              
++              psDevInfo->ui32RefCount = 0;
++
++      
++              if(OpenPVRServices(&psDevInfo->hPVRServices) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++              if(GetLibFuncAddr (psDevInfo->hPVRServices, "PVRGetBufferClassJTable", &pfnGetPVRJTable) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++              if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++
++              psDevInfo->ui32NumBuffers = 0;
++
++              psDevInfo->psSystemBuffer = AllocKernelMem(sizeof(BC_EXAMPLE_BUFFER) * BC_EXAMPLE_NUM_BUFFERS);
++
++              if(!psDevInfo->psSystemBuffer)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              for(i=0; i < BC_EXAMPLE_NUM_BUFFERS; i++)
++              {
++
++                      
++                      if (AllocContigMemory(BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE,
++                                                                &psDevInfo->psSystemBuffer[i].hMemHandle,
++                                                                &psDevInfo->psSystemBuffer[i].sCPUVAddr,
++                                                                &sSystemBufferCPUPAddr) != PVRSRV_OK)
++                      {
++                              break;
++                      }
++
++                      psDevInfo->ui32NumBuffers++;
++
++                      psDevInfo->psSystemBuffer[i].ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++                      psDevInfo->psSystemBuffer[i].sSysAddr = CpuPAddrToSysPAddr(sSystemBufferCPUPAddr);
++                      psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr = (psDevInfo->psSystemBuffer[i].sSysAddr.uiAddr & 0xFFFFF000);
++                      psDevInfo->psSystemBuffer[i].psSyncData = IMG_NULL;
++              }
++
++              
++
++              psDevInfo->sBCJTable.ui32TableSize = sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE);
++              psDevInfo->sBCJTable.pfnOpenBCDevice = OpenBCDevice;
++              psDevInfo->sBCJTable.pfnCloseBCDevice = CloseBCDevice;
++              psDevInfo->sBCJTable.pfnGetBCBuffer = GetBCBuffer;
++              psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo;
++              psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr;
++
++
++              
++              
++              if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterBCDevice (&psDevInfo->sBCJTable,
++                                                                                                                      &psDevInfo->ui32DeviceID ) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++              }
++
++              
++
++              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
++              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
++              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
++              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
++              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
++              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
++              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
++      }
++
++      
++      psDevInfo->ui32RefCount++;
++
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BC_Example_Deinit(IMG_VOID)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo;
++      IMG_UINT32 i;
++      psDevInfo = GetAnchorPtr();
++
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      psDevInfo->ui32RefCount--;
++
++      if (psDevInfo->ui32RefCount == 0)
++      {
++              
++              PVRSRV_BC_BUFFER2SRV_KMJTABLE   *psJTable = &psDevInfo->sPVRJTable;
++
++
++              
++              if (psJTable->pfnPVRSRVRemoveBCDevice(psDevInfo->ui32DeviceID) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              if (ClosePVRServices(psDevInfo->hPVRServices) != PVRSRV_OK)
++              {
++                      psDevInfo->hPVRServices = IMG_NULL;
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              for(i=0; i < psDevInfo->ui32NumBuffers; i++)
++              {
++                      FreeContigMemory(psDevInfo->psSystemBuffer[i].ui32Size,
++                                                       psDevInfo->psSystemBuffer[i].hMemHandle,
++                                                       psDevInfo->psSystemBuffer[i].sCPUVAddr,
++                                                       SysPAddrToCpuPAddr(psDevInfo->psSystemBuffer[i].sSysAddr));
++              }
++
++              
++              FreeKernelMem(psDevInfo);
++
++              
++              SetAnchorPtr(IMG_NULL);
++      }
++
++      
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,122 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_H__
++#define __BC_EXAMPLE_H__
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kernelbuffer.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++extern IMG_IMPORT IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++#define BC_EXAMPLE_NUM_BUFFERS        3
++
++#define BC_EXAMPLE_WIDTH              (160)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (160*2)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YVYU)
++
++#define BC_EXAMPLE_DEVICEID            0
++
++
++typedef struct BC_EXAMPLE_BUFFER_TAG
++{
++      IMG_UINT32                                      ui32Size;
++      IMG_HANDLE                                      hMemHandle;
++      IMG_SYS_PHYADDR                         sSysAddr;
++      IMG_SYS_PHYADDR                         sPageAlignSysAddr;
++      IMG_CPU_VIRTADDR                        sCPUVAddr;
++      PVRSRV_SYNC_DATA                        *psSyncData;
++      struct BC_EXAMPLE_BUFFER_TAG    *psNext;
++} BC_EXAMPLE_BUFFER;
++
++
++typedef struct BC_EXAMPLE_DEVINFO_TAG
++{
++      IMG_UINT32                              ui32DeviceID;   
++
++      BC_EXAMPLE_BUFFER                       *psSystemBuffer;
++
++      BUFFER_INFO                             sBufferInfo;
++
++      
++      IMG_UINT32                              ui32NumBuffers;
++
++      
++      PVRSRV_BC_BUFFER2SRV_KMJTABLE   sPVRJTable;
++
++      
++      PVRSRV_BC_SRV2BUFFER_KMJTABLE   sBCJTable;
++
++      
++
++
++      IMG_HANDLE                              hPVRServices;
++
++      
++      IMG_UINT32                              ui32RefCount;
++
++}  BC_EXAMPLE_DEVINFO;
++
++
++PVRSRV_ERROR BC_Example_Init(IMG_VOID);
++PVRSRV_ERROR BC_Example_Deinit(IMG_VOID);
++
++PVRSRV_ERROR OpenPVRServices(IMG_HANDLE *phPVRServices);
++PVRSRV_ERROR ClosePVRServices(IMG_HANDLE hPVRServices);
++
++IMG_VOID *AllocKernelMem(IMG_UINT32 ui32Size);
++IMG_VOID FreeKernelMem(IMG_VOID *pvMem);
++
++PVRSRV_ERROR AllocContigMemory(IMG_UINT32 ui32Size,
++                                                         IMG_HANDLE * phMemHandle,
++                                                         IMG_CPU_VIRTADDR *pLinAddr,
++                                                         IMG_CPU_PHYADDR *pPhysAddr);
++IMG_VOID FreeContigMemory(IMG_UINT32 ui32Size, 
++                                                IMG_HANDLE hMemHandle,
++                                                IMG_CPU_VIRTADDR LinAddr, 
++                                                IMG_CPU_PHYADDR PhysAddr);
++
++IMG_SYS_PHYADDR CpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr);
++IMG_CPU_PHYADDR SysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr);
++
++IMG_VOID *MapPhysAddr(IMG_SYS_PHYADDR sSysAddr, IMG_UINT32 ui32Size);
++IMG_VOID UnMapPhysAddr(IMG_VOID *pvAddr, IMG_UINT32 ui32Size);
++
++PVRSRV_ERROR GetLibFuncAddr (IMG_HANDLE hExtDrv, IMG_CHAR *szFunctionName, PFN_BC_GET_PVRJTABLE *ppfnFuncTable);
++BC_EXAMPLE_DEVINFO * GetAnchorPtr(IMG_VOID);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,372 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++
++#if defined(LMA)
++#include <linux/pci.h>
++#else
++#include <linux/dma-mapping.h>
++#endif
++
++#include "bufferclass_example.h"
++#include "bufferclass_example_linux.h"
++
++#define DEVNAME       "bc_example"
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++
++int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
++int FillBuffer(unsigned int ui32BufferIndex);
++int GetBufferCount(unsigned int *pui32BufferCount);
++
++static int AssignedMajorNumber;
++
++static struct file_operations bufferclass_example_fops = {
++      ioctl:BC_Example_Bridge,
++};
++
++
++#define unref__ __attribute__ ((unused))
++
++#if defined(LMA)
++
++#define PVR_BUFFERCLASS_MEMOFFSET (220 * 1024 * 1024) 
++#define PVR_BUFFERCLASS_MEMSIZE         (4 * 1024 * 1024)       
++
++unsigned int g_ui32MemBase = 0;
++unsigned int g_ui32MemCurrent = 0;
++
++typedef struct  
++{
++      union
++      {
++              IMG_UINT8       aui8PCISpace[256];
++              IMG_UINT16      aui16PCISpace[128];
++              IMG_UINT32      aui32PCISpace[64];
++
++              struct  
++              {
++                      IMG_UINT16      ui16VenID;
++                      IMG_UINT16      ui16DevID;
++                      IMG_UINT16      ui16PCICmd;
++                      IMG_UINT16      ui16PCIStatus;
++              }s;
++      }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#define VENDOR_ID_PVR                                 0x1010
++#define DEVICE_ID_PVR                                 0x1CF1
++
++#define PCI_BASEREG_OFFSET_DWORDS             4
++
++#define PVR_MEM_PCI_BASENUM                   2
++#define PVR_MEM_PCI_OFFSET                (PVR_MEM_PCI_BASENUM + PCI_BASEREG_OFFSET_DWORDS)
++
++
++
++IMG_UINT32 PCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg)
++{
++    struct pci_dev *dev;
++    IMG_UINT32 ui32Value;
++
++    dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++    if (dev)
++    {
++            pci_read_config_dword(dev, (int) ui32Reg, (u32 *) & ui32Value);
++            return (ui32Value);
++    }
++    else
++    {
++            return (0);
++    }
++}
++
++IMG_VOID PCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value)
++{
++    struct pci_dev *dev;
++
++    dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++    if (dev)
++    {
++            pci_write_config_dword(dev, (int) ui32Reg, (u32) ui32Value);
++    }
++}
++
++static IMG_UINT32 FindPCIDevice(IMG_UINT16 ui16VenID, IMG_UINT16 ui16DevID, PCICONFIG_SPACE *psPCISpace)
++{
++      IMG_UINT32 ui32BusNum;
++      IMG_UINT32 ui32DevNum;
++      IMG_UINT32 ui32VenDevID;
++
++      
++      for (ui32BusNum=0; ui32BusNum < 255; ui32BusNum++)
++      {
++              
++              for (ui32DevNum=0; ui32DevNum < 32; ui32DevNum++)
++              {
++                      
++                      ui32VenDevID=PCIReadDword(ui32BusNum, ui32DevNum, 0, 0);                        
++
++                      
++                      if (ui32VenDevID == (IMG_UINT32)((ui16DevID<<16)+ui16VenID))
++                      {
++                              IMG_UINT32 ui32Idx;
++
++                              
++                              PCIWriteDword(ui32BusNum, ui32DevNum, 0, 4, PCIReadDword(ui32BusNum, ui32DevNum, 0, 4) | 0x02);
++
++                              
++                              for (ui32Idx=0; ui32Idx < 64; ui32Idx++)
++                              {
++                                      psPCISpace->u.aui32PCISpace[ui32Idx] = PCIReadDword(ui32BusNum, ui32DevNum, 0, ui32Idx*4);
++                              }
++                              return PVRSRV_OK;
++                      }
++                                                      
++              }
++
++      }
++
++      return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++
++
++static int __init BC_Example_ModInit(void)
++{
++#if defined(LMA)
++      PCICONFIG_SPACE sPCISpace;
++#endif
++      
++      AssignedMajorNumber = register_chrdev(0, DEVNAME, &bufferclass_example_fops);
++
++      if (AssignedMajorNumber <= 0)
++      {
++              printk("BC_Example_ModInit: unable to get major number\n");
++
++              return -EBUSY;
++      }
++
++      printk("BC_Example_ModInit: major device %d\n", AssignedMajorNumber);
++
++#if defined(LMA)
++      if(FindPCIDevice(VENDOR_ID_PVR, DEVICE_ID_PVR, &sPCISpace) != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModInit: can't init device (can't find PCI) \n");
++              unregister_chrdev(AssignedMajorNumber, DEVNAME);
++              return PVRSRV_ERROR_INVALID_DEVICE;
++      }
++      
++      g_ui32MemBase = sPCISpace.u.aui32PCISpace[PVR_MEM_PCI_OFFSET] + PVR_BUFFERCLASS_MEMOFFSET;
++#endif
++
++      if(BC_Example_Init() != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModInit: can't init device\n");
++              unregister_chrdev(AssignedMajorNumber, DEVNAME);
++              return -ENODEV;
++      }
++
++      return 0;
++} 
++
++static void __exit BC_Example_ModCleanup(void)
++{    
++      unregister_chrdev(AssignedMajorNumber, DEVNAME);
++      
++      if(BC_Example_Deinit() != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModCleanup: can't deinit device\n");
++      }
++
++} 
++
++
++IMG_VOID *AllocKernelMem(IMG_UINT32 ui32Size)
++{
++      return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++IMG_VOID FreeKernelMem(IMG_VOID *pvMem)
++{
++      kfree(pvMem);
++}
++
++PVRSRV_ERROR AllocContigMemory(       IMG_UINT32 ui32Size,
++                                                              IMG_HANDLE unref__ *phMemHandle, 
++                                                              IMG_CPU_VIRTADDR *pLinAddr, 
++                                                              IMG_CPU_PHYADDR *pPhysAddr)
++{
++#if defined(LMA)
++      IMG_VOID *pvLinAddr;
++      
++      
++      if(g_ui32MemCurrent + ui32Size >= PVR_BUFFERCLASS_MEMSIZE)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      pvLinAddr = ioremap(g_ui32MemBase + g_ui32MemCurrent, ui32Size);
++
++      if(pvLinAddr)
++      {
++              pPhysAddr->uiAddr = g_ui32MemBase + g_ui32MemCurrent;
++              *pLinAddr = pvLinAddr;  
++
++              
++              g_ui32MemCurrent += ui32Size;
++              return PVRSRV_OK;
++      }
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++      dma_addr_t dma;
++      IMG_VOID *pvLinAddr;
++      
++      pvLinAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL);
++
++      if(pvLinAddr == IMG_NULL)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      else
++      {
++              IMG_VOID *pvPage;
++              IMG_VOID *pvEnd = pvLinAddr + ui32Size;
++
++              for(pvPage = pvLinAddr; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++              {
++                      SetPageReserved(virt_to_page(pvPage));
++              }
++
++              pPhysAddr->uiAddr = dma;
++              *pLinAddr = pvLinAddr;
++
++              return PVRSRV_OK;
++      }
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++#endif
++}
++
++void FreeContigMemory(  IMG_UINT32 ui32Size,
++                                          IMG_HANDLE unref__ hMemHandle, 
++                                              IMG_CPU_VIRTADDR LinAddr, 
++                                              IMG_CPU_PHYADDR PhysAddr)
++{
++#if defined(LMA)
++      g_ui32MemCurrent -= ui32Size;
++      iounmap(LinAddr);
++#else
++      dma_free_coherent(NULL, ui32Size, LinAddr, (dma_addr_t)PhysAddr.uiAddr);
++#endif
++}
++
++IMG_SYS_PHYADDR CpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr)
++{
++      IMG_SYS_PHYADDR sys_paddr;
++      
++      
++      sys_paddr.uiAddr = cpu_paddr.uiAddr;
++      return sys_paddr;
++}
++
++IMG_CPU_PHYADDR SysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr)
++{
++      
++      IMG_CPU_PHYADDR cpu_paddr;
++      
++      cpu_paddr.uiAddr = sys_paddr.uiAddr;
++      return cpu_paddr;
++}
++
++PVRSRV_ERROR OpenPVRServices (IMG_HANDLE *phPVRServices)
++{
++      
++      *phPVRServices = 0;
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR ClosePVRServices (IMG_HANDLE unref__ hPVRServices)
++{
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR GetLibFuncAddr (IMG_HANDLE unref__ hExtDrv, IMG_CHAR *szFunctionName, PFN_BC_GET_PVRJTABLE *ppfnFuncTable)
++{
++      if(strcmp("PVRGetBufferClassJTable", szFunctionName) != 0)
++              return PVRSRV_ERROR_INVALID_PARAMS;
++
++      
++      *ppfnFuncTable = PVRGetBufferClassJTable;
++
++      return PVRSRV_OK;
++}
++
++
++int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
++{
++      int err = -EFAULT;
++      int command = _IOC_NR(cmd);
++      BC_Example_ioctl_package *psBridge = (BC_Example_ioctl_package *)arg;
++
++      if(!access_ok(VERIFY_WRITE, psBridge, sizeof(BC_Example_ioctl_package)))
++              return err;
++
++      switch(command)
++      {
++              case _IOC_NR(BC_Example_ioctl_fill_buffer):
++              {
++                      if(FillBuffer(psBridge->inputparam) == -1)
++                              return err;
++                      break;
++              }
++              case _IOC_NR(BC_Example_ioctl_get_buffer_count):
++              {       
++                      if(GetBufferCount(&psBridge->outputparam) == -1)
++                              return err;
++                      
++                      break;
++              }
++              default:
++                      return err;
++      }
++
++      return 0;
++}
++
++
++module_init(BC_Example_ModInit);
++module_exit(BC_Example_ModCleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_LINUX_H__
++#define __BC_EXAMPLE_LINUX_H__
++
++#include <linux/ioctl.h>
++
++typedef struct BC_Example_ioctl_package_TAG
++{
++      int inputparam;
++      int outputparam;
++
++}BC_Example_ioctl_package;
++
++#define BC_EXAMPLE_IOC_GID      'g'
++
++#define BC_EXAMPLE_IOWR(INDEX)  _IOWR(BC_EXAMPLE_IOC_GID, INDEX, BC_Example_ioctl_package)
++
++#define BC_Example_ioctl_fill_buffer          BC_EXAMPLE_IOWR(0)
++#define BC_Example_ioctl_get_buffer_count     BC_EXAMPLE_IOWR(1)
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,158 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++
++
++void FillYUV422Image(void *pvDest, int width, int height, int bytestride)
++{
++      static int iPhase = 0;
++      int x, y;
++      unsigned char u,v,y0,y1;
++      unsigned int *pui32yuv = (unsigned int *)pvDest;
++      unsigned int count = 0;
++
++      for(y=0;y<height;y++)
++      {
++              for(x=0;x<width >> 1;x++)
++              {
++                      u = (y<(height/2))? ((x<(width/4))? 0xFF:0x33) : ((x<(width/4))? 0x33:0xAA);
++                      v = (y<(height/2))? ((x<(width/4))? 0xAA:0x0) : ((x<(width/4))? 0x03:0xEE);
++
++                      y0 = y1 = (((x+iPhase)>>4)%(2)==0)? 0x7f:0x00;
++
++                      
++                      pui32yuv[count++] = (y1 << 24) | (v << 16) | (y0 << 8) | u;
++
++              }
++      }
++
++      iPhase++;
++}
++
++void FillRGB565Image(void *pvDest, int width, int height, int bytestride)
++{
++      int i, Count;
++      unsigned long *pui32Addr = (unsigned long *)pvDest;
++      unsigned short *pui16Addr = (unsigned short *)pvDest;
++      unsigned long Colour32;
++      unsigned short Colour16;
++      static  unsigned char   Colour8 = 0;
++      
++      Colour16 = (Colour8>>3) | ((Colour8>>2)<<5) | ((Colour8>>3)<<11);
++      Colour32 = Colour16 | Colour16 << 16;
++                      
++      Count = (height * bytestride)>>2;
++
++      for(i=0; i<Count; i++)
++      {
++              pui32Addr[i] = Colour32;
++      }
++
++      Count =  height;
++
++      pui16Addr = (unsigned short *)((unsigned char *)pvDest + (2 * Colour8));
++
++      for(i=0; i<Count; i++)
++      {
++              *pui16Addr = 0xF800;
++
++              pui16Addr = (unsigned short *)((unsigned char *)pui16Addr + bytestride);
++      }
++      Count = bytestride >> 2;
++      
++      pui32Addr = (unsigned long *)((unsigned char *)pvDest + (bytestride * (0xFF - Colour8)));
++
++      for(i=0; i<Count; i++)
++      {
++              pui32Addr[i] = 0x001F001F;
++      }
++
++      
++      Colour8 = (Colour8 + 1) % 0xFF;
++}
++
++
++int FillBuffer(unsigned int ui32BufferIndex)
++{
++      BC_EXAMPLE_DEVINFO              *psDevInfo = GetAnchorPtr();
++      BC_EXAMPLE_BUFFER               *psBuffer;
++      BUFFER_INFO                     *psBufferInfo;
++      PVRSRV_SYNC_DATA        *psSyncData;                    
++
++      
++      if(psDevInfo == IMG_NULL)
++      {
++              return -1;
++      }
++
++      psBuffer = &psDevInfo->psSystemBuffer[ui32BufferIndex];
++      psBufferInfo = &psDevInfo->sBufferInfo;
++
++      
++      psSyncData = psBuffer->psSyncData;
++
++      
++      if(psSyncData)
++      {
++              psSyncData->ui32WriteOpsPending++;
++      }
++
++      if(psBufferInfo->pixelformat == PVRSRV_PIXEL_FORMAT_RGB565)
++      {
++              FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++      }
++      else
++      {
++              FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++      }
++
++      
++      if(psSyncData)
++      {
++              psSyncData->ui32WriteOpsComplete++;
++      }
++
++      return 0;
++}
++
++
++int GetBufferCount(unsigned int *pui32BufferCount)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr();
++
++      
++      if(psDevInfo == IMG_NULL)
++      {
++              return -1;
++      }
++
++      
++      *pui32BufferCount = psDevInfo->sBufferInfo.ui32BufferCount;
++      
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1312 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++#include <linux/workqueue.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "omaplfb.h"
++
++#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
++
++#define       DRIVER_PREFIX   "omaplfb"
++
++static IMG_VOID *gpvAnchor;
++
++static int fb_idx = 0;
++
++#define OMAPLFB_COMMAND_COUNT         1
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++PVRSRV_ERROR OMAPLFBPrePower (IMG_HANDLE              hDevHandle,
++                                                PVR_POWER_STATE       eNewPowerState,
++                                                PVR_POWER_STATE       eCurrentPowerState);
++PVRSRV_ERROR OMAPLFBPostPower (IMG_HANDLE             hDevHandle,
++                                                 PVR_POWER_STATE      eNewPowerState,
++                                                 PVR_POWER_STATE      eCurrentPowerState);
++#endif
++
++extern void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr);
++
++static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
++
++static OMAPLFB_DEVINFO * GetAnchorPtr(IMG_VOID)
++{
++      return (OMAPLFB_DEVINFO *)gpvAnchor;
++}
++
++static IMG_VOID SetAnchorPtr(OMAPLFB_DEVINFO *psDevInfo)
++{
++      gpvAnchor = (IMG_VOID*)psDevInfo;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++              unsigned long event, void *data)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      struct fb_event *psFBEvent = (struct fb_event *)data;
++
++      
++      if (event != FB_EVENT_BLANK)
++      {
++              return 0;
++      }
++
++      
++      if (*(int *)psFBEvent->data == 0)
++      {
++              return 0;
++      }
++
++      psDevInfo = GetAnchorPtr();
++
++      
++      schedule_work(&psDevInfo->sLINWork);
++
++      return 0;
++}
++
++static void unblank_display(OMAPLFB_DEVINFO *psDevInfo)
++{
++      acquire_console_sem();
++      (void) fb_blank(psDevInfo->psLINFBInfo, 0);
++      release_console_sem();
++}
++
++static void WorkHandler(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++                      void *
++#else
++                      struct work_struct *
++#endif
++                      data)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      PVR_UNREFERENCED_PARAMETER(data);
++
++      psDevInfo = GetAnchorPtr();
++
++      unblank_display(psDevInfo);
++}
++
++static PVRSRV_ERROR Flip(OMAPLFB_SWAPCHAIN *psSwapChain,
++                                                IMG_UINT32 aPhyAddr)
++{
++      if (1 /* omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD */)
++      {
++                omap_dispc_set_plane_base(0, aPhyAddr);
++              return PVRSRV_OK;
++      }
++      else
++      if (0 /*omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV*/)
++      {
++                omap_dispc_set_plane_base(0, aPhyAddr);
++              return PVRSRV_OK;
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++static IMG_VOID EnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++
++}
++
++static IMG_VOID DisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++}
++
++static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
++                                 IMG_HANDLE *phDevice,
++                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      int res;
++
++      PVR_UNREFERENCED_PARAMETER(ui32DeviceID);
++      
++      psDevInfo = GetAnchorPtr();
++      
++      
++      psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++      
++      
++      INIT_WORK(&psDevInfo->sLINWork, WorkHandler
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++      , NULL
++#endif
++      );
++
++      memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
++
++      psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++      res = fb_register_client(&psDevInfo->sLINNotifBlock);
++      if (res != 0)
++      {
++              printk(KERN_INFO DRIVER_PREFIX
++                      ": Couldn't register for framebuffer events: %d\n",
++                      res);
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      unblank_display(psDevInfo);
++
++      
++      *phDevice = (IMG_HANDLE)psDevInfo;
++      
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
++{
++      OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      BUG_ON(psDevInfo->psSwapChain != IMG_NULL);
++
++      
++      fb_unregister_client(&psDevInfo->sLINNotifBlock);
++
++      
++      flush_scheduled_work();
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
++                                                                      IMG_UINT32 *pui32NumFormats, 
++                                                                      DISPLAY_FORMAT *psFormat)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !pui32NumFormats)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      *pui32NumFormats = 1;
++      
++      if(psFormat)
++      {
++              psFormat[0] = psDevInfo->sDisplayFormat;
++      }
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, 
++                                                      DISPLAY_FORMAT *psFormat, 
++                                                      IMG_UINT32 *pui32NumDims, 
++                                                      DISPLAY_DIMS *psDim)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !psFormat || !pui32NumDims)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *pui32NumDims = 1;
++
++              
++      if(psDim)
++      {
++              psDim[0] = psDevInfo->sDisplayDim;
++      }
++      
++      return PVRSRV_OK;       
++}
++
++
++static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
++
++      return PVRSRV_OK;       
++}
++
++
++static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !psDCInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *psDCInfo = psDevInfo->sDisplayInfo;
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
++                                                                      IMG_HANDLE        hBuffer, 
++                                                                      IMG_SYS_PHYADDR   **ppsSysAddr,
++                                                                      IMG_UINT32        *pui32ByteSize, 
++                                                                      IMG_VOID          **ppvCpuVAddr,
++                                                                      IMG_HANDLE        *phOSMapInfo,
++                                                                      IMG_BOOL          *pbIsContiguous)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_BUFFER *psSystemBuffer;
++
++      if(!hDevice)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      if(!hBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
++
++      if (!ppsSysAddr)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      *ppsSysAddr = &psSystemBuffer->sSysAddr;
++
++      if (!pui32ByteSize)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      *pui32ByteSize = psDevInfo->sFBInfo.ui32BufferSize;
++
++      if (ppvCpuVAddr)
++      {
++              *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++      }
++
++      if (phOSMapInfo)
++      {
++              *phOSMapInfo = (IMG_HANDLE)0;
++      }
++
++      if (pbIsContiguous)
++      {
++              *pbIsContiguous = IMG_TRUE;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
++                                                                              IMG_UINT32 ui32Flags, 
++                                                                              DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, 
++                                                                              DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                              IMG_UINT32 ui32BufferCount, 
++                                                                              PVRSRV_SYNC_DATA **ppsSyncData,
++                                                                              IMG_UINT32 ui32OEMFlags,
++                                                                              IMG_HANDLE *phSwapChain, 
++                                                                              IMG_UINT32 *pui32SwapChainID)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      OMAPLFB_BUFFER *psBuffer;
++      OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      PVR_UNREFERENCED_PARAMETER(ui32OEMFlags);       
++      PVR_UNREFERENCED_PARAMETER(pui32SwapChainID);
++      
++      if(!hDevice 
++      || !psDstSurfAttrib 
++      || !psSrcSurfAttrib 
++      || !ppsSyncData 
++      || !phSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }       
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      
++      if(psDevInfo->psSwapChain != IMG_NULL)
++      {
++              return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;  
++      }
++      
++      
++      if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++      {
++              return PVRSRV_ERROR_TOOMANYBUFFERS;     
++      }
++      
++      if ((psDevInfo->sFBInfo.ui32RoundedBufferSize * ui32BufferCount) > psDevInfo->sFBInfo.ui32FBSize)
++      {
++              return PVRSRV_ERROR_TOOMANYBUFFERS;
++      }
++
++      
++
++
++      if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
++      || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
++      || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
++      || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }               
++
++      if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
++      || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
++      || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
++      || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }               
++
++      
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      
++      
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
++      if(!psSwapChain)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
++      if(!psBuffer)
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorFreeSwapChain;
++      }
++
++      psVSyncFlips = (OMAPLFB_VSYNC_FLIP_ITEM *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
++      if (!psVSyncFlips)
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorFreeBuffers;
++      }
++
++      psSwapChain->ui32BufferCount = ui32BufferCount;
++      psSwapChain->psBuffer = psBuffer;
++      psSwapChain->psVSyncFlips = psVSyncFlips;
++      psSwapChain->ui32InsertIndex = 0;
++      psSwapChain->ui32RemoveIndex = 0;
++      psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++
++      
++      for(i=0; i<ui32BufferCount-1; i++)
++      {
++              psBuffer[i].psNext = &psBuffer[i+1];
++      }
++      
++      psBuffer[i].psNext = &psBuffer[0];
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              psBuffer[i].psSyncData = ppsSyncData[i];
++
++              psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + (i * psDevInfo->sFBInfo.ui32RoundedBufferSize);
++              psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + (i * psDevInfo->sFBInfo.ui32RoundedBufferSize);
++      }
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              psVSyncFlips[i].bValid = IMG_FALSE;
++              psVSyncFlips[i].bFlipped = IMG_FALSE;
++              psVSyncFlips[i].bCmdCompleted = IMG_FALSE;
++      }
++
++      
++      unblank_display(psDevInfo);
++
++      if (OMAPLFBInstallVSyncISR(psSwapChain) != PVRSRV_OK)
++      {
++              printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
++              goto ErrorFreeVSyncItems;
++      }
++              
++      EnableVSyncInterrupt(psSwapChain);
++              
++      
++      psDevInfo->psSwapChain = psSwapChain;
++
++      
++      *phSwapChain = (IMG_HANDLE)psSwapChain;
++
++      return PVRSRV_OK;
++
++ErrorFreeVSyncItems:
++      OMAPLFBFreeKernelMem(psVSyncFlips);
++ErrorFreeBuffers:
++      OMAPLFBFreeKernelMem(psBuffer);
++ErrorFreeSwapChain:
++      OMAPLFBFreeKernelMem(psSwapChain);
++
++      return eError;
++}
++
++      
++static IMG_VOID FlushInternalVSyncQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++      OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
++      IMG_UINT32               ui32MaxIndex;
++
++      
++      DisableVSyncInterrupt(psSwapChain);
++
++      
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      ui32MaxIndex = psSwapChain->ui32BufferCount - 1;
++
++      while(psFlipItem->bValid)
++      {
++              if(psFlipItem->bFlipped == IMG_FALSE)
++              {
++                      
++                      Flip(psSwapChain, (IMG_UINT32)psFlipItem->sSysAddr);
++              }
++
++              
++              if(psFlipItem->bCmdCompleted == IMG_FALSE)
++              {
++                      psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(psFlipItem->hCmdComplete, IMG_TRUE);
++              }
++
++              
++              psSwapChain->ui32RemoveIndex++;
++              
++              if(psSwapChain->ui32RemoveIndex == ui32MaxIndex)
++              {
++                      psSwapChain->ui32RemoveIndex = 0;
++              }
++
++              
++              psFlipItem->bFlipped = IMG_FALSE;
++              psFlipItem->bCmdCompleted = IMG_FALSE;
++              psFlipItem->bValid = IMG_FALSE;
++              
++              
++              psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      }
++
++      psSwapChain->ui32InsertIndex = 0;
++      psSwapChain->ui32RemoveIndex = 0;
++
++      
++      EnableVSyncInterrupt(psSwapChain);
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      PVRSRV_ERROR    eError;
++
++      
++      if(!hDevice || !hSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++
++      
++      psDevInfo->psSwapChain = IMG_NULL;
++
++      
++      FlushInternalVSyncQueue(psSwapChain);
++
++      
++      eError =Flip(psSwapChain, psSwapChain->psBuffer[0].sSysAddr.uiAddr);
++
++      if(eError != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;    
++      }
++
++      DisableVSyncInterrupt(psSwapChain);
++      
++      
++      unblank_display(psDevInfo);
++      if(OMAPLFBUninstallVSyncISR(psSwapChain) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;            
++      }
++
++      
++      OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
++      OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
++      OMAPLFBFreeKernelMem(psSwapChain);
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_RECT *psRect)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(psRect);     
++
++      
++      
++      return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_RECT *psRect)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(psRect);     
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;      
++}
++
++static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_UINT32 ui32CKColour)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(ui32CKColour);       
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
++                                                                      IMG_HANDLE hSwapChain,
++                                                                      IMG_UINT32 ui32CKColour)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(ui32CKColour);       
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;      
++}
++
++static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_UINT32 *pui32BufferCount,
++      IMG_HANDLE *phBuffer)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      IMG_UINT32 i;
++      
++      
++      if(!hDevice 
++      || !hSwapChain
++      || !pui32BufferCount
++      || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      
++      *pui32BufferCount = psSwapChain->ui32BufferCount;
++      
++      
++      for(i=0; i<psSwapChain->ui32BufferCount; i++)
++      {
++              phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
++      }
++      
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
++      IMG_HANDLE hBuffer,
++      IMG_UINT32 ui32SwapInterval,
++      IMG_HANDLE hPrivateTag,
++      IMG_UINT32 ui32ClipRectCount,
++      IMG_RECT *psClipRect)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32SwapInterval);
++      PVR_UNREFERENCED_PARAMETER(hPrivateTag);        
++      PVR_UNREFERENCED_PARAMETER(psClipRect);
++      
++      if(!hDevice 
++      || !hBuffer
++      || (ui32ClipRectCount != 0))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      PVR_UNREFERENCED_PARAMETER(hBuffer);
++
++      
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      
++      if(!hDevice || !hSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      
++      FlushInternalVSyncQueue(psSwapChain);
++      
++      
++      Flip(psSwapChain, psSwapChain->psBuffer[0].sSysAddr.uiAddr);
++
++      return PVRSRV_OK;               
++}
++
++static IMG_VOID SetDCState(IMG_HANDLE hDevice,
++                                       IMG_UINT32 ui32State)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      if (ui32State == DC_STATE_FLUSH_COMMANDS)
++      {
++              OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++              if (psSwapChain != IMG_NULL)
++              {
++                      FlushInternalVSyncQueue(psSwapChain);
++              }
++              
++              psDevInfo->bFlushCommands = IMG_TRUE;
++      }
++      else if (ui32State == DC_STATE_NO_FLUSH_COMMANDS)
++      {
++              psDevInfo->bFlushCommands = IMG_FALSE;
++      }
++}
++
++IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++      IMG_BOOL                bStatus = IMG_FALSE;
++      OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
++      IMG_UINT32 ui32MaxIndex;
++      
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      ui32MaxIndex = psSwapChain->ui32BufferCount - 1;        
++
++      while(psFlipItem->bValid)
++      {       
++              
++              if(psFlipItem->bFlipped)
++              {
++                      
++                      if(!psFlipItem->bCmdCompleted)
++                      {
++                              
++                              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(psFlipItem->hCmdComplete, IMG_TRUE);
++
++                              
++                              psFlipItem->bCmdCompleted = IMG_TRUE;                                   
++                      }
++
++                      
++                      psFlipItem->ui32SwapInterval--;                                 
++                                              
++                      
++                      if(psFlipItem->ui32SwapInterval == 0)
++                      {       
++                              
++                              psSwapChain->ui32RemoveIndex++;
++                              
++                              if(psSwapChain->ui32RemoveIndex == ui32MaxIndex)
++                              {
++                                      psSwapChain->ui32RemoveIndex = 0;
++                              }
++                              
++                              
++                              psFlipItem->bCmdCompleted = IMG_FALSE;  
++                              psFlipItem->bFlipped = IMG_FALSE;                       
++      
++                              
++                              psFlipItem->bValid = IMG_FALSE;
++                      }
++                      else
++                      {
++                              
++                              break;                                  
++                      }
++              }
++              else
++              {
++                      
++                      Flip(psSwapChain, (IMG_UINT32)psFlipItem->sSysAddr);
++                      
++                      
++                      psFlipItem->bFlipped = IMG_TRUE;
++                      
++                      
++                      break;
++              }
++              
++              
++              psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      }
++              
++      return bStatus;
++}
++
++static IMG_BOOL ProcessFlip(IMG_HANDLE        hCmdCookie, 
++                                                      IMG_UINT32      ui32DataSize,
++                                                      IMG_VOID        *pvData)
++{
++      PVRSRV_ERROR eError;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_BUFFER *psBuffer;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      OMAPLFB_VSYNC_FLIP_ITEM* psFlipItem;
++
++      
++      if(!hCmdCookie || !pvData)
++      {
++              return IMG_FALSE;                                               
++      }
++
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
++
++      if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++      {
++              return IMG_FALSE;                               
++      }
++
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
++      psBuffer = (OMAPLFB_BUFFER*)psFlipCmd->hExtBuffer; 
++      psSwapChain = (OMAPLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
++
++      if (psDevInfo->bFlushCommands)
++      {
++              
++              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++              return IMG_TRUE;
++      }
++       
++              
++              
++
++      if(psFlipCmd->ui32SwapInterval == 0)
++      {                       
++              
++              
++              
++
++              
++              eError = Flip(psSwapChain, psBuffer->sSysAddr.uiAddr);
++
++              if(eError != PVRSRV_OK)
++              {
++                      return IMG_FALSE;       
++              }
++
++              
++              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++              return IMG_TRUE;
++      }
++
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32InsertIndex];
++
++      
++      if(!psFlipItem->bValid)
++      {
++              IMG_UINT32 ui32MaxIndex = psSwapChain->ui32BufferCount - 1;
++              
++              if(psSwapChain->ui32InsertIndex == psSwapChain->ui32RemoveIndex)
++              {
++                      
++                      eError = Flip(psSwapChain, psBuffer->sSysAddr.uiAddr);
++                      if(eError != PVRSRV_OK)
++                      {
++                              return IMG_FALSE;       
++                      }
++
++                      psFlipItem->bFlipped = IMG_TRUE;
++              }
++              else
++              {
++                      psFlipItem->bFlipped = IMG_FALSE;
++              }
++
++              psFlipItem->hCmdComplete = hCmdCookie;
++              psFlipItem->ui32SwapInterval = psFlipCmd->ui32SwapInterval;
++              psFlipItem->sSysAddr = &psBuffer->sSysAddr;
++              psFlipItem->bValid = IMG_TRUE;
++
++              psSwapChain->ui32InsertIndex++;
++              if(psSwapChain->ui32InsertIndex >= ui32MaxIndex)
++              {
++                      psSwapChain->ui32InsertIndex = 0;
++              }
++
++              return IMG_TRUE;        
++      }
++      
++      return IMG_FALSE;
++}
++
++
++static PVRSRV_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
++{
++      struct fb_info *psLINFBInfo;
++      struct module *psLINFBOwner;
++      OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++      unsigned long FBSize;
++
++      acquire_console_sem();
++
++      if (fb_idx < 0 || fb_idx >= num_registered_fb)
++      {
++              eError = PVRSRV_ERROR_INVALID_DEVICE;
++              goto errRelSem;
++      }
++
++      psLINFBInfo = registered_fb[fb_idx];
++
++      psLINFBOwner = psLINFBInfo->fbops->owner;
++      if (!try_module_get(psLINFBOwner))
++      {
++              printk(KERN_INFO DRIVER_PREFIX
++                      ": Couldn't get framebuffer module\n");
++
++              goto errRelSem;
++      }
++
++      if (psLINFBInfo->fbops->fb_open != NULL)
++      {
++              int res;
++
++              res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
++              if (res != 0)
++              {
++                      printk(KERN_INFO DRIVER_PREFIX
++                              ": Couldn't open framebuffer: %d\n", res);
++
++                      goto errModPut;
++              }
++      }
++
++      psDevInfo->psLINFBInfo = psLINFBInfo;
++
++      FBSize = (psLINFBInfo->screen_size) != 0 ?
++                                      psLINFBInfo->screen_size :
++                                      psLINFBInfo->fix.smem_len;
++#ifdef        DEBUG
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer physical address: 0x%lx\n",
++                      psLINFBInfo->fix.smem_start);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual address: 0x%lx\n",
++                      (unsigned long)psLINFBInfo->screen_base);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer size: %lu\n",
++                      FBSize);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual width: %u\n",
++                      psLINFBInfo->var.xres_virtual);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual height: %u\n",
++                      psLINFBInfo->var.yres_virtual);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer width: %u\n",
++                      psLINFBInfo->var.xres);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer height: %u\n",
++                      psLINFBInfo->var.yres);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer stride: %u\n",
++                      psLINFBInfo->fix.line_length);
++#endif
++
++      
++      psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
++      psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
++
++      psPVRFBInfo->ui32Width = psLINFBInfo->var.xres;
++      psPVRFBInfo->ui32Height = psLINFBInfo->var.yres;
++      psPVRFBInfo->ui32ByteStride =  psLINFBInfo->fix.line_length;
++      psPVRFBInfo->ui32FBSize = FBSize;
++      psPVRFBInfo->ui32BufferSize = psPVRFBInfo->ui32Height * psPVRFBInfo->ui32ByteStride;
++      
++      psPVRFBInfo->ui32RoundedBufferSize = OMAPLFB_PAGE_ROUNDUP(psPVRFBInfo->ui32BufferSize);
++
++      if(psLINFBInfo->var.bits_per_pixel == 16)
++      {
++              if((psLINFBInfo->var.red.length == 5) &&
++                      (psLINFBInfo->var.green.length == 6) && 
++                      (psLINFBInfo->var.blue.length == 5) && 
++                      (psLINFBInfo->var.red.offset == 11) &&
++                      (psLINFBInfo->var.green.offset == 5) && 
++                      (psLINFBInfo->var.blue.offset == 0) && 
++                      (psLINFBInfo->var.red.msb_right == 0))
++              {
++                      psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
++              }
++              else
++              {
++                      printk("Unknown FB format\n");
++              }
++      }
++      else
++      {
++              printk("Unknown FB format\n");
++      }
++
++      
++      psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
++      psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
++
++      eError = PVRSRV_OK;
++      goto errRelSem;
++
++errModPut:
++      module_put(psLINFBOwner);
++errRelSem:
++      release_console_sem();
++      return eError;
++}
++
++static IMG_VOID DeInitDev(OMAPLFB_DEVINFO *psDevInfo)
++{
++      struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
++      struct module *psLINFBOwner;
++
++      acquire_console_sem();
++
++      psLINFBOwner = psLINFBInfo->fbops->owner;
++
++      if (psLINFBInfo->fbops->fb_release != NULL) 
++      {
++              (void) psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
++      }
++
++      module_put(psLINFBOwner);
++
++      release_console_sem();
++}
++
++PVRSRV_ERROR OMAPLFBInit(IMG_VOID)
++{
++      OMAPLFB_DEVINFO         *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              PFN_CMD_PROC                    pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
++              IMG_UINT32                              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
++              
++              psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
++
++              if(!psDevInfo)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
++
++              
++              SetAnchorPtr((IMG_VOID*)psDevInfo);
++
++              
++              psDevInfo->ui32RefCount = 0;
++
++              
++              if(InitDev(psDevInfo) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;       
++              }
++
++              
++              if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;       
++              }
++
++              
++
++              
++              psDevInfo->bFlushCommands = IMG_FALSE;
++              psDevInfo->psSwapChain = IMG_NULL;              
++              psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++              psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++              psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
++              psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = psDevInfo->sFBInfo.ui32FBSize / psDevInfo->sFBInfo.ui32RoundedBufferSize;
++
++              strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++      
++              psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
++              psDevInfo->sDisplayDim.ui32Width =  psDevInfo->sFBInfo.ui32Width;
++              psDevInfo->sDisplayDim.ui32Height =  psDevInfo->sFBInfo.ui32Height;
++              psDevInfo->sDisplayDim.ui32ByteStride =  psDevInfo->sFBInfo.ui32ByteStride;
++
++              
++              psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
++              psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
++              psDevInfo->sSystemBuffer.ui32BufferSize = (psDevInfo->sFBInfo.ui32RoundedBufferSize);
++
++              
++
++              psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
++              psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++              psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++              psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++              psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++              psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++              psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++              psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++              psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++              psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++              psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++              psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++              psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++              psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++              psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++              psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
++              psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
++              psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
++
++              
++              if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
++                      &psDevInfo->sDCJTable,
++                      &psDevInfo->ui32DeviceID ) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++              }
++              
++              
++              pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++              
++              aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++              aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++              
++
++
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ui32DeviceID, 
++                                                                                                                              &pfnCmdProcList[0], 
++                                                                                                                              aui32SyncCountList,
++                                                                                                                              OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
++              {
++                      printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
++                      return PVRSRV_ERROR_CANT_REGISTER_CALLBACK;
++              }
++
++#if defined(OMAPLFB_DEVICE_POWER)
++              
++
++
++#else
++              
++
++
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice(psDevInfo->ui32DeviceID,
++                                                                      OMAPLFBPrePower,
++                                                                      OMAPLFBPostPower,
++                                                                      psDevInfo,
++                                                                      PVRSRV_POWER_Unspecified,
++                                                                      PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++#endif 
++      }
++
++      
++      psDevInfo->ui32RefCount++;
++
++                      
++      return PVRSRV_OK;
++      
++      }
++
++PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID)
++{
++      OMAPLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++      psDevFirst = GetAnchorPtr();
++      psDevInfo = psDevFirst;
++
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDevInfo->ui32RefCount--;
++
++      if (psDevInfo->ui32RefCount == 0)
++      {
++              
++              PVRSRV_DC_DISP2SRV_KMJTABLE     *psJTable = &psDevInfo->sPVRJTable;
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++              
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice(psDevInfo->ui32DeviceID,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;            
++              }
++#endif 
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ui32DeviceID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ui32DeviceID) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              
++              DeInitDev(psDevInfo);
++
++              
++              OMAPLFBFreeKernelMem(psDevInfo);
++      }
++      
++      
++      SetAnchorPtr(IMG_NULL);
++
++      
++      return PVRSRV_OK;
++}
++
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++PVRSRV_ERROR OMAPLFBPrePower (IMG_HANDLE              hDevHandle,
++                                                PVR_POWER_STATE       eNewPowerState,
++                                                PVR_POWER_STATE       eCurrentPowerState)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevHandle);
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OMAPLFBPostPower (IMG_HANDLE             hDevHandle,
++                                                 PVR_POWER_STATE      eNewPowerState,
++                                                 PVR_POWER_STATE      eCurrentPowerState)
++{
++      OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevHandle;
++
++      if ((eNewPowerState != eCurrentPowerState) &&
++              (eCurrentPowerState == PVRSRV_POWER_STATE_D3))
++      {
++              
++              if (psDevInfo->psSwapChain != IMG_NULL)
++              {
++                      EnableVSyncInterrupt(psDevInfo->psSwapChain);                   
++              }
++      }
++
++      return PVRSRV_OK;
++}
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,206 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __OMAPLFB_H__
++#define __OMAPLFB_H__
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++#define OMAPLCD_IRQ                   25
++
++#define OMAPLCD_SYSCONFIG             0x0410
++#define OMAPLCD_CONFIG                        0x0444
++#define OMAPLCD_DEFAULT_COLOR0                0x044C
++#define OMAPLCD_TIMING_H              0x0464
++#define OMAPLCD_TIMING_V              0x0468
++#define OMAPLCD_POL_FREQ              0x046C
++#define OMAPLCD_DIVISOR                       0x0470
++#define OMAPLCD_SIZE_DIG              0x0478
++#define OMAPLCD_SIZE_LCD              0x047C
++#define OMAPLCD_GFX_POSITION          0x0488
++#define OMAPLCD_GFX_SIZE              0x048C
++#define OMAPLCD_GFX_ATTRIBUTES                0x04a0
++#define OMAPLCD_GFX_FIFO_THRESHOLD    0x04a4
++#define OMAPLCD_GFX_WINDOW_SKIP               0x04b4
++
++#define OMAPLCD_IRQSTATUS             0x0418
++#define OMAPLCD_IRQENABLE             0x041c
++#define OMAPLCD_CONTROL                       0x0440
++#define OMAPLCD_GFX_BA0                       0x0480
++#define OMAPLCD_GFX_BA1                       0x0484
++#define OMAPLCD_GFX_ROW_INC           0x04ac
++#define OMAPLCD_GFX_PIX_INC           0x04b0
++#define OMAPLCD_VID1_BA0              0x04bc
++#define OMAPLCD_VID1_BA1              0x04c0
++#define OMAPLCD_VID1_ROW_INC          0x04d8
++#define OMAPLCD_VID1_PIX_INC          0x04dc
++
++#define       OMAP_CONTROL_GODIGITAL          (1 << 6)
++#define       OMAP_CONTROL_GOLCD              (1 << 5)
++#define       OMAP_CONTROL_DIGITALENABLE      (1 << 1)
++#define       OMAP_CONTROL_LCDENABLE          (1 << 0)
++
++#define OMAPLCD_INTMASK_VSYNC         (1 << 1)
++#define OMAPLCD_INTMASK_OFF           0
++
++#define DISPC_IRQSTATUS_EVSYNC_ODD    (1 <<  3)
++#define DISPC_IRQSTATUS_EVSYNC_EVEN   (1 <<  2)
++
++/*
++ * from $(KERNELDIR)/include/asm-arm/arch/display.h
++ */
++#define OMAP2_GRAPHICS                0
++#define OMAP2_VIDEO1          1
++#define OMAP2_VIDEO2          2
++
++#define OMAP2_OUTPUT_LCD      4
++#define OMAP2_OUTPUT_TV               5
++
++typedef struct OMAPLFB_BUFFER_TAG
++{
++      IMG_SYS_PHYADDR                 sSysAddr;
++      IMG_CPU_VIRTADDR                sCPUVAddr;
++      IMG_UINT32                      ui32BufferSize;
++      PVRSRV_SYNC_DATA                *psSyncData;    
++      struct OMAPLFB_BUFFER_TAG       *psNext;
++} OMAPLFB_BUFFER;
++
++typedef struct OMAPLFB_VSYNC_FLIP_ITEM_TAG
++{     
++
++      IMG_HANDLE      hCmdComplete;
++      
++      IMG_SYS_PHYADDR* sSysAddr;
++      
++      IMG_UINT32      ui32SwapInterval;
++      
++      IMG_BOOL        bValid;
++      
++      IMG_BOOL        bFlipped;
++      
++      IMG_BOOL        bCmdCompleted;
++
++} OMAPLFB_VSYNC_FLIP_ITEM;
++
++typedef struct PVRPDP_SWAPCHAIN_TAG
++{
++      
++      IMG_UINT32 ui32BufferCount;
++      
++      OMAPLFB_BUFFER *psBuffer;
++      
++      OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++
++      
++      IMG_UINT32 ui32InsertIndex;
++      
++      
++      IMG_UINT32 ui32RemoveIndex;
++
++      
++      PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
++} OMAPLFB_SWAPCHAIN;
++
++typedef struct OMAPLFB_FBINFO_TAG
++{
++      IMG_SYS_PHYADDR                 sSysAddr;
++      IMG_CPU_VIRTADDR                sCPUVAddr;
++      IMG_UINT32                      ui32FBSize;
++      IMG_UINT32                      ui32BufferSize;
++      IMG_UINT32                      ui32RoundedBufferSize;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32ByteStride;
++      
++      PVRSRV_PIXEL_FORMAT             ePixelFormat;
++}OMAPLFB_FBINFO;
++
++typedef struct OMAPLFB_DEVINFO_TAG
++{
++      IMG_UINT32                      ui32DeviceID;   
++      DISPLAY_INFO                    sDisplayInfo;
++
++      
++      OMAPLFB_BUFFER                  sSystemBuffer;
++
++      
++      DISPLAY_FORMAT                  sDisplayFormat;
++      
++      
++      DISPLAY_DIMS                    sDisplayDim;    
++      
++      
++      PVRSRV_DC_DISP2SRV_KMJTABLE     sPVRJTable;
++      
++      
++      PVRSRV_DC_SRV2DISP_KMJTABLE     sDCJTable;
++
++      
++      OMAPLFB_FBINFO                  sFBInfo;
++
++      
++      IMG_UINT32                      ui32RefCount;
++
++      
++      OMAPLFB_SWAPCHAIN               *psSwapChain;
++
++      
++      IMG_DEV_VIRTADDR                sDisplayDevVAddr;
++
++      
++      IMG_BOOL                        bFlushCommands;
++
++      
++      struct fb_info                  *psLINFBInfo;
++
++      
++      struct  notifier_block          sLINNotifBlock;
++
++      
++      struct  work_struct             sLINWork;
++}  OMAPLFB_DEVINFO;
++
++#define       OMAPLFB_PAGE_SIZE 4096
++#define       OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1)
++#define       OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK)
++
++#define       OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC)
++
++PVRSRV_ERROR OMAPLFBInit(IMG_VOID);
++PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID);
++
++IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size);
++IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem);
++IMG_VOID OMAPLFBWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++IMG_UINT32 OMAPLFBReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
++PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
++PVRSRV_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
++PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
++IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++
++#include <linux/pci.h>
++#include <asm/uaccess.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <asm/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++ #include "kerneldisplay.h"
++#include "omaplfb.h"
++
++#define DRVNAME "omaplfb"
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++
++extern int omap_dispc_request_irq(unsigned long, void (*)(void *), void *);
++extern void omap_dispc_free_irq(unsigned long, void (*)(void *), void *);
++
++
++#define unref__ __attribute__ ((unused))
++
++
++
++
++static int __init OMAPLFB_Init(void)
++{
++      if(OMAPLFBInit() != PVRSRV_OK)
++              return -ENODEV;
++
++      return 0;
++}
++
++static void __exit OMAPLFB_Cleanup(void)
++{    
++      if(OMAPLFBDeinit() != PVRSRV_OK)
++      {
++              printk ("OMAPLFB_Cleanup: can't deinit device\n");
++      }
++}
++
++
++IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size)
++{
++      return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem)
++{
++      kfree(pvMem);
++}
++
++
++PVRSRV_ERROR OMAPLFBGetLibFuncAddr (IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
++{
++      if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++              return PVRSRV_ERROR_INVALID_PARAMS;
++
++      
++      *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++      return PVRSRV_OK;
++}
++
++static void
++OMAPLFBVSyncISR(void *arg)
++{
++      (void) OMAPLFBVSyncIHandler((OMAPLFB_SWAPCHAIN *)arg);
++}
++
++#define DISPC_IRQ_VSYNC 0x0002
++
++PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++
++        if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
++            return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++        omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++
++      return PVRSRV_OK;
++}
++
++module_init(OMAPLFB_Init);
++module_exit(OMAPLFB_Cleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/kernelbuffer.h git-nokia/drivers/gpu/pvr/services4/include/kernelbuffer.h
+--- git/drivers/gpu/pvr/services4/include/kernelbuffer.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/kernelbuffer.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++      IMG_UINT32                                                      ui32TableSize;
++      PFN_OPEN_BC_DEVICE                                      pfnOpenBCDevice;
++      PFN_CLOSE_BC_DEVICE                                     pfnCloseBCDevice;
++      PFN_GET_BC_INFO                                         pfnGetBCInfo;
++      PFN_GET_BC_BUFFER                                       pfnGetBCBuffer;
++      PFN_GET_BUFFER_ADDR                                     pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32); 
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++      IMG_UINT32                                                      ui32TableSize;
++      PFN_BC_REGISTER_BUFFER_DEV                      pfnPVRSRVRegisterBCDevice;
++      PFN_BC_REMOVE_BUFFER_DEV                        pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE); 
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/kerneldisplay.h git-nokia/drivers/gpu/pvr/services4/include/kerneldisplay.h
+--- git/drivers/gpu/pvr/services4/include/kerneldisplay.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/kerneldisplay.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
++                                                                               DISPLAY_FORMAT*,
++                                                                               IMG_UINT32*,
++                                                                               DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
++                                                                                              IMG_UINT32, 
++                                                                                              DISPLAY_SURF_ATTRIBUTES*, 
++                                                                                              DISPLAY_SURF_ATTRIBUTES*,
++                                                                                              IMG_UINT32, 
++                                                                                              PVRSRV_SYNC_DATA**,
++                                                                                              IMG_UINT32,
++                                                                                              IMG_HANDLE*, 
++                                                                                              IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE, 
++                                                                                               IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
++                                                                                 IMG_HANDLE,
++                                                                                 IMG_UINT32*,
++                                                                                 IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
++                                                                                        IMG_HANDLE,
++                                                                                        IMG_UINT32,
++                                                                                        IMG_HANDLE,
++                                                                                        IMG_UINT32,
++                                                                                        IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
++typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++      IMG_UINT32                                              ui32TableSize;
++      PFN_OPEN_DC_DEVICE                              pfnOpenDCDevice;
++      PFN_CLOSE_DC_DEVICE                             pfnCloseDCDevice;
++      PFN_ENUM_DC_FORMATS                             pfnEnumDCFormats;
++      PFN_ENUM_DC_DIMS                                pfnEnumDCDims;
++      PFN_GET_DC_SYSTEMBUFFER                 pfnGetDCSystemBuffer;
++      PFN_GET_DC_INFO                                 pfnGetDCInfo;
++      PFN_GET_BUFFER_ADDR                             pfnGetBufferAddr;
++      PFN_CREATE_DC_SWAPCHAIN                 pfnCreateDCSwapChain;
++      PFN_DESTROY_DC_SWAPCHAIN                pfnDestroyDCSwapChain;
++      PFN_SET_DC_DSTRECT                              pfnSetDCDstRect;
++      PFN_SET_DC_SRCRECT                              pfnSetDCSrcRect;
++      PFN_SET_DC_DSTCK                                pfnSetDCDstColourKey;
++      PFN_SET_DC_SRCCK                                pfnSetDCSrcColourKey;
++      PFN_GET_DC_BUFFERS                              pfnGetDCBuffers;
++      PFN_SWAP_TO_DC_BUFFER                   pfnSwapToDCBuffer;
++      PFN_SWAP_TO_DC_SYSTEM                   pfnSwapToDCSystem;
++      PFN_SET_DC_STATE                                pfnSetDCState;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
++typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
++                                                                                        IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++      IMG_UINT32                                              ui32TableSize;
++      PFN_DC_REGISTER_DISPLAY_DEV             pfnPVRSRVRegisterDCDevice;
++      PFN_DC_REMOVE_DISPLAY_DEV               pfnPVRSRVRemoveDCDevice;
++      PFN_DC_OEM_FUNCTION                             pfnPVRSRVOEMFunction;
++      PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++      PFN_DC_REMOVE_COMMANDPROCLIST   pfnPVRSRVRemoveCmdProcList;
++      PFN_DC_CMD_COMPLETE                             pfnPVRSRVCmdComplete;
++      PFN_DC_REGISTER_SYS_ISR                 pfnPVRSRVRegisterSystemISRHandler;
++      PFN_DC_REGISTER_POWER                   pfnPVRSRVRegisterPowerDevice;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++      
++      IMG_HANDLE hExtDevice;
++
++      
++      IMG_HANDLE hExtSwapChain;
++
++      
++      IMG_HANDLE hExtBuffer;
++
++      
++      IMG_HANDLE hPrivateTag;
++
++      
++      IMG_UINT32 ui32ClipRectCount;
++
++      
++      IMG_RECT *psClipRect;
++
++      
++      IMG_UINT32      ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND               0
++
++#define DC_STATE_NO_FLUSH_COMMANDS            0
++#define DC_STATE_FLUSH_COMMANDS                       1
++
++
++typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge.h git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1313 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++              #include <linux/ioctl.h>
++    
++    #define PVRSRV_IOC_GID      'g'
++    #define PVRSRV_IO(INDEX)    _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOW(INDEX)   _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOR(INDEX)   _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOWR(INDEX)  _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else 
++
++                      #error Unknown platform: Cannot define ioctls
++
++      #define PVRSRV_IO(INDEX)    (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOW(INDEX)   (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOR(INDEX)   (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOWR(INDEX)  (PVRSRV_IOC_GID + INDEX)
++
++      #define PVRSRV_BRIDGE_BASE                  PVRSRV_IOC_GID
++#endif 
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST                  0
++#define PVRSRV_BRIDGE_ENUM_DEVICES                            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)     
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO              PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)     
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO              PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)     
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)     
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT           PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)     
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)     
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)     
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM                       PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)     
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)     
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)     
++#define       PVRSRV_BRIDGE_KV_TO_MMAP_DATA           PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)   
++#define PVRSRV_BRIDGE_CONNECT_SERVICES                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)    
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)    
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)    
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO                       PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)    
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST                           (PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST                           (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT            PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)      
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS            PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)      
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS  PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)      
++#define PVRSRV_BRIDGE_SIM_CMD_LAST                            (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST                       (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE              PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)  
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE            PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)  
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP           PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)  
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST                        (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_POWER_CMD_FIRST                 (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_POWER_CONTROL                           PVRSRV_IOWR(PVRSRV_BRIDGE_POWER_CMD_FIRST+0)    
++#if defined (SUPPORT_INT_POWER_MAN)
++#define PVRSRV_BRIDGE_INT_POWER_MAN                           PVRSRV_IOWR(PVRSRV_BRIDGE_POWER_CMD_FIRST+1)    
++#endif
++#define PVRSRV_BRIDGE_POWER_CMD_LAST                  (PVRSRV_BRIDGE_POWER_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST                 (PVRSRV_BRIDGE_POWER_CMD_LAST+1)
++#define       PVRSRV_BRIDGE_GET_FB_STATS                              PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)    
++#define PVRSRV_BRIDGE_STATS_CMD_LAST                  (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST                  (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO                           PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)     
++#define PVRSRV_BRIDGE_MISC_CMD_LAST                           (PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST                       (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES             PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)  
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES           PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)  
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST                        (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST                 (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT                              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)    
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL                            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)    
++#define PVRSRV_BRIDGE_PDUMP_REG                                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)    
++#define PVRSRV_BRIDGE_PDUMP_REGPOL                            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)    
++#define PVRSRV_BRIDGE_PDUMP_COMMENT                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)    
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)    
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP                        PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)    
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)   
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)   
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)   
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO                        PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)   
++#define PVRSRV_BRIDGE_PDUMP_PDREG                             PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)   
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)   
++#define PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST                  (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST                  PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST                           (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE                           PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)      
++#define PVRSRV_BRIDGE_OEM_CMD_LAST                            (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST              (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS                              PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST                       (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST             (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE  PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS  PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS             PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN             PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER        PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM        PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST              (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++ 
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST              (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE        PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO            PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER  PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST                       (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST                  (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY                 PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY                       PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST                           (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST             (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM            PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM             PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM                       PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST              (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_POLLFORVALUE                        PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR        PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_DEVMEMHEAPS                 PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST  (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST                       (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT                 PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT              PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST                        (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST  (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)      
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CONNECT            PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST           (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++      
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD             (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++
++
++#define PVRSRV_KERNAL_MODE_CLIENT                             1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_VOID *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++      IMG_UINT32                              ui32BridgeID;                   
++      IMG_UINT32                              ui32Size;                               
++      IMG_VOID                                *pvParamIn;                              
++      IMG_UINT32                              ui32InBufferSize;               
++      IMG_VOID                                *pvParamOut;                    
++      IMG_UINT32                              ui32OutBufferSize;              
++
++      IMG_HANDLE                              hKernelServices;                
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++ 
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      uiDevIndex;
++      PVRSRV_DEVICE_TYPE      eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++#if defined (SUPPORT_INT_POWER_MAN)
++
++typedef struct PVRSRV_BRIDGE_IN_INT_POWER_MAN_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32OSPowerState;
++} PVRSRV_BRIDGE_IN_INT_POWER_MAN;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_DEVICE_CLASS DeviceClass;
++      IMG_VOID*                       pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_UINT32                      ui32Attribs;
++      IMG_UINT32                      ui32Size;
++      IMG_UINT32                      ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_PVOID                                pvLinAddr;
++      IMG_HANDLE                               hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_UINT32                      ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      PVRSRV_QUEUE_INFO       *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_POWER_CONTROL_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVR_POWER_CONTROL       eControlMode;
++      PVR_POWER_STATE         ePVRPowerState;
++
++}PVRSRV_BRIDGE_IN_POWER_CONTROL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_VOID                        *pvKVIndexAddress;
++    IMG_UINT32          ui32Bytes;
++} PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_DEV_VIRTADDR        *psDevVAddr;
++      IMG_UINT32                      ui32Size;
++      IMG_UINT32                      ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++      PVRSRV_ERROR                    eError;
++      IMG_HANDLE              hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psSrcKernelMemInfo;
++      IMG_HANDLE                              hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psDstKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;   
++      PVRSRV_CLIENT_MEM_INFO  sDstClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO          sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO         sClientSyncInfo;        
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_SYS_PHYADDR                 *psSysPAddr;
++      IMG_UINT32                              ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO          sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO         sClientSyncInfo;
++      IMG_UINT32                                      ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      IMG_HANDLE              hDeviceClassBuffer;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;      
++      IMG_HANDLE                              hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Value;
++      IMG_UINT32 ui32Mask;
++      IMG_BOOL bLastFrame;
++      IMG_BOOL bOverwrite;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      IMG_BOOL bIsRead;
++      IMG_UINT32 ui32Value;
++      IMG_UINT32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_PVOID pvAltLinAddr;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Bytes;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_PVOID pvAltLinAddr;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Mask;
++      IMG_UINT32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hKernelMemInfo;
++      IMG_DEV_PHYADDR         *pPages;
++      IMG_UINT32                      ui32NumPages;
++      IMG_DEV_VIRTADDR        sDevAddr;
++      IMG_UINT32                      ui32Start;
++      IMG_UINT32                      ui32Length;
++      IMG_BOOL                        bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++ 
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++      IMG_UINT32 ui32FileOffset;
++      IMG_UINT32 ui32Width;
++      IMG_UINT32 ui32Height;
++      IMG_UINT32 ui32StrideInBytes;
++      IMG_DEV_VIRTADDR sDevBaseAddr;
++      IMG_UINT32 ui32Size;
++      PDUMP_PIXEL_FORMAT ePixelFormat;
++      PDUMP_MEM_FORMAT eMemFormat;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++      IMG_UINT32 ui32FileOffset;
++      IMG_UINT32 ui32Address;
++      IMG_UINT32 ui32Size;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++      IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE *hKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32RegOffset;
++      IMG_BOOL bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32NumDevices;
++      PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32NumDevices;
++      IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_UINT32              ui32DeviceID;
++      IMG_HANDLE              hDevCookie;
++      
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_HANDLE              hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      IMG_UINT32              ui32ByteSize;
++      IMG_UINT32              ui32PageOffset;
++      IMG_BOOL                bPhysContig;
++      IMG_UINT32                              ui32NumPageTableEntries;
++      IMG_SYS_PHYADDR         *psSysPAddr;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++      PVRSRV_ERROR    eError;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS                 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS              10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS                       4
++#define PVRSRV_MAX_DC_CLIP_RECTS                              32
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Count;
++      DISPLAY_FORMAT  asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDeviceKM;
++      DISPLAY_FORMAT  sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Count;
++      DISPLAY_DIMS    asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++      PVRSRV_ERROR    eError;
++      DISPLAY_INFO    sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_HANDLE              hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDeviceKM;
++      IMG_UINT32                              ui32Flags;
++      DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++      DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++      IMG_UINT32                              ui32BufferCount;
++      IMG_UINT32                              ui32OEMFlags;
++      IMG_UINT32                              ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hSwapChain;
++      IMG_UINT32                      ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++      IMG_RECT                        sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++      IMG_UINT32                      ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      ui32BufferCount;
++      IMG_HANDLE                      ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hBuffer;
++      IMG_UINT32                      ui32SwapInterval;
++      IMG_HANDLE                      hPrivateTag;
++      IMG_UINT32                      ui32ClipRectCount;
++      IMG_RECT                        sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32DeviceID;
++      IMG_HANDLE                      hDevCookie;
++      
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++      PVRSRV_ERROR            eError;
++      BUFFER_INFO                     sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_UINT32                      ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS_TAG
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_HEAP_INFO        sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevMemContext;
++      IMG_UINT32                      ui32ClientHeapCount;
++      PVRSRV_HEAP_INFO        sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++      PVRSRV_ERROR                    eError;
++      IMG_PVOID                               pvLinAddr;
++      IMG_HANDLE                              hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Free;
++      IMG_UINT32 ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA_TAG
++{
++      PVRSRV_ERROR            eError;
++    
++    
++      IMG_UINT32                      ui32MMapOffset;
++
++    
++      IMG_UINT32                      ui32ByteOffset;
++
++    
++    IMG_UINT32          ui32RealByteSize;
++
++} PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA;
++ 
++typedef struct PVRSRV_BRIDGE_OUT_POWER_CONTROL_TAG
++{
++      PVRSRV_ERROR    eError;
++      PVR_POWER_STATE ePVRPowerState;
++
++}PVRSRV_BRIDGE_OUT_POWER_CONTROL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_MISC_INFO        sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_MISC_INFO        sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++ 
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_BOOL bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG 
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_SYS_PHYADDR         sSysPhysAddr;
++      IMG_UINT32                      uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++      IMG_PVOID                       pvUserAddr;
++      IMG_UINT32                      uiActualSize;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvUserAddr;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++      IMG_PVOID                       *ppvTbl;
++      IMG_UINT32                      uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++      IMG_SYS_PHYADDR         sRegsPhysBase;                  
++      IMG_VOID                        *pvRegsBase;                    
++      IMG_PVOID                       pvProcess;
++      IMG_UINT32                      ulNoOfEntries;
++      IMG_PVOID                       pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvProcess;
++      IMG_VOID                        *pvRegsBase;                    
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_UINT32                      ui32StatusAndMask;
++      PVRSRV_ERROR            eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_BOOL                        bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_UINT32 ui32Flags;
++    IMG_UINT32 ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_POLLFORVALUE_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_UINT32 *pui32CpuVAddrKM;
++    IMG_UINT32 ui32Value;
++    IMG_UINT32 ui32Mask;
++    IMG_UINT32 ui32Waitus;
++    IMG_UINT32 ui32Tries;
++}PVRSRV_BRIDGE_IN_POLLFORVALUE;
++
++typedef struct PVRSRV_BRIDGE_OUT_POLLFORVALUE_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_POLLFORVALUE;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++    IMG_DEV_PHYADDR sPDDevPAddr;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE      hOSEventKM;
++      IMG_UINT32  ui32MSTimeout;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge_km.h git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge_km.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,260 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++
++#if defined(__linux__)
++PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
++IMG_VOID LinuxBridgeDeInit(IMG_VOID);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++                                                                                                 PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32                        uiDevIndex,
++                                                                                                      PVRSRV_DEVICE_TYPE      eDeviceType,
++                                                                                                      IMG_HANDLE                      *phDevCookie);
++                                                      
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++                                                                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                               IMG_HANDLE *phDevMemContext,
++                                                                                                               IMG_UINT32 *pui32ClientHeapCount,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo,
++                                                                                                               IMG_BOOL *pbCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                                                         , IMG_BOOL *pbShared
++#endif
++                                      );
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                                IMG_HANDLE hDevMemContext,
++                                                                                                                IMG_BOOL *pbCreated);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE                   hDevCookie,
++                                                                                               IMG_HANDLE                     hDevMemHeap,
++                                                                                               IMG_UINT32                     ui32Flags,
++                                                                                               IMG_UINT32                     ui32Size,
++                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                               PVRSRV_KERNEL_MEM_INFO         **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE                    hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo,
++                                                                                              IMG_BOOL                                bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE                      hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE          hDevMemHeap,
++                                                                                                               IMG_DEV_VIRTADDR       *psDevVAddr,
++                                                                                                               IMG_UINT32                     ui32Size,
++                                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                                               PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++                                                                                                IMG_HANDLE hDstDevMemHeap,
++                                                                                                PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                      IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE                            hDevCookie,
++                                                                                              IMG_UINT32                              ui32ByteSize, 
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysAddr,
++                                                                                              PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++                                                               IMG_UINT32 *pui32DevCount,
++                                                               IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(IMG_UINT32 ui32DeviceID,
++                                                                IMG_HANDLE hDevCookie,
++                                                                IMG_HANDLE *phDeviceKM);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
++                                                                 IMG_UINT32 *pui32Count,
++                                                                 DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
++                                                              DISPLAY_FORMAT *psFormat,
++                                                              IMG_UINT32 *pui32Count,
++                                                              DISPLAY_DIMS *psDim);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
++                                                                         IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
++                                                         DISPLAY_INFO *psDisplayInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(IMG_HANDLE                             hDeviceKM,
++                                                                         IMG_UINT32                           ui32Flags,
++                                                                         DISPLAY_SURF_ATTRIBUTES      *psDstSurfAttrib,
++                                                                         DISPLAY_SURF_ATTRIBUTES      *psSrcSurfAttrib,
++                                                                         IMG_UINT32                           ui32BufferCount,
++                                                                         IMG_UINT32                           ui32OEMFlags,
++                                                                         IMG_HANDLE                           *phSwapChain,
++                                                                         IMG_UINT32                           *pui32SwapChainID);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE    hSwapChain,
++                                                                              IMG_BOOL bResManCallback);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT      *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT      *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                      IMG_HANDLE              hSwapChain,
++                                                                      IMG_UINT32              ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_UINT32    *pui32BufferCount,
++                                                                IMG_HANDLE    *phBuffer);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hBuffer,
++                                                                      IMG_UINT32      ui32SwapInterval,
++                                                                      IMG_HANDLE      hPrivateTag,
++                                                                      IMG_UINT32      ui32ClipRectCount,
++                                                                      IMG_RECT        *psClipRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(IMG_UINT32  ui32DeviceID,
++                                                                IMG_HANDLE    hDevCookie,
++                                                                IMG_HANDLE    *phDeviceKM);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE     hDeviceKM,
++                                                         BUFFER_INFO  *psBufferInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE   hDeviceKM,
++                                                               IMG_UINT32     ui32BufferIndex,
++                                                               IMG_HANDLE     *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(IMG_HANDLE hDeviceClassBuffer,
++                                                                                                         PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++                                                                                                         IMG_HANDLE *phOSMapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                                      IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++                                                                                                 IMG_UINT32 *pui32Total,
++                                                                                                 IMG_UINT32 *pui32Free,
++                                                                                                 IMG_UINT32 *pui32LargestBlock);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                                                              IMG_HANDLE                                      hDevMemContext,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO        *psKernelSyncInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32    *pui32Total,
++                                                              IMG_UINT32      *pui32Available);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(IMG_UINT32 ui32Flags,
++                                                       IMG_UINT32 ui32Size,
++                                                       PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvrmmap.h git-nokia/drivers/gpu/pvr/services4/include/pvrmmap.h
+--- git/drivers/gpu/pvr/services4/include/pvrmmap.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvrmmap.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++
++IMG_VOID *PVRMMAPMapKernelPtr(IMG_HANDLE hModule, IMG_VOID *pvKVAddress, IMG_UINT32 ui32Bytes);
++
++
++IMG_BOOL PVRMMAPRemoveMapping(IMG_VOID *pvUserAddress, IMG_UINT32 ui32Bytes);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/servicesint.h git-nokia/drivers/gpu/pvr/services4/include/servicesint.h
+--- git/drivers/gpu/pvr/services4/include/servicesint.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/servicesint.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,252 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH  (100)
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++      
++      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
++      
++      IMG_HANDLE      hOSEventKM;
++} PVRSRV_EVENTOBJECT;
++
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++      
++      IMG_PVOID                               pvLinAddrKM;
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddr;
++                                                                              
++              
++      IMG_UINT32                              ui32Flags;
++                                                                               
++      
++      IMG_UINT32                              ui32AllocSize;          
++
++                                                                                                      
++      PVRSRV_MEMBLK                   sMemBlk;
++      
++      
++      IMG_PVOID                               pvSysBackupBuffer;      
++
++
++      
++      struct _PVRSRV_KERNEL_SYNC_INFO_        *psKernelSyncInfo;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                *psSyncData;
++      
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      PVRSRV_KERNEL_MEM_INFO  *psSyncDataMemInfoKM;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++      IMG_UINT32                              ui32WriteOpsPending;
++      IMG_UINT32                              ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++      IMG_UINT32                      ui32CmdSize;            
++      IMG_UINT32                      ui32DevIndex;           
++      IMG_UINT32                      CommandType;            
++      IMG_UINT32                      ui32DstSyncCount;       
++      IMG_UINT32                      ui32SrcSyncCount;       
++      PVRSRV_SYNC_OBJECT      *psDstSync;                     
++      PVRSRV_SYNC_OBJECT      *psSrcSync;                     
++      IMG_UINT32                      ui32DataSize;           
++      IMG_UINT32                      ui32ProcessID;          
++      IMG_VOID                        *pvData;                        
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++      IMG_VOID                        *pvLinQueueKM;                  
++      IMG_VOID                        *pvLinQueueUM;                  
++      volatile IMG_UINT32     ui32ReadOffset;                 
++      volatile IMG_UINT32     ui32WriteOffset;                
++      IMG_UINT32                      *pui32KickerAddrKM;             
++      IMG_UINT32                      *pui32KickerAddrUM;             
++      IMG_UINT32                      ui32QueueSize;                  
++
++      IMG_UINT32                      ui32ProcessID;                  
++
++      IMG_HANDLE                      hMemBlock[2];
++
++      struct _PVRSRV_QUEUE_INFO_ *psNextKM;            
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*, 
++                                                                              PVRSRV_COMMAND**,
++                                                                              IMG_UINT32,
++                                                                              IMG_UINT16,
++                                                                              IMG_UINT32,
++                                                                              PVRSRV_KERNEL_SYNC_INFO*[],
++                                                                              IMG_UINT32,
++                                                                              PVRSRV_KERNEL_SYNC_INFO*[],
++                                                                              IMG_UINT32); 
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{     
++      PFN_GET_BUFFER_ADDR             pfnGetBufferAddr;
++      IMG_HANDLE                              hDevMemContext;
++      IMG_HANDLE                              hExtDevice;
++      IMG_HANDLE                              hExtBuffer;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++              
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++      IMG_HANDLE hDeviceKM;
++      IMG_HANDLE      hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetWriteOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++      IMG_UINT32 ui32WriteOpsPending;                 
++
++      if(bIsReadOp)
++      {
++              ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++      else
++      {
++              
++
++
++              ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++      }
++
++      return ui32WriteOpsPending;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetReadOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++      IMG_UINT32 ui32ReadOpsPending;                  
++
++      if(bIsReadOp)
++      {
++              ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++      }
++      else
++      {
++              ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      return ui32ReadOpsPending;
++}
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo, 
++                                                              PVRSRV_COMMAND *psCommand);
++
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVPollForValue(const PVRSRV_CONNECTION *psConnection,
++                   IMG_UINT32* pui32LinMemAddr,
++                   IMG_UINT32 ui32Value,
++                   IMG_UINT32 ui32Mask,
++                   IMG_UINT32 ui32Waitus,
++                   IMG_UINT32 ui32Tries);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++                              IMG_HANDLE hDevMemContext,
++                              IMG_DEV_PHYADDR *sPDDevPAddr);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                                              IMG_UINT32 ui32Flags,
++                                              IMG_UINT32 ui32Size,
++                                              PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                                         PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                        PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++                    IMG_HANDLE hKernelMemInfo,
++                    PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge.h git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,357 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO                       PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK                              PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR             PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++#define PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND             PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+6)
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_2DQUEUEBLT            PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+7)
++#if defined(SGX2D_DIRECT_BLITS)
++#define PVRSRV_BRIDGE_SGX_2DDIRECTBLT           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+8)
++#endif
++#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE   PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++#endif 
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)  
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER                      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO                         PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT                     PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2                                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC                    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC                   PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC                     PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevMemHeap;
++      IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++      PVRSRV_ERROR            eError;
++      IMG_DEV_PHYADDR         DevPAddr;
++      IMG_CPU_PHYADDR         CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++      IMG_DEV_PHYADDR                 sPDDevPAddr;
++      PVRSRV_ERROR                    eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      IMG_HANDLE                                      hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++      PVR3DIF4_INTERNAL_DEVINFO       sSGXInternalDevInfo;
++      PVRSRV_ERROR                            eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++      PVR3DIF4_CLIENT_INFO    sClientInfo;
++      PVRSRV_ERROR                    eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVR3DIF4_CLIENT_INFO    sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVR3DIF4_CCB_KICK               sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++#if defined(TRANSFER_QUEUE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_DEV_VIRTADDR                sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++#endif
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++    IMG_PCHAR                         pszKey;
++    IMG_PCHAR                         pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SCHEDULECOMMAND_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_SGX_COMMAND_TYPE eCommandType;
++      PVRSRV_SGX_COMMAND              *psCommandData;
++
++}PVRSRV_BRIDGE_IN_SCHEDULECOMMAND;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      SGX_MISC_INFO   *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++      PVRSRV_ERROR                    eError;
++      SGX_BRIDGE_INFO_FOR_SRVINIT     sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      SGX_BRIDGE_INIT_INFO    sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DQUEUEBLT_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hKernDstSync;
++      IMG_UINT32                              ui32NumSrcSyncs;
++      IMG_HANDLE                              ahKernSrcSync[PVRSRV_MAX_BLT_SRC_SYNCS];
++      IMG_UINT32                              ui32DataByteSize;
++      IMG_UINT32                              *pui32BltData;
++}PVRSRV_BRIDGE_IN_2DQUEUEBLT;
++
++#if defined(SGX2D_DIRECT_BLITS)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DDIRECTBLT_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_UINT32                              ui32DataByteSize;
++      IMG_UINT32                              *pui32BltData;
++}PVRSRV_BRIDGE_IN_2DDIRECTBLT;
++
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hKernSyncInfo;
++      IMG_BOOL                                bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++#endif 
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++    IMG_UINT32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++      IMG_HANDLE hKernelMemInfo;
++      IMG_HANDLE hSharedPBDesc;
++      IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
++      IMG_HANDLE hHWPBDescKernelMemInfoHandle;
++      IMG_HANDLE hBlockKernelMemInfoHandle;
++      IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++      IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hSharedPBDescKernelMemInfo;
++      IMG_HANDLE hHWPBDescKernelMemInfo;
++      IMG_HANDLE hBlockKernelMemInfo;
++      IMG_UINT32 ui32TotalPBSize;
++      IMG_HANDLE *phKernelMemInfoHandles;
++      IMG_UINT32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef        PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVR3DIF4_KICKTA_DUMP_BUFFER *psBufferArray;
++      IMG_UINT32 ui32BufferArrayLength;
++      IMG_BOOL bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge_km.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
++                                                               IMG_DEV_VIRTADDR sHWRenderContextDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++                                               PVR3DIF4_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++                                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                                IMG_DEV_PHYADDR *pDevPAddr,
++                                                                IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
++                                                                                      IMG_HANDLE              hDevMemContext,
++                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                        IMG_UINT32            ui32DataByteSize,
++                                                        IMG_UINT32            *pui32BltData);
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData);
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete);
++#endif 
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                         IMG_HANDLE hDevHandle,
++                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgxinfo.h git-nokia/drivers/gpu/pvr/services4/include/sgxinfo.h
+--- git/drivers/gpu/pvr/services4/include/sgxinfo.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgxinfo.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,375 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++
++#include "servicesint.h"
++
++#include "services.h"
++#include "sgxapi_km.h"
++
++#define SGX_MAX_DEV_DATA              24
++#define       SGX_MAX_INIT_MEM_HANDLES        16
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++      
++      IMG_UINT32 ui32uKernelTimerClock;
++#if defined(SUPPORT_HW_RECOVERY)
++      IMG_UINT32 ui32HWRecoverySampleRate;
++#endif 
++      IMG_UINT32 ui32ActivePowManSampleRate;
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++typedef struct _SGX_BRIDGE_INIT_INFO_ {
++      IMG_HANDLE      hKernelCCBMemInfo;
++      IMG_HANDLE      hKernelCCBCtlMemInfo;
++      IMG_HANDLE      hKernelCCBEventKickerMemInfo;
++      IMG_HANDLE      hKernelSGXHostCtlMemInfo;
++      IMG_UINT32      ui32TAKickAddress;
++      IMG_UINT32      ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      IMG_HANDLE      hKernelHWProfilingMemInfo;
++#endif
++
++      IMG_UINT32 ui32EDMTaskReg0;
++      IMG_UINT32 ui32EDMTaskReg1;
++
++      IMG_UINT32 ui32ClockGateMask;
++
++      IMG_UINT32 ui32CacheControl;
++
++      IMG_UINT32      asInitDevData[SGX_MAX_DEV_DATA];        
++      IMG_HANDLE      asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++      SGX_INIT_SCRIPTS sScripts;
++
++} SGX_BRIDGE_INIT_INFO;
++
++typedef struct _PVRSRV_SGX_COMMAND_
++{
++      IMG_UINT32                              ui32ServiceAddress;             
++      IMG_UINT32                              ui32Data[7];                    
++} PVRSRV_SGX_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++      PVRSRV_SGX_COMMAND              asCommands[256];                
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++      IMG_UINT32                              ui32WriteOffset;                
++      IMG_UINT32                              ui32ReadOffset;                 
++} PVRSRV_SGX_CCB_CTL;
++
++
++#define SGX_AUXCCBFLAGS_SHARED                                        0x00000001
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef enum _PVRSRV_SGX_COMMAND_TYPE_
++{
++      PVRSRV_SGX_COMMAND_EDM_KICK             = 0,
++      PVRSRV_SGX_COMMAND_VIDEO_KICK   = 1,
++
++      PVRSRV_SGX_COMMAND_FORCE_I32    = 0xFFFFFFFF,
++
++}PVRSRV_SGX_COMMAND_TYPE;
++
++#define               SGX_HOSTPORT_PRESENT                    0x00000001
++
++#define PVRSRV_CCBFLAGS_RASTERCMD                     0x1
++#define PVRSRV_CCBFLAGS_TRANSFERCMD                   0x2
++#define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD     0x3
++
++#define PVRSRV_KICKFLAG_RENDER                                0x1
++#define PVRSRV_KICKFLAG_PIXEL                         0x2
++
++
++#define       SGX_BIF_INVALIDATE_PTCACHE      0x1
++#define       SGX_BIF_INVALIDATE_PDCACHE      0x2
++
++typedef struct _PVR3DIF4_CCB_KICK_
++{
++      IMG_BOOL                        bKickRender;
++      PVRSRV_SGX_COMMAND_TYPE         eCommand;
++      PVRSRV_SGX_COMMAND              sCommand;
++      IMG_HANDLE                      hCCBKernelMemInfo;
++      IMG_HANDLE                      hDstKernelSyncInfo;
++      IMG_UINT32                      ui32DstReadOpsPendingOffset;
++      IMG_UINT32                      ui32DstWriteOpsPendingOffset;
++      IMG_UINT32      ui32NumTAStatusVals;
++      IMG_UINT32      aui32TAStatusValueOffset[SGX_MAX_TA_STATUS_VALS];
++      IMG_HANDLE      ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++
++      IMG_UINT32      ui32Num3DStatusVals;
++      IMG_UINT32      aui323DStatusValueOffset[SGX_MAX_3D_STATUS_VALS];
++      IMG_HANDLE      ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#ifdef        NO_HARDWARE
++      IMG_BOOL        bTerminate;
++      IMG_HANDLE      hUpdateDstKernelSyncInfo;
++      IMG_UINT32      ui32WriteOpsPendingVal;
++#endif
++      IMG_UINT32                                      ui32KickFlags;
++} PVR3DIF4_CCB_KICK;
++
++
++typedef struct _PVRSRV_SGX_HOST_CTL_
++{     
++
++      volatile IMG_UINT32             ui32PowManFlags; 
++#if defined(SUPPORT_HW_RECOVERY)
++      IMG_UINT32                              ui32uKernelDetectedLockups;              
++      IMG_UINT32                              ui32HostDetectedLockups;                
++#endif
++      IMG_UINT32                              ui32InterruptFlags; 
++      IMG_UINT32                              ui32InterruptClearFlags; 
++
++      IMG_UINT32                              ui32ResManFlags;                
++      IMG_DEV_VIRTADDR                sResManCleanupData;             
++
++      IMG_DEV_VIRTADDR                sTAHWPBDesc;            
++      IMG_DEV_VIRTADDR                s3DHWPBDesc;
++
++} PVRSRV_SGX_HOST_CTL;
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++typedef struct _SGX_INIT_SCRIPT_DATA
++{
++      IMG_UINT32 asHWRecoveryData[SGX_MAX_DEV_DATA];
++} SGX_INIT_SCRIPT_DATA;
++#endif
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;
++      PVRSRV_DEVICE_CLASS             eDeviceClass;
++
++      IMG_UINT8                               ui8VersionMajor;
++      IMG_UINT8                               ui8VersionMinor;
++      IMG_UINT32                              ui32CoreConfig;
++      IMG_UINT32                              ui32CoreFlags;
++
++      
++      IMG_PVOID                               pvRegsBaseKM;
++      
++
++      
++      IMG_HANDLE                              hRegMapping;
++
++      
++      IMG_SYS_PHYADDR                 sRegsPhysBase;
++      
++      IMG_UINT32                              ui32RegSize;
++
++      
++      IMG_UINT32                              ui32CoreClockSpeed;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++      SGX_SLAVE_PORT                  s2DSlavePortKM;
++
++      
++      PVRSRV_RESOURCE                 s2DSlaveportResource;
++
++      
++      IMG_UINT32                      ui322DFifoSize;
++      IMG_UINT32                      ui322DFifoOffset;
++      
++      IMG_HANDLE                      h2DCmdCookie;
++      
++      IMG_HANDLE                      h2DQueue;
++      IMG_BOOL                        b2DHWRecoveryInProgress;
++      IMG_BOOL                        b2DHWRecoveryEndPending;
++      IMG_UINT32                      ui322DCompletedBlits;
++      IMG_BOOL                        b2DLockupSuspected;
++#endif
++      
++    
++      IMG_VOID                        *psStubPBDescListKM;
++
++
++      
++      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
++
++      IMG_VOID                                *pvDeviceMemoryHeap;
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
++      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
++      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
++      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
++      IMG_UINT32                              *pui32KernelCCBEventKicker; 
++      IMG_UINT32                              ui32TAKickAddress;              
++      IMG_UINT32                              ui32TexLoadKickAddress; 
++      IMG_UINT32                              ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++
++      
++      IMG_UINT32                              ui32ClientRefCount;
++
++      
++      IMG_UINT32                              ui32CacheControl;
++
++      
++
++
++      IMG_VOID                                *pvMMUContextList;
++
++      
++      IMG_BOOL                                bForcePTOff;
++
++      IMG_UINT32                              ui32EDMTaskReg0;
++      IMG_UINT32                              ui32EDMTaskReg1;
++
++      IMG_UINT32                              ui32ClockGateMask;
++      SGX_INIT_SCRIPTS                sScripts;
++#if defined(SUPPORT_HW_RECOVERY)
++      SGX_INIT_SCRIPT_DATA    sScriptData;
++#endif
++              
++      IMG_HANDLE                              hBIFResetPDOSMemHandle;
++      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
++      IMG_UINT32                              *pui32BIFResetPD;
++      IMG_UINT32                              *pui32BIFResetPT;
++
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      IMG_HANDLE                              hTimer;
++      
++      IMG_UINT32                              ui32TimeStamp;
++#endif
++
++      
++      IMG_UINT32                              ui32NumResets;
++
++      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32RegFlags;
++
++      #if defined(PDUMP)
++      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
++      #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      IMG_VOID                                *pvDummyPTPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
++      IMG_HANDLE                              hDummyPTPageOSMemHandle;
++      IMG_VOID                                *pvDummyDataPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
++      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#endif
++
++      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      PVRSRV_EVENTOBJECT      *psSGXEventObject;
++#endif
++
++} PVRSRV_SGXDEV_INFO;
++
++typedef struct _PVR3DIF4_CLIENT_INFO_
++{
++      IMG_VOID                                        *pvRegsBase;                    
++      IMG_HANDLE                                      hBlockMapping;                  
++      SGX_SLAVE_PORT                          s2DSlavePort;                   
++      IMG_UINT32                                      ui32ProcessID;                  
++      IMG_VOID                                        *pvProcess;                             
++      PVRSRV_MISC_INFO                        sMiscInfo;                              
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      IMG_HANDLE                                      hOSEventKM;             
++#endif
++
++      IMG_UINT32                                      asDevData[SGX_MAX_DEV_DATA];
++
++} PVR3DIF4_CLIENT_INFO;
++
++typedef struct _PVR3DIF4_INTERNAL_DEVINFO_
++{
++      IMG_UINT32                      ui32Flags;
++      IMG_BOOL                        bTimerEnable;
++      IMG_HANDLE                      hCtlKernelMemInfoHandle;
++      IMG_BOOL                        bForcePTOff;
++      IMG_UINT32                      ui32RegFlags;
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      IMG_HANDLE                      hOSEvent;               
++#endif
++} PVR3DIF4_INTERNAL_DEVINFO;
++
++typedef struct _PVRSRV_SGX_SHARED_CCB_
++{
++      PVRSRV_CLIENT_MEM_INFO  *psCCBClientMemInfo;    
++      PVRSRV_CLIENT_MEM_INFO  *psCCBCtlClientMemInfo; 
++      IMG_UINT32                              *pui32CCBLinAddr;               
++      IMG_DEV_VIRTADDR                sCCBDevAddr;                    
++      IMG_UINT32                              *pui32WriteOffset;      
++      volatile IMG_UINT32             *pui32ReadOffset;               
++      IMG_UINT32                              ui32Size;                               
++      IMG_UINT32                              ui32AllocGran;                  
++
++      #ifdef PDUMP
++      IMG_UINT32                              ui32CCBDumpWOff;                
++      #endif
++}PVRSRV_SGX_SHARED_CCB;
++
++typedef struct _PVRSRV_SGX_CCB_
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo;                  
++      PVRSRV_KERNEL_MEM_INFO  *psCCBCtlMemInfo;               
++      IMG_PUINT32                             pui32CCBLinAddr;                
++      IMG_DEV_VIRTADDR                sCCBDevAddr;                    
++      IMG_UINT32                              *pui32WriteOffset;              
++      volatile IMG_UINT32             *pui32ReadOffset;               
++      IMG_UINT32                              ui32Size;                               
++      IMG_UINT32                              ui32AllocGran;                  
++      
++      #ifdef PDUMP
++      IMG_UINT32                              ui32CCBDumpWOff;                
++      #endif
++}PVRSRV_SGX_CCB;
++
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,4173 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "sgx_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++#include "mmu.h"
++
++#include "bridged_pvr_bridge.h"
++#include "env_data.h"
++
++
++#if defined (__linux__)
++#include "mmap.h"
++#else
++#define PVRMMapKVIndexAddressToMMapData(A,B,C,D,E) PVRSRV_OK
++#endif
++
++#ifndef EFAULT
++#define EFAULT        14
++#endif
++#ifndef ENOTTY
++#define ENOTTY        25
++#endif
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++static IMG_BOOL gbInitServerRunning = IMG_FALSE;
++static IMG_BOOL gbInitServerRan = IMG_FALSE;
++static IMG_BOOL gbInitServerSuccessful = IMG_FALSE;
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_UINT32 aui322DBltData[SGX2D_MAX_BLT_CMD_SIZ];
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++static PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, 
++                                      IMG_UINT32 ui32BridgeID,
++                                      IMG_VOID *pvDest,
++                                      IMG_VOID *pvSrc,
++                                      IMG_UINT32 ui32Size)
++{
++      g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
++      g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
++      return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++static PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, 
++                                IMG_UINT32 ui32BridgeID,
++                                IMG_VOID *pvDest,
++                                IMG_VOID *pvSrc,
++                                IMG_UINT32 ui32Size)
++{
++      g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
++      g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
++      return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++      OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++      OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++static int
++PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
++                                               IMG_VOID *psBridgeIn,
++                                               PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      psEnumDeviceOUT->eError =
++              PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++                                                               psEnumDeviceOUT->asDeviceIdentifier);
++
++      return 0;
++}
++
++static int
++PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++                                                PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++      psAcquireDevInfoOUT->eError =
++              PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex, 
++                                                                psAcquireDevInfoIN->eDeviceType, 
++                                                                &hDevCookieInt);
++      if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAcquireDevInfoOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psAcquireDevInfoOUT->hDevCookie,
++                                                hDevCookieInt,
++                                                PVRSRV_HANDLE_TYPE_DEV_NODE,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++      return 0;
++}
++
++static int
++PVRSRVGetDeviceMemHeapsBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS *psGetDevMemHeapsIN,
++                                                PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS *psGetDevMemHeapsOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEMHEAPS);
++
++      psGetDevMemHeapsOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psGetDevMemHeapsIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDevMemHeapsOUT->eError =
++              PVRSRVGetDeviceMemHeapsKM(hDevCookieInt,
++                                                                &psGetDevMemHeapsOUT->sHeapInfo[0]);
++
++      if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              if(psGetDevMemHeapsOUT->sHeapInfo[i].ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++              {
++                      IMG_HANDLE hDevMemHeapExt;
++
++                      if(psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap != IMG_NULL)
++                      {
++                              
++                              psGetDevMemHeapsOUT->eError =
++                                      PVRSRVAllocHandle(psPerProc->psHandleBase, &hDevMemHeapExt, 
++                                                                        psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap, 
++                                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                        PVRSRV_HANDLE_ALLOC_FLAG_SHARED); 
++                              if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++                              {
++                                      return 0;
++                              }
++                              psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++                                         PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXInfoForSrvinitOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psSGXInfoForSrvinitIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXInfoForSrvinitOUT->eError =
++              SGXGetInfoForSrvinitKM(hDevCookieInt,
++                                                         &psSGXInfoForSrvinitOUT->sInitInfo);
++
++      if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              PVRSRV_HEAP_INFO *psHeapInfo;
++
++              psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++              if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++              {
++                      IMG_HANDLE hDevMemHeapExt;
++
++                      if (psHeapInfo->hDevMemHeap != IMG_NULL)
++                      {
++                              
++                              psSGXInfoForSrvinitOUT->eError =
++                                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                                        &hDevMemHeapExt,
++                                                                        psHeapInfo->hDevMemHeap,
++                                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                        PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++                              if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++                              {
++                                      return 0;
++                              }
++                              psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++                                                         PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemContextInt;
++      IMG_UINT32 i;
++      IMG_BOOL bCreated;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++      psCreateDevMemContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psCreateDevMemContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDevMemContextOUT->eError = 
++              PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++                                                                         &hDevMemContextInt,
++                                                                         &psCreateDevMemContextOUT->ui32ClientHeapCount,
++                                                                         &psCreateDevMemContextOUT->sHeapInfo[0],
++                                                                         &bCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                         , abSharedDeviceMemHeap
++#endif
++                                                                        );
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      if(bCreated)
++      {
++              psCreateDevMemContextOUT->eError =
++                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                        &psCreateDevMemContextOUT->hDevMemContext,
++                                                        hDevMemContextInt,
++                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      }
++      else
++      {
++              psCreateDevMemContextOUT->eError =
++                      PVRSRVFindHandle(psPerProc->psHandleBase,
++                                                       &psCreateDevMemContextOUT->hDevMemContext,
++                                                       hDevMemContextInt,
++                                                       PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      }
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
++      {
++              IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++              if(abSharedDeviceMemHeap[i])
++#endif
++              {
++                      
++                      psCreateDevMemContextOUT->eError =
++                              PVRSRVAllocHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED); 
++              }
++#if defined(PVR_SECURE_HANDLES)
++              else
++              {
++                      
++                      if(bCreated)
++                      {
++                              psCreateDevMemContextOUT->eError =
++                                      PVRSRVAllocSubHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                               psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                               PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                               PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                                               psCreateDevMemContextOUT->hDevMemContext);
++                      }
++                      else
++                      {
++                              psCreateDevMemContextOUT->eError =
++                                      PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                       psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                       PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++                      }
++              }
++#endif
++              if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++                                                              PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++                                                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemContextInt;
++      IMG_BOOL bDestroyed;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psDestroyDevMemContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++                                                 psDestroyDevMemContextIN->hDevMemContext,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(bDestroyed)
++      {
++              psRetOUT->eError =
++                      PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psDestroyDevMemContextIN->hDevMemContext,
++                                                              PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      }
++
++      return 0;
++}
++
++
++
++static int
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++                                         PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemHeapInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++      psAllocDeviceMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psAllocDeviceMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAllocDeviceMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++                                                 psAllocDeviceMemIN->hDevMemHeap,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAllocDeviceMemOUT->eError = 
++              PVRSRVAllocDeviceMemKM(hDevCookieInt,
++                                                         hDevMemHeapInt,
++                                                         psAllocDeviceMemIN->ui32Attribs,
++                                                         psAllocDeviceMemIN->ui32Size,
++                                                         psAllocDeviceMemIN->ui32Alignment,
++                                                         &psMemInfo);
++
++      if(psAllocDeviceMemOUT->eError == PVRSRV_OK)
++      {
++              OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
++                               0,
++                               sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++              
++              if(psMemInfo->pvLinAddrKM)
++              {
++                      psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->pvLinAddrKM;
++              }
++              else
++              {
++                      psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->sMemBlk.hOSMemHandle;
++              }
++#if defined (__linux__)
++              psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++              psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++              psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++              psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++              psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++              psAllocDeviceMemOUT->eError =
++                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                        &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                        psMemInfo,
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++              if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
++              {
++                      
++                      OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
++                                       0,
++                                       sizeof (PVRSRV_CLIENT_SYNC_INFO));
++                      psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
++                      psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL;
++              }
++              else
++              {
++                      
++                      psAllocDeviceMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++                      psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++                              psMemInfo->psKernelSyncInfo->psSyncData;
++                      psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++                      psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++                      psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++                      psAllocDeviceMemOUT->eError =
++                              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                                       &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo, 
++                                                                       psMemInfo->psKernelSyncInfo, 
++                                                                       PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                                       PVRSRV_HANDLE_ALLOC_FLAG_NONE, 
++                                                                       psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
++                      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++                      {
++                              return 0;
++                      }
++
++                      psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = 
++                              &psAllocDeviceMemOUT->sClientSyncInfo;
++
++              }
++      }
++
++      return 0;
++}
++
++
++static int
++PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psFreeDeviceMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++                                                 psFreeDeviceMemIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psFreeDeviceMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++                                                       PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_HANDLE hOSMapInfo;
++      IMG_HANDLE hDeviceClassBufferInt;
++      PVRSRV_HANDLE_TYPE eHandleType;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++      
++      psMapDevClassMemOUT->eError =
++              PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, &hDeviceClassBufferInt,
++                                                                &eHandleType,
++                                                                psMapDevClassMemIN->hDeviceClassBuffer);
++
++      if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      switch(eHandleType)
++      {
++#if defined(PVR_SECURE_HANDLES)
++              case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++              case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++              case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++                      break;
++              default:
++                      psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++                      return 0;
++      }
++
++      psMapDevClassMemOUT->eError = 
++              PVRSRVMapDeviceClassMemoryKM(hDeviceClassBufferInt,
++                                                                       &psMemInfo,
++                                                                       &hOSMapInfo);
++
++      if(psMapDevClassMemOUT->eError == PVRSRV_OK)
++      {
++              OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
++                               0,
++                               sizeof(psMapDevClassMemOUT->sClientMemInfo));
++              OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
++                               0,
++                               sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++              
++              if(psMemInfo->pvLinAddrKM)
++              {
++                      psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = 
++                              psMemInfo->pvLinAddrKM;
++              }
++              else
++              {
++                      psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->sMemBlk.hOSMemHandle;
++              }
++              psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++              psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++              psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++              psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++              psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++              psMapDevClassMemOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                        &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                        psMemInfo,
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                        psMapDevClassMemIN->hDeviceClassBuffer);
++              if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++              psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL;
++
++              
++              if(psMemInfo->psKernelSyncInfo)
++              {
++                      psMapDevClassMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++                      psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++                              psMemInfo->psKernelSyncInfo->psSyncData;
++                      psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++                      psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++                      psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++                      psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
++                      
++                      psMapDevClassMemOUT->eError =
++                              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                                &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
++                                                                psMemInfo->psKernelSyncInfo,
++                                                                PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                                psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
++                      if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++                      {
++                              return 0;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++                                                 psUnmapDevClassMemIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psUnmapDevClassMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++                                        PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_UINT32 ui32PageTableSize;
++      IMG_SYS_PHYADDR *psSysPAddr;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++      
++      psWrapExtMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psWrapExtMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++                                              * sizeof(IMG_SYS_PHYADDR);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32PageTableSize,
++                                (IMG_VOID **)&psSysPAddr, 0)
++         != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                                 ui32BridgeID,
++                                                 psSysPAddr,
++                                                 psWrapExtMemIN->psSysPAddr,
++                                                 ui32PageTableSize) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,      ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
++              return -EFAULT;
++      }
++
++      psWrapExtMemOUT->eError =
++              PVRSRVWrapExtMemoryKM(hDevCookieInt,
++                                                        psWrapExtMemIN->ui32ByteSize,
++                                                        psWrapExtMemIN->ui32PageOffset,
++                                                        psWrapExtMemIN->bPhysContig,
++                                                        psSysPAddr,
++                                                        &psMemInfo);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        ui32PageTableSize,
++                        (IMG_VOID *)psSysPAddr, 0);
++
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      if(psMemInfo->pvLinAddrKM)
++      {
++              psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = 
++                      psMemInfo->sMemBlk.hOSMemHandle;
++      }
++
++      
++      psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++      psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++      psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++      psWrapExtMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++      psWrapExtMemOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase, 
++                                                &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo, 
++                                                psMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++              psMemInfo->psKernelSyncInfo->psSyncData;
++      psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++      psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++      psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++      psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
++
++      psWrapExtMemOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo, 
++                                                (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
++                                                PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++      return 0;
++}
++
++static int
++PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++                                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psUnwrapExtMemIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++                                                              IMG_FALSE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                 psUnwrapExtMemIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                               PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++                                               PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psGetFreeDeviceMemOUT->eError = 
++              PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++                                                               &psGetFreeDeviceMemOUT->ui32Total,
++                                                               &psGetFreeDeviceMemOUT->ui32Free,
++                                                               &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++      return 0;
++}
++
++static int
++PVRMMapKVIndexAddressToMMapDataBW(IMG_UINT32 ui32BridgeID,
++                                                                PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA *psMMapDataIN,
++                                                                PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA *psMMapDataOUT,
++                                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_KV_TO_MMAP_DATA);
++      PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psMMapDataOUT->eError =
++              PVRMMapKVIndexAddressToMMapData(psMMapDataIN->pvKVIndexAddress,
++                                                                              psMMapDataIN->ui32Bytes,
++                                                                              &psMMapDataOUT->ui32MMapOffset,
++                                                                              &psMMapDataOUT->ui32ByteOffset,
++                                                                              &psMMapDataOUT->ui32RealByteSize);
++
++      return 0;
++}
++
++
++#ifdef PDUMP
++static int
++PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
++                                        IMG_VOID *psBridgeIn,
++                                        PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++      psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PDumpCommentBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++                                                                        psPDumpCommentIN->ui32Flags);
++      return 0;
++}
++
++static int
++PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
++                              PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++      return 0;
++}
++
++static int
++PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++                                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError =
++              PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++                                                      psPDumpRegDumpIN->sHWReg.ui32RegVal,
++                                                      psPDumpRegDumpIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = 
++              PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,     
++                                                         psPDumpRegPolIN->sHWReg.ui32RegVal,
++                                                         psPDumpRegPolIN->ui32Mask,
++                                                         psPDumpRegPolIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psPDumpMemPolIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++                                        psPDumpMemPolIN->ui32Offset,
++                                        psPDumpMemPolIN->ui32Value,
++                                        psPDumpMemPolIN->ui32Mask,
++                                        PDUMP_POLL_OPERATOR_EQUAL,
++                                        psPDumpMemPolIN->bLastFrame,
++                                        psPDumpMemPolIN->bOverwrite,
++                                        MAKEUNIQUETAG(pvMemInfo));
++
++      return 0;
++}
++
++static int
++PDumpMemBW(IMG_UINT32 ui32BridgeID,
++                 PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++      IMG_VOID *pvAltLinAddrKM = IMG_NULL;
++      IMG_UINT32 ui32Bytes = psPDumpMemDumpIN->ui32Bytes;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psPDumpMemDumpIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpMemDumpIN->pvAltLinAddr)
++      {
++              if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                        ui32Bytes, 
++                                        &pvAltLinAddrKM, 0) != PVRSRV_OK)
++              {
++                      return -EFAULT;
++              }
++
++              if(CopyFromUserWrapper(psPerProc, 
++                                             ui32BridgeID,
++                                                         pvAltLinAddrKM,
++                                                         psPDumpMemDumpIN->pvAltLinAddr,
++                                                         ui32Bytes) != PVRSRV_OK)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++                      return -EFAULT;
++              }
++      }
++
++      psRetOUT->eError =
++              PDumpMemKM(pvAltLinAddrKM,
++                                 pvMemInfo,
++                                 psPDumpMemDumpIN->ui32Offset,
++                                 ui32Bytes,
++                                 psPDumpMemDumpIN->ui32Flags,
++                                 MAKEUNIQUETAG(pvMemInfo));
++
++      if(pvAltLinAddrKM)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++      }
++
++      return 0;
++}             
++
++static int
++PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++      psRetOUT->eError =
++              PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++                                        psPDumpBitmapIN->ui32FileOffset,
++                                        psPDumpBitmapIN->ui32Width,
++                                        psPDumpBitmapIN->ui32Height,
++                                        psPDumpBitmapIN->ui32StrideInBytes,
++                                        psPDumpBitmapIN->sDevBaseAddr,
++                                        psPDumpBitmapIN->ui32Size,
++                                        psPDumpBitmapIN->ePixelFormat,
++                                        psPDumpBitmapIN->eMemFormat,
++                                        psPDumpBitmapIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError =
++              PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++                                         psPDumpReadRegIN->ui32FileOffset,
++                                         psPDumpReadRegIN->ui32Address,
++                                         psPDumpReadRegIN->ui32Size,
++                                         psPDumpReadRegIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 ui32PDumpFlags;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      ui32PDumpFlags = 0;
++      if(psPDumpDriverInfoIN->bContinuous)
++      {
++              ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++      }
++      psRetOUT->eError =
++              PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++                                                ui32PDumpFlags);
++
++      return 0;
++}
++
++static int
++PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
++                              PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvAltLinAddrKM = IMG_NULL;
++      IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++      IMG_VOID *pvSyncInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++                                                 psPDumpSyncDumpIN->psKernelSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpSyncDumpIN->pvAltLinAddr)
++      {
++              if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                        ui32Bytes, 
++                                        &pvAltLinAddrKM, 0) != PVRSRV_OK)
++              {
++                      return -EFAULT;
++              }
++
++              if(CopyFromUserWrapper(psPerProc, 
++                                             ui32BridgeID,
++                                                         pvAltLinAddrKM,
++                                                         psPDumpSyncDumpIN->pvAltLinAddr,
++                                                         ui32Bytes) != PVRSRV_OK)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++                      return -EFAULT;
++              }
++      }
++
++      psRetOUT->eError =
++              PDumpMemKM(pvAltLinAddrKM,
++                                 ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++                                 psPDumpSyncDumpIN->ui32Offset,
++                                 ui32Bytes,
++                                 0,
++                                 MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++      if(pvAltLinAddrKM)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++      }
++
++      return 0;
++}
++
++static int
++PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 ui32Offset;
++      IMG_VOID *pvSyncInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++                                                 psPDumpSyncPolIN->psKernelSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpSyncPolIN->bIsRead)
++      {
++              ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++      }
++      else
++      {
++              ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++      }
++
++      psRetOUT->eError =
++              PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++                                        ui32Offset,
++                                        psPDumpSyncPolIN->ui32Value,
++                                        psPDumpSyncPolIN->ui32Mask,
++                                        PDUMP_POLL_OPERATOR_EQUAL,
++                                        IMG_FALSE,
++                                        IMG_FALSE,
++                                        MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++      return 0;
++}
++
++static int
++PDumpPDRegBW(IMG_UINT32 ui32BridgeID,
++                       PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++                         psPDumpPDRegDumpIN->sHWReg.ui32RegVal,
++                         PDUMP_PD_UNIQUETAG);
++
++      psRetOUT->eError = PVRSRV_OK;
++      return 0;
++}
++
++static int
++PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
++                                               PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
++                                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++                                                 psPDumpCycleCountRegReadIN->bLastFrame);
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++      psRetOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++                                                 psPDumpPDDevPAddrIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++                                                psPDumpPDDevPAddrIN->ui32Offset,
++                                                psPDumpPDDevPAddrIN->sPDDevPAddr,
++                                                MAKEUNIQUETAG(pvMemInfo),
++                                                PDUMP_PD_UNIQUETAG);
++      return 0;
++}
++
++static int
++PDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++                                 IMG_VOID *psBridgeOut,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 i;
++      PVR3DIF4_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++      IMG_UINT32 ui32BufferArrayLength =
++              psPDumpBufferArrayIN->ui32BufferArrayLength;
++      IMG_UINT32 ui32BufferArraySize =
++              ui32BufferArrayLength * sizeof(PVR3DIF4_KICKTA_DUMP_BUFFER);
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                ui32BufferArraySize, 
++                                (IMG_PVOID *)&psKickTADumpBuffer, 0) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID,
++                                                 psKickTADumpBuffer,
++                                                 psPDumpBufferArrayIN->psBufferArray,
++                                                 ui32BufferArraySize) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++              return -EFAULT;
++      }
++
++      for(i = 0; i < ui32BufferArrayLength; i++)
++      {
++              IMG_VOID *pvMemInfo;
++
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                                      &pvMemInfo,
++                                                                      psKickTADumpBuffer[i].hKernelMemInfo,
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY: "
++                                       "PVRSRVLookupHandle failed (%d)", eError));
++                      break;
++              }
++              psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++      }
++
++      if(eError == PVRSRV_OK)
++      {
++              DumpBufferArray(psKickTADumpBuffer,
++                                              ui32BufferArrayLength,
++                                              psPDumpBufferArrayIN->bDumpPolls);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++      return 0;
++}
++
++#endif 
++
++#if defined(SUPPORT_SGX1)
++static int
++SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++                                 PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++      psGetClientInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psGetClientInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psGetClientInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetClientInfoOUT->eError =
++              SGXGetClientInfoKM(hDevCookieInt, 
++                                                 &psGetClientInfoOUT->sClientInfo);
++      return 0;
++}
++
++static int
++SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++      psRetOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psReleaseClientInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++      psDevInfo->ui32ClientRefCount--;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++
++static int
++SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++                                              PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++      
++      psSGXGetInternalDevInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psSGXGetInternalDevInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXGetInternalDevInfoOUT->eError =
++              SGXGetInternalDevInfoKM(hDevCookieInt, 
++                                                              &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++      
++      psSGXGetInternalDevInfoOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hCtlKernelMemInfoHandle,
++                                                psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hCtlKernelMemInfoHandle,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++      return 0;
++}
++
++
++static int
++SGXDoKickBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psDoKickIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++                                                 psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO); 
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psDoKickIN->sCCBKick.hDstKernelSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++#if defined (NO_HARDWARE)
++      if(psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++#endif
++      for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      psRetOUT->eError =
++              SGXDoKickKM(hDevCookieInt, 
++                                      &psDoKickIN->sCCBKick);
++
++      return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static int
++SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSubmitTransferIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              SGXSubmitTransferKM(hDevCookieInt,
++                                                      psSubmitTransferIN->sHWRenderContextDevVAddr);
++
++      return 0;
++}
++#endif
++
++static int
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      SGX_MISC_INFO *psMiscInfo;
++
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++      
++      psMiscInfo =
++              (SGX_MISC_INFO *)((IMG_UINT8 *)psSGXGetMiscInfoIN
++                                                + sizeof(PVRSRV_BRIDGE_IN_SGXGETMISCINFO));
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psSGXGetMiscInfoIN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID,
++                                                 psMiscInfo,
++                                                 psSGXGetMiscInfoIN->psMiscInfo,
++                                                 sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++                      break;
++      }
++
++      
++      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++
++      
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++                      break;
++      }
++
++      if(CopyToUserWrapper(psPerProc,
++                                   ui32BridgeID,
++                                               psSGXGetMiscInfoIN->psMiscInfo,
++                                               psMiscInfo,
++                                               sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
++                                         IMG_VOID *psBridgeIn,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      if(!OSProcHasPrivSrvInit() || gbInitServerRunning || gbInitServerRan)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++#if defined (__linux__)
++      gbInitServerRunning = IMG_TRUE;
++#endif
++      psPerProc->bInitProcess = IMG_TRUE;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      PDUMPENDINITPHASE();
++
++      gbInitServerSuccessful = psInitSrvDisconnectIN->bInitSuccesful;
++
++      psPerProc->bInitProcess = IMG_FALSE;
++      gbInitServerRunning = IMG_FALSE;
++      gbInitServerRan = IMG_TRUE;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++
++static int
++PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++      psRetOUT->eError = OSEventObjectWait(psEventObjectWaitIN->hOSEventKM, psEventObjectWaitIN->ui32MSTimeout);
++
++      return 0;
++}
++
++
++static int
++SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_ERROR eError;
++      IMG_BOOL bDissociateFailed = IMG_FALSE;
++      IMG_BOOL bLookupFailed = IMG_FALSE;
++      IMG_BOOL bReleaseFailed = IMG_FALSE;
++      IMG_HANDLE hDummy;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psSGXDevInitPart2IN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      
++      
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy,
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (hHandle == IMG_NULL)
++              {
++                      continue;
++              }
++
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &hDummy, 
++                                                         hHandle, 
++                                                         PVRSRV_HANDLE_TYPE_MEM_INFO);
++              bLookupFailed |= (eError != PVRSRV_OK);
++      }
++
++      if (bLookupFailed)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (*phHandle == IMG_NULL)
++                      continue;
++
++              eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                         phHandle, 
++                                                         *phHandle, 
++                                                         PVRSRV_HANDLE_TYPE_MEM_INFO);
++              bReleaseFailed |= (eError != PVRSRV_OK);
++      }
++
++      if (bReleaseFailed)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              
++              PVR_DBG_BREAK;
++              return 0;
++      }
++
++      
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (hHandle == IMG_NULL)
++                      continue;
++
++              eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++              bDissociateFailed |= (eError != PVRSRV_OK);
++      }
++
++       
++      if(bDissociateFailed)
++      {
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, IMG_FALSE);
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, IMG_FALSE);
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, IMG_FALSE);
++
++              for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++              {
++                      IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++                      if (hHandle == IMG_NULL)
++                              continue;
++
++                      PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle, IMG_FALSE);
++
++              }
++
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++              
++              PVR_DBG_BREAK;
++              return 0;
++      }
++
++      psRetOUT->eError =
++              DevInitSGXPart2KM(psPerProc,
++                                                hDevCookieInt,
++                                                &psSGXDevInitPart2IN->sInitInfo);
++
++      return 0;
++}
++
++static int
++SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++      psSGXRegHWRenderContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXRegHWRenderContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      hHWRenderContextInt =
++              SGXRegisterHWRenderContextKM(psDevInfo,
++                                                                       &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr);
++
++      if (hHWRenderContextInt == IMG_NULL)
++      {
++              psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHWRenderContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHWRenderContextOUT->hHWRenderContext,
++                                                hHWRenderContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      SGXFlushHWRenderTargetKM(psDevInfo, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++      return 0;
++}
++
++static int
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWRenderContextInt,
++                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      
++      return 0;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++static int
++SGX2DQueueBlitBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_2DQUEUEBLT *ps2DQueueBltIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[PVRSRV_MAX_BLT_SRC_SYNCS];
++      IMG_UINT32 i;
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvSyncInfo;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUEUEBLT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DQueueBltIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(ps2DQueueBltIN->ui32DataByteSize > sizeof(aui322DBltData))
++      {
++              psRetOUT->eError = PVRSRV_ERROR_CMD_TOO_BIG;
++              return 0;
++      }
++
++      if(CopyFromUserWrapper(psPerProc,
++                                     ui32BridgeID,
++                                                 aui322DBltData,
++                                                 ps2DQueueBltIN->pui32BltData,
++                                                 ps2DQueueBltIN->ui32DataByteSize)
++        != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      for(i = 0; i < ps2DQueueBltIN->ui32NumSrcSyncs; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                         ps2DQueueBltIN->ahKernSrcSync[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if( psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++              apsSrcSync[i] = (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                 ps2DQueueBltIN->hKernDstSync,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if( psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DQueueBlitKM(psDevInfo,
++                                               (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++                                               ps2DQueueBltIN->ui32NumSrcSyncs,
++                                               apsSrcSync,
++                                               ps2DQueueBltIN->ui32DataByteSize,
++                                               aui322DBltData);
++
++      return 0;
++}
++
++#if defined(SGX2D_DIRECT_BLITS)
++static int
++SGX2DDirectBlitBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_2DDIRECTBLT *ps2DDirectBltIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DDIRECTBLT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DDirectBltIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(ps2DDirectBltIN->ui32DataByteSize > sizeof(aui322DBltData))
++      {
++              psRetOUT->eError = PVRSRV_ERROR_CMD_TOO_BIG;
++              return 0;
++      }
++
++      if(CopyFromUserWrapper(psPerProc,
++                                     ui32BridgeID,
++                                                 aui322DBltData,
++                                                 ps2DDirectBltIN->pui32BltData,
++                                                 ps2DDirectBltIN->ui32DataByteSize)
++        != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DDirectBlitKM(psDevInfo,
++                                                ps2DDirectBltIN->ui32DataByteSize,
++                                                ps2DDirectBltIN->pui32BltData);
++
++      return 0;
++}
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++static int
++SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvSyncInfo;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DQueryBltsCompleteIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                 ps2DQueryBltsCompleteIN->hKernSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DQueryBlitsCompleteKM(psDevInfo,
++                                                                (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++                                                                ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++      return 0;
++}
++#endif 
++
++static int
++SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++                                        PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
++      IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++      IMG_UINT32 i;
++      IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++      psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSGXFindSharedPBDescIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              SGXFindSharedPBDescKM(hDevCookieInt,
++                                                        psSGXFindSharedPBDescIN->ui32TotalPBSize,
++                                                        &hSharedPBDesc,
++                                                        &psSharedPBDescKernelMemInfo,
++                                                        &psHWPBDescKernelMemInfo,
++                                                        &psBlockKernelMemInfo,
++                                                        &ppsSharedPBDescSubKernelMemInfos,
++                                                        &ui32SharedPBDescSubKernelMemInfosCount);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++                         <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++      psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++              ui32SharedPBDescSubKernelMemInfosCount;
++
++      if(hSharedPBDesc == IMG_NULL)
++      {   
++              psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++              
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++      }
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++                                                hSharedPBDesc,
++                                                PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++                                                psSharedPBDescKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++                                                psHWPBDescKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++                                                psBlockKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
++              PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++                      psSGXFindSharedPBDescOUT;
++
++              psSGXFindSharedPBDescOut->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                        &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++                                                        ppsSharedPBDescSubKernelMemInfos[i],
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                        psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++              if(psSGXFindSharedPBDescOut->eError != PVRSRV_OK)
++                      goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                        * ui32SharedPBDescSubKernelMemInfosCount,
++                        ppsSharedPBDescSubKernelMemInfos,
++                        IMG_NULL);
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++              psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount = 0;
++
++              if(hSharedPBDesc != IMG_NULL)
++              {
++                      SGXUnrefSharedPBDescKM(hSharedPBDesc);
++              }
++              if (psSGXFindSharedPBDescOUT->hSharedPBDesc != IMG_NULL)
++              {
++                              PVRSRV_ERROR eError;
++
++                              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                      psSGXFindSharedPBDescOUT->hSharedPBDesc,
++                                        PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescBW: Couldn't free shared PB description handle (%d)", eError));
++                              }
++              }
++      }
++
++      return 0;
++}
++
++static int
++SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++                                         PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hSharedPBDesc;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hSharedPBDesc,
++                                                 psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++                                                 PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++      if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++      if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                 psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++                                                 PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++      
++      return 0;
++}
++
++static int
++SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++                                       PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++      IMG_UINT32 ui32KernelMemInfoHandlesCount =
++              psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++      IMG_BOOL bFault=IMG_FALSE;
++      IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
++      PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError=PVRSRV_OK;
++      IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++      psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++      PVR_ASSERT(ui32KernelMemInfoHandlesCount 
++                         <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              &hDevCookieInt,
++                                                              psSGXAddSharedPBDescIN->hDevCookie,
++                                                              PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psSharedPBDescKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psHWPBDescKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psBlockKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++      
++      if(!OSAccessOK(PVR_VERIFY_READ,
++                                 psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++                                 ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++                               " Invalid phKernelMemInfos pointer", __FUNCTION__));
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++                                (IMG_VOID **)&phKernelMemInfoHandles,
++                                0) != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID, 
++                                     phKernelMemInfoHandles,
++                                                 psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++                                                 ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
++         != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++                                (IMG_VOID **)&ppsKernelMemInfos,
++                                0) != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++      {
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                                      (IMG_VOID **)&ppsKernelMemInfos[i],
++                                                                      phKernelMemInfoHandles[i],
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++              if(eError != PVRSRV_OK)
++              {
++                      goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++              }
++      }
++
++      eError = SGXAddSharedPBDescKM(hDevCookieInt,
++                                                                psSharedPBDescKernelMemInfo,
++                                                                psHWPBDescKernelMemInfo,
++                                                                psBlockKernelMemInfo,
++                                                                psSGXAddSharedPBDescIN->ui32TotalPBSize,
++                                                                &hSharedPBDesc,
++                                                                ppsKernelMemInfos,
++                                                                ui32KernelMemInfoHandlesCount);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++                                                hSharedPBDesc,
++                                                PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++      {
++              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                      phKernelMemInfoHandles[i],
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++              if(eError != PVRSRV_OK)
++              {
++                      goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++              }
++      }
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++      psSGXAddSharedPBDescOUT->eError = eError;
++
++      if(phKernelMemInfoHandles)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++                                * sizeof(IMG_HANDLE),
++                                (IMG_VOID *)phKernelMemInfoHandles, 0);
++      }
++      if(ppsKernelMemInfos)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++                                * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++                                (IMG_VOID *)ppsKernelMemInfos, 0);
++      }
++
++      if(bFault || eError != PVRSRV_OK)
++      {
++              if(hSharedPBDesc != IMG_NULL)
++              {
++                      SGXUnrefSharedPBDescKM(hSharedPBDesc);
++              }
++
++              if(psSGXAddSharedPBDescOUT->hSharedPBDesc != IMG_NULL)
++              {
++                              PVRSRV_ERROR eError;
++
++                              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                                       psSGXAddSharedPBDescOUT->hSharedPBDesc,
++                                                                                       PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR,
++                                                      "SGXAddSharedPBDescBW: Couldn't free shared PB description handle (%d)",
++                                                      eError));
++                              }
++              }
++      }
++
++      if(bFault)
++              return -EFAULT;
++      else
++              return 0;
++}
++
++#endif 
++
++
++static int
++PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++                                      PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++      
++      OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
++                        &psGetMiscInfoIN->sMiscInfo,
++                        sizeof(PVRSRV_MISC_INFO));
++
++      psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoIN->sMiscInfo);
++      psGetMiscInfoOUT->sMiscInfo = psGetMiscInfoIN->sMiscInfo;
++
++      return 0;
++}
++
++static int
++PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
++                              IMG_VOID *psBridgeIn,
++                              PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++      
++      psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++      psConnectServicesOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
++                                 IMG_VOID *psBridgeIn,
++                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++      
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++                                      PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++      psEnumDispClassOUT->eError =
++              PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++                                                      &psEnumDispClassOUT->ui32NumDevices,
++                                                      &psEnumDispClassOUT->ui32DevID[0]);
++
++      return 0;
++}
++
++static int
++PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++                                       PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++      psOpenDispClassDeviceOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psOpenDispClassDeviceIN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenDispClassDeviceOUT->eError =
++              PVRSRVOpenDCDeviceKM(psOpenDispClassDeviceIN->ui32DeviceID, 
++                                                       hDevCookieInt,
++                                                       &hDispClassInfoInt);
++
++      if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenDispClassDeviceOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase, 
++                                                &psOpenDispClassDeviceOUT->hDeviceKM, 
++                                                hDispClassInfoInt, 
++                                                PVRSRV_HANDLE_TYPE_DISP_INFO, 
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      return 0;
++} 
++
++static int
++PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psCloseDispClassDeviceIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psCloseDispClassDeviceIN->hDeviceKM,
++                                                      PVRSRV_HANDLE_TYPE_DISP_INFO);
++      return 0;
++} 
++
++static int
++PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++                                        PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++      psEnumDispClassFormatsOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psEnumDispClassFormatsIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psEnumDispClassFormatsOUT->eError = 
++              PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++                                                        &psEnumDispClassFormatsOUT->ui32Count,
++                                                        psEnumDispClassFormatsOUT->asFormat);
++
++      return 0;
++} 
++
++static int
++PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++                                 PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++      psEnumDispClassDimsOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psEnumDispClassDimsIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psEnumDispClassDimsOUT->eError = 
++              PVRSRVEnumDCDimsKM(pvDispClassInfoInt, 
++                                                 &psEnumDispClassDimsIN->sFormat, 
++                                                 &psEnumDispClassDimsOUT->ui32Count,
++                                                 psEnumDispClassDimsOUT->asDim);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,  
++                                                PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hBufferInt;
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psGetDispClassSysBufferIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, 
++                                                                &hBufferInt);
++
++      if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                       &psGetDispClassSysBufferOUT->hBuffer,
++                                                       hBufferInt,
++                                                       PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                       (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                       psGetDispClassSysBufferIN->hDeviceKM);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++                                PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++      psGetDispClassInfoOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psGetDispClassInfoIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassInfoOUT->eError =
++              PVRSRVGetDCInfoKM(pvDispClassInfo,
++                                                &psGetDispClassInfoOUT->sDisplayInfo);
++
++      return 0;
++} 
++
++static int
++PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
++                                                PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_HANDLE hSwapChainInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psCreateDispClassSwapChainIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVCreateDCSwapChainKM(pvDispClassInfo, 
++                                                                psCreateDispClassSwapChainIN->ui32Flags,
++                                                                &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++                                                                &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++                                                                psCreateDispClassSwapChainIN->ui32BufferCount,
++                                                                psCreateDispClassSwapChainIN->ui32OEMFlags,
++                                                                &hSwapChainInt,
++                                                                &psCreateDispClassSwapChainOUT->ui32SwapChainID);
++
++      if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                &psCreateDispClassSwapChainOUT->hSwapChain, 
++                                                hSwapChainInt,
++                                                PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                psCreateDispClassSwapChainIN->hDeviceKM);
++
++      return 0;
++}
++
++static int
++PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++                                                 PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
++                                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain, 
++                                                 psDestroyDispClassSwapChainIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVDestroyDCSwapChainKM(pvSwapChain, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase, 
++                                                      psDestroyDispClassSwapChainIN->hSwapChain, 
++                                                      PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++                                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassDstRectIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassDstRectIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCDstRectKM(pvDispClassInfo,
++                                                       pvSwapChain,
++                                                       &psSetDispClassDstRectIN->sRect);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++                                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassSrcRectIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassSrcRectIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++                                                       pvSwapChain,
++                                                       &psSetDispClassSrcRectIN->sRect);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassColKeyIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassColKeyIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++                                                                pvSwapChain,
++                                                                psSetDispClassColKeyIN->ui32CKColour);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassColKeyIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassColKeyIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++                                                                pvSwapChain,
++                                                                psSetDispClassColKeyIN->ui32CKColour);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++                                       PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++      psGetDispClassBuffersOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psGetDispClassBuffersIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassBuffersOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvSwapChain, 
++                                                 psGetDispClassBuffersIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassBuffersOUT->eError = 
++              PVRSRVGetDCBuffersKM(pvDispClassInfo, 
++                                                       pvSwapChain,
++                                                       &psGetDispClassBuffersOUT->ui32BufferCount,
++                                                       psGetDispClassBuffersOUT->ahBuffer);
++
++      PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++      for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
++      {
++              IMG_HANDLE hBufferExt;
++
++              psGetDispClassBuffersOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                               &hBufferExt,
++                                                               psGetDispClassBuffersOUT->ahBuffer[i],
++                                                               PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                               (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                               psGetDispClassBuffersIN->hSwapChain);
++              if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++              psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++      }
++
++      return 0;
++} 
++
++static int
++PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChainBuf;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psSwapDispClassBufferIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupSubHandle(psPerProc->psHandleBase, 
++                                                 &pvSwapChainBuf, 
++                                                 psSwapDispClassBufferIN->hBuffer, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                 psSwapDispClassBufferIN->hDeviceKM);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVSwapToDCBufferKM(pvDispClassInfo, 
++                                                         pvSwapChainBuf,
++                                                         psSwapDispClassBufferIN->ui32SwapInterval,
++                                                         psSwapDispClassBufferIN->hPrivateTag,
++                                                         psSwapDispClassBufferIN->ui32ClipRectCount,
++                                                         psSwapDispClassBufferIN->sClipRect);
++
++      return 0;
++}
++
++static int
++PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psSwapDispClassSystemIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSwapDispClassSystemIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++                                                 psSwapDispClassSystemIN->hDeviceKM);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psRetOUT->eError = 
++              PVRSRVSwapToDCSystemKM(pvDispClassInfo, 
++                                                         pvSwapChain);
++
++      return 0;
++}
++
++static int
++PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++                                       PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++      psOpenBufferClassDeviceOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psOpenBufferClassDeviceIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenBufferClassDeviceOUT->eError = 
++              PVRSRVOpenBCDeviceKM(psOpenBufferClassDeviceIN->ui32DeviceID,
++                                                       hDevCookieInt,
++                                                       &hBufClassInfo);
++      if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenBufferClassDeviceOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psOpenBufferClassDeviceOUT->hDeviceKM,
++                                                hBufClassInfo,
++                                                PVRSRV_HANDLE_TYPE_BUF_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psCloseBufferClassDeviceIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                                 psCloseBufferClassDeviceIN->hDeviceKM,
++                                                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++                                PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++      psGetBufferClassInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psGetBufferClassInfoIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassInfoOUT->eError =
++              PVRSRVGetBCInfoKM(pvBufClassInfo, 
++                                                &psGetBufferClassInfoOUT->sBufferInfo);
++      return 0;
++}
++
++static int
++PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++                                      PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++      IMG_HANDLE hBufferInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++      psGetBufferClassBufferOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psGetBufferClassBufferIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassBufferOUT->eError = 
++              PVRSRVGetBCBufferKM(pvBufClassInfo, 
++                                                      psGetBufferClassBufferIN->ui32BufferIndex,
++                                                      &hBufferInt);
++
++      if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassBufferOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                       &psGetBufferClassBufferOUT->hBuffer,
++                                                       hBufferInt,
++                                                       PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++                                                       (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |  PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                       psGetBufferClassBufferIN->hDeviceKM);
++
++      return 0;
++}
++
++static int
++PVRSRVPowerControlBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_POWER_CONTROL *psPowerControlIN,
++                                       PVRSRV_BRIDGE_OUT_POWER_CONTROL *psPowerControlOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{     
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_POWER_CONTROL);
++
++      psPowerControlOUT->eError =
++              PVRSRVPowerControlKM(psPowerControlIN->eControlMode, 
++                                                       &psPowerControlIN->ePVRPowerState);
++      psPowerControlOUT->ePVRPowerState = psPowerControlIN->ePVRPowerState;
++      return 0;
++}
++
++
++static int
++PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++                                                       PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++      psAllocSharedSysMemOUT->eError =
++              PVRSRVAllocSharedSysMemoryKM(psAllocSharedSysMemIN->ui32Flags,
++                                                                       psAllocSharedSysMemIN->ui32Size,
++                                                                       &psKernelMemInfo);
++      if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++                       0,
++                       sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++      
++      if(psKernelMemInfo->pvLinAddrKM)
++      {
++              psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->sMemBlk.hOSMemHandle;
++      }
++      psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++              psKernelMemInfo->ui32Flags;
++      psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++              psKernelMemInfo->ui32AllocSize; 
++      psAllocSharedSysMemOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                psKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                      PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++                                                      PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++                                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 (IMG_VOID **)&psKernelMemInfo,
++                                                 psFreeSharedSysMemIN->psKernelMemInfo,
++                                                                                                                                 PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++      if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++              return 0;
++      
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++      if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++              return 0;
++
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psFreeSharedSysMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      return 0;
++}
++
++static int
++PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++                                        PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++      PVRSRV_HANDLE_TYPE eHandleType;
++      IMG_HANDLE      hParent;
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++      psMapMemInfoMemOUT->eError =
++              PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++                                                 (IMG_VOID **)&psKernelMemInfo,
++                                                 &eHandleType,
++                                                 psMapMemInfoMemIN->hKernelMemInfo);
++      if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      switch (eHandleType)
++      {
++#if defined(PVR_SECURE_HANDLES)
++              case PVRSRV_HANDLE_TYPE_MEM_INFO:
++              case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++              case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++              case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++                      break;
++              default:
++                      psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++                      return 0;
++      }
++
++      
++      psMapMemInfoMemOUT->eError =
++              PVRSRVGetParentHandle(psPerProc->psHandleBase,
++                                      &hParent,
++                                      psMapMemInfoMemIN->hKernelMemInfo,
++                                      eHandleType);
++      if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      if (hParent == IMG_NULL)
++      {
++              hParent = psMapMemInfoMemIN->hKernelMemInfo;
++      }
++
++      OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++                       0,
++                       sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++      
++      if(psKernelMemInfo->pvLinAddrKM)
++      {
++              psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->sMemBlk.hOSMemHandle;
++      }
++
++      psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++              psKernelMemInfo->sDevVAddr;
++      psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++              psKernelMemInfo->ui32Flags;
++      psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++              psKernelMemInfo->ui32AllocSize; 
++      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++      psMapMemInfoMemOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                psKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                hParent);
++
++      if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++      {
++              
++              OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
++                               0,
++                               sizeof (PVRSRV_CLIENT_SYNC_INFO));
++              psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL;
++      }
++      else
++      {
++              
++              psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++                      psKernelMemInfo->psKernelSyncInfo->psSyncData;
++              psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                      psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++              psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                      psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++              psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++              psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
++
++              psMapMemInfoMemOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                               &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++                                                               psKernelMemInfo->psKernelSyncInfo,
++                                                               PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                               PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                               psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++              if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVPollForValueBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_POLLFORVALUE *psPollForValueIN,
++                                       PVRSRV_BRIDGE_OUT_POLLFORVALUE *psPollForValueOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_POLLFORVALUE);
++
++      psPollForValueOUT->eError =
++              PollForValueKM(psPollForValueIN->pui32CpuVAddrKM,
++                                         psPollForValueIN->ui32Value,
++                                         psPollForValueIN->ui32Mask,
++                                         psPollForValueIN->ui32Waitus,
++                                         psPollForValueIN->ui32Tries
++                                        );
++      return 0;
++}
++
++static int
++MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++                                      PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevMemContextInt;
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++      psGetMmuPDDevPAddrOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, 
++                                                 psGetMmuPDDevPAddrIN->hDevMemContext,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++              MMU_GetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
++      if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++      {
++              psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++      }
++      else
++      {
++              psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++      }
++      return 0;
++}
++
++
++static int
++DummyBW(IMG_UINT32 ui32BridgeID,
++              IMG_VOID *psBridgeIn,
++              IMG_VOID *psBridgeOut,
++              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++      PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++                       "Dummy Wrapper (probably not what you want!)",
++                       __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++      PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++                       "Dummy Wrapper (probably not what you want!)",
++                       __FUNCTION__, ui32BridgeID));
++#endif
++      return -ENOTTY;
++}
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++      _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++static IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++                                         const IMG_CHAR *pszIOCName,
++                                         BridgeWrapperFunction pfFunction,
++                                         const IMG_CHAR *pszFunctionName)
++{
++      static IMG_UINT32 ui32PrevIndex = (IMG_UINT32)-1;
++#if !defined(DEBUG)
++      PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++      PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++      
++      PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
++#endif
++
++      
++      if(g_BridgeDispatchTable[ui32Index].pfFunction)
++      {
++#if defined(DEBUG_BRIDGE_KM)
++              PVR_DPF((PVR_DBG_ERROR,
++                               "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++                               __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++              PVR_DPF((PVR_DBG_ERROR,
++                               "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++                               __FUNCTION__, pszIOCName, ui32Index));
++#endif
++              PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++                              __FUNCTION__));
++      }
++
++      
++      if((ui32PrevIndex != (IMG_UINT32)-1) &&
++         (ui32Index >= ui32PrevIndex+DISPATCH_TABLE_GAP_THRESHOLD ||
++              ui32Index <= ui32PrevIndex))
++      {
++#if defined(DEBUG_BRIDGE_KM)
++              PVR_DPF((PVR_DBG_WARNING,
++                               "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++                               __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++                               ui32Index, pszIOCName));
++#else
++              PVR_DPF((PVR_DBG_WARNING,
++                               "%s: There is a gap in the dispatch table between indices %lu and %lu (%s)",
++                               __FUNCTION__, ui32PrevIndex, ui32Index, pszIOCName));
++#endif
++              PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++                              __FUNCTION__));
++      }
++
++      g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++      g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++      g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++      g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++      g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++      ui32PrevIndex = ui32Index;
++}
++
++
++PVRSRV_ERROR
++CommonBridgeInit(IMG_VOID)
++{
++      IMG_UINT32 i;
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_KV_TO_MMAP_DATA, PVRMMapKVIndexAddressToMMapDataBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_POWER_CONTROL, PVRSRVPowerControlBW);
++
++      
++#if defined (SUPPORT_INT_POWER_MAN)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INT_POWER_MAN, DummyBW);
++#endif
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++
++      
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++
++      
++#if defined(PDUMP)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY, PDumpBufferArrayBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
++#endif 
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_POLLFORVALUE, PVRSRVPollForValueBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEMHEAPS, PVRSRVGetDeviceMemHeapsBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRVInitSrvConnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRVInitSrvDisconnectBW);
++
++              
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++
++
++#if defined(SUPPORT_SGX1)
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND, DummyBW);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUEUEBLT, SGX2DQueueBlitBW);
++#if defined(SGX2D_DIRECT_BLITS)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DDIRECTBLT, SGX2DDirectBlitBW);
++#endif 
++#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++#endif 
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif        
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++
++#endif 
++
++
++      
++      
++      for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
++      {
++              if(!g_BridgeDispatchTable[i].pfFunction)
++              {
++                      g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++                      g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
++                      g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++                      g_BridgeDispatchTable[i].ui32CallCount = 0;
++                      g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++                      g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++                                        PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
++ {
++
++      IMG_VOID   * psBridgeIn;
++      IMG_VOID   * psBridgeOut;
++      BridgeWrapperFunction pfBridgeHandler;
++      IMG_UINT32   ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++      int          err          = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++      PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++                       __FUNCTION__,
++                       g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++      g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++      g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++      if(!psPerProc->bInitProcess)
++      {
++              if(gbInitServerRan)
++              {
++                      if(!gbInitServerSuccessful)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed.  Driver unusable.",
++                                               __FUNCTION__));
++                              goto return_fault;
++                      }
++              }
++              else
++              {
++                      if(gbInitServerRunning)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
++                                               __FUNCTION__));
++                              goto return_fault;
++                      }
++                      else
++                      {
++                              
++                              switch(ui32BridgeID)
++                              {
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++                                              break;
++                                      default:
++                                              PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
++                                                               __FUNCTION__));
++                                              goto return_fault;
++                              }
++                      }
++              }
++      }
++
++
++
++#if defined(__linux__)
++      {
++              
++              SYS_DATA *psSysData;
++
++              if(SysAcquireData(&psSysData) != PVRSRV_OK)
++              {
++                      goto return_fault;
++              }
++
++              
++              psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++              psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++              if(psBridgePackageKM->ui32InBufferSize > 0)
++              {
++                      if(!OSAccessOK(PVR_VERIFY_READ,
++                                                      psBridgePackageKM->pvParamIn,
++                                                      psBridgePackageKM->ui32InBufferSize))
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
++                      }
++
++                      if(CopyFromUserWrapper(psPerProc,
++                                                     ui32BridgeID,
++                                                                 psBridgeIn,
++                                                                 psBridgePackageKM->pvParamIn,
++                                                                 psBridgePackageKM->ui32InBufferSize)
++                        != PVRSRV_OK)
++                      {
++                              goto return_fault;
++                      }
++              }
++      }
++#else
++      psBridgeIn  = psBridgePackageKM->pvParamIn;
++      psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++      if(ui32BridgeID > (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
++                               __FUNCTION__, ui32BridgeID));
++              goto return_fault;
++      }
++      pfBridgeHandler =
++              (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
++      err = pfBridgeHandler(ui32BridgeID,
++                                                psBridgeIn,
++                                                psBridgeOut,
++                                                psPerProc);
++      if(err < 0)
++      {
++              goto return_fault;
++      }
++
++
++#if defined(__linux__)        
++      
++      if(CopyToUserWrapper(psPerProc, 
++                                               ui32BridgeID,
++                                               psBridgePackageKM->pvParamOut,
++                                               psBridgeOut,
++                                               psBridgePackageKM->ui32OutBufferSize)
++         != PVRSRV_OK)
++      {
++              goto return_fault;
++      }
++#endif
++
++      return 0;
++
++return_fault:
++      return err;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,91 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X)       _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X)       (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++typedef int (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
++                                                                       IMG_VOID *psBridgeIn,
++                                                                       IMG_VOID *psBridgeOut,
++                                                                       PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++      BridgeWrapperFunction pfFunction; 
++#if defined(DEBUG_BRIDGE_KM)
++      const IMG_CHAR *pszIOCName; 
++      const IMG_CHAR *pszFunctionName; 
++      IMG_UINT32 ui32CallCount; 
++      IMG_UINT32 ui32CopyFromUserTotalBytes; 
++      IMG_UINT32 ui32CopyToUserTotalBytes; 
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++
++#if defined(SUPPORT_SGX1)
++#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++#else
++#error "FIXME: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT unset"
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++      IMG_UINT32 ui32IOCTLCount;
++      IMG_UINT32 ui32TotalCopyFromUserBytes;
++      IMG_UINT32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++                                        PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1761 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b)       (a > b ? b : a)
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags);
++static void
++BM_FreeMemory (void *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
++static IMG_BOOL
++BM_ImportMemory(void *pH, IMG_SIZE_T uSize,
++                                      IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
++                                      IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++                              BM_MAPPING *pMapping, 
++                              IMG_SIZE_T *pActualSize,
++                              IMG_UINT32 uFlags,
++                              IMG_UINT32 dev_vaddr_alignment,
++                              IMG_DEV_VIRTADDR *pDevVAddr);
++static void
++DevMemoryFree (BM_MAPPING *pMapping);
++
++static IMG_BOOL
++AllocMemory (BM_CONTEXT                               *pBMContext,
++                              BM_HEAP                         *psBMHeap,
++                              IMG_DEV_VIRTADDR        *psDevVAddr,
++                              IMG_SIZE_T                      uSize,
++                              IMG_UINT32                      uFlags,
++                              IMG_UINT32                      uDevVAddrAlignment,
++                              BM_BUF                          *pBuf)
++{
++      BM_MAPPING                      *pMapping;
++      IMG_UINTPTR_T           uOffset;
++      RA_ARENA                        *pArena = IMG_NULL;
++
++      PVR_UNREFERENCED_PARAMETER(pBMContext);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++                        pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++      
++
++
++      if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++      {
++              if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));                  
++                      return IMG_FALSE;
++              }
++
++              
++
++              
++              if(psBMHeap->ui32Attribs
++                 &    (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      
++                      pArena = psBMHeap->pImportArena;
++              }
++              else
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
++                      return IMG_FALSE;
++              }
++
++              
++              if (!RA_Alloc(pArena,
++                                        uSize,
++                                        IMG_NULL,
++                                        (void*) &pMapping,
++                                        uFlags,
++                                        uDevVAddrAlignment,
++                                        0,
++                                        (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++                      return IMG_FALSE;
++              }
++
++              uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++              if(pMapping->CpuVAddr)
++              {
++                      pBuf->CpuVAddr = (void*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
++              }
++              else
++              {
++                      pBuf->CpuVAddr = IMG_NULL;
++              }
++
++              if(uSize == pMapping->uSize)
++              {
++                      pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++              }
++              else
++              {
++                      if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++                                                               uOffset,
++                                                               uSize,
++                                                               psBMHeap->ui32Attribs,
++                                                               &pBuf->hOSMemHandle)!=PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
++                              return IMG_FALSE;
++                      }
++              }
++
++              
++              pBuf->CpuPAddr = pMapping->CpuPAddr;
++
++              if(uFlags & PVRSRV_MEM_ZERO)
++              {
++                      if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
++                      {
++                              return IMG_FALSE;
++                      }
++              }
++      }
++      else
++      {
++              if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++              {
++                      
++                      PVR_ASSERT(psDevVAddr != IMG_NULL);
++
++                      
++                      pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++                                                                                                      uSize,
++                                                                                                      IMG_NULL,
++                                                                                                      PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++                                                                                                      uDevVAddrAlignment,
++                                                                                                      psDevVAddr);
++
++                      
++                      pBuf->DevVAddr = *psDevVAddr;
++              }
++              else
++              {
++                      
++
++                      
++                      pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++                                                                                                      uSize,
++                                                                                                      IMG_NULL,
++                                                                                                      0,
++                                                                                                      uDevVAddrAlignment,
++                                                                                                      &pBuf->DevVAddr);
++              }
++
++              
++              if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                      sizeof (struct _BM_MAPPING_),
++                                                      (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED"));
++                      return IMG_FALSE;
++              }
++
++              
++              pBuf->CpuVAddr = IMG_NULL;
++              pBuf->hOSMemHandle = 0;
++              pBuf->CpuPAddr.uiAddr = 0;
++
++              
++              pMapping->CpuVAddr = IMG_NULL;
++              pMapping->CpuPAddr.uiAddr = 0;
++              pMapping->DevVAddr = pBuf->DevVAddr;
++              pMapping->psSysAddr = IMG_NULL;
++              pMapping->uSize = uSize;
++              pMapping->hOSMemHandle = 0;
++      }
++
++      
++      pMapping->pArena = pArena;
++
++      
++      pMapping->pBMHeap = psBMHeap;
++      pBuf->pMapping = pMapping;
++
++      
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pMapping,
++                              pMapping->DevVAddr.uiAddr,
++                              pMapping->CpuVAddr,
++                              pMapping->CpuPAddr.uiAddr,
++                              pMapping->uSize));
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pBuf,
++                              pBuf->DevVAddr.uiAddr,
++                              pBuf->CpuVAddr,
++                              pBuf->CpuPAddr.uiAddr,
++                              uSize));
++
++      
++      PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++      return IMG_TRUE;
++}
++
++
++static IMG_BOOL
++WrapMemory (BM_HEAP *psBMHeap,
++                      IMG_SIZE_T uSize,
++                      IMG_UINT32 ui32BaseOffset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 uFlags,
++                      BM_BUF *pBuf)
++{
++      IMG_DEV_VIRTADDR DevVAddr = {0};
++      BM_MAPPING *pMapping;
++      IMG_BOOL bResult;
++      IMG_UINT32 const ui32PageSize = HOST_PAGESIZE();
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++                        psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, uFlags, pBuf));
++
++      PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++      
++      PVR_ASSERT(((IMG_UINT32)pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++      uSize += ui32BaseOffset;
++      uSize = HOST_PAGEALIGN (uSize);
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(*pMapping),
++                                              (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
++              return IMG_FALSE;
++      }
++
++      OSMemSet(pMapping, 0, sizeof (*pMapping));
++      
++      pMapping->uSize = uSize;
++      pMapping->pBMHeap = psBMHeap;
++
++      if(!bPhysContig)
++      {
++              pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++              pMapping->psSysAddr = psAddr;
++              
++              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: Non phys-contig mapping starting at %p",psAddr[0]));
++      }
++      else
++      {
++              pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++              if(pvCPUVAddr)
++              {
++                      pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++                      pMapping->CpuVAddr = pvCPUVAddr;
++              
++                      if(OSRegisterMem(pMapping->CpuPAddr, 
++                                                      pMapping->CpuVAddr,
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      &pMapping->hOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: RegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++                                      pMapping->CpuPAddr, pMapping->CpuVAddr, pMapping->uSize));
++                              goto fail_cleanup;
++                      }
++
++              }
++              else
++              {
++                      pMapping->eCpuMemoryOrigin = hm_wrapped;
++
++                      if(OSReservePhys(pMapping->CpuPAddr,
++                                                       pMapping->uSize,
++                                                       uFlags,
++                                                       &pMapping->CpuVAddr,
++                                                       &pMapping->hOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: Reserve/Map Phys=0x%08X, Size=%d) failed",
++                                      pMapping->CpuPAddr, pMapping->uSize));
++                              goto fail_cleanup;
++                      }
++              }
++      }
++
++
++      
++      bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++                                                       pMapping,
++                                                       IMG_NULL,
++                                                       uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++                                                       ui32PageSize,
++                                                       &DevVAddr);
++      if (!bResult)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "WrapMemory: DevMemoryAlloc(0x%x) failed",
++                              pMapping->uSize));
++              goto fail_cleanup;
++      }
++
++      
++      pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++      if(!ui32BaseOffset)
++      {
++              pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++      }
++      else
++      {
++              if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++                                                       ui32BaseOffset,
++                                                       (pMapping->uSize-ui32BaseOffset),
++                                                       uFlags,
++                                                       &pBuf->hOSMemHandle)!=PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
++                      goto fail_cleanup;
++              }
++      }
++      if(pMapping->CpuVAddr)
++      {
++              pBuf->CpuVAddr = (void*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
++      }
++      pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset;
++
++      if(uFlags & PVRSRV_MEM_ZERO)
++      {
++              if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++              {
++                      return IMG_FALSE;
++              }
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pMapping, pMapping->DevVAddr.uiAddr,
++                              pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pBuf, pBuf->DevVAddr.uiAddr,
++                              pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++      pBuf->pMapping = pMapping;
++      return IMG_TRUE;
++
++fail_cleanup:
++      if(ui32BaseOffset && pBuf->hOSMemHandle)
++      {
++              OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++      }
++
++      if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++      {
++              if(pMapping->eCpuMemoryOrigin == hm_wrapped)
++              {
++                      OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++              }
++              else if(pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++              {
++                      OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++      return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags)
++{
++      IMG_VOID *pvCpuVAddr;
++
++      if(pBuf->CpuVAddr)
++      {
++              OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++      }
++      else if(pMapping->eCpuMemoryOrigin == hm_contiguous
++                      || pMapping->eCpuMemoryOrigin == hm_wrapped)
++      {
++              pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++                                                                      ui32Bytes,
++                                                                      PVRSRV_HAP_KERNEL_ONLY
++                                                                      | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                                      IMG_NULL);
++              if(!pvCpuVAddr)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++                      return IMG_FALSE;
++              }
++              OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++              OSUnMapPhysToLin(pvCpuVAddr,
++                                               ui32Bytes,
++                                               PVRSRV_HAP_KERNEL_ONLY
++                                               | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                               IMG_NULL);
++      }
++      else
++      {
++              IMG_UINT32 ui32BytesRemaining = ui32Bytes;
++              IMG_UINT32 ui32CurrentOffset = 0;
++              IMG_CPU_PHYADDR CpuPAddr;
++
++              
++              PVR_ASSERT(pBuf->hOSMemHandle);
++
++              CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, 0);
++
++              while(ui32BytesRemaining > 0)
++              {
++                      IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
++                      CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
++                      
++                      if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
++                      {
++                              ui32BlockBytes =
++                                      MIN(ui32BytesRemaining, HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++                      }
++
++                      pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++                                                                              ui32BlockBytes,
++                                                                              PVRSRV_HAP_KERNEL_ONLY
++                                                                              | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                                              IMG_NULL);
++                      if(!pvCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++                              return IMG_FALSE;
++                      }
++                      OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++                      OSUnMapPhysToLin(pvCpuVAddr,
++                                                       ui32BlockBytes,
++                                                       PVRSRV_HAP_KERNEL_ONLY
++                                                       | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                       IMG_NULL);
++
++                      ui32BytesRemaining -= ui32BlockBytes;
++                      ui32CurrentOffset += ui32BlockBytes;
++              }
++              PVR_ASSERT(ui32BytesRemaining == 0);
++      }
++
++      return IMG_TRUE;
++}
++
++static void
++FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags)
++{
++      BM_MAPPING *pMapping;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++                      pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
++
++      
++      pMapping = pBuf->pMapping;
++
++      if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++      {
++              
++              if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++              }
++              else
++              {
++                      
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++              }
++      }
++      else
++      {
++              
++              if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++              {
++                      OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++              }
++              if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++              {
++                      
++
++
++                      RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
++              }
++              else
++              {
++                      if(pMapping->eCpuMemoryOrigin == hm_wrapped)
++                      {
++                              OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++                      }
++                      else if(pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++                      {
++                              OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++                      }
++                      
++                      DevMemoryFree (pMapping);
++
++                      
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
++}
++
++
++
++PVRSRV_ERROR
++BM_DestroyContext(IMG_HANDLE hBMContext,
++                                IMG_BOOL bKernelContext,
++                                IMG_BOOL bResManCallback,
++                                IMG_BOOL *pbDestroyed)
++{
++      BM_CONTEXT **ppBMContext;
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++      BM_HEAP *psBMHeap, *psTmpBMHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++      if (pbDestroyed != IMG_NULL)
++      {
++              *pbDestroyed = IMG_FALSE;
++      }
++
++      
++
++      if (pBMContext == IMG_NULL)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++
++      psDeviceNode = pBMContext->psDeviceNode;
++
++      
++      if((!bKernelContext) && (pBMContext->ui32RefCount))
++      {
++              pBMContext->ui32RefCount--;
++
++              if(pBMContext->ui32RefCount > 0 && !bResManCallback)
++              {
++                      
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++
++      psBMHeap = pBMContext->psBMHeap;
++      while(psBMHeap)
++      {
++              
++              if(psBMHeap->ui32Attribs 
++              &       (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                      |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      if (psBMHeap->pImportArena)
++                      {
++                              RA_Delete (psBMHeap->pImportArena);
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++
++              
++              psTmpBMHeap = psBMHeap;
++
++              
++              psBMHeap = psBMHeap->psNext;
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psTmpBMHeap, IMG_NULL);
++      }
++
++      
++
++      if (pBMContext->psMMUContext)
++      {
++              psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++      }
++      
++      
++
++      if (pBMContext->pBufferHash)
++      {
++              HASH_Delete (pBMContext->pBufferHash);
++      }
++
++      
++
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++      
++      if(bKernelContext)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDevMemoryInfo->pBMKernelContext, IMG_NULL);
++              psDevMemoryInfo->pBMKernelContext = IMG_NULL;
++      }
++      else
++      {
++              ppBMContext = &psDevMemoryInfo->pBMContext;
++              while(*ppBMContext)
++              {
++                      if(*ppBMContext == pBMContext)
++                      {
++                              
++                              *ppBMContext = pBMContext->psNext;
++
++                              
++                              if(!bResManCallback && pBMContext->hResItem)
++                              {
++                                      PVRSRV_ERROR eError;
++
++                                      eError = ResManFreeResByPtr(pBMContext->hResItem, IMG_FALSE);
++
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
++                                              return eError;
++                                      }
++                              }
++
++                              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pBMContext, IMG_NULL);
++                              break;
++                      }
++                      ppBMContext = &((*ppBMContext)->psNext);
++              }
++      }
++      
++      if (pbDestroyed != IMG_NULL)
++      {
++              *pbDestroyed = IMG_TRUE;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_UINT32 ui32ProcessID,
++                                                                                        IMG_PVOID pvParam,
++                                                                                        IMG_UINT32 ui32Param)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER (ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER (ui32Param);
++
++      return BM_DestroyContext(pBMContext, IMG_FALSE, IMG_TRUE, IMG_NULL);
++}
++
++
++IMG_HANDLE 
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++                               IMG_DEV_PHYADDR *psPDDevPAddr,
++                               IMG_BOOL bKernelContext,
++                               IMG_BOOL *pbCreated)
++{
++      BM_CONTEXT *pBMContext;
++      BM_HEAP *psBMHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++      if (pbCreated != IMG_NULL)
++      {
++              *pbCreated = IMG_FALSE;
++      }
++
++      
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++      
++
++
++      if(bKernelContext && psDevMemoryInfo->pBMKernelContext)
++      {
++              
++              return (IMG_HANDLE)psDevMemoryInfo->pBMKernelContext;
++      }
++      
++      pBMContext = psDevMemoryInfo->pBMContext;
++
++      while(pBMContext)
++      {
++              if(ResManFindResourceByPtr(pBMContext->hResItem) == PVRSRV_OK)
++              {
++                      
++                      pBMContext->ui32RefCount++;
++
++                      return (IMG_HANDLE)pBMContext;
++              }
++
++              pBMContext = pBMContext->psNext;
++      }
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof (struct _BM_CONTEXT_),
++                                       (IMG_PVOID *)&pBMContext, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++              return IMG_NULL;
++      }
++      OSMemSet (pBMContext, 0, sizeof (BM_CONTEXT));
++
++      
++      pBMContext->psDeviceNode = psDeviceNode;
++
++      
++      if(bKernelContext)
++      {
++              
++              pBMContext->pBufferHash = HASH_Create (32);
++              if (pBMContext->pBufferHash==IMG_NULL)
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
++                      goto cleanup;
++              }
++      }
++
++      if(psDeviceNode->pfnMMUInitialise(psDeviceNode,
++                                                                              &pBMContext->psMMUContext,
++                                                                              psPDDevPAddr) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
++              goto cleanup;
++      }
++
++      if(bKernelContext)
++      {
++              
++              psDevMemoryInfo->pBMKernelContext = pBMContext;
++      }
++      else
++      {
++              
++
++
++
++
++              PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++              PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++              
++
++
++
++              pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
++              
++              
++
++
++              psBMHeap = pBMContext->psBMSharedHeap;
++              while(psBMHeap)
++              {
++                      switch(psBMHeap->sDevArena.DevMemHeapType)
++                      {
++                              case DEVICE_MEMORY_HEAP_SHARED:
++                              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                              {
++                                      
++                                      psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
++                                      break;
++                              }
++                      }
++                      
++                      psBMHeap = psBMHeap->psNext;
++              }
++
++              
++              pBMContext->hResItem = ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_CONTEXT,
++                                                                                              pBMContext,
++                                                                                              0,
++                                                                                              BM_DestroyContextCallBack,
++                                                                                              0);
++              if (pBMContext->hResItem == IMG_NULL)
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
++                      goto cleanup;
++              }
++
++              
++              pBMContext->ui32RefCount++;
++
++              
++              pBMContext->psNext = psDevMemoryInfo->pBMContext;
++              psDevMemoryInfo->pBMContext = pBMContext;
++      }
++
++      if (pbCreated != IMG_NULL)
++      {
++              *pbCreated = IMG_TRUE;
++      }
++      return (IMG_HANDLE)pBMContext;
++
++cleanup:
++      
++
++
++      BM_DestroyContext(pBMContext, bKernelContext, IMG_FALSE, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pBMContext, IMG_NULL);
++
++      return IMG_NULL;
++}
++
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++                         DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++      PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode;
++      BM_HEAP *psBMHeap;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++      if(!pBMContext)
++      {
++              return IMG_NULL;
++      }
++
++      
++      if(pBMContext->ui32RefCount > 1)
++      {
++              psBMHeap = pBMContext->psBMHeap;
++
++              while(psBMHeap)
++              {
++                      if(psBMHeap->sDevArena.ui32HeapID ==  psDevMemHeapInfo->ui32HeapID)
++                      
++                      {
++                              
++                              return psBMHeap;
++                      }
++                      psBMHeap = psBMHeap->psNext;
++              }
++      }
++
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_HEAP),
++                                              (IMG_PVOID *)&psBMHeap, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++              return IMG_NULL;
++      }
++
++      OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
++
++      psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++      psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++      psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++      psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++      psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++      psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++      psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++      
++      psBMHeap->pBMContext = pBMContext;
++
++      psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
++                                                                                                      &psBMHeap->sDevArena,
++                                                                                                      &psBMHeap->pVMArena);
++      if (!psBMHeap->pMMUHeap)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++              goto ErrorExit;
++      }
++
++      
++      psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
++                                                                              0, 0, IMG_NULL,
++                                                                              HOST_PAGESIZE(),
++                                                                              BM_ImportMemory, 
++                                                                              BM_FreeMemory, 
++                                                                              IMG_NULL,
++                                                                              psBMHeap);
++      if(psBMHeap->pImportArena == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++              goto ErrorExit;
++      }
++
++      if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              
++
++
++
++              psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
++              if(psBMHeap->pLocalDevMemArena == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
++                      goto ErrorExit;
++              }
++      }
++
++      
++      psBMHeap->psNext = pBMContext->psBMHeap;
++      pBMContext->psBMHeap = psBMHeap;
++
++      return (IMG_HANDLE)psBMHeap;
++
++      
++ErrorExit:
++
++      
++      if (psBMHeap->pMMUHeap != IMG_NULL)
++      {
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++              psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
++      }
++
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBMHeap, IMG_NULL);
++
++      return IMG_NULL;
++}
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
++{
++      BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
++      PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++      if(psBMHeap)
++      {
++              BM_HEAP **ppsBMHeap;
++              
++              
++              if(psBMHeap->ui32Attribs 
++              &       (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                      |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      if (psBMHeap->pImportArena)
++                      {
++                              RA_Delete (psBMHeap->pImportArena);
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
++                      return;
++              }
++
++              
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++              
++              
++              ppsBMHeap = &psBMHeap->pBMContext->psBMHeap;
++              while(*ppsBMHeap)
++              {
++                      if(*ppsBMHeap == psBMHeap)
++                      {
++                              
++                              *ppsBMHeap = psBMHeap->psNext;
++                              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBMHeap, IMG_NULL);
++                              break;
++                      }
++                      ppsBMHeap = &((*ppsBMHeap)->psNext);
++              }
++      }
++      else
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));       
++      }
++}
++
++
++IMG_BOOL 
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++#ifdef FIXME
++      BM_CONTEXT *pBMContext;
++
++      pBMContext = psDeviceNode->sDevMemoryInfo.pBMContext;
++
++      while(pBMContext)
++      {
++              MMU_Enable (pBMContext);
++              pBMContext = pBMContext->psNext;
++      }
++#endif
++
++      return IMG_TRUE;
++}
++
++IMG_BOOL
++BM_Alloc (  IMG_HANDLE                        hDevMemHeap,
++                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                      IMG_SIZE_T                      uSize,
++                      IMG_UINT32                      *pui32Flags,
++                      IMG_UINT32                      uDevVAddrAlignment,
++                      BM_HANDLE                       *phBuf)
++{
++      BM_BUF *pBuf;
++      BM_CONTEXT *pBMContext;
++      BM_HEAP *psBMHeap;
++      SYS_DATA *psSysData;
++      IMG_UINT32 uFlags = 0;
++
++      if(pui32Flags)
++              uFlags = *pui32Flags;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++                      uSize, uFlags, uDevVAddrAlignment));
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++              return IMG_FALSE;
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      pBMContext = psBMHeap->pBMContext;
++
++      if(uDevVAddrAlignment == 0)
++              uDevVAddrAlignment = 1;
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                 sizeof (BM_BUF),
++                                 (IMG_PVOID *)&pBuf, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++              return IMG_FALSE;
++      }
++      OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++      
++      if (AllocMemory(pBMContext,
++                                      psBMHeap,
++                                      psDevVAddr,
++                                      uSize,
++                                      uFlags,
++                                      uDevVAddrAlignment,
++                                      pBuf) != IMG_TRUE)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++              PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++              return IMG_FALSE;
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++                uSize, uFlags, pBuf));
++
++      
++      pBuf->ui32RefCount = 1;
++      *phBuf = (BM_HANDLE)pBuf;
++      *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++      return IMG_TRUE;
++}
++
++
++
++IMG_BOOL
++BM_Wrap (     IMG_HANDLE hDevMemHeap,
++                      IMG_UINT32 ui32Size,
++                      IMG_UINT32 ui32Offset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psSysAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 *pui32Flags,
++                      BM_HANDLE *phBuf)
++{
++      BM_BUF *pBuf;
++      BM_CONTEXT *psBMContext;
++      BM_HEAP *psBMHeap;
++      SYS_DATA *psSysData;
++      IMG_SYS_PHYADDR sHashAddress;
++      IMG_UINT32 uFlags;
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      psBMContext = psBMHeap->pBMContext;
++
++      uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++      if(pui32Flags)
++              uFlags |= *pui32Flags;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++                      ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++      if(SysAcquireData (&psSysData) != PVRSRV_OK)
++              return IMG_FALSE;
++
++      
++      sHashAddress = psSysAddr[0];
++      
++      
++      sHashAddress.uiAddr += ui32Offset;
++
++      
++      pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr);
++
++      if(pBuf)
++      {
++              IMG_UINT32 ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
++
++              
++              if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++                                                                                                              pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
++              {
++                      PVR_DPF((PVR_DBG_MESSAGE,
++                                      "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++                                      ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++                      pBuf->ui32RefCount++;
++                      *phBuf = (BM_HANDLE)pBuf;
++                      if(pui32Flags)
++                              *pui32Flags = uFlags;
++
++                      return IMG_TRUE;
++              }
++      }
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_BUF),
++                                              (IMG_PVOID *)&pBuf, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++              return IMG_FALSE;
++      }
++      OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++      
++      if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++              return IMG_FALSE;
++      }
++
++      
++      if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++      {
++              
++              PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
++
++              if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
++              {
++                      FreeBuf (pBuf, uFlags);
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++                      PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++                      return IMG_FALSE;
++              }
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++                      ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++      
++      pBuf->ui32RefCount = 1;
++      *phBuf = (BM_HANDLE)pBuf;
++      if(pui32Flags)
++              *pui32Flags = uFlags;
++
++      return IMG_TRUE;
++}
++
++
++void
++BM_Free (BM_HANDLE hBuf,
++              IMG_UINT32 ui32Flags)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++      SYS_DATA *psSysData;
++      IMG_SYS_PHYADDR sHashAddr;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++      PVR_ASSERT (pBuf!=IMG_NULL);
++
++      if(SysAcquireData (&psSysData) != PVRSRV_OK)
++              return;
++
++      pBuf->ui32RefCount--;
++
++      if(pBuf->ui32RefCount == 0)
++      {
++              if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++              {
++                      sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++                      HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash,  (IMG_UINTPTR_T)sHashAddr.uiAddr);
++              }
++              FreeBuf (pBuf, ui32Flags);
++      }
++}
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "BM_HandleToCpuVaddr(h=%08X)=%08X",
++                              hBuf, pBuf->CpuVAddr));
++      return pBuf->CpuVAddr;
++}
++
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, pBuf->DevVAddr));
++      return pBuf->DevVAddr;
++}
++
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr));
++      return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
++}
++
++IMG_HANDLE
++BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "BM_HandleToOSMemHandle(h=%08X)=%08X",
++                              hBuf, pBuf->hOSMemHandle));
++      return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++                                               IMG_UINT32 *pTotalBytes,
++                                               IMG_UINT32 *pAvailableBytes)
++{
++      if (pAvailableBytes || pTotalBytes || uFlags);
++      return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++                              BM_MAPPING *pMapping, 
++                              IMG_SIZE_T *pActualSize,
++                              IMG_UINT32 uFlags,
++                              IMG_UINT32 dev_vaddr_alignment,
++                              IMG_DEV_VIRTADDR *pDevVAddr)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++      IMG_UINT32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++      psDeviceNode = pBMContext->psDeviceNode;
++
++      if(uFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              
++              pMapping->uSize *= 2;
++      }
++      
++#ifdef PDUMP
++      if(uFlags & PVRSRV_MEM_DUMMY)
++      {
++              
++              ui32PDumpSize = HOST_PAGESIZE();
++      }
++#endif
++
++      
++      if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap, 
++                                                                      pMapping->uSize, 
++                                                                      pActualSize, 
++                                                                      0,
++                                                                      dev_vaddr_alignment, 
++                                                                      &(pMapping->DevVAddr)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++              return IMG_FALSE;
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++      
++      
++      PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, (IMG_HANDLE)pMapping);
++
++      switch (pMapping->eCpuMemoryOrigin)
++      {
++              case hm_wrapped:
++              case hm_wrapped_virtaddr:
++              case hm_contiguous:
++              {
++                      psDeviceNode->pfnMMUMapPages (  pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++
++                      *pDevVAddr = pMapping->DevVAddr;
++                      break;
++              }
++              case hm_env:
++              {
++                      psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      pMapping->uSize,
++                                                      pMapping->CpuVAddr,
++                                                      pMapping->hOSMemHandle,
++                                                      pDevVAddr,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++                      break;
++              }
++              case hm_wrapped_scatter:
++              {
++                      psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      pMapping->psSysAddr,
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++
++                      *pDevVAddr = pMapping->DevVAddr;
++                      break;
++              }
++              default:
++                      PVR_DPF((PVR_DBG_ERROR,
++                              "Illegal value %d for pMapping->eCpuMemoryOrigin",
++                              pMapping->eCpuMemoryOrigin));
++                      return IMG_FALSE;
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++      return IMG_TRUE;
++}
++
++static void
++DevMemoryFree (BM_MAPPING *pMapping)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++      IMG_UINT32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++      
++      if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              
++              ui32PSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              ui32PSize = pMapping->uSize;
++      }
++
++      PDUMPFREEPAGES(pMapping->pBMHeap, pMapping->DevVAddr,
++                                 ui32PSize, (IMG_HANDLE)pMapping,
++                                 (IMG_BOOL)(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED));
++#endif
++
++      psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, pMapping->uSize);
++}
++
++static IMG_BOOL
++BM_ImportMemory (void *pH,
++                        IMG_SIZE_T uRequestSize,
++                        IMG_SIZE_T *pActualSize,
++                        BM_MAPPING **ppsMapping,
++                        IMG_UINT32 uFlags,
++                        IMG_UINTPTR_T *pBase)
++{
++      BM_MAPPING *pMapping;
++      BM_HEAP *pBMHeap = pH;
++      BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++      IMG_BOOL bResult;
++      IMG_SIZE_T uSize;
++      IMG_SIZE_T uPSize;
++      IMG_UINT32 uDevVAddrAlignment = 0;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++                        pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++      PVR_ASSERT (ppsMapping != IMG_NULL);
++      PVR_ASSERT (pBMContext != IMG_NULL);
++
++      uSize = HOST_PAGEALIGN (uRequestSize);
++      PVR_ASSERT (uSize >= uRequestSize);
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_MAPPING),
++                                              (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
++              goto fail_exit;
++      }
++
++      pMapping->hOSMemHandle = 0;
++      pMapping->CpuVAddr = 0;
++      pMapping->DevVAddr.uiAddr = 0;
++      pMapping->CpuPAddr.uiAddr = 0;
++      pMapping->uSize = uSize;
++      pMapping->pBMHeap = pBMHeap;
++      pMapping->ui32Flags = uFlags;
++
++      
++      if (pActualSize)
++      {
++              *pActualSize = uSize;
++      }
++      
++      
++      if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              uPSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              uPSize = pMapping->uSize;
++      }
++
++      
++
++      if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++      {
++              
++              if (OSAllocPages(pBMHeap->ui32Attribs,
++                                               uPSize, 
++                                               (IMG_VOID **)&pMapping->CpuVAddr,
++                                               &pMapping->hOSMemHandle) != PVRSRV_OK) 
++              {
++                      PVR_DPF((PVR_DBG_ERROR,
++                                      "BM_ImportMemory: OSAllocPages(0x%x) failed",
++                                      uPSize));
++                      goto fail_mapping_alloc;
++              }
++
++              
++              pMapping->eCpuMemoryOrigin = hm_env;
++      }
++      else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              
++              PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
++
++              if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
++                                         uPSize,
++                                         IMG_NULL,
++                                         IMG_NULL,
++                                         0,
++                                         HOST_PAGESIZE(),
++                                         0,
++                                         (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
++                      goto fail_mapping_alloc;
++              }
++
++              
++              pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++              if(OSReservePhys(pMapping->CpuPAddr,
++                                               uPSize,
++                                               pBMHeap->ui32Attribs,
++                                               &pMapping->CpuVAddr,
++                                               &pMapping->hOSMemHandle) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
++                      goto fail_dev_mem_alloc;
++              }
++
++              
++              pMapping->eCpuMemoryOrigin = hm_contiguous;
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
++              goto fail_mapping_alloc;
++      }
++
++      
++      bResult = DevMemoryAlloc (pBMContext, pMapping, IMG_NULL, uFlags,
++                                                        uDevVAddrAlignment, &pMapping->DevVAddr);
++      if (!bResult)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++                              pMapping->uSize));
++              goto fail_dev_mem_alloc;
++      }
++
++      PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
++
++      *pBase = pMapping->DevVAddr.uiAddr;
++      *ppsMapping = pMapping;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
++      return IMG_TRUE;
++
++fail_dev_mem_alloc:
++      if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++      {
++              
++              if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++              {
++                      pMapping->uSize /= 2;
++              }
++
++              if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++              {
++                      uPSize = HOST_PAGESIZE();
++              }
++              else
++              {
++                      uPSize = pMapping->uSize;
++              }
++
++              if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++              {
++                      OSFreePages(pBMHeap->ui32Attribs, 
++                                                uPSize, 
++                                                (void *) pMapping->CpuVAddr,
++                                                pMapping->hOSMemHandle);
++              }
++              else
++              {
++                      IMG_SYS_PHYADDR sSysPAddr;
++
++                      if(pMapping->CpuVAddr)
++                      {
++                              OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle);
++                      }
++                      sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);          
++                      RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);      
++              }
++      }
++fail_mapping_alloc:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++fail_exit:
++      return IMG_FALSE;
++}
++
++
++static void
++BM_FreeMemory (void *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
++{
++      BM_HEAP *pBMHeap = h;
++      IMG_SIZE_T uPSize;
++
++      PVR_UNREFERENCED_PARAMETER (_base);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping));
++
++      PVR_ASSERT (psMapping != IMG_NULL);
++
++      DevMemoryFree (psMapping);
++
++      
++      if(psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++      {
++              psMapping->uSize /= 2;
++      }
++      
++      if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              uPSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              uPSize = psMapping->uSize;
++      }
++      
++      if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++      {
++              OSFreePages(pBMHeap->ui32Attribs,
++                                              uPSize,
++                                              (void *) psMapping->CpuVAddr,
++                                              psMapping->hOSMemHandle);
++      }
++      else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++              sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++              RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++                      h, _base, psMapping));
++}
++
++PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                              IMG_DEV_VIRTADDR sDevVPageAddr,
++                                                              IMG_DEV_PHYADDR *psDevPAddr)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++      if(!psMemInfo || !psDevPAddr)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_GetPhysPageAddr: Invalid params"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++      psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap, 
++                                                                                              sDevVPageAddr);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, PVRSRV_HEAP_INFO *psHeapInfo)
++{
++      BM_HEAP *psBMHeap = (BM_HEAP *)hDevMemHeap;
++
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++      psHeapInfo->hDevMemHeap = hDevMemHeap;
++      psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++      psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++      psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++      return PVRSRV_OK;
++}
++
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
++{
++      BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
++
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++      return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
++
++      PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++      return pBMContext->psMMUContext;
++}
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++      return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
++}
++
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++      return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
++}
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++      return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1734 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/module.h>
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG
++{
++      
++      PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++      struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++      struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG
++{
++      IMG_HANDLE                                                      hExtSwapChain;
++      PVRSRV_QUEUE_INFO                                       *psQueue;
++      PVRSRV_DC_BUFFER                                        asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      IMG_UINT32                                                      ui32BufferCount;
++      PVRSRV_DC_BUFFER                                        *psLastFlipBuffer;
++      struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
++{
++      IMG_UINT32                                                      ui32RefCount;
++      IMG_UINT32                                                      ui32DeviceID;
++      IMG_HANDLE                                                      hExtDevice;
++      PPVRSRV_DC_SRV2DISP_KMJTABLE            psFuncTable;
++      IMG_HANDLE                                                      hDevMemContext;
++      PVRSRV_DC_BUFFER                                        sSystemBuffer;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
++{
++      PVRSRV_DISPLAYCLASS_INFO                        *psDCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG
++{
++      
++      PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++      struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
++{
++      IMG_UINT32                                                      ui32RefCount;
++      IMG_UINT32                                                      ui32DeviceID;
++      IMG_HANDLE                                                      hExtDevice;
++      PPVRSRV_BC_SRV2BUFFER_KMJTABLE          psFuncTable;
++      IMG_HANDLE                                                      hDevMemContext;
++      
++      IMG_UINT32                                                      ui32BufferCount;
++      PVRSRV_BC_BUFFER                                        *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
++{
++      PVRSRV_BUFFERCLASS_INFO                         *psBCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++
++static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
++{
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      return psDCPerContextInfo->psDCInfo;
++}
++
++
++static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
++{
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      return psBCPerContextInfo->psBCInfo;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
++                                                                IMG_UINT32 *pui32DevCount,
++                                                                IMG_UINT32 *pui32DevID )
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_UINT                        ui32DevCount = 0;
++      SYS_DATA                        *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if      ((psDeviceNode->sDevId.eDeviceClass == DeviceClass)
++              &&      (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
++              {
++                      ui32DevCount++;
++                      if(pui32DevID)
++                      {
++                              *pui32DevID++ = psDeviceNode->sDevId.ui32DeviceIndex;
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      if(pui32DevCount)
++      {
++              *pui32DevCount = ui32DevCount;
++      }
++      else if(pui32DevID == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
++              return (PVRSRV_ERROR_INVALID_PARAMS);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++                                                                         IMG_UINT32 *pui32DeviceID)
++{
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo = IMG_NULL;
++      PVRSRV_DEVICE_NODE                      *psDeviceNode;
++      SYS_DATA                                        *psSysData;
++
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++
++      
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(*psDCInfo),
++                                       (IMG_VOID **)&psDCInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++                                       (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL) != PVRSRV_OK)
++      {               
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++      
++      *psDCInfo->psFuncTable = *psFuncTable;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DEVICE_NODE),
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++      psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
++      psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++      psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++      psDeviceNode->psSysData = psSysData;
++
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++      psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      if (pui32DeviceID)
++      {
++              *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      }
++      
++      
++      SysRegisterExternalDevice(psDeviceNode);
++
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      if(psDCInfo->psFuncTable)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo->psFuncTable, IMG_NULL);
++      }
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++      SYS_DATA                                        *psSysData;
++      PVRSRV_DEVICE_NODE                      **ppsDeviceNode, *psDeviceNode;
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ppsDeviceNode = &psSysData->psDeviceNodeList;
++      while(*ppsDeviceNode)
++      {
++              switch((*ppsDeviceNode)->sDevId.eDeviceClass)
++              {
++                      case PVRSRV_DEVICE_CLASS_DISPLAY :
++                      {
++                              if((*ppsDeviceNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++                              {
++                                      goto FoundDevice;
++                              }
++                              break;
++                      }
++                      default:
++                      {
++                              break;
++                      }
++              }
++              ppsDeviceNode = &((*ppsDeviceNode)->psNext);
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++      
++      psDeviceNode = *ppsDeviceNode;
++      *ppsDeviceNode = psDeviceNode->psNext;
++
++      
++      SysRemoveExternalDevice(psDeviceNode);
++      
++      
++
++
++      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++      PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++      FreeDeviceID(psSysData, ui32DevIndex);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo->psFuncTable, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDeviceNode, IMG_NULL);
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++                                                                         IMG_UINT32   *pui32DeviceID)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++      SYS_DATA                                *psSysData;
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(*psBCInfo),
++                                       (IMG_VOID **)&psBCInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));      
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++                                       (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++      
++      *psBCInfo->psFuncTable = *psFuncTable;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DEVICE_NODE),
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++      psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
++      psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++      psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++      psDeviceNode->psSysData = psSysData;
++
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++      psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      if (pui32DeviceID)
++      {
++              *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      }
++
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      if(psBCInfo->psFuncTable)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psFuncTable, IMG_NULL);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++      SYS_DATA                                        *psSysData;
++      PVRSRV_DEVICE_NODE                      **ppsDevNode, *psDevNode;
++      PVRSRV_BUFFERCLASS_INFO         *psBCInfo;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ppsDevNode = &psSysData->psDeviceNodeList;
++      while(*ppsDevNode)
++      {
++              switch((*ppsDevNode)->sDevId.eDeviceClass)
++              {
++                      case PVRSRV_DEVICE_CLASS_BUFFER :
++                      {
++                              if((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++                              {
++                                      goto FoundDevice;
++                              }
++                              break;
++                      }
++                      default:
++                      {
++                              break;
++                      }
++              }
++              ppsDevNode = &(*ppsDevNode)->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++      
++      psDevNode = *(ppsDevNode);
++      *ppsDevNode = psDevNode->psNext;
++
++      
++
++
++      FreeDeviceID(psSysData, ui32DevIndex);
++      psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psFuncTable, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDevNode, IMG_NULL);
++      return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE        hDeviceKM,
++                                                                      IMG_BOOL        bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++      PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      
++      eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem, IMG_TRUE);
++                      
++                              return eError;
++                      }
++              
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_UINT32  ui32ProcessID,
++                                                                                IMG_PVOID             pvParam,
++                                                                                IMG_UINT32    ui32Param)
++{
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
++      psDCInfo = psDCPerContextInfo->psDCInfo;
++
++      psDCInfo->ui32RefCount--;
++      if(psDCInfo->ui32RefCount == 0)
++      {       
++              
++              psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++              PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++              
++              psDCInfo->hDevMemContext = IMG_NULL;
++              psDCInfo->hExtDevice = IMG_NULL;
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCPerContextInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM (IMG_UINT32 ui32DeviceID,
++                                                                 IMG_HANDLE hDevCookie,
++                                                                 IMG_HANDLE *phDeviceKM)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++
++      if(!phDeviceKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if ((psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) &&
++                      (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID))
++              {
++                      
++
++
++                      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++      
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(*psDCPerContextInfo),
++                                (IMG_VOID **)&psDCPerContextInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++      if(psDCInfo->ui32RefCount++ == 0)
++      {
++              PVRSRV_ERROR eError;
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++              PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++              
++              psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++              
++              eError = PVRSRVAllocSyncInfoKM(IMG_NULL, 
++                                                                      (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++                                                                      &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++                      psDCInfo->ui32RefCount--;
++                      return eError;
++              }
++
++              
++              eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++                                                              &psDCInfo->hExtDevice,
++                                                              (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++                      psDCInfo->ui32RefCount--;
++                      PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++                      return eError;
++              }
++      }
++
++      psDCPerContextInfo->psDCInfo = psDCInfo;
++      psDCPerContextInfo->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++                                                                                                                               psDCPerContextInfo,
++                                                                                                                              0,
++                                                                                                                              CloseDCDeviceCallBack,
++                                                                                                                              0);
++
++      
++      *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
++                                                                      IMG_UINT32 *pui32Count,
++                                                                      DISPLAY_FORMAT *psFormat)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      if(!hDeviceKM || !pui32Count || !psFormat)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
++                                                               DISPLAY_FORMAT *psFormat,
++                                                               IMG_UINT32 *pui32Count,
++                                                               DISPLAY_DIMS *psDim)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      if(!hDeviceKM || !pui32Count || !psFormat)      
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
++                                                                              IMG_HANDLE *phBuffer)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      IMG_HANDLE hExtBuffer;
++
++      if(!hDeviceKM || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++              return eError;          
++      }
++
++      
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++      psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++      
++      *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
++                                                              DISPLAY_INFO *psDisplayInfo)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_ERROR eError;
++
++      if(!hDeviceKM || !psDisplayInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++      {
++              psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain, 
++                                                                              IMG_BOOL bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++      IMG_UINT32 i;
++
++      if(!hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++      psDCInfo = psSwapChain->psDCInfo;
++
++      
++      if(!bResManCallback && psSwapChain->hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psSwapChain->hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      
++      PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++      
++      eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++                                                                                                                      psSwapChain->hExtSwapChain);
++
++      
++      for(i=0; i<psSwapChain->ui32BufferCount; i++)
++      {
++              if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSwapChain, IMG_NULL);
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChainCallBack(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      IMG_HANDLE hSwapChain = (IMG_HANDLE)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVDestroyDCSwapChainKM(hSwapChain, IMG_TRUE);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (IMG_HANDLE                            hDeviceKM,
++                                                                              IMG_UINT32                              ui32Flags,
++                                                                              DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++                                                                              DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                              IMG_UINT32                              ui32BufferCount,
++                                                                              IMG_UINT32                              ui32OEMFlags,
++                                                                              IMG_HANDLE                              *phSwapChain,
++                                                                              IMG_UINT32                              *pui32SwapChainID)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++      PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++
++      if(!hDeviceKM
++      || !psDstSurfAttrib
++      || !psSrcSurfAttrib
++      || !phSwapChain
++      || !pui32SwapChainID)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
++              return PVRSRV_ERROR_TOOMANYBUFFERS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DC_SWAPCHAIN),
++                                       (IMG_VOID **)&psSwapChain, IMG_NULL) != PVRSRV_OK)     
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorExit;
++      }
++      OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++      
++      eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++              goto ErrorExit;
++      }
++
++      
++      psSwapChain->psQueue = psQueue;
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++                                                                              psDCInfo->hDevMemContext,
++                                                                              &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++                      goto ErrorExit;
++              }
++
++              
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++
++              
++              psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++              psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++              
++              apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++      }
++
++      psSwapChain->ui32BufferCount = ui32BufferCount;
++      psSwapChain->psDCInfo = psDCInfo;
++
++      
++      eError =  psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++                                                                                                              ui32Flags,
++                                                                                                              psDstSurfAttrib,
++                                                                                                              psSrcSurfAttrib,
++                                                                                                              ui32BufferCount,
++                                                                                                              apsSyncData,
++                                                                                                              ui32OEMFlags,
++                                                                                                              &psSwapChain->hExtSwapChain,
++                                                                                                              pui32SwapChainID);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++              goto ErrorExit;
++      }
++      
++      
++      *phSwapChain = (IMG_HANDLE)psSwapChain;
++
++
++      
++      psSwapChain->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,
++                                                                                                                              psSwapChain,
++                                                                                                                              0,
++                                                                                                                              DestroyDCSwapChainCallBack,
++                                                                                                                              0);
++
++      return eError;
++
++ErrorExit:
++
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      if(psQueue)
++      {
++              PVRSRVDestroyCommandQueueKM(psQueue);
++      }
++
++      if(psSwapChain)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSwapChain, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT              *psRect)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT              *psRect)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++                                                                                                              psSwapChain->hExtSwapChain,
++                                                                                                              ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++                                                                                                              psSwapChain->hExtSwapChain,
++                                                                                                              ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_UINT32    *pui32BufferCount,
++                                                                IMG_HANDLE    *phBuffer)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++      IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++      if(!hDeviceKM || !hSwapChain || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));    
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      pui32BufferCount,
++                                                                                                      ahExtBuffer);
++
++      PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++      
++
++
++      for(i=0; i<*pui32BufferCount; i++)
++      {
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
++              phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hBuffer,
++                                                                      IMG_UINT32      ui32SwapInterval,
++                                                                      IMG_HANDLE      hPrivateTag,
++                                                                      IMG_UINT32      ui32ClipRectCount,
++                                                                      IMG_RECT        *psClipRect)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_BUFFER *psBuffer;
++      PVRSRV_QUEUE_INFO *psQueue;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      IMG_UINT32 i;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++      IMG_UINT32 ui32NumSrcSyncs = 1;
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++      PVRSRV_COMMAND *psCommand;
++
++      if(!hDeviceKM || !hBuffer || !psClipRect)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++#if defined(SUPPORT_LMA)
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif 
++      
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
++
++      
++      psQueue = psBuffer->psSwapChain->psQueue;
++
++      
++      apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++      if(psBuffer->psSwapChain->psLastFlipBuffer)
++      {
++              apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++              ui32NumSrcSyncs++;
++      }
++
++      
++      eError = PVRSRVInsertCommandKM (psQueue,
++                                                                      &psCommand,
++                                                                      psDCInfo->ui32DeviceID,
++                                                                      DC_FLIP_COMMAND,
++                                                                      0,
++                                                                      IMG_NULL,
++                                                                      ui32NumSrcSyncs,
++                                                                      apsSrcSync,
++                                                                      sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount));
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++              goto Exit;
++      }
++      
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++      
++      psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++      
++      psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++      
++      psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++      
++      psFlipCmd->hPrivateTag = hPrivateTag;
++
++      
++      psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++      
++      psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++      
++      for(i=0; i<ui32ClipRectCount; i++)
++      {
++              psFlipCmd->psClipRect[i] = psClipRect[i];
++      }
++
++      
++      psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++              
++      eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
++              goto Exit;
++      }
++      
++      
++
++
++
++
++
++
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      goto ProcessedQueues;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);     
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++      eError = PVRSRV_ERROR_GENERIC;
++      goto Exit;
++
++ProcessedQueues:
++      
++      psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++#if defined(SUPPORT_LMA)
++      PVRSRVPowerUnlock(KERNEL_ID);
++#endif        
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hSwapChain)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_QUEUE_INFO *psQueue;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++      IMG_UINT32 ui32NumSrcSyncs = 1;
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++      PVRSRV_COMMAND *psCommand;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++#if defined(SUPPORT_LMA)
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif 
++      
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      
++      psQueue = psSwapChain->psQueue;
++
++      
++      apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++      if(psSwapChain->psLastFlipBuffer)
++      {
++              apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++              ui32NumSrcSyncs++;
++      }
++
++      
++      eError = PVRSRVInsertCommandKM (psQueue,
++                                                                      &psCommand,
++                                                                      psDCInfo->ui32DeviceID,
++                                                                      DC_FLIP_COMMAND,
++                                                                      0,
++                                                                      IMG_NULL,
++                                                                      ui32NumSrcSyncs,
++                                                                      apsSrcSync,
++                                                                      sizeof(DISPLAYCLASS_FLIP_COMMAND));
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++              goto Exit;
++      }
++
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++      
++      psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++      
++      psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++      
++      psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++      
++      psFlipCmd->hPrivateTag = IMG_NULL;
++
++      
++      psFlipCmd->ui32ClipRectCount = 0;
++
++      psFlipCmd->ui32SwapInterval = 1;
++
++      
++      eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
++              goto Exit;
++      }
++
++      
++
++
++
++
++
++
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      goto ProcessedQueues;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues"));
++      eError = PVRSRV_ERROR_GENERIC;
++      goto Exit;
++
++ProcessedQueues:
++      
++      psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++      eError = PVRSRV_OK;
++      
++Exit:
++#if defined(SUPPORT_LMA)
++      PVRSRVPowerUnlock(KERNEL_ID);
++#endif        
++      return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER  pfnISRHandler,
++                                                                                       IMG_VOID                       *pvISRHandlerData,
++                                                                                       IMG_UINT32                     ui32ISRSourceMask,
++                                                                                       IMG_UINT32                     ui32DeviceID)
++{
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDevNode;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDevNode = psSysData->psDeviceNodeList;
++      while(psDevNode)
++      {
++              if(psDevNode->sDevId.ui32DeviceIndex == ui32DeviceID)
++              {
++                      break;
++              }
++              psDevNode = psDevNode->psNext;
++      }
++
++      
++      psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
++
++      
++      psDevNode->pfnDeviceISR = pfnISRHandler;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State)
++{
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
++      PVRSRV_DEVICE_NODE                      *psDeviceNode;
++      SYS_DATA                                        *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCState: Failed to get SysData"));
++              return;
++      }
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++              {
++                      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++                      if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
++                      {
++                              psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++      psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++      psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++      psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++      psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++      psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++      psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++      psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++      psJTable->pfnPVRSRVRegisterSystemISRHandler = PVRSRVRegisterSystemISRHandler;
++      psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++      return IMG_TRUE;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE        hDeviceKM,
++                                                                      IMG_BOOL        bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++      PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      
++      eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem, IMG_TRUE);
++                      
++                              return eError;
++                      }
++
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_UINT32  ui32ProcessID,
++                                                                                IMG_PVOID             pvParam,
++                                                                                IMG_UINT32    ui32Param)
++{
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
++      psBCInfo = psBCPerContextInfo->psBCInfo;
++
++      psBCInfo->ui32RefCount--;
++      if(psBCInfo->ui32RefCount == 0)
++      {
++              IMG_UINT32 i;
++
++              
++              psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++              
++              for(i=0; i<psBCInfo->ui32BufferCount; i++)
++              {
++                      if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++                      {
++                              PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++                      }
++              }
++
++              
++              if(psBCInfo->psBuffer)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psBuffer, IMG_NULL);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCPerContextInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM (IMG_UINT32 ui32DeviceID,
++                                                                 IMG_HANDLE hDevCookie,
++                                                                 IMG_HANDLE *phDeviceKM)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO      *psBCPerContextInfo;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++      SYS_DATA                                *psSysData;
++      IMG_UINT32                              i;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++
++      if(!phDeviceKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if ((psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_BUFFER) &&
++                      (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID))
++              {
++                      
++
++
++                      psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++      
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(*psBCPerContextInfo),
++                                (IMG_VOID **)&psBCPerContextInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++      if(psBCInfo->ui32RefCount++ == 0)
++      {
++              BUFFER_INFO sBufferInfo;
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++              PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++              
++              psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++              
++              eError = psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->hExtDevice);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++                      return eError;
++              }
++
++              
++              eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++                      return eError;
++              }
++
++              
++              psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++              
++
++              
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                        sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
++                                                        (IMG_VOID **)&psBCInfo->psBuffer, 
++                                                        IMG_NULL);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++                      return eError;
++              }
++              OSMemSet (psBCInfo->psBuffer,
++                                      0,
++                                      sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++      
++              for(i=0; i<psBCInfo->ui32BufferCount; i++)
++              {
++                      
++                      eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++                                                                              psBCInfo->hDevMemContext,
++                                                                              &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++                      if(eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++                              goto ErrorExit;
++                      }
++                      
++                      
++
++
++                      eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
++                                                                                                                      i,
++                                                                                                                      psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
++                                                                                                                      &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
++                      if(eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++                              goto ErrorExit;
++                      }
++
++                      
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
++              }
++      }
++
++      psBCPerContextInfo->psBCInfo = psBCInfo;
++      psBCPerContextInfo->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_BUFFERCLASS_DEVICE,
++                                                                                                                               psBCPerContextInfo,
++                                                                                                                              0,
++                                                                                                                              CloseBCDeviceCallBack,
++                                                                                                                              0);
++      
++      
++      *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++      for(i=0; i<psBCInfo->ui32BufferCount; i++)
++      {
++              if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      
++      if(psBCInfo->psBuffer)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psBuffer, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
++                                                              BUFFER_INFO *psBufferInfo)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++
++      if(!hDeviceKM || !psBufferInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++      eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
++
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
++                                                                IMG_UINT32 ui32BufferIndex,
++                                                                IMG_HANDLE *phBuffer)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++      if(!hDeviceKM || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));     
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++      if(ui32BufferIndex < psBCInfo->ui32BufferCount)
++      {
++              *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++      psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++      psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++      psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++      return IMG_TRUE;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/devicemem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/devicemem.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1055 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "sgxmmu.h"
++#include "sgxapi_km.h"
++
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE         hDevCookie,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Flags,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo);
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo
++                                                                                                               )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_UINT32 ui32HeapCount;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_UINT32 i;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++      
++      ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++      psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++      
++      PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++      
++      for(i=0; i<ui32HeapCount; i++)
++      {
++              
++              psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++              psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++              psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++              psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++              psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++      }
++
++      for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++              psHeapInfo[i].ui32HeapID = (IMG_UINT32)SGX_UNDEFINED_HEAP_ID;
++      }
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                               IMG_HANDLE *phDevMemContext,
++                                                                                                               IMG_UINT32 *pui32ClientHeapCount,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo,
++                                                                                                               IMG_BOOL *pbCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                                                               , IMG_BOOL *pbShared
++#endif
++                                                                                                               )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_HANDLE hDevMemContext;
++      IMG_HANDLE hDevMemHeap;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      IMG_UINT32 i;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++      
++
++      ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++      psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++      
++
++      PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++      
++
++      hDevMemContext = BM_CreateContext(psDeviceNode,
++                                                                        &sPDDevPAddr,
++                                                                        IMG_FALSE,
++                                                                        pbCreated);
++      if (hDevMemContext == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      for(i=0; i<ui32HeapCount; i++)
++      {
++              switch(psDeviceMemoryHeap[i].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++                              psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++                              psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++                              psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++                              pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++                              ui32ClientHeapCount++;
++                              break;
++                      }
++                      case DEVICE_MEMORY_HEAP_PERCONTEXT:
++                      {
++                              hDevMemHeap = BM_CreateHeap(hDevMemContext,
++                                                                                      &psDeviceMemoryHeap[i]);
++
++                              
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++                              psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++                              psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++                              psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++                              pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++                              ui32ClientHeapCount++;
++                              break;
++                      }
++              }
++      }
++
++      
++      *pui32ClientHeapCount = ui32ClientHeapCount;
++      *phDevMemContext = hDevMemContext;
++      
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                                IMG_HANDLE hDevMemContext,
++                                                                                                                IMG_BOOL *pbDestroyed)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      return BM_DestroyContext(hDevMemContext, IMG_FALSE, IMG_FALSE, pbDestroyed);
++}
++
++
++
++
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE         hDevCookie,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Flags,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO  *psMemInfo;
++      BM_HANDLE               hBuffer;
++      
++      PVRSRV_MEMBLK   *psMemBlock;
++      IMG_BOOL                bBMError;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      *ppsMemInfo = IMG_NULL;
++
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                       (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      
++      psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++      bBMError = BM_Alloc (hDevMemHeap,
++                                                      IMG_NULL,
++                                                      ui32Size,
++                                                      &psMemInfo->ui32Flags,
++                                                      ui32Alignment,
++                                                      &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++      psMemInfo->ui32AllocSize = ui32Size;
++
++      
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      *ppsMemInfo = psMemInfo;
++
++
++      
++      return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++      BM_HANDLE               hBuffer;
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++      
++      BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++      if(psMemInfo->pvSysBackupBuffer)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++
++      return(PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                                                              IMG_HANDLE                                      hDevMemContext,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO         **ppsKernelSyncInfo)
++{
++      IMG_HANDLE hSyncDevMemHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      BM_CONTEXT *pBMContext;
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_SYNC_DATA *psSyncData;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_SYNC_INFO ),
++                                                (IMG_VOID **)&psKernelSyncInfo, IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      pBMContext = (BM_CONTEXT*)hDevMemContext;
++      psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++      
++      hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
++
++      eError = AllocDeviceMem(hDevCookie,
++                                                      hSyncDevMemHeap,
++                                                      0,
++                                                      sizeof(PVRSRV_SYNC_DATA),
++                                                      sizeof(IMG_UINT32),
++                                                      &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++      if (eError != PVRSRV_OK)
++      {
++
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psKernelSyncInfo, IMG_NULL);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++      psSyncData = psKernelSyncInfo->psSyncData;
++
++      psSyncData->ui32WriteOpsPending = 0;
++      psSyncData->ui32WriteOpsComplete = 0;
++      psSyncData->ui32ReadOpsPending = 0;
++      psSyncData->ui32ReadOpsComplete = 0;
++      psSyncData->ui32LastOpDumpVal = 0;
++
++      psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++      psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++      
++      psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
++
++      
++      *ppsKernelSyncInfo = psKernelSyncInfo;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO        *psKernelSyncInfo)
++{
++      FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psKernelSyncInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE                            hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo, 
++                                                                                              IMG_BOOL                                bResManCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      if (psMemInfo->psKernelSyncInfo)
++      {
++              PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++      }
++
++      if (eError == PVRSRV_OK)
++      {
++              eError = FreeDeviceMem(psMemInfo);
++      }
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVFreeDeviceMemKM(IMG_NULL, psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE                   hDevCookie,
++                                                                                               IMG_HANDLE                     hDevMemHeap,
++                                                                                               IMG_UINT32                     ui32Flags,
++                                                                                               IMG_UINT32                     ui32Size,
++                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                               PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO  *psMemInfo;
++      PVRSRV_ERROR                    eError;
++      BM_HEAP                                 *psBMHeap;
++      IMG_HANDLE                              hDevMemContext;
++
++      if (!hDevMemHeap)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      eError = AllocDeviceMem(hDevCookie,
++                                                      hDevMemHeap,
++                                                      ui32Flags,
++                                                      ui32Size,
++                                                      ui32Alignment,
++                                                      &psMemInfo);
++
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++      {
++              psMemInfo->psKernelSyncInfo = IMG_NULL;
++      }
++      else
++      {
++              
++
++
++              psBMHeap = (BM_HEAP*)hDevMemHeap;
++              hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++              eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++                                                                         hDevMemContext,
++                                                                         &psMemInfo->psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      goto free_mainalloc;
++              }
++      }
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
++      {
++              psMemInfo->sMemBlk.hResItem = IMG_NULL;
++      }
++      else
++      {
++              
++              psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              FreeDeviceMemCallBack,
++                                                                                                                              0);
++              if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
++              {
++                      
++                      eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++                      goto free_mainalloc;
++              }
++      }               
++
++      
++      return (PVRSRV_OK);
++
++free_mainalloc:
++      FreeDeviceMem(psMemInfo);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE                              hDevCookie,
++                                                                                                        PVRSRV_KERNEL_MEM_INFO        *psMemInfo)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      psMemInfo->sMemBlk.hResItem = ResManRegisterRes(RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION | RESMAN_TYPE_USE_PROCESSID,
++                                                psMemInfo,
++                                                0,
++                                                FreeDeviceMemCallBack,
++                                                RESMAN_KERNEL_PROCESSID);
++
++      if (psMemInfo->sMemBlk.hResItem == IMG_NULL)                                      
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT                    
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++                                                                                                 IMG_UINT32 *pui32Total,
++                                                                                                 IMG_UINT32 *pui32Free,
++                                                                                                 IMG_UINT32 *pui32LargestBlock)
++{
++      
++
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      PVR_UNREFERENCED_PARAMETER(pui32Total);
++      PVR_UNREFERENCED_PARAMETER(pui32Free);
++      PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++      return PVRSRV_OK;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO     *psMemInfo,
++                                                                                                      IMG_BOOL                                bResManCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      if (psMemInfo->psKernelSyncInfo)
++      {
++              PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++      }
++
++      if (eError == PVRSRV_OK)
++      {
++              eError = FreeDeviceMem(psMemInfo);
++      }
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnwrapExtMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE                            hDevCookie,
++                                                                                              IMG_UINT32                              ui32ByteSize,
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysAddr,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      IMG_HANDLE hDevMemHeap, hDevMemContext;
++      PVRSRV_DEVICE_NODE* psDeviceNode;
++      BM_HANDLE                       hBuffer;
++      PVRSRV_MEMBLK           *psMemBlock;
++      IMG_BOOL                        bBMError;
++      BM_HEAP                         *psBMHeap;
++      PVRSRV_ERROR            eError;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL)
++
++      
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++      hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32MappingHeapID].hDevMemHeap;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(hDevMemHeap,
++                                         ui32ByteSize,
++                                         ui32PageOffset,
++                                         bPhysContig,
++                                         psSysAddr,
++                                         IMG_NULL,
++                                         &psMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++      
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = ui32ByteSize;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++              
++
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++      eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++                                                                      hDevMemContext,
++                                                                      &psMemInfo->psKernelSyncInfo);
++      if(eError != PVRSRV_OK)
++      {
++              goto free_mainwrap;
++      }
++
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_WRAP,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnwrapExtMemoryCallBack,
++                                                                                                                              0);
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      return PVRSRV_OK;
++
++free_mainwrap:
++      FreeDeviceMem(psMemInfo);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                              IMG_BOOL bResManCallback)
++{
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return FreeDeviceMem(psMemInfo);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnmapDeviceMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++                                                                                                IMG_HANDLE hDstDevMemHeap,
++                                                                                                PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PageCount, ui32PageOffset;
++      IMG_UINT32                      ui32HostPageSize = HOST_PAGESIZE();
++      IMG_SYS_PHYADDR         *psSysPAddr = IMG_NULL;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      BM_BUF                          *psBuf;
++      IMG_DEV_VIRTADDR        sDevVAddr;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++      BM_HANDLE                       hBuffer;
++      PVRSRV_MEMBLK           *psMemBlock;
++      IMG_BOOL                        bBMError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      
++      if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      *ppsDstMemInfo = IMG_NULL;
++      
++      ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++      ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
++
++      
++
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      ui32PageCount*sizeof(IMG_SYS_PHYADDR),
++                                      (IMG_VOID **)&psSysPAddr, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++      
++      psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      
++      sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - ui32PageOffset;
++      for(i=0; i<ui32PageCount; i++)
++      {
++              eError = BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to retrieve page list from device"));
++                      goto ErrorExit;
++              }
++
++              
++              psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
++
++              
++              sDevVAddr.uiAddr += ui32HostPageSize;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorExit;
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(psBuf->pMapping->pBMHeap,
++                                         psSrcMemInfo->ui32AllocSize,
++                                         ui32PageOffset,
++                                         IMG_FALSE,
++                                         psSysPAddr,
++                                         IMG_NULL,
++                                         &psSrcMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++              eError = PVRSRV_ERROR_BAD_MAPPING;
++              goto ErrorExit;         
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++      psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++      
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++      psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_MAPPING,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnmapDeviceMemoryCallBack,
++                                                                                                                              0);
++
++      *ppsDstMemInfo = psMemInfo;
++
++      return PVRSRV_OK;
++
++      
++      
++ErrorExit:
++
++      if(psSysPAddr)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSysPAddr, IMG_NULL);
++      }
++
++      if(psMemInfo)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                                      IMG_BOOL bResManCallback)
++{
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return FreeDeviceMem(psMemInfo);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnmapDeviceClassMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(IMG_HANDLE hDeviceClassBuffer,
++                                                                                                         PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++                                                                                                         IMG_HANDLE *phOSMapInfo)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++      IMG_SYS_PHYADDR *psSysPAddr;
++      IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++      IMG_BOOL bPhysContig;
++      BM_CONTEXT *psBMContext;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      IMG_HANDLE hDevMemHeap;
++      IMG_UINT32 ui32ByteSize;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32PageSize = HOST_PAGESIZE();
++      BM_HANDLE               hBuffer;
++      PVRSRV_MEMBLK   *psMemBlock;
++      IMG_BOOL                bBMError;
++
++      if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      
++      psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
++      
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++      eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
++                                                                                                 psDeviceClassBuffer->hExtBuffer,
++                                                                                                 &psSysPAddr,
++                                                                                                 &ui32ByteSize,
++                                                                                                 &pvCPUVAddr,
++                                                                                                 phOSMapInfo,
++                                                                                                 &bPhysContig);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));  
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
++      psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++      hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32MappingHeapID].hDevMemHeap;
++
++      
++      ui32Offset = ((IMG_UINT32)pvCPUVAddr) & (ui32PageSize - 1);
++      pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINT8 *)pvCPUVAddr - ui32Offset);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(hDevMemHeap,
++                                         ui32ByteSize,
++                                         ui32Offset,
++                                         bPhysContig,
++                                         psSysPAddr,
++                                         pvPageAlignedCPUVAddr,
++                                         &psMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++      
++      
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = ui32ByteSize;
++      psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnmapDeviceClassMemoryCallBack,
++                                                                                                                              0);
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      return PVRSRV_OK;       
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/handle.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/handle.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/handle.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/handle.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,973 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifdef        PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef        DEBUG
++#define       HANDLE_BLOCK_SIZE       1
++#else
++#define       HANDLE_BLOCK_SIZE       256
++#endif
++
++#define       HANDLE_HASH_TAB_INIT_SIZE       32
++
++#define       INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define       INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1))
++#define       HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define       HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define       HANDLE_PTR_TO_INDEX(psBase, psHandle) ((psHandle) - ((psBase)->psHandleArray))
++#define       HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++      INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++enum eHandKey {
++      HAND_KEY_DATA = 0,
++      HAND_KEY_TYPE,
++      HAND_KEY_PARENT,
++      HAND_KEY_LEN                    
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInit)
++#endif
++static INLINE
++IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
++{
++      psList->ui32Next = ui32Index;
++      psList->ui32Prev = ui32Index;
++      psList->hParent = hParent;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitParentList)
++#endif
++static INLINE
++IMG_VOID InitParentList(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++      HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitChildEntry)
++#endif
++static INLINE
++IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, IMG_NULL);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIsEmpty)
++#endif
++static INLINE
++IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
++{
++      IMG_BOOL bIsEmpty;
++
++      bIsEmpty = (psList->ui32Next == ui32Index);
++
++#ifdef        DEBUG
++      {
++              IMG_BOOL bIsEmpty2;
++
++              bIsEmpty2 = (psList->ui32Prev == ui32Index);
++              PVR_ASSERT(bIsEmpty == bIsEmpty2);
++      }
++#endif
++
++      return bIsEmpty;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoChildren)
++#endif
++static INLINE
++IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++      return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sChildren);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoParent)
++#endif
++static INLINE
++IMG_BOOL NoParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings))
++      {
++              PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL);
++
++              return IMG_TRUE;
++      }
++      else
++      {
++              PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL);
++      }
++      return IMG_FALSE;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentHandle)
++#endif
++static INLINE
++IMG_HANDLE ParentHandle(struct sHandle *psHandle)
++{
++      return psHandle->sSiblings.hParent;
++}
++
++#define       LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++              ((struct sHandleList *)((char *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInsertBefore)
++#endif
++static INLINE
++IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
++{
++      struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
++
++      PVR_ASSERT(psEntry->hParent == IMG_NULL);
++      PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++      PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, ui32ParentIndex));
++
++      psEntry->ui32Prev = psIns->ui32Prev;
++      psIns->ui32Prev = ui32EntryIndex;
++      psEntry->ui32Next = ui32InsIndex;
++      psPrevIns->ui32Next = ui32EntryIndex;
++
++      psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(AdoptChild)
++#endif
++static INLINE
++IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
++{
++      IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++      PVR_ASSERT(ui32Parent == (IMG_UINT32)HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++      HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psBase, psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
++
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListRemove)
++#endif
++static INLINE
++IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
++{
++      if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
++      {
++              struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++              struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++
++              
++              PVR_ASSERT(psEntry->hParent != IMG_NULL);
++
++              psPrev->ui32Next = psEntry->ui32Next;
++              psNext->ui32Prev = psEntry->ui32Prev;
++
++              HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
++      }
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(UnlinkFromParent)
++#endif
++static INLINE
++IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIterate)
++#endif
++static INLINE
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++      IMG_UINT32 ui32Index;
++      IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++      PVR_ASSERT(psHead->hParent != IMG_NULL);
++
++      
++      for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
++      {
++              struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++              struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
++              PVRSRV_ERROR eError;
++
++              PVR_ASSERT(psEntry->hParent == psHead->hParent);
++              
++              ui32Index = psEntry->ui32Next;
++
++              eError = (*pfnIterFunc)(psBase, psHandle);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(IterateOverChildren)
++#endif
++static INLINE
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++       return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(GetHandleStructure)
++#endif
++static INLINE
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++      struct sHandle *psHandle;
++
++      
++      if (!INDEX_IS_VALID(psBase, ui32Index))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%d >= %d)", ui32Index, psBase->ui32TotalHandCount));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psHandle =  INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++      if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID);
++
++      
++      *ppsHandle = psHandle;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentIfPrivate)
++#endif
++static INLINE
++IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
++{
++      return (psHandle->eFlag & PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++                      ParentHandle(psHandle) : IMG_NULL;
++}
++                      
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitKey)
++#endif
++static INLINE
++IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
++      aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
++      aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (psBase->psHandleArray != IMG_NULL)
++      {
++              eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                      psBase->ui32TotalHandCount * sizeof(struct sHandle),
++                      psBase->psHandleArray,
++                      psBase->hHandBlockAlloc);
++
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreeHandleArray: Error freeing memory (%d)", eError));
++              }
++              else
++              {
++                      psBase->psHandleArray = IMG_NULL;
++              }
++      }
++
++      return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HAND_KEY aKey;
++      IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID);
++
++      InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
++
++      if (!(psHandle->eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              IMG_HANDLE hHandle;
++              hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++              PVR_ASSERT(hHandle != IMG_NULL);
++              PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++      }
++
++      
++      UnlinkFromParent(psBase, psHandle);
++
++      
++      eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
++              return eError;
++      }
++
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++              psBase->ui32FirstFreeIndex =  ui32Index;
++      }
++      else
++      {
++              
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++              INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne =  ui32Index + 1;
++      }
++
++      PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++      
++      psHandle->ui32NextIndexPlusOne = psBase->ui32LastFreeIndexPlusOne;
++
++      psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++
++      psBase->ui32FreeHandCount++;
++
++      
++      psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++      {
++              return eError;
++      }
++
++      for (i = 0; i < psBase->ui32TotalHandCount; i++)
++      {
++              struct sHandle *psHandle;
++
++              psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++              if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
++              {
++                      eError = FreeHandle(psBase, psHandle);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
++                              break;
++                      }
++
++                      
++                      if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++                      {
++                              break;
++                      }
++              }
++      }
++
++      return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError;
++      
++      
++      eError = FreeAllHandles(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
++              return eError;
++      }
++
++      
++      eError = FreeHandleArray(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
++              return eError;
++      }
++
++      if (psBase->psHashTab != IMG_NULL)
++      {
++              
++              HASH_Delete(psBase->psHashTab);
++      }
++
++      eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psBase),
++              psBase,
++              psBase->hBaseBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(FindHandle)
++#endif
++static INLINE
++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++      HAND_KEY aKey;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      InitKey(aKey, psBase, pvData, eType, hParent);
++
++      return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase)
++{
++      struct sHandle *psNewHandleArray;
++      IMG_HANDLE hNewHandBlockAlloc;
++      PVRSRV_ERROR eError;
++      struct sHandle *psHandle;
++
++      
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              (psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE) * sizeof(struct sHandle),
++              (IMG_PVOID *)&psNewHandleArray,
++              &hNewHandBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Couldn't allocate new handle array (%d)", eError));
++              return eError;
++      }
++
++      
++      if (psBase->psHandleArray != IMG_NULL)
++              OSMemCopy(psNewHandleArray,
++                      psBase->psHandleArray,
++                      psBase->ui32TotalHandCount *  sizeof(struct sHandle));
++
++      
++      for(psHandle = psNewHandleArray + psBase->ui32TotalHandCount;
++              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE;
++              psHandle++)
++      {
++              psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++              psHandle->ui32NextIndexPlusOne  = 0;
++      }
++
++      
++      eError = FreeHandleArray(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psBase->psHandleArray = psNewHandleArray;
++      psBase->hHandBlockAlloc = hNewHandBlockAlloc;
++
++      
++      PVR_ASSERT(psBase->ui32FreeHandCount == 0);
++      psBase->ui32FreeHandCount = HANDLE_BLOCK_SIZE;
++
++      PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++      psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++
++      psBase->ui32TotalHandCount += HANDLE_BLOCK_SIZE;
++
++      PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++      psBase->ui32LastFreeIndexPlusOne = psBase->ui32TotalHandCount;
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      IMG_UINT32 ui32NewIndex;
++      struct sHandle *psNewHandle;
++      IMG_HANDLE hHandle;
++      HAND_KEY aKey;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      PVR_ASSERT(psBase->psHashTab != NULL);
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
++      }
++
++      
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVRSRV_ERROR eError = IncreaseHandleArraySize(psBase);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't increase handle array size (%d)", eError));
++                      return eError;
++              }
++      }
++      PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++      
++      ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++      
++      psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++
++      
++      hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++      
++      
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              InitKey(aKey, psBase, pvData, eType, hParent);
++
++              
++              if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
++
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      psBase->ui32FreeHandCount--;
++
++      
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1));
++
++              psBase->ui32LastFreeIndexPlusOne = 0;
++              psBase->ui32FirstFreeIndex = 0;
++      }
++      else
++      {
++              
++              psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
++                      ui32NewIndex + 1 :
++                      psNewHandle->ui32NextIndexPlusOne - 1;
++      }
++
++      
++      psNewHandle->eType = eType;
++      psNewHandle->pvData = pvData;
++      psNewHandle->ui32NextIndexPlusOne = 0;
++      psNewHandle->eFlag = eFlag;
++      psNewHandle->ui32PID = psBase->ui32PID;
++      psNewHandle->ui32Index = ui32NewIndex;
++
++      InitParentList(psBase, psNewHandle);
++      PVR_ASSERT(NoChildren(psBase, psNewHandle));
++
++      InitChildEntry(psBase, psNewHandle);
++      PVR_ASSERT(NoParent(psBase, psNewHandle));
++
++      
++      *phHandle = hHandle;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++      IMG_HANDLE hHandle;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
++              if (hHandle != IMG_NULL)
++              {
++                      struct sHandle *psHandle;
++                      PVRSRV_ERROR eError;
++
++                      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
++                              return eError;
++                      }
++              
++                      
++                      if ((psHandle->eFlag & eFlag & PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
++                      {
++                              *phHandle = hHandle;
++                              return PVRSRV_OK;
++                      }
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      return AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      struct sHandle *psPHand;
++      struct sHandle *psCHand;
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hParentKey;
++      IMG_HANDLE hHandle;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      hParentKey = (eFlag & PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++                      hParent : IMG_NULL;
++
++      
++      eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++              if (hHandle != IMG_NULL)
++              {
++                      struct sHandle *psCHand;
++                      PVRSRV_ERROR eError;
++
++                      eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++                              return eError;
++                      }
++              
++                      PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent);
++
++                      
++                      if ((psCHand->eFlag & eFlag & PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent)
++                      {
++                              *phHandle = hHandle;
++                              return PVRSRV_OK;
++                      }
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++      psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++      AdoptChild(psBase, psPHand, psCHand);
++
++      *phHandle = hHandle;
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++      IMG_HANDLE hHandle;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      
++      hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
++      if (hHandle == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: couldn't find handle"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      *phHandle = hHandle;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++      *peType = psHandle->eType;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++      struct sHandle *psPHand;
++      struct sHandle *psCHand;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
++              return eError;
++      }
++
++      
++      for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
++      {
++              eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      *ppvData = psCHand->pvData;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
++              return eError;
++      }
++
++      *phParent = ParentHandle(psHandle);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++
++      eError = FreeHandle(psBase, psHandle);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      eError = FreeHandle(psBase, psHandle);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID)
++{
++      PVRSRV_HANDLE_BASE *psBase;
++      IMG_HANDLE hBlockAlloc;
++      PVRSRV_ERROR eError;
++
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psBase),
++              (IMG_PVOID *)&psBase,
++              &hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
++              return eError;
++      }
++      OSMemSet(psBase, 0, sizeof(*psBase));
++
++      
++      psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
++      if (psBase->psHashTab == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++              goto failure;
++      }
++
++      psBase->hBaseBlockAlloc = hBlockAlloc;
++      psBase->ui32PID = ui32PID;
++
++      *ppsBase = psBase;
++
++      return PVRSRV_OK;
++failure:
++      (void)PVRSRVFreeHandleBase(psBase);
++      return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++      eError = FreeHandleBase(psBase);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
++
++      eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, KERNEL_ID);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (gpsKernelHandleBase != IMG_NULL)
++      {
++              eError = FreeHandleBase(gpsKernelHandleBase);
++              if (eError == PVRSRV_OK)
++              {
++                      gpsKernelHandleBase = IMG_NULL;
++              }
++      }
++
++      return eError;
++}
++#else
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/hash.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/hash.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/hash.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/hash.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,406 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define       KEY_TO_INDEX(pHash, key, uSize) \
++      ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define       KEY_COMPARE(pHash, pKey1, pKey2) \
++      ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_
++{
++      
++      struct _BUCKET_ *pNext;
++
++      
++      IMG_UINTPTR_T v;
++
++      
++      IMG_UINTPTR_T k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_ 
++{
++      
++      BUCKET **ppBucketTable;
++      
++      
++      IMG_UINT32 uSize;       
++
++      
++      IMG_UINT32 uCount;
++
++      
++      IMG_UINT32 uMinimumSize;
++
++      
++      IMG_UINT32 uKeySize;
++
++      
++      HASH_FUNC *pfnHashFunc;
++
++      
++      HASH_KEY_COMP *pfnKeyComp;
++};
++
++IMG_UINT32
++HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
++{ 
++      IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
++      IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++      IMG_UINT32 ui;
++      IMG_UINT32 uHashKey = 0;
++
++      PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++      PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++      for (ui = 0; ui < uKeyLen; ui++)
++      {
++              IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
++
++              uHashPart += (uHashPart << 12);
++              uHashPart ^= (uHashPart >> 22);
++              uHashPart += (uHashPart << 4);
++              uHashPart ^= (uHashPart >> 9);
++              uHashPart += (uHashPart << 10);
++              uHashPart ^= (uHashPart >> 2);
++              uHashPart += (uHashPart << 7);
++              uHashPart ^= (uHashPart >> 12);
++
++              uHashKey += uHashPart;
++      }
++
++      return uHashKey;
++}
++
++IMG_BOOL
++HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
++{ 
++      IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
++      IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
++      IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++      IMG_UINT32 ui;
++
++      PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++      for (ui = 0; ui < uKeyLen; ui++)
++      {
++              if (*p1++ != *p2++)
++                      return IMG_FALSE;
++      }
++
++      return IMG_TRUE;
++}
++
++static void
++_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
++{
++      IMG_UINT32 uIndex;
++
++      PVR_ASSERT (pBucket != IMG_NULL);
++      PVR_ASSERT (ppBucketTable != IMG_NULL);
++      PVR_ASSERT (uSize != 0);
++
++      uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++      pBucket->pNext = ppBucketTable[uIndex];
++      ppBucketTable[uIndex] = pBucket;
++}
++
++static void
++_Rehash (HASH_TABLE *pHash,
++       BUCKET **ppOldTable, IMG_UINT32 uOldSize,
++         BUCKET **ppNewTable, IMG_UINT32 uNewSize)
++{
++      IMG_UINT32 uIndex;
++      for (uIndex=0; uIndex< uOldSize; uIndex++)
++    {
++              BUCKET *pBucket;
++              pBucket = ppOldTable[uIndex];
++              while (pBucket != IMG_NULL)
++              {
++                      BUCKET *pNextBucket = pBucket->pNext;
++                      _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
++                      pBucket = pNextBucket;
++              }
++    }
++}
++
++static IMG_BOOL
++_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
++{
++      if (uNewSize != pHash->uSize)
++    {
++              BUCKET **ppNewTable;
++        IMG_UINT32 uIndex;
++
++              PVR_DPF ((PVR_DBG_MESSAGE,
++                  "HASH_Resize: oldsize=0x%x  newsize=0x%x  count=0x%x",
++                              pHash->uSize, uNewSize, pHash->uCount));
++
++              OSAllocMem (PVRSRV_OS_PAGEABLE_HEAP, 
++                      sizeof (BUCKET *) * uNewSize, 
++                      (IMG_PVOID*)&ppNewTable, IMG_NULL);
++              if (ppNewTable == IMG_NULL)
++            return IMG_FALSE;
++        
++        for (uIndex=0; uIndex<uNewSize; uIndex++)
++            ppNewTable[uIndex] = IMG_NULL;
++        _Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
++        OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pHash->ppBucketTable, IMG_NULL);
++        pHash->ppBucketTable = ppNewTable;
++        pHash->uSize = uNewSize;
++    }
++    return IMG_TRUE;
++}
++
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
++{
++      HASH_TABLE *pHash;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(HASH_TABLE), 
++                                      (IMG_VOID **)&pHash, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pHash->uCount = 0;
++      pHash->uSize = uInitialLen;
++      pHash->uMinimumSize = uInitialLen;
++      pHash->uKeySize = uKeySize;
++      pHash->pfnHashFunc = pfnHashFunc;
++      pHash->pfnKeyComp = pfnKeyComp;
++
++      OSAllocMem (PVRSRV_OS_PAGEABLE_HEAP, 
++                  sizeof (BUCKET *) * pHash->uSize, 
++                  (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL);       
++
++      if (pHash->ppBucketTable == IMG_NULL)
++    {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, IMG_NULL);
++              return IMG_NULL;
++    }
++
++      for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++              pHash->ppBucketTable[uIndex] = IMG_NULL;
++      return pHash;
++}
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
++{
++      return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
++              &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++IMG_VOID
++HASH_Delete (HASH_TABLE *pHash)
++{
++      if (pHash != IMG_NULL)
++    {
++              PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
++              
++              PVR_ASSERT (pHash->uCount==0);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pHash->ppBucketTable, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, IMG_NULL);
++    }
++}
++
++IMG_BOOL
++HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
++{
++      BUCKET *pBucket;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, pKey, v));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BUCKET) + pHash->uKeySize, 
++                                      (IMG_VOID **)&pBucket, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      pBucket->v = v;
++      OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++      _ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
++      pHash->uCount++;
++
++      
++      if (pHash->uCount << 1 > pHash->uSize)
++    {
++        
++
++        _Resize (pHash, pHash->uSize << 1);
++    }
++    
++      
++      return IMG_TRUE;
++}
++
++IMG_BOOL
++HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++      return HASH_Insert_Extended(pHash, &k, v);
++}
++
++IMG_UINTPTR_T
++HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++      BUCKET **ppBucket;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, pKey=%08X", pHash, pKey));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++  
++      for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++      {
++              if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++              {
++                      BUCKET *pBucket = *ppBucket;
++                      IMG_UINTPTR_T v = pBucket->v;
++                      (*ppBucket) = pBucket->pNext;
++
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
++
++                      pHash->uCount--;
++
++                      
++                      if (pHash->uSize > (pHash->uCount << 2) &&
++                pHash->uSize > pHash->uMinimumSize)
++            {
++                
++
++                              _Resize (pHash,
++                         PRIVATE_MAX (pHash->uSize >> 1,
++                                      pHash->uMinimumSize));
++            }
++            
++                      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++                      pHash, pKey, v));
++                      return v;
++              }
++      }
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++      return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++      return HASH_Remove_Extended(pHash, &k);
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++      BUCKET **ppBucket;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, pKey=%08X", pHash,pKey));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++  
++      for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++      {
++              if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++              {         
++                      BUCKET *pBucket = *ppBucket;
++                      IMG_UINTPTR_T v = pBucket->v;
++
++                      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++                      pHash, pKey, v));
++                      return v;
++              }
++      }
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++      return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,k));
++      return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++void
++HASH_Dump (HASH_TABLE *pHash)
++{
++      IMG_UINT32 uIndex;
++      IMG_UINT32 uMaxLength=0;
++      IMG_UINT32 uEmptyCount=0;
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++      {
++              BUCKET *pBucket;
++              IMG_UINT32 uLength = 0;
++              if (pHash->ppBucketTable[uIndex] == IMG_NULL)
++                      uEmptyCount++;
++              for (pBucket=pHash->ppBucketTable[uIndex];
++                      pBucket != IMG_NULL;
++                      pBucket = pBucket->pNext)
++                              uLength++;
++              uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
++      }
++
++      PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
++                      pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++      PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/mem.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/mem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/mem.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/mem.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "pvr_bridge_km.h"
++
++
++static PVRSRV_ERROR
++FreeSharedSysMemCallBack(IMG_UINT32 ui32ProcessID,
++                                               IMG_PVOID pvParam,
++                                               IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(IMG_UINT32 ui32Flags,
++                                                       IMG_UINT32 ui32Size,
++                                                       PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++                  PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO),
++                                (IMG_VOID **)&psKernelMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++      ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++      psKernelMemInfo->ui32Flags = ui32Flags;
++      psKernelMemInfo->ui32AllocSize = ui32Size;
++
++      if(OSAllocPages(psKernelMemInfo->ui32Flags,
++                                      psKernelMemInfo->ui32AllocSize,
++                                      &psKernelMemInfo->pvLinAddrKM,
++                                      &psKernelMemInfo->sMemBlk.hOSMemHandle)
++              != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO),
++                                psKernelMemInfo,
++                                0);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psKernelMemInfo->sMemBlk.hResItem =
++              (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_SHARED_MEM_INFO,
++                                                                        psKernelMemInfo,
++                                                                        0,
++                                                                        FreeSharedSysMemCallBack,
++                                                                        0);
++
++      *ppsKernelMemInfo = psKernelMemInfo;
++
++      return PVRSRV_OK; 
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++      PVRSRV_ERROR eError;
++
++      if(psKernelMemInfo->sMemBlk.hResItem)
++      {
++              eError =
++                      ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if(eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      OSFreePages(psKernelMemInfo->ui32Flags,
++                              psKernelMemInfo->ui32AllocSize,
++                              psKernelMemInfo->pvLinAddrKM,
++                              psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(PVRSRV_KERNEL_MEM_INFO),
++                        psKernelMemInfo,
++                        IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(!psKernelMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(psKernelMemInfo->sMemBlk.hResItem)
++      {
++              eError =
++                      ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++              psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
++      }
++
++      return eError;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/metrics.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/metrics.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/metrics.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/metrics.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,153 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "metrics.h"
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile IMG_UINT32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X)        asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X)           ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X)                 asTimers[X].ui32Count
++
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS]; 
++
++
++IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
++{
++      if (!pui32TimerRegister)
++      {
++              static IMG_BOOL bFirstTime = IMG_TRUE;
++
++              if (bFirstTime)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
++
++                      bFirstTime = IMG_FALSE;
++              }
++
++              return 0;
++      }
++
++#if defined(__sh__)
++
++      return (0xffffffff-*pui32TimerRegister);
++
++#else 
++
++      return 0;
++
++#endif 
++}
++
++
++static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
++{
++      IMG_UINT32 ui32Time1, ui32Time2;
++
++      ui32Time1 = PVRSRVTimeNow();
++
++      OSWaitus(1000000);
++
++      ui32Time2 = PVRSRVTimeNow();
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
++
++      return (ui32Time2 - ui32Time1);
++}
++
++
++IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
++{
++      IMG_UINT32 ui32Loop;
++
++      PVR_UNREFERENCED_PARAMETER(pvDevInfo);
++
++      for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++      {
++              asTimers[ui32Loop].ui32Total = 0;
++              asTimers[ui32Loop].ui32Count = 0;
++      }
++
++
++      #if defined(__sh__)
++
++              
++              
++              
++              
++              *TCR_2 = TIMER_DIVISOR;
++
++              
++              *TCOR_2 = *TCNT_2 = (unsigned int)0xffffffff;
++
++              
++              *TST_REG |= (unsigned char)0x04;
++
++              pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
++
++      #else 
++
++              pui32TimerRegister = 0;
++
++      #endif 
++
++}
++
++
++IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
++{
++      IMG_UINT32 ui32TicksPerMS, ui32Loop;
++
++      ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++      if (!ui32TicksPerMS)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++              return;
++      }
++
++      for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++      {
++              if (asTimers[ui32Loop].ui32Count & 0x80000000L)
++              {
++                      PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
++              }
++      }
++#if 0
++      
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/perproc.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/perproc.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/perproc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/perproc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,213 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++
++#define       HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = IMG_NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_ERROR eError;
++      IMG_UINTPTR_T uiPerProc;
++
++      PVR_ASSERT(psPerProc != IMG_NULL);
++
++      uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
++      if (uiPerProc == 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
++              
++              PVR_ASSERT(psPerProc->ui32PID == 0);
++      }
++      else
++      {
++              PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
++              PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
++      }
++
++      
++      if (psPerProc->psHandleBase != IMG_NULL)
++      {
++              eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
++                      return eError;
++              }
++      }
++
++      
++      if (psPerProc->hPerProcData != IMG_NULL)
++      {
++              eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
++                      return eError;
++              }
++      }
++
++      eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psPerProc),
++              psPerProc,
++              psPerProc->hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR ResMgrFreeProcessData(IMG_UINT32 ui32ProcessID,
++                                                                                IMG_PVOID pvParam,
++                                                                                IMG_UINT32 ui32Param)
++{
++      PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
++      
++#ifdef        DEBUG
++      PVR_ASSERT(psPerProc->ui32PID == ui32ProcessID);
++#else
++      PVR_UNREFERENCED_PARAMETER (ui32ProcessID);
++#endif
++      PVR_UNREFERENCED_PARAMETER (ui32Param);
++
++      return FreePerProcessData(psPerProc);
++}
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID, IMG_BOOL bAlloc)
++{
++      PVRSRV_PER_PROCESS_DATA *psPerProc;
++      IMG_HANDLE hBlockAlloc;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psHashTab != IMG_NULL);
++
++      
++      psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++
++      if (psPerProc != IMG_NULL || !bAlloc)
++      {
++              PVR_ASSERT(psPerProc == IMG_NULL || psPerProc->ui32PID == ui32PID);
++              return psPerProc;
++      }
++
++      
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psPerProc),
++              (IMG_PVOID *)&psPerProc,
++              &hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate per-process data (%d)", eError));
++              return IMG_NULL;
++      }
++      OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++      psPerProc->hBlockAlloc = hBlockAlloc;
++
++      if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't insert per-process data into hash table"));
++              goto failure;
++      }
++
++      psPerProc->ui32PID = ui32PID;
++
++      
++      eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++                                                         &psPerProc->hPerProcData,
++                                                         psPerProc,
++                                                         PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++                                                         PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate handle for per-process data (%d)", eError));
++              goto failure;
++      }
++
++      
++      eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase, ui32PID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate handle base for process (%d)", eError));
++              goto failure;
++      }
++
++      
++      psPerProc->psResManItem = ResManRegisterRes(RESMAN_TYPE_USE_PROCESSID | RESMAN_TYPE_RESOURCE_PERPROC_DATA,
++                      psPerProc,
++                      0,
++                      ResMgrFreeProcessData,
++                      ui32PID);
++
++      if (psPerProc->psResManItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't register handles with the resource manager"));
++              goto failure;
++      }
++
++      return psPerProc;
++
++failure:
++      (void)FreePerProcessData(psPerProc);
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
++{
++      PVR_ASSERT(psHashTab == IMG_NULL);
++
++      
++      psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++      if (psHashTab == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
++{
++      
++      if (psHashTab != IMG_NULL)
++      {
++              
++              HASH_Delete(psHashTab);
++              psHashTab = IMG_NULL;
++      }
++
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/power.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/power.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/power.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/power.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,595 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++static IMG_BOOL _IsSystemStatePowered(PVR_POWER_STATE eSystemPowerState)
++{
++      return (IMG_BOOL)(eSystemPowerState < PVRSRV_POWER_STATE_D2);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32       ui32CallerID,
++                                                       IMG_BOOL       bSystemPowerEvent)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++      IMG_UINT32              ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++      
++      ui32Timeout *= 60;
++#endif 
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      do
++      {
++              eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++                                                              ui32CallerID);
++              if (eError == PVRSRV_OK)
++              {
++                      break;
++              }
++              else if (ui32CallerID == ISR_ID)
++              {
++                      
++
++                      eError = PVRSRV_ERROR_RETRY;
++                      break;
++              }
++
++              OSWaitus(1);
++              ui32Timeout--;
++      } while (ui32Timeout > 0);
++
++      if ((eError == PVRSRV_OK) &&
++              !bSystemPowerEvent &&
++              !_IsSystemStatePowered(psSysData->eCurrentPowerState))
++      {
++              
++              PVRSRVPowerUnlock(ui32CallerID);
++              eError = PVRSRV_ERROR_RETRY;
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
++{
++      OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++}
++
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL                     bAllDevices,
++                                                                               IMG_UINT32                     ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++              {
++                      eNewDevicePowerState = (eNewPowerState == PVRSRV_POWER_Unspecified) ?
++                                                                      psPowerDevice->eDefaultPowerState : eNewPowerState;
++                      
++                      if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++                      {
++                              if (psPowerDevice->pfnPrePower != IMG_NULL)
++                              {
++                                      
++                                      eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
++                                                                                                              eNewDevicePowerState,
++                                                                                                              psPowerDevice->eCurrentPowerState);
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              return eError;
++                                      }
++                              }
++
++                              
++                              eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
++                                                                                              eNewDevicePowerState,
++                                                                                              psPowerDevice->eCurrentPowerState);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      return eError;
++                              }
++                      }
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL                    bAllDevices,
++                                                                                IMG_UINT32            ui32DeviceIndex,
++                                                                                PVR_POWER_STATE       eNewPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++              {
++                      eNewDevicePowerState = (eNewPowerState == PVRSRV_POWER_Unspecified) ?
++                                                                      psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++                      if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++                      {
++                              
++                              eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
++                                                                                               eNewDevicePowerState,
++                                                                                               psPowerDevice->eCurrentPowerState);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      return eError;
++                              }
++
++                              if (psPowerDevice->pfnPostPower != IMG_NULL)
++                              {
++                                      
++                                      eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
++                                                                                                               eNewDevicePowerState,
++                                                                                                               psPowerDevice->eCurrentPowerState);
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              return eError;
++                                      }
++                              }
++
++                              psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
++                      }
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState,
++                                                                               IMG_UINT32                     ui32CallerID,
++                                                                               IMG_BOOL                       bRetainMutex)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++
++Exit:
++
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
++      }
++
++      if (!bRetainMutex || (eError != PVRSRV_OK))
++      {
++              PVRSRVPowerUnlock(ui32CallerID);
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      SYS_DATA                        *psSysData;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++              eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++      }
++      else
++      {
++              eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++      }
++
++      
++      eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      if (eNewPowerState != psSysData->eCurrentPowerState)
++      {
++              
++              eError = SysSystemPrePowerState(eNewPowerState);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++
++      return eError;
++
++ErrorExit:
++
++      PVR_DPF((PVR_DBG_ERROR,
++                      "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++                      psSysData->eCurrentPowerState, eNewPowerState, eError));
++
++      
++      psSysData->eFailedPowerState = eNewPowerState;
++
++      PVRSRVPowerUnlock(KERNEL_ID);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      SYS_DATA                        *psSysData;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      if (eNewPowerState != psSysData->eCurrentPowerState)
++      {
++              
++              eError = SysSystemPostPowerState(eNewPowerState);
++              if (eError != PVRSRV_OK)
++              {
++                      goto Exit;
++              }
++      }
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++              eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++      }
++      else
++      {
++              eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++      }
++
++      
++      eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++      if (eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      PVR_DPF((PVR_DBG_WARNING,
++                      "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++                      psSysData->eCurrentPowerState, eNewPowerState));
++
++      psSysData->eCurrentPowerState = eNewPowerState;
++
++Exit:
++
++      PVRSRVPowerUnlock(KERNEL_ID);
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++
++
++              PVRSRVCommandCompleteCallbacks();
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVSystemPrePowerStateKM(eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      eError = PVRSRVSystemPostPowerStateKM(eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      
++      psSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      PVR_DPF((PVR_DBG_ERROR,
++                      "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++                      psSysData->eCurrentPowerState, eNewPowerState, eError));
++
++      
++      psSysData->eFailedPowerState = eNewPowerState;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32             ui32DeviceIndex,
++                                                                         PFN_PRE_POWER        pfnPrePower,
++                                                                         PFN_POST_POWER       pfnPostPower,
++                                                                         IMG_HANDLE           hDevCookie,
++                                                                         PVR_POWER_STATE      eCurrentPowerState,
++                                                                         PVR_POWER_STATE      eDefaultPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++
++      if (pfnPrePower == IMG_NULL &&
++              pfnPostPower == IMG_NULL)
++      {
++              return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                               sizeof(PVRSRV_POWER_DEV),
++                                               (IMG_VOID **)&psPowerDevice, IMG_NULL);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++              return eError;
++      }
++
++      
++      psPowerDevice->pfnPrePower = pfnPrePower;
++      psPowerDevice->pfnPostPower = pfnPostPower;
++      psPowerDevice->hDevCookie = hDevCookie;
++      psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++      psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++      psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++      
++      psPowerDevice->psNext = psSysData->psPowerDeviceList;
++      psSysData->psPowerDeviceList = psPowerDevice;
++
++      return (PVRSRV_OK);
++}
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psCurrent, *psPrevious;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psCurrent = psSysData->psPowerDeviceList;
++      psPrevious = IMG_NULL;
++
++      while (psCurrent)
++      {
++              if (psCurrent->ui32DeviceIndex == ui32DeviceIndex)
++              {
++                      
++                      if (psPrevious)
++                      {
++                              psPrevious->psNext = psCurrent->psNext;
++                      }
++                      else
++                      {
++                              
++                              psSysData->psPowerDeviceList = psCurrent->psNext;
++                      }
++
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCurrent, IMG_NULL);
++                      
++                      break;
++              }
++              else
++              {
++                      psPrevious = psCurrent;
++                      psCurrent = psCurrent->psNext;
++              }
++      }
++
++      return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerControlKM(PVR_POWER_CONTROL ePowerControl, PVR_POWER_STATE *pePVRPowerState)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      switch(ePowerControl)
++      {
++              case PVRSRV_POWER_CONTROL_SET :
++              {
++                      eError = PVRSRVSetPowerStateKM(*pePVRPowerState);
++                      break;
++              }
++              case PVRSRV_POWER_CONTROL_RETRY :
++              {
++                      eError = PVRSRVSetPowerStateKM(psSysData->eFailedPowerState);
++#ifdef DEBUG
++                      if(eError == PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVPowerControlKM: Power Transition Re-Try success"));
++                      }
++#endif
++                      break;
++              }
++              case PVRSRV_POWER_CONTROL_QUERY :
++              {
++                      *pePVRPowerState = psSysData->eCurrentPowerState;
++                      break;
++              }
++              default :
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVPowerControlKM: Invalid Power control mode %d", ePowerControl));
++                      return PVRSRV_ERROR_GENERIC;
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
++              OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++      {
++              return IMG_FALSE;
++      }
++
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (psPowerDevice->ui32DeviceIndex == ui32DeviceIndex)
++              {
++                      return (IMG_BOOL)(psPowerDevice->eCurrentPowerState == PVRSRV_POWER_STATE_D0);
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      
++      return IMG_FALSE;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,948 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++
++
++#include "ra.h"
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
++{
++      SYS_DEVICE_ID* psDeviceWalker;
++      SYS_DEVICE_ID* psDeviceEnd;
++      
++      psDeviceWalker = &psSysData->sDeviceID[0];
++      psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++      
++      while (psDeviceWalker < psDeviceEnd)
++      {
++              if (!psDeviceWalker->bInUse)
++              {
++                      psDeviceWalker->bInUse = IMG_TRUE;
++                      *pui32DevID = psDeviceWalker->uiID;
++                      return PVRSRV_OK;
++              }
++              psDeviceWalker++;
++      }
++      
++      PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
++
++      
++      PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
++{
++      SYS_DEVICE_ID* psDeviceWalker;
++      SYS_DEVICE_ID* psDeviceEnd;
++
++      psDeviceWalker = &psSysData->sDeviceID[0];
++      psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++      
++      while (psDeviceWalker < psDeviceEnd)
++      {
++              
++              if      (
++                              (psDeviceWalker->uiID == ui32DevID) &&
++                              (psDeviceWalker->bInUse)
++                      )
++              {
++                      psDeviceWalker->bInUse = IMG_FALSE;
++                      return PVRSRV_OK;
++              }
++              psDeviceWalker++;
++      }
++      
++      PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
++
++      
++      PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++#ifndef ReadHWReg
++IMG_EXPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++      return *(volatile IMG_UINT32*)((IMG_UINT32)pvLinRegBaseAddr+ui32Offset);
++}
++#endif
++
++
++#ifndef WriteHWReg
++IMG_EXPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",pvLinRegBaseAddr,ui32Offset,ui32Value));
++
++      *(IMG_UINT32*)((IMG_UINT32)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++}
++#endif
++
++
++#ifndef WriteHWRegs
++IMG_EXPORT
++IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
++{
++      while (ui32Count--)
++      {
++              WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
++              psHWRegs++;
++      }
++}
++#endif
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++                                                                                                 PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_UINT32                      i;
++      
++      if (!pui32NumDevices || !psDevIdList)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Failed to get SysData"));
++              return eError;
++      }
++
++      
++
++      for (i=0; i<PVRSRV_MAX_DEVICES; i++)
++      {
++              psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++      }
++      
++      
++      *pui32NumDevices = 0;
++      
++      
++
++
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      for (i=0; psDeviceNode != IMG_NULL; i++)
++      {
++              
++              if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
++              {
++                      
++                      *psDevIdList++ = psDeviceNode->sDevId;
++                      
++                      (*pui32NumDevices)++;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData)
++{
++      PVRSRV_ERROR    eError;
++
++      
++      eError = ResManInit();
++      if (eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      eError = PVRSRVPerProcessDataInit();
++      if(eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      eError = PVRSRVHandleInit();
++      if(eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++      if (eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      gpsSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0;
++      gpsSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++      return eError;
++      
++Error:
++      PVRSRVDeInit(psSysData);
++      return eError;
++}
++
++
++
++IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData)
++{
++      PVRSRV_ERROR    eError;
++      
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++      eError = PVRSRVHandleDeInit();
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++      }
++
++      eError = PVRSRVPerProcessDataDeInit();
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++      }
++      
++      ResManDeInit();
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,  
++                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                IMG_UINT32 ui32SOCInterruptBit,
++                                                                IMG_UINT32 *pui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      
++      
++      if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_DEVICE_NODE), 
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)    
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); 
++      
++      eError = pfnRegisterDevice(psDeviceNode);
++      if (eError != PVRSRV_OK)        
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      0, psDeviceNode, IMG_NULL);
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
++              return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++      }
++
++      
++
++
++
++
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->psSysData = psSysData;
++      psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++      
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++              
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      
++      *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed to get SysData"));
++              return(eError);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++
++      while (psDeviceNode)
++      {
++              if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++              {
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
++      return PVRSRV_ERROR_INIT_FAILURE;
++      
++FoundDevice:
++
++      PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++      
++      
++      if(psDeviceNode->pfnInitDevice != IMG_NULL)
++      {
++              eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32                       ui32DevIndex,
++                                                                                                       PVRSRV_DEVICE_TYPE     eDeviceType,
++                                                                                                       IMG_HANDLE                     *phDevCookie)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: Failed to get SysData"));
++              return(eError);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++
++      if (eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN)
++      {
++              while (psDeviceNode)
++              {
++                      if (psDeviceNode->sDevId.eDeviceType == eDeviceType)
++                      {
++                              goto FoundDevice;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++      else
++      {
++              while (psDeviceNode)
++              {
++                      if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++                      {
++                              goto FoundDevice;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
++      return PVRSRV_ERROR_INIT_FAILURE;               
++      
++FoundDevice:
++
++      PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++      
++      if (phDevCookie)
++      {
++              *phDevCookie = (IMG_HANDLE)psDeviceNode;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_DEVICE_NODE      **ppsDevNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed to get SysData"));
++              return(eError);
++      }
++
++      ppsDevNode = &psSysData->psDeviceNodeList;
++      while(*ppsDevNode)
++      {
++              if((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++              {
++                      psDeviceNode = *ppsDevNode;
++                      goto FoundDevice;
++              }
++              ppsDevNode = &((*ppsDevNode)->psNext);
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
++      
++      return PVRSRV_ERROR_GENERIC;
++      
++FoundDevice:
++
++      
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++
++      eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++                                                                               PVRSRV_POWER_STATE_D3,
++                                                                               KERNEL_ID,
++                                                                               IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++              return eError;
++      }
++#endif 
++
++      
++
++      if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
++      {
++              eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++                      return eError;
++              }
++      }
++
++      
++      *ppsDevNode = psDeviceNode->psNext;
++
++              
++      FreeDeviceID(psSysData, ui32DevIndex);  
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                              0, psDeviceNode, IMG_NULL);
++      
++      return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++                                                                        IMG_UINT32 ui32Value,
++                                                                        IMG_UINT32 ui32Mask,
++                                                                        IMG_UINT32 ui32Waitus,
++                                                                        IMG_UINT32 ui32Tries)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
++
++      uiMaxTime = ui32Tries * ui32Waitus;
++
++      
++      do
++      {
++              if((*pui32LinMemAddr & ui32Mask) == ui32Value)
++              {
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++
++              OSWaitus(ui32Waitus);
++
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++
++      } while ((uiCurrent - uiStart) < uiMaxTime); 
++
++
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern IMG_UINT32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value,
++                                                               IMG_UINT32 ui32Mask,
++                                                               IMG_UINT32 ui32Waitus,
++                                                               IMG_UINT32 ui32Tries)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
++
++      uiMaxTime = ui32Tries * ui32Waitus;
++
++      
++      do
++      {
++              if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value)
++              {
++                      gui32EventStatusServicesByISR = 0;
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++
++              OSWaitus(ui32Waitus);
++
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++
++      } while ((uiCurrent - uiStart) < uiMaxTime); 
++
++      return PVRSRV_ERROR_GENERIC;
++}
++#endif  
++
++
++
++
++IMG_EXPORT                    
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
++{
++      SYS_DATA *psSysData;
++      PVRSRV_ERROR eError;
++      
++      if(!psMiscInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));             
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      
++      if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++                                                                              |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
++              return PVRSRV_ERROR_INVALID_PARAMS;                     
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: Failed to get SysData"));          
++              return eError;  
++      }
++      
++      psMiscInfo->ui32StatePresent = 0;
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT)
++      && psSysData->pvSOCTimerRegisterKM)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++              psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
++      }
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT)
++      && psSysData->pvSOCClockGateRegsBase)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++              psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
++              psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
++      }
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT)
++      && psMiscInfo->pszMemoryStr)
++      {
++              RA_ARENA                        **ppArena;
++              BM_HEAP                         *psBMHeap;
++      BM_CONTEXT                      *psBMContext;
++              PVRSRV_DEVICE_NODE      *psDeviceNode;
++              IMG_CHAR                        *pszStr;
++              IMG_UINT32                      ui32StrLen;
++              IMG_INT32                       i32Count;
++              
++              pszStr = psMiscInfo->pszMemoryStr;
++              ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++  
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++              
++              ppArena = &psSysData->apsLocalDevMemArena[0];
++              while(*ppArena)
++              {
++                      CHECK_SPACE(ui32StrLen);
++                      i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++                      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++                      
++                      RA_GetStats(*ppArena,
++                                                      &pszStr, 
++                                                      &ui32StrLen);
++                      
++                      ppArena++;
++              }
++
++              
++              psDeviceNode = psSysData->psDeviceNodeList;
++              while(psDeviceNode)
++              {
++                      CHECK_SPACE(ui32StrLen);
++                      i32Count = OSSNPrintf(pszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
++                      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++                      
++                      if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++                      {
++                              CHECK_SPACE(ui32StrLen);
++                              i32Count = OSSNPrintf(pszStr, 100, "\nKernel Context:\n");
++                              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++                              
++                              psBMHeap = psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap;
++                              while(psBMHeap)
++                              {               
++                                      if(psBMHeap->pImportArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pImportArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++
++                                      if(psBMHeap->pVMArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pVMArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++                                      psBMHeap = psBMHeap->psNext;
++                              }
++                      }
++              
++                      
++                      psBMContext = psDeviceNode->sDevMemoryInfo.pBMContext;
++                      while(psBMContext)
++                      {
++                              CHECK_SPACE(ui32StrLen);
++                              i32Count = OSSNPrintf(pszStr, 100, "\nApplication Context (hDevMemContext) 0x%08X:\n", (IMG_HANDLE)psBMContext);
++                              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++                              psBMHeap = psBMContext->psBMHeap;
++                              while(psBMHeap)
++                              {
++                                      if(psBMHeap->pImportArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pImportArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++
++                                      if(psBMHeap->pVMArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pVMArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++                                      psBMHeap = psBMHeap->psNext;
++                              }
++                              psBMContext = psBMContext->psNext;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++
++              
++              i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32            *pui32Total, 
++                                                              IMG_UINT32              *pui32Available)
++{
++      IMG_UINT32 ui32Total = 0, i = 0;
++      IMG_UINT32 ui32Available = 0;
++
++      *pui32Total             = 0;
++      *pui32Available = 0;
++
++      
++      while(BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == IMG_TRUE)
++      {
++              *pui32Total             += ui32Total;
++              *pui32Available += ui32Available;
++
++              i++;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      SYS_DATA                        *psSysData;
++      IMG_BOOL                        bStatus = IMG_FALSE;
++      IMG_UINT32                      ui32InterruptSource;
++
++      if(!psDeviceNode)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++              goto out;
++      }
++      psSysData = psDeviceNode->psSysData;
++
++      
++      ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++      if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++      {
++              if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++              {
++                      bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);               
++              }
++
++              SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
++      }
++
++out:
++      return bStatus;
++}
++
++
++IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA                        *psSysData = pvSysData;
++      IMG_BOOL                        bStatus = IMG_FALSE;
++      IMG_UINT32                      ui32InterruptSource;
++      IMG_UINT32                      ui32ClearInterrupts = 0;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++              goto out;
++      }
++
++      
++      ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
++      
++      
++      if(ui32InterruptSource == 0)
++      {
++              goto out;
++      }
++      
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++              {
++                      if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++                      {
++                              if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
++                              {
++                                      
++                                      bStatus = IMG_TRUE;
++                              }
++                              
++                              ui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      SysClearInterrupts(psSysData, ui32ClearInterrupts);
++      
++out:
++      return bStatus;
++}
++
++
++IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA                        *psSysData = pvSysData;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++              return;
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
++              {
++                      (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
++      {
++              PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++      }
++}
++
++
++PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
++{
++      IMG_UINT32         uiBytesSaved = 0;
++      IMG_PVOID          pvLocalMemCPUVAddr;
++      RA_SEGMENT_DETAILS sSegDetails;
++
++      if (hArena == IMG_NULL)
++      {
++              return (PVRSRV_ERROR_INVALID_PARAMS);
++      }
++
++      sSegDetails.uiSize = 0;
++      sSegDetails.sCpuPhyAddr.uiAddr = 0;
++      sSegDetails.hSegment = 0;
++
++      
++      while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++      {
++              if (pbyBuffer == IMG_NULL)
++              {
++                      
++                      uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++              }
++              else
++              {
++                      if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
++                      {
++                              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++                      }
++
++                      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
++
++                      
++                      pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++                                                                      sSegDetails.uiSize,
++                                                                      PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                      IMG_NULL);
++                      if (pvLocalMemCPUVAddr == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++                              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++                      }
++
++                      if (bSave)
++                      {
++                              
++                              OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
++                              pbyBuffer += sizeof(sSegDetails.uiSize);
++
++                              OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
++                              pbyBuffer += sSegDetails.uiSize;
++                      }
++                      else
++                      {
++                              IMG_UINT32 uiSize;
++                              
++                              OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
++
++                              if (uiSize != sSegDetails.uiSize)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++                              }
++                              else
++                              {
++                                      pbyBuffer += sizeof(sSegDetails.uiSize);
++
++                                      OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
++                                      pbyBuffer += sSegDetails.uiSize;
++                              }
++                      }
++
++
++                      uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++                      OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++                                   sSegDetails.uiSize,
++                                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                   IMG_NULL);
++              }
++      }
++
++      if (pbyBuffer == IMG_NULL)
++      {
++              *puiBufSize = uiBytesSaved;
++      }
++
++      return (PVRSRV_OK);
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/queue.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/queue.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/queue.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/queue.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,966 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include "proc.h"
++
++static int
++QueuePrintCommands (PVRSRV_QUEUE_INFO * psQueue, char * buffer, size_t size)
++{
++      off_t off = 0;
++      int cmds = 0;
++      IMG_UINT32 ui32ReadOffset  = psQueue->ui32ReadOffset;
++      IMG_UINT32 ui32WriteOffset = psQueue->ui32WriteOffset;
++      PVRSRV_COMMAND * psCmd;
++
++      while (ui32ReadOffset != ui32WriteOffset)
++      {
++              psCmd= (PVRSRV_COMMAND *)((IMG_UINT32)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++              off = printAppend(buffer, size, off, "%p %p  %5lu  %6lu  %3lu  %5lu   %2lu   %2lu    %3lu  \n",
++                                                      psQueue,
++                                                      psCmd,
++                                                      psCmd->ui32ProcessID,
++                                                      psCmd->CommandType,
++                                                      psCmd->ui32CmdSize,
++                                                      psCmd->ui32DevIndex,
++                                                      psCmd->ui32DstSyncCount,
++                                                      psCmd->ui32SrcSyncCount,
++                                                      psCmd->ui32DataSize);
++              
++              ui32ReadOffset += psCmd->ui32CmdSize;
++              ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++              cmds++;
++      }
++      if (cmds == 0)
++              off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++      return off;
++} 
++
++
++off_t
++QueuePrintQueues (char * buffer, size_t size, off_t off)
++{
++      SYS_DATA * psSysData;
++      PVRSRV_QUEUE_INFO * psQueue;
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++              return END_OF_FILE;
++
++       if (!off)
++                return printAppend (buffer, size, 0,
++                                                              "Command Queues\n"
++                                                              "Queue    CmdPtr      Pid Command Size DevInd  DSC  SSC  #Data ...\n");
++
++      
++ 
++      for (psQueue = psSysData->psQueueList; --off && psQueue; psQueue = psQueue->psNextKM)
++              ;
++
++      return psQueue ? QueuePrintCommands (psQueue, buffer, size) : END_OF_FILE;
++} 
++#endif 
++
++#define GET_SPACE_IN_CMDQ(psQueue)                                                                            \
++      (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset)                          \
++      + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size)                                                  \
++      psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size)        \
++      & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending)                                        \
++      (ui32OpsComplete >= ui32OpsPending)
++
++IMG_UINT32 NearestPower2(IMG_UINT32 ui32Value)
++{
++      IMG_UINT32 ui32Temp, ui32Result = 1;
++
++      if(!ui32Value)
++              return 0;
++
++      ui32Temp = ui32Value - 1;
++      while(ui32Temp)
++      {
++              ui32Result <<= 1;
++              ui32Temp >>= 1;
++      }
++
++      return ui32Result;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++      PVRSRV_QUEUE_INFO       *psQueueInfo;
++      IMG_UINT32                      ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hMemBlock;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_QUEUE_INFO),
++                                       (IMG_VOID **)&psQueueInfo, &hMemBlock) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++              goto ErrorExit;
++      }
++      OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++      psQueueInfo->hMemBlock[0] = hMemBlock;
++      psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                       ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE, 
++                                       &psQueueInfo->pvLinQueueKM, &hMemBlock) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++              goto ErrorExit;
++      }
++
++      psQueueInfo->hMemBlock[1] = hMemBlock;
++      psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++      
++      PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++      PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++      psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++      
++      if (psSysData->psQueueList == IMG_NULL)
++      {
++              eError = OSCreateResource(&psSysData->sQProcessResource);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++      
++      
++      if (OSLockResource(&psSysData->sQProcessResource, 
++                                                      KERNEL_ID) != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      psQueueInfo->psNextKM = psSysData->psQueueList;
++      psSysData->psQueueList = psQueueInfo;
++
++      if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      *ppsQueueInfo = psQueueInfo;
++
++      return PVRSRV_OK;
++      
++ErrorExit:
++
++      if(psQueueInfo)
++      {
++              if(psQueueInfo->pvLinQueueKM)
++              {
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                              psQueueInfo->ui32QueueSize,
++                                              psQueueInfo->pvLinQueueKM,
++                                              psQueueInfo->hMemBlock[1]);
++              }
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(PVRSRV_QUEUE_INFO), 
++                                      psQueueInfo, 
++                                      psQueueInfo->hMemBlock[0]);
++      }
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++      PVRSRV_QUEUE_INFO       *psQueue;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++      IMG_BOOL                        bTimeout = IMG_TRUE;
++      IMG_BOOL                        bStart = IMG_FALSE;
++      IMG_UINT32                      uiStart = 0;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      psQueue = psSysData->psQueueList;
++
++      do
++      {
++              if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
++              {
++                      bTimeout = IMG_FALSE;
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      if (bTimeout)
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++              eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++      }
++
++      
++      eError = OSLockResource(&psSysData->sQProcessResource, 
++                                                              KERNEL_ID);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++      
++      if(psQueue == psQueueInfo)
++      {
++              psSysData->psQueueList = psQueueInfo->psNextKM;
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      psQueueInfo->ui32QueueSize,
++                                      psQueueInfo->pvLinQueueKM,
++                                      psQueueInfo->hMemBlock[1]);
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_QUEUE_INFO),
++                                      psQueueInfo,
++                                      psQueueInfo->hMemBlock[0]);
++      }
++      else
++      {
++              while(psQueue)
++              {
++                      if(psQueue->psNextKM == psQueueInfo)
++                      {
++                              psQueue->psNextKM = psQueueInfo->psNextKM;
++
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      psQueueInfo->ui32QueueSize,
++                                                      psQueueInfo->pvLinQueueKM,
++                                                      psQueueInfo->hMemBlock[1]);
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      sizeof(PVRSRV_QUEUE_INFO),
++                                                      psQueueInfo,
++                                                      psQueueInfo->hMemBlock[0]);
++                              break;
++                      }
++                      psQueue = psQueue->psNextKM;
++              }
++
++              if(!psQueue)
++              {
++                      eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++                      if (eError != PVRSRV_OK)
++                      {
++                              goto ErrorExit;
++                      }
++                      eError = PVRSRV_ERROR_INVALID_PARAMS;
++                      goto ErrorExit;
++              }
++      }
++
++      
++      eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      
++      if (psSysData->psQueueList == IMG_NULL)
++      {
++              eError = OSDestroyResource(&psSysData->sQProcessResource);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++      
++ErrorExit:
++
++      return eError;  
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              IMG_UINT32 ui32ParamSize,
++                                                                                              IMG_VOID **ppvSpace)
++{
++      IMG_BOOL bTimeout = IMG_TRUE;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0, uiCurrent = 0;
++
++      
++      ui32ParamSize =  (ui32ParamSize+3) & 0xFFFFFFFC;
++
++      if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
++      {
++              PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      do
++      {
++              if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
++              {
++                      bTimeout = IMG_FALSE;
++                      break;  
++              }
++              
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++              
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++      } while ((uiCurrent - uiStart) < MAX_HW_TIME_US);
++
++      if (bTimeout == IMG_TRUE)
++      {
++              *ppvSpace = IMG_NULL;
++
++              return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++      }
++      else
++      {
++              *ppvSpace = (IMG_VOID *)(psQueue->ui32WriteOffset + (IMG_UINT32)psQueue->pvLinQueueUM);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO     *psQueue,
++                                                                                              PVRSRV_COMMAND          **ppsCommand,
++                                                                                              IMG_UINT32                      ui32DevIndex,
++                                                                                              IMG_UINT16                      CommandType,
++                                                                                              IMG_UINT32                      ui32DstSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++                                                                                              IMG_UINT32                      ui32SrcSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                                                              IMG_UINT32                      ui32DataByteSize )
++{
++      PVRSRV_ERROR    eError;
++      PVRSRV_COMMAND  *psCommand;
++      IMG_UINT32              ui32CommandSize;
++      IMG_UINT32              i;
++
++      
++      ui32DataByteSize = (ui32DataByteSize + 3) & 0xFFFFFFFC;
++
++      
++      ui32CommandSize = sizeof(PVRSRV_COMMAND) 
++                                      + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++                                      + ui32DataByteSize;
++
++      
++      eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      psCommand->ui32ProcessID        = OSGetCurrentProcessIDKM();
++
++      
++      psCommand->ui32CmdSize          = ui32CommandSize; 
++      psCommand->ui32DevIndex         = ui32DevIndex;
++      psCommand->CommandType          = CommandType;
++      psCommand->ui32DstSyncCount     = ui32DstSyncCount;
++      psCommand->ui32SrcSyncCount     = ui32SrcSyncCount;
++      psCommand->psDstSync            = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand) + sizeof(PVRSRV_COMMAND));     
++
++
++      psCommand->psSrcSync            = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand->psDstSync) 
++                                                              + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      psCommand->pvData                       = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand->psSrcSync) 
++                                                              + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      psCommand->ui32DataSize         = ui32DataByteSize;
++
++      
++      for (i=0; i<ui32DstSyncCount; i++)
++      {
++              psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++              psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++              psCommand->psDstSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++      }
++
++      
++      for (i=0; i<ui32SrcSyncCount; i++)
++      {
++              psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++              psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++              psCommand->psSrcSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);  
++      }
++
++      
++      *ppsCommand = psCommand;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              PVRSRV_COMMAND *psCommand)
++{
++      
++      if (psCommand->ui32DstSyncCount > 0)
++      {
++              psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
++      }
++
++      if (psCommand->ui32SrcSyncCount > 0)
++      {
++              psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++                                                                      + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++      }
++
++      psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++                                                                      + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
++                                                                      + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      
++      UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++      
++      return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA                    *psSysData,
++                                                                PVRSRV_COMMAND        *psCommand,
++                                                                IMG_BOOL                      bFlush)
++{
++      PVRSRV_SYNC_OBJECT              *psWalkerObj;
++      PVRSRV_SYNC_OBJECT              *psEndObj;
++      IMG_UINT32                              i;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++      IMG_UINT32                              ui32WriteOpsComplete;
++      IMG_UINT32                              ui32ReadOpsComplete;
++
++      
++      psWalkerObj = psCommand->psDstSync;
++      psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++      while (psWalkerObj < psEndObj)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++              ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++              ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++              
++              if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++              ||      (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++              {
++                      if (!bFlush ||
++                              !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++                              !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++                      }
++              }
++
++              psWalkerObj++;
++      }
++
++      
++      psWalkerObj = psCommand->psSrcSync;
++      psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++      while (psWalkerObj < psEndObj)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++              ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++              ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++              
++              if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++              || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++              {
++                      if (!bFlush &&
++                              SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
++                              SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              PVR_DPF((PVR_DBG_WARNING,
++                                              "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++                                              psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
++                      }
++
++                      if (!bFlush ||
++                              !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++                              !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++                      }
++              }
++              psWalkerObj++;
++      }
++
++      
++      if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++                                      psCommand->ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      psCmdCompleteData = psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->CommandType];
++      if (psCmdCompleteData->bInUse)
++      {
++              
++              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++      }
++
++      
++      psCmdCompleteData->bInUse = IMG_TRUE;
++
++      
++      psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++      for (i=0; i<psCommand->ui32DstSyncCount; i++)
++      {
++              psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++      }
++
++      
++      psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++      for (i=0; i<psCommand->ui32SrcSyncCount; i++)
++      {
++              psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++      }
++
++      
++
++
++
++
++
++
++
++
++
++      if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->CommandType]((IMG_HANDLE)psCmdCompleteData, 
++                                                                                                                                                              psCommand->ui32DataSize, 
++                                                                                                                                                              psCommand->pvData) == IMG_FALSE)
++      {
++              
++
++
++              psCmdCompleteData->bInUse = IMG_FALSE;
++              eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32   ui32CallerID,
++                                                               IMG_BOOL       bFlush)
++{
++      PVRSRV_QUEUE_INFO       *psQueue;
++      SYS_DATA                        *psSysData;
++      PVRSRV_COMMAND          *psCommand;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_ERROR            eError;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psSysData->bReProcessQueues = IMG_FALSE;
++
++      
++      eError = OSLockResource(&psSysData->sQProcessResource,
++                                                      ui32CallerID);
++      if(eError != PVRSRV_OK)
++      {
++              
++              psSysData->bReProcessQueues = IMG_TRUE;
++
++              
++              if(ui32CallerID == ISR_ID)
++              {
++                      if (bFlush)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Couldn't acquire queue processing lock"));                       
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Queue processing failed when called from Services - not expected behaviour!"));
++              }
++              
++              return PVRSRV_OK;
++      }
++
++      psQueue = psSysData->psQueueList;
++
++      if(!psQueue)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
++      }
++
++      if (bFlush)
++      {
++              PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++      }
++
++      while (psQueue)
++      {
++              while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
++              {
++                      psCommand = (PVRSRV_COMMAND*)((IMG_UINT32)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
++
++                      if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
++                      {
++                                      
++                              UPDATE_QUEUE_ROFF(psQueue, psCommand->ui32CmdSize)
++                              
++                              if (bFlush)
++                              {
++                                      continue;
++                              }
++                      }
++
++                      break;
++              }
++              psQueue = psQueue->psNextKM;
++      }
++
++      if (bFlush)
++      {
++              PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if (psDeviceNode->bReProcessDeviceCommandComplete &&
++                      psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++              {
++                      (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++      
++      
++      if(psSysData->bReProcessQueues)
++      {
++              return PVRSRV_ERROR_PROCESSING_BLOCKED;
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR)
++{
++      IMG_UINT32                              i;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++      SYS_DATA                                *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return;
++      }
++
++      
++      for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
++      {
++              psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
++      }
++
++      
++      for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
++      {
++              psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete++;
++      }
++      
++      
++      psCmdCompleteData->bInUse = IMG_FALSE;
++      
++      
++      PVRSRVCommandCompleteCallbacks();
++      
++#if defined(SYS_USING_INTERRUPTS)
++      if(bScheduleMISR)
++      {
++              OSScheduleMISR(psSysData);
++      }
++#else
++      PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif 
++}
++
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID)
++{
++      SYS_DATA                                *psSysData;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCommandCompleteCallbacks: SysAcquireData failed"));
++              return;
++      }
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++              {
++                      
++                      (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32           ui32DevIndex,
++                                                                               PFN_CMD_PROC   *ppfnCmdProcList,
++                                                                               IMG_UINT32             ui32MaxSyncsPerCmd[][2],
++                                                                               IMG_UINT32             ui32CmdCount)
++{
++      SYS_DATA                                *psSysData;
++      PVRSRV_ERROR                    eError;
++      IMG_UINT32                              i;
++      IMG_UINT32                              ui32AllocSize;
++      PFN_CMD_PROC                    *ppfnCmdProc;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData;
++
++      
++      if(ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++                                      ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: SysAcquireData failed"));
++              return eError;
++      }
++
++      
++      eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       ui32CmdCount * sizeof(PFN_CMD_PROC), 
++                                       (IMG_VOID **)&psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++              return eError;
++      }
++
++      
++      ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++      
++      for (i=0; i<ui32CmdCount; i++)
++      {
++              ppfnCmdProc[i] = ppfnCmdProcList[i];
++      }
++
++      
++      ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++      eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                       ui32AllocSize, 
++                                       (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++              goto ErrorExit;
++      }
++
++      for (i=0; i<ui32CmdCount; i++)
++      {
++              
++
++              ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA) 
++                                        + ((ui32MaxSyncsPerCmd[i][0]
++                                        +     ui32MaxSyncsPerCmd[i][1])
++                                        * sizeof(PVRSRV_SYNC_OBJECT));         
++
++              eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      ui32AllocSize,
++                                                      (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++                                                      IMG_NULL);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",i));
++                      goto ErrorExit;
++              }
++
++              
++              OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, ui32AllocSize);
++
++              psCmdCompleteData = psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++              
++              psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
++                                                                              (((IMG_UINT32)psCmdCompleteData) 
++                                                                              + sizeof(COMMAND_COMPLETE_DATA));
++              psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
++                                                                              (((IMG_UINT32)psCmdCompleteData->psDstSync) 
++                                                                              + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++      }
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++
++      if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++      {
++              for (i=0; i<ui32CmdCount; i++)
++              {
++                      if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++                      }
++              }
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      }
++
++      if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++                                                                         IMG_UINT32 ui32CmdCount)
++{
++      SYS_DATA                *psSysData;
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              i;
++
++      
++      if(ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++                                      ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveCmdProcListKM: SysAcquireData failed"));
++              return eError;
++      }
++
++      if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++      {
++              for(i=0; i<ui32CmdCount; i++)
++              {
++                      
++                      if(psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++                      }
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      }
++
++      
++      if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      }
++
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/ra.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/ra.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/ra.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/ra.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1181 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++struct _BT_
++{
++      enum bt_type
++      {
++              btt_span,                               
++              btt_free,                               
++              btt_live                                
++      } type;
++
++      
++      IMG_UINTPTR_T base;
++      IMG_SIZE_T uSize;
++
++      
++      struct _BT_ *pNextSegment;
++      struct _BT_ *pPrevSegment;
++      
++      struct _BT_ *pNextFree;
++      struct _BT_ *pPrevFree;
++      
++      BM_MAPPING *psMapping;
++};
++typedef struct _BT_ BT;
++
++
++struct _RA_ARENA_
++{
++      
++      char *name;
++
++      
++      IMG_UINT32 uQuantum;
++
++      
++      IMG_BOOL (*pImportAlloc)(void *,
++                                                       IMG_SIZE_T uSize,
++                                                       IMG_SIZE_T *pActualSize,
++                                                       BM_MAPPING **ppsMapping,
++                                                       IMG_UINT32 uFlags,
++                                                       IMG_UINTPTR_T *pBase);
++      void (*pImportFree) (void *,
++                                               IMG_UINTPTR_T,
++                                               BM_MAPPING *psMapping);
++      void (*pBackingStoreFree) (void *, IMG_UINT32, IMG_UINT32, IMG_HANDLE);
++
++      
++      void *pImportHandle;
++
++      
++#define FREE_TABLE_LIMIT 32
++
++      
++      BT *aHeadFree [FREE_TABLE_LIMIT];
++
++      
++      BT *pHeadSegment;
++      BT *pTailSegment;
++
++      
++      HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++      RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE                32
++      char szProcInfoName[PROC_NAME_SIZE];
++      char szProcSegsName[PROC_NAME_SIZE];
++#endif
++};
++
++void
++RA_Dump (RA_ARENA *pArena);
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, void *data);
++static int
++RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++void CheckBMFreespace(void);
++#endif
++
++static IMG_BOOL
++_RequestAllocFail (void *_h,
++                                IMG_SIZE_T _uSize,
++                                IMG_SIZE_T *_pActualSize, 
++                                BM_MAPPING **_ppsMapping,
++                                IMG_UINT32 _uFlags,
++                                IMG_UINTPTR_T *_pBase)
++{
++      PVR_UNREFERENCED_PARAMETER (_h);
++      PVR_UNREFERENCED_PARAMETER (_uSize);
++      PVR_UNREFERENCED_PARAMETER (_pActualSize);
++      PVR_UNREFERENCED_PARAMETER (_ppsMapping);
++      PVR_UNREFERENCED_PARAMETER (_uFlags);
++      PVR_UNREFERENCED_PARAMETER (_pBase);
++
++      return IMG_FALSE;
++}
++
++static IMG_UINT32
++pvr_log2 (IMG_SIZE_T n)
++{
++      IMG_UINT32 l = 0;
++      n>>=1;
++      while (n>0)
++      {
++              n>>=1;
++              l++;
++      }
++      return l;
++}
++
++static void
++_SegmentListInsertAfter (RA_ARENA *pArena,
++                                               BT *pInsertionPoint,
++                                               BT *pBT)
++{
++      PVR_ASSERT (pArena != IMG_NULL);
++      PVR_ASSERT (pInsertionPoint != IMG_NULL);
++
++      pBT->pNextSegment = pInsertionPoint->pNextSegment;
++      pBT->pPrevSegment = pInsertionPoint;
++      if (pInsertionPoint->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pBT;
++      else
++              pInsertionPoint->pNextSegment->pPrevSegment = pBT; 
++      pInsertionPoint->pNextSegment = pBT;
++}
++
++static void
++_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
++{
++      
++      if (pArena->pHeadSegment == IMG_NULL)
++      {
++              pArena->pHeadSegment = pArena->pTailSegment = pBT;
++              pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
++      }
++      else
++      {
++              BT *pBTScan;
++              pBTScan = pArena->pHeadSegment;
++              while (pBTScan->pNextSegment != IMG_NULL 
++                         && pBT->base >= pBTScan->pNextSegment->base)
++                      pBTScan = pBTScan->pNextSegment;
++              _SegmentListInsertAfter (pArena, pBTScan, pBT);
++      }
++}
++
++static void
++_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
++{
++      if (pBT->pPrevSegment == IMG_NULL)
++              pArena->pHeadSegment = pBT->pNextSegment;
++      else
++              pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++      if (pBT->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pBT->pPrevSegment;
++      else
++              pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *
++_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
++{
++      BT *pNeighbour;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BT), 
++                                      (IMG_VOID **)&pNeighbour, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pNeighbour->pPrevSegment = pBT;
++      pNeighbour->pNextSegment = pBT->pNextSegment;
++      if (pBT->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pNeighbour;
++      else
++              pBT->pNextSegment->pPrevSegment = pNeighbour;
++      pBT->pNextSegment = pNeighbour;
++
++      pNeighbour->type = btt_free;
++      pNeighbour->uSize = pBT->uSize - uSize;
++      pNeighbour->base = pBT->base + uSize;
++      pNeighbour->psMapping = pBT->psMapping;
++      pBT->uSize = uSize;
++      return pNeighbour;
++}
++
++static void
++_FreeListInsert (RA_ARENA *pArena, BT *pBT)
++{
++      IMG_UINT32 uIndex;
++      uIndex = pvr_log2 (pBT->uSize);
++      pBT->type = btt_free;
++      pBT->pNextFree = pArena->aHeadFree [uIndex];
++      pBT->pPrevFree = IMG_NULL;
++      if (pArena->aHeadFree[uIndex] != IMG_NULL)
++              pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++      pArena->aHeadFree [uIndex] = pBT;
++}
++
++static void
++_FreeListRemove (RA_ARENA *pArena, BT *pBT)
++{
++      IMG_UINT32 uIndex;
++      uIndex = pvr_log2 (pBT->uSize);
++      if (pBT->pNextFree != IMG_NULL)
++              pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++      if (pBT->pPrevFree == IMG_NULL)
++              pArena->aHeadFree[uIndex] = pBT->pNextFree;
++      else
++              pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *
++_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(BT),
++                                      (IMG_VOID **)&pBT, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pBT->type = btt_span;
++      pBT->base = base;
++      pBT->uSize = uSize;
++      pBT->psMapping = IMG_NULL;
++
++      return pBT;
++}
++
++static BT *
++_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BT), 
++                                      (IMG_VOID **)&pBT, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pBT->type = btt_free;
++      pBT->base = base;
++      pBT->uSize = uSize;
++
++      return pBT;
++}
++
++static BT *
++_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++      PVR_ASSERT (pArena!=IMG_NULL);
++      pBT = _BuildBT (base, uSize);
++      if (pBT != IMG_NULL)
++      {
++              _SegmentListInsert (pArena, pBT);
++              _FreeListInsert (pArena, pBT);
++#ifdef RA_STATS
++              pArena->sStatistics.uTotalResourceCount+=uSize;
++              pArena->sStatistics.uFreeResourceCount+=uSize;
++              pArena->sStatistics.uSpanCount++;
++#endif
++      }
++      return pBT;
++}
++
++static BT *
++_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pSpanStart;
++      BT *pSpanEnd;
++      BT *pBT;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++                        pArena->name, base, uSize));
++
++      pSpanStart = _BuildSpanMarker (base, uSize);
++      if (pSpanStart == IMG_NULL)
++      {
++              goto fail_start;
++      }
++      pSpanEnd = _BuildSpanMarker (base + uSize, 0);
++      if (pSpanEnd == IMG_NULL)
++      {
++              goto fail_end;
++      }
++
++      pBT = _BuildBT (base, uSize);
++      if (pBT == IMG_NULL)
++      {
++              goto fail_bt;
++      }
++
++      _SegmentListInsert (pArena, pSpanStart);
++      _SegmentListInsertAfter (pArena, pSpanStart, pBT);
++      _FreeListInsert (pArena, pBT);
++      _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
++#ifdef RA_STATS
++      pArena->sStatistics.uTotalResourceCount+=uSize;
++#endif
++      return pBT;
++
++  fail_bt:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
++  fail_end:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
++  fail_start:
++      return IMG_NULL;
++}
++
++static void
++_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
++{
++      BT *pNeighbour;
++      IMG_UINTPTR_T uOrigBase;
++      IMG_SIZE_T uOrigSize;
++
++      PVR_ASSERT (pArena!=IMG_NULL);
++      PVR_ASSERT (pBT!=IMG_NULL);
++
++#ifdef RA_STATS
++      pArena->sStatistics.uLiveSegmentCount--;
++      pArena->sStatistics.uFreeSegmentCount++;
++      pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++#endif
++
++      uOrigBase = pBT->base;
++      uOrigSize = pBT->uSize;
++
++      
++      pNeighbour = pBT->pPrevSegment;
++      if (pNeighbour!=IMG_NULL
++              && pNeighbour->type == btt_free
++              && pNeighbour->base + pNeighbour->uSize == pBT->base)
++      {
++              _FreeListRemove (pArena, pNeighbour);
++              _SegmentListRemove (pArena, pNeighbour);
++              pBT->base = pNeighbour->base;
++              pBT->uSize += pNeighbour->uSize;
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uFreeSegmentCount--;
++#endif
++      }
++
++      
++      pNeighbour = pBT->pNextSegment;
++      if (pNeighbour!=IMG_NULL
++              && pNeighbour->type == btt_free
++              && pBT->base + pBT->uSize == pNeighbour->base)
++      {
++              _FreeListRemove (pArena, pNeighbour);
++              _SegmentListRemove (pArena, pNeighbour);
++              pBT->uSize += pNeighbour->uSize;
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uFreeSegmentCount--;
++#endif
++      }
++
++      
++      if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
++      {
++              IMG_UINTPTR_T   uRoundedStart, uRoundedEnd;
++
++              
++              uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++              
++              if (uRoundedStart < pBT->base)
++              {
++                      uRoundedStart += pArena->uQuantum;
++              }
++
++              
++              uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
++              
++              if (uRoundedEnd > (pBT->base + pBT->uSize))
++              {
++                      uRoundedEnd -= pArena->uQuantum;
++              }
++              
++              if (uRoundedStart < uRoundedEnd)
++              {
++                      pArena->pBackingStoreFree(pArena->pImportHandle, uRoundedStart, uRoundedEnd, (IMG_HANDLE)0);
++              }
++      }
++
++      if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
++              && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
++      {
++              BT *next = pBT->pNextSegment;
++              BT *prev = pBT->pPrevSegment;
++              _SegmentListRemove (pArena, next);
++              _SegmentListRemove (pArena, prev);
++              _SegmentListRemove (pArena, pBT);
++              pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
++#ifdef RA_STATS
++              pArena->sStatistics.uSpanCount--;
++              pArena->sStatistics.uExportCount++;
++              pArena->sStatistics.uFreeSegmentCount--;
++              pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++              pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
++#endif
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++      }
++      else
++              _FreeListInsert (pArena, pBT);
++}
++
++
++static IMG_BOOL
++_AttemptAllocAligned (RA_ARENA *pArena,
++                                        IMG_SIZE_T uSize,
++                                        BM_MAPPING **ppsMapping,
++                                        IMG_UINT32 uFlags,
++                                        IMG_UINT32 uAlignment,
++                                        IMG_UINT32 uAlignmentOffset,
++                                        IMG_UINTPTR_T *base)
++{
++      IMG_UINT32 uIndex;
++      PVR_ASSERT (pArena!=IMG_NULL);
++
++      PVR_UNREFERENCED_PARAMETER (uFlags);
++
++      if (uAlignment>1)
++              uAlignmentOffset %= uAlignment;
++
++      
++
++      uIndex = pvr_log2 (uSize);
++
++#if 0
++      
++      if (1u<<uIndex < uSize)
++              uIndex++;
++#endif
++
++      while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
++              uIndex++;
++
++      while (uIndex < FREE_TABLE_LIMIT)
++      {
++              if (pArena->aHeadFree[uIndex]!=IMG_NULL)
++              {
++                      
++                      BT *pBT;
++
++                      pBT = pArena->aHeadFree [uIndex];
++                      while (pBT!=IMG_NULL)
++                      {
++                              IMG_UINTPTR_T aligned_base;
++
++                              if (uAlignment>1)
++                                      aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
++                              else
++                                      aligned_base = pBT->base;
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_AttemptAllocAligned: pBT-base=0x%x "
++                                                "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++                                              pBT->base, pBT->uSize, aligned_base, uSize));
++
++                              if (pBT->base + pBT->uSize >= aligned_base + uSize)
++                              {
++                                      if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
++                                      {
++                                              _FreeListRemove (pArena, pBT);
++
++                                              PVR_ASSERT (pBT->type == btt_free);
++
++#ifdef RA_STATS
++                                              pArena->sStatistics.uLiveSegmentCount++;
++                                              pArena->sStatistics.uFreeSegmentCount--;
++                                              pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++#endif
++
++                                              
++                                              if (aligned_base > pBT->base)
++                                              {
++                                                      BT *pNeighbour;
++
++                                                      pNeighbour = _SegmentSplit (pArena, pBT, aligned_base-pBT->base);
++                                                      
++                                                      if (pNeighbour==IMG_NULL)
++                                                      {
++                                                              PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
++                                                              
++                                                              _FreeListInsert (pArena, pBT); 
++                                                              return IMG_FALSE;
++                                                      }
++
++                                                      _FreeListInsert (pArena, pBT);
++      #ifdef RA_STATS
++                                                      pArena->sStatistics.uFreeSegmentCount++;
++                                                      pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++      #endif
++                                                      pBT = pNeighbour;
++                                              }
++
++                                              
++                                              if (pBT->uSize > uSize)
++                                              {
++                                                      BT *pNeighbour;
++                                                      pNeighbour = _SegmentSplit (pArena, pBT, uSize);
++                                                      
++                                                      if (pNeighbour==IMG_NULL)
++                                                      {
++                                                              PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
++                                                              
++                                                              _FreeListInsert (pArena, pBT); 
++                                                              return IMG_FALSE;
++                                                      }
++
++                                                      _FreeListInsert (pArena, pNeighbour);
++      #ifdef RA_STATS
++                                                      pArena->sStatistics.uFreeSegmentCount++;
++                                                      pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
++      #endif
++                                              }
++
++                                              pBT->type = btt_live;
++
++                                              if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
++                                              {
++                                                      _FreeBT (pArena, pBT, IMG_FALSE);
++                                                      return IMG_FALSE;
++                                              }
++
++                                              if (ppsMapping!=IMG_NULL)
++                                                      *ppsMapping = pBT->psMapping;
++
++                                              *base = pBT->base;
++                                              
++                                              return IMG_TRUE;
++                                      }
++                                      else
++                                      {
++                                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                              "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
++
++                                      }
++                              }
++                              pBT = pBT->pNextFree;
++                      }
++                      
++              }
++              uIndex++;
++      }
++
++      return IMG_FALSE;
++}
++
++
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++                 IMG_UINTPTR_T base, 
++                 IMG_SIZE_T uSize, 
++                 BM_MAPPING *psMapping,
++                 IMG_SIZE_T uQuantum,
++                 IMG_BOOL (*alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
++                                   BM_MAPPING **ppsMapping, IMG_UINT32 _flags, IMG_UINTPTR_T *pBase),
++                 IMG_VOID (*free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *psMapping),
++                 IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_UINT32, IMG_UINT32, IMG_HANDLE),
++                 IMG_VOID *pImportHandle)
++{
++      RA_ARENA *pArena;
++      BT *pBT;
++      int i;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++                        name, base, uSize, alloc, free));
++
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof (*pArena),
++                                       (IMG_VOID **)&pArena, IMG_NULL) != PVRSRV_OK)
++      {
++              goto arena_fail;
++      }
++
++      pArena->name = name;
++      pArena->pImportAlloc = alloc!=IMG_NULL ? alloc : _RequestAllocFail;
++      pArena->pImportFree = free;
++      pArena->pBackingStoreFree = backingstore_free;
++      pArena->pImportHandle = pImportHandle;
++      for (i=0; i<FREE_TABLE_LIMIT; i++)
++              pArena->aHeadFree[i] = IMG_NULL;
++      pArena->pHeadSegment = IMG_NULL;
++      pArena->pTailSegment = IMG_NULL;
++      pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++      pArena->sStatistics.uSpanCount = 0;
++      pArena->sStatistics.uLiveSegmentCount = 0;
++      pArena->sStatistics.uFreeSegmentCount = 0;
++      pArena->sStatistics.uFreeResourceCount = 0;
++      pArena->sStatistics.uTotalResourceCount = 0;
++      pArena->sStatistics.uCumulativeAllocs = 0;
++      pArena->sStatistics.uCumulativeFrees = 0;
++      pArena->sStatistics.uImportCount = 0;
++      pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++      if(strcmp(pArena->name,"") != 0)
++      {
++              sprintf(pArena->szProcInfoName, "ra_info_%s", pArena->name);
++              CreateProcEntry(pArena->szProcInfoName, RA_DumpInfo, 0, pArena);
++              sprintf(pArena->szProcSegsName, "ra_segs_%s", pArena->name);
++              CreateProcEntry(pArena->szProcSegsName, RA_DumpSegs, 0, pArena);
++      }
++#endif
++
++      pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
++      if (pArena->pSegmentHash==IMG_NULL)
++      {
++              goto hash_fail;
++      }
++      if (uSize>0)
++      {
++              uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++              pBT = _InsertResource (pArena, base, uSize);
++              if (pBT == IMG_NULL)
++              {
++                      goto insert_fail;
++              }
++              pBT->psMapping = psMapping;
++              
++      }
++      return pArena;
++
++  insert_fail:
++      HASH_Delete (pArena->pSegmentHash);
++  hash_fail:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pArena, IMG_NULL);
++  arena_fail:
++      return IMG_NULL;
++}
++
++void
++RA_Delete (RA_ARENA *pArena)
++{
++      IMG_UINT32 uIndex;
++
++      PVR_ASSERT(pArena != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Delete: name='%s'", pArena->name));
++
++      for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
++              pArena->aHeadFree[uIndex] = IMG_NULL;
++
++      while (pArena->pHeadSegment != IMG_NULL)
++      {
++              BT *pBT = pArena->pHeadSegment;
++              PVR_ASSERT (pBT->type == btt_free);
++              _SegmentListRemove (pArena, pBT);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uSpanCount--;
++#endif
++      }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++      RemoveProcEntry(pArena->szProcInfoName);
++      RemoveProcEntry(pArena->szProcSegsName);
++#endif
++      HASH_Delete (pArena->pSegmentHash);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pArena, IMG_NULL);
++}
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      PVR_ASSERT (pArena != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
++
++      uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
++      return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
++}
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++                IMG_SIZE_T uRequestSize,
++                IMG_SIZE_T *pActualSize,
++                BM_MAPPING **ppsMapping,
++                IMG_UINT32 uFlags,
++                IMG_UINT32 uAlignment,
++                IMG_UINT32 uAlignmentOffset,
++                IMG_UINTPTR_T *base)
++{
++      IMG_BOOL bResult = IMG_FALSE;
++      IMG_SIZE_T uSize = uRequestSize;
++
++      PVR_ASSERT (pArena!=IMG_NULL);
++
++#ifdef USE_BM_FREESPACE_CHECK
++      CheckBMFreespace();
++#endif
++
++      if (pActualSize != IMG_NULL)
++              *pActualSize = uSize;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x", 
++                 pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
++
++      
++
++      bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
++                                                                      uAlignment, uAlignmentOffset, base);
++      if (!bResult)
++      {
++              BM_MAPPING *psImportMapping;
++              IMG_UINTPTR_T import_base;
++              IMG_SIZE_T uImportSize = uSize;
++
++              
++
++
++              if (uAlignment > pArena->uQuantum)
++              {
++                      uImportSize += (uAlignment - 1);
++              }
++
++              
++              uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
++              
++              bResult =
++                      pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
++                                                               &psImportMapping, uFlags, &import_base);
++              if (bResult)
++              {
++                      BT *pBT;
++                      pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
++                      
++                      if (pBT == IMG_NULL)
++                      {
++                              
++                              pArena->pImportFree(pArena->pImportHandle, import_base,
++                                                                      psImportMapping);
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_Alloc: name='%s', size=0x%x failed!", 
++                                                pArena->name, uSize));
++                              
++                              return IMG_FALSE;
++                      }
++                      pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++                      pArena->sStatistics.uFreeSegmentCount++;
++                      pArena->sStatistics.uFreeResourceCount += uImportSize;
++                      pArena->sStatistics.uImportCount++;
++                      pArena->sStatistics.uSpanCount++;
++#endif
++                      bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++                                                                                 uAlignment, uAlignmentOffset,
++                                                                                 base);
++                      if (!bResult)
++                      {
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_Alloc: name='%s' uAlignment failed!",
++                                                pArena->name));
++                      }
++              }
++      }
++#ifdef RA_STATS
++      if (bResult)
++              pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++                        pArena->name, uSize, *base, bResult));
++
++      
++
++      return bResult;
++}
++
++void
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
++{
++      BT *pBT;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++
++#ifdef USE_BM_FREESPACE_CHECK
++      CheckBMFreespace();
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Free: name='%s', base=0x%x", pArena->name, base));
++      
++      pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
++      PVR_ASSERT (pBT != IMG_NULL);
++
++      if (pBT)
++      {
++              PVR_ASSERT (pBT->base == base);
++
++#ifdef RA_STATS
++              pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++{
++unsigned char* p;
++unsigned char* endp;
++
++      p = (unsigned char*)pBT->base + SysGetDevicePhysOffset();
++      endp = (unsigned char*)((IMG_UINT32)(p + pBT->uSize));
++      while ((IMG_UINT32)p & 3)
++      {
++              *p++ = 0xAA;
++      }
++      while (p < (unsigned char*)((IMG_UINT32)endp & 0xfffffffc))
++      {
++              *(IMG_UINT32*)p = 0xAAAAAAAA;
++              p += sizeof(IMG_UINT32);
++      }
++      while (p < endp)
++      {
++              *p++ = 0xAA;
++      }
++      PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(unsigned char*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
++}
++#endif
++              _FreeBT (pArena, pBT, bFreeBackingStore);
++      }
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
++{
++      BT        *pBT;
++
++      if (psSegDetails->hSegment)
++      {
++              pBT = (BT *)psSegDetails->hSegment;
++      }
++      else
++      {
++              RA_ARENA *pArena = (RA_ARENA *)hArena;
++
++              pBT = pArena->pHeadSegment;
++      }
++      
++      while (pBT != IMG_NULL)
++      {
++              if (pBT->type == btt_live)
++              {
++                      psSegDetails->uiSize = pBT->uSize;      
++                      psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++                      psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
++
++                      return IMG_TRUE;
++              }
++
++              pBT = pBT->pNextSegment;
++      }
++
++      psSegDetails->uiSize = 0;       
++      psSegDetails->sCpuPhyAddr.uiAddr = 0;
++      psSegDetails->hSegment = (IMG_HANDLE)-1;
++
++      return IMG_FALSE;
++}
++      
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA* pJFSavedArena = IMG_NULL;
++
++void CheckBMFreespace(void)
++{
++BT *pBT;
++unsigned char* p;
++unsigned char* endp;
++
++      if (pJFSavedArena != IMG_NULL)
++      {
++              for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++              {
++                      if (pBT->type == btt_free)
++                      {
++                              p = (unsigned char*)pBT->base + SysGetDevicePhysOffset();
++                              endp = (unsigned char*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
++
++                              while ((IMG_UINT32)p & 3)
++                              {
++                                      if (*p++ != 0xAA)
++                                      {
++                                              fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(unsigned long*)p);
++                                              for (;;);
++                                              break;
++                                      }
++                              }
++                              while (p < endp)
++                              {
++                                      if (*(unsigned long*)p != 0xAAAAAAAA)
++                                      {
++                                              fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(unsigned long*)p);
++                                              for (;;);
++                                              break;
++                                      }
++                                      p += 4;
++                              }
++                      }
++              }
++      }
++}
++#endif
++
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static char *
++_BTType (int eType)
++{
++      switch (eType)
++      {
++      case btt_span: return "span";
++      case btt_free: return "free";
++      case btt_live: return "live";
++      }
++      return "junk";
++}
++#endif 
++
++
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      BT *pBT = 0;
++      int len = 0;
++      RA_ARENA *pArena = (RA_ARENA *)data;
++
++      if (count < 80)
++      {
++              *start = (char *)0;
++              return (0);
++      }
++      *eof = 0;
++      *start = (char *)1;
++      if (off == 0)
++      {
++              return printAppend(page, count, 0, "Arena \"%s\"\nBase         Size Type Ref\n", pArena->name);
++      }
++      for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment)
++              ;
++      if (pBT)
++      {
++              len = printAppend(page, count, 0, "%08x %8x %4s %08x\n", 
++                                                      (unsigned int)pBT->base, (unsigned int)pBT->uSize, _BTType (pBT->type),
++                                                      (unsigned int)pBT->psMapping);
++      }
++      else
++      {
++              *eof = 1;
++      }
++      return (len);
++}
++
++static int
++RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      int len = 0;
++      RA_ARENA *pArena = (RA_ARENA *)data;
++
++      if (count < 80)
++      {
++              *start = (char *)0;
++              return (0);
++      }
++      *eof = 0;
++      switch (off)
++      {
++      case 0:
++              len = printAppend(page, count, 0, "quantum\t\t\t%lu\n", pArena->uQuantum);
++              break;
++      case 1:
++              len = printAppend(page, count, 0, "import_handle\t\t%08X\n", (unsigned int)pArena->pImportHandle);
++              break;
++#ifdef RA_STATS
++      case 2:
++              len = printAppend(page, count, 0, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++              break;
++      case 3:
++              len = printAppend(page, count, 0, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++              break;
++      case 4:
++              len = printAppend(page, count, 0, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++              break;
++      case 5:
++              len = printAppend(page, count, 0, "free resource count\t%lu (0x%x)\n",
++                                                      pArena->sStatistics.uFreeResourceCount,
++                                                      (unsigned int)pArena->sStatistics.uFreeResourceCount);
++              break;
++      case 6:
++              len = printAppend(page, count, 0, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++              break;
++      case 7:
++              len = printAppend(page, count, 0, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++              break;
++      case 8:
++              len = printAppend(page, count, 0, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++              break;
++      case 9:
++              len = printAppend(page, count, 0, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++              break;
++#endif
++
++      default:
++              *eof = 1;
++      }
++      *start = (char *)1;
++      return (len);
++}
++#endif
++
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++                                                      IMG_CHAR **ppszStr, 
++                                                      IMG_UINT32 *pui32StrLen)
++{
++      IMG_CHAR        *pszStr = *ppszStr;
++      IMG_UINT32      ui32StrLen = *pui32StrLen;
++      IMG_INT32       i32Count;
++      BT                      *pBT;
++      
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      
++              
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "  allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n", 
++                                                       pArena->pImportAlloc, 
++                                                       pArena->pImportFree, 
++                                                       pArena->pImportHandle,
++                                                       pArena->uQuantum);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++                                                      pArena->sStatistics.uFreeResourceCount,
++                                                      (unsigned int)pArena->sStatistics.uFreeResourceCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "  segment Chain:\n");
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      
++      if (pArena->pHeadSegment != IMG_NULL &&
++          pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "  error: head boundary tag has invalid pPrevSegment\n");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      if (pArena->pTailSegment != IMG_NULL &&
++          pArena->pTailSegment->pNextSegment != IMG_NULL)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "  error: tail boundary tag has invalid pNextSegment\n");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%08X\n", 
++                                                                                       (unsigned long) pBT->base,
++                                                                                       pBT->uSize,
++                                                                                       _BTType(pBT->type),
++                                                                                       pBT->psMapping);
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++
++      *ppszStr = pszStr;
++      *pui32StrLen = ui32StrLen;
++      
++      return PVRSRV_OK;
++}
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/resman.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/resman.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/resman.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/resman.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,958 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/semaphore.h>
++#include <linux/version.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++#include <linux/semaphore.h>
++#endif
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ  do {                                                        \
++              if (in_interrupt()) {                                                   \
++                      printk ("ISR cannot take RESMAN mutex\n");      \
++                      BUG();                                                                          \
++              }                                                                                               \
++              else down (&lock);                                                              \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_
++{
++#ifdef DEBUG
++      IMG_UINT32                              ui32Signature;
++#endif
++      struct _RESMAN_ITEM_    **ppsThis;      
++      struct _RESMAN_ITEM_    *psNext;        
++
++      IMG_UINT32                              ui32Flags;      
++      IMG_UINT32                              ui32ResType;
++
++      IMG_PVOID                               pvParam;        
++      IMG_UINT32                              ui32Param;      
++
++      RESMAN_FREE_FN                  pfnFreeResource;
++
++      IMG_UINT32                              ui32ProcessID;
++
++} RESMAN_ITEM;
++
++
++typedef struct _RESMAN_PROCESS_
++{
++#ifdef DEBUG
++      IMG_UINT32                                      ui32Signature;
++#endif
++      struct  _RESMAN_PROCESS_        **ppsThis;
++      struct  _RESMAN_PROCESS_        *psNext;
++
++      IMG_UINT32                                      ui32ProcessID;
++      IMG_UINT32                                      ui32RefCount; 
++      RESMAN_ITEM                                     *psResItemList;
++
++} RESMAN_PROCESS, *PRESMAN_PROCESS;
++
++
++typedef struct
++{
++      RESMAN_PROCESS  *psProcessList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++
++PRESMAN_LIST  gpsResList=IMG_NULL;
++
++
++#define PRINT_RESLIST(x, y, z)
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_PROCESS    psProcess,
++                                                                                 IMG_UINT32           ui32SearchCriteria, 
++                                                                                 IMG_UINT32           ui32ResType, 
++                                                                                 IMG_PVOID            pvParam, 
++                                                                                 IMG_UINT32           ui32Param, 
++                                                                                 IMG_BOOL                     bExecuteCallback);
++
++static PRESMAN_PROCESS FindProcess(IMG_UINT32 ui32ProcessID);
++static IMG_VOID SaveRestoreBuffers(IMG_BOOL bSaveBuffers);
++
++#ifdef DEBUG
++      static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
++      #define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++      #define VALIDATERESLIST()
++#endif
++
++#ifdef __linux__
++#include "proc.h"
++#endif
++
++#if defined(__linux__)
++
++static const char *
++resourceType (IMG_UINT32 type)
++{
++      static char buf[32];
++      switch (type)
++      {
++              
++              case RESMAN_TYPE_HW_RENDER_CONTEXT:
++                      return "HW Render Context Resource";
++              case RESMAN_TYPE_SHARED_PB_DESC:
++                      return "Shared Parameter Buffer Description Resource";
++              
++              
++              
++              
++              
++              case RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN:
++                      return "Display Class Swapchain Resource";
++              case RESMAN_TYPE_DISPLAYCLASS_DEVICE:
++                      return "Display Class Device Resource";
++              
++              
++              case RESMAN_TYPE_BUFFERCLASS_DEVICE:
++                      return "Buffer Class Device Resource";
++
++              
++              case RESMAN_TYPE_OS_USERMODE_MAPPING:
++                      return "OS specific User mode mappings";
++              
++              
++              case RESMAN_TYPE_DEVICEMEM_CONTEXT:
++                      return "Device Memory Context Resource";
++              case RESMAN_TYPE_DEVICECLASSMEM_MAPPING:
++                      return "Device Memory Mapping Resource";
++              case RESMAN_TYPE_DEVICEMEM_MAPPING:
++                      return "Device Memory Mapping Resource";
++              case RESMAN_TYPE_DEVICEMEM_WRAP:
++                      return "Device Memory Wrap Resource";
++              case RESMAN_TYPE_DEVICEMEM_ALLOCATION:
++                      return "Device Memory Allocation Resource";
++              default:                                        
++                      sprintf(buf, "Unknown (type %lu)", type);
++                      return buf;
++      }
++}
++#endif
++
++#ifdef __linux__
++static off_t
++ResManPrintProcessResources (char * buffer, size_t size,
++                                                       PRESMAN_PROCESS psProcess)
++{
++    off_t off = 0;
++    
++      
++      PRESMAN_ITEM psCurItem = psProcess->psResItemList;
++
++      off = printAppend (buffer, size, 0,
++                                         "  pid=%ld ref count=%ld\n"
++                                         "    Flags    pParam   Param    FreeFn   Type\n",
++                                         psProcess->ui32ProcessID,
++                                         psProcess->ui32RefCount);
++      while(psCurItem)
++      {
++              off  = printAppend (buffer, size, off,
++                                                      "    %8lx %8p %8lx %8p %s\n",
++                                                      psCurItem->ui32Flags,
++                                                      psCurItem->pvParam,
++                                                      psCurItem->ui32Param,
++                                                      psCurItem->pfnFreeResource,
++                                                      resourceType(psCurItem->ui32ResType));
++              psCurItem = psCurItem->psNext;
++      }
++      return off;
++} 
++
++static off_t
++ResManPrintAllProcessResources (char * buffer, size_t size, off_t off)
++{
++      PRESMAN_PROCESS psProcess;
++
++      VALIDATERESLIST();
++
++      if (size < 80)                          
++              return 0;
++      
++    if (!off)
++        return printAppend (buffer, size, 0, "Registered resources\n");
++    
++      if(gpsResList != IMG_NULL) 
++      {                               
++      
++      psProcess = gpsResList->psProcessList;
++      while (--off && psProcess)
++              psProcess = psProcess->psNext;
++      }
++
++    return ResManPrintProcessResources (buffer, size, psProcess);
++
++} 
++
++#endif 
++
++
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID)
++{
++      
++      if(gpsResList == IMG_NULL)
++      {       
++              if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(*gpsResList),
++                                              (IMG_VOID **)&gpsResList, IMG_NULL) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              gpsResList->psProcessList = IMG_NULL;
++      
++              
++              VALIDATERESLIST();
++
++      }
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID ResManDeInit(IMG_VOID)
++{
++      if (gpsResList != IMG_NULL)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
++      }
++}
++
++
++static PVRSRV_ERROR ResManProcessConnect(IMG_UINT32 ui32ProcID)
++{
++      PRESMAN_PROCESS         psProcess;
++      PVRSRV_ERROR            eError;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      psProcess = FindProcess(ui32ProcID);
++      if(psProcess == IMG_NULL)
++      {
++              
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_PROCESS), (IMG_VOID **)&psProcess, IMG_NULL);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "ResManProcessConnect: ERROR allocating new RESMAN process struct"));
++                      
++                      VALIDATERESLIST();
++
++                      
++                      RELEASE_SYNC_OBJ;
++
++                      return eError;
++              }
++
++#ifdef DEBUG
++              psProcess->ui32Signature = RESMAN_SIGNATURE;
++#endif 
++              psProcess->ui32ProcessID        = ui32ProcID;
++              psProcess->ui32RefCount         = 0;
++              psProcess->psResItemList        = IMG_NULL;
++
++              
++              psProcess->psNext               = gpsResList->psProcessList;
++              psProcess->ppsThis              = &gpsResList->psProcessList;
++              gpsResList->psProcessList       = psProcess;
++              if (psProcess->psNext)
++              {
++                      psProcess->psNext->ppsThis = &(psProcess->psNext);                      
++              }
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "ResManProcessConnect: Process 0x%x has ref-count %d",
++                      psProcess->ui32ProcessID, psProcess->ui32RefCount));
++      psProcess->ui32RefCount++;
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_OK;
++}
++static PVRSRV_ERROR ResManProcessDisconnect(IMG_UINT32 ui32ProcID)
++{     
++      PRESMAN_PROCESS         psProcess;
++      
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      psProcess = FindProcess(ui32ProcID);
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ResManProcessDisconnect: "
++                               "ERROR finding process struct for 0x%x", ui32ProcID));
++              
++              
++              VALIDATERESLIST();
++
++              
++              PRINT_RESLIST(gpsResList, psProcess, IMG_FALSE);
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      if(--psProcess->ui32RefCount == 0)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManProcessDisconnect: "
++                              "Last close from process 0x%x received", psProcess->ui32ProcessID));
++
++              
++              PRINT_RESLIST(gpsResList, psProcess, IMG_TRUE);
++
++              
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);                       
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
++
++              
++              
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
++              
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_RESOURCE_PERPROC_DATA, 0, 0, IMG_TRUE);
++
++              
++              *(psProcess->ppsThis) = psProcess->psNext;
++              if (psProcess->psNext)
++              {
++                      psProcess->psNext->ppsThis      = psProcess->ppsThis;
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psProcess, IMG_NULL);
++
++      }
++
++      
++      VALIDATERESLIST();
++
++      
++      PRINT_RESLIST(gpsResList, psProcess, IMG_FALSE);
++
++      
++      if (gpsResList->psProcessList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManProcessDisconnect: Releasing Resource List"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, gpsResList, IMG_NULL);
++
++              gpsResList = IMG_NULL;
++      }
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_OK;
++}
++
++
++PRESMAN_ITEM ResManRegisterRes(IMG_UINT32             ui32ResType, 
++                                                         IMG_PVOID            pvParam, 
++                                                         IMG_UINT32           ui32Param, 
++                                                         RESMAN_FREE_FN       pfnFreeResource, 
++                                                         IMG_UINT32           ui32ProcessID)
++{
++      IMG_UINT32              ui32CurProcessID;
++      PRESMAN_ITEM    psNewResItem;
++      PRESMAN_PROCESS psProcess;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      if(ui32ResType & RESMAN_TYPE_USE_PROCESSID)
++      {
++              ui32CurProcessID = ui32ProcessID;
++              ui32ResType &= ~RESMAN_TYPE_USE_PROCESSID;
++      }
++      else
++      {
++              
++              ui32CurProcessID = OSGetCurrentProcessIDKM();
++      }
++
++      PVR_ASSERT(ui32ResType != 0);           
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++                      "Proc 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++                      "FreeFunc %08X",
++                      ui32CurProcessID, ui32ResType, (IMG_UINT32)pvParam,
++                      ui32Param, pfnFreeResource));
++
++      
++      psProcess = FindProcess(ui32CurProcessID);
++
++      if(psProcess == IMG_NULL)
++      {
++              
++
++
++
++
++              
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: "
++                              "Could not find process info for process 0x%x",
++                              ui32CurProcessID));
++
++              
++              VALIDATERESLIST();
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return((PRESMAN_ITEM)IMG_NULL);
++      }                                                       
++
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
++                                      IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++                              "ERROR allocating new resource item"));
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return((PRESMAN_ITEM)IMG_NULL);
++      }
++
++      
++#ifdef DEBUG
++      psNewResItem->ui32Signature     = RESMAN_SIGNATURE;
++#endif 
++      psNewResItem->ui32ResType               = ui32ResType;
++      psNewResItem->pvParam                   = pvParam;
++      psNewResItem->ui32Param                 = ui32Param;
++      psNewResItem->pfnFreeResource   = pfnFreeResource;
++      psNewResItem->ui32ProcessID             = ui32CurProcessID;
++      psNewResItem->ui32Flags             = 0;
++      
++      
++      psNewResItem->ppsThis   = &psProcess->psResItemList;
++      psNewResItem->psNext    = psProcess->psResItemList;
++      psProcess->psResItemList = psNewResItem;
++      if (psNewResItem->psNext)
++      {
++              psNewResItem->psNext->ppsThis = &psNewResItem->psNext;
++      }
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return(psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM   *psResItem,
++                                                              IMG_BOOL        bExecuteCallback)
++{
++      PVRSRV_ERROR eError;
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
++              return PVRSRV_OK;
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      eError = FreeResourceByPtr(psResItem, bExecuteCallback);
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return(eError);
++}
++
++
++PVRSRV_ERROR ResManFreeResByCriteria(IMG_UINT32 ui32SearchCriteria, 
++                                                                       IMG_UINT32     ui32ResType, 
++                                                                       IMG_PVOID      pvParam, 
++                                                                       IMG_UINT32     ui32Param, 
++                                                                       IMG_BOOL       bExecuteCallback)
++{
++      IMG_UINT32              ui32CurProcessID;
++      PRESMAN_PROCESS psProcess;
++      PVRSRV_ERROR eError;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      ui32CurProcessID = OSGetCurrentProcessIDKM();
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++                      "Proc 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++                      ui32CurProcessID, ui32SearchCriteria, ui32ResType,
++                      (IMG_UINT32)pvParam, ui32Param));
++
++      
++      psProcess = FindProcess(ui32CurProcessID);
++
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++                              "ERROR finding process struct for 0x%x",
++                              ui32CurProcessID));
++
++              
++              VALIDATERESLIST();
++              
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      eError = FreeResourceByCriteria(psProcess, ui32SearchCriteria,
++                                                                      ui32ResType, pvParam, ui32Param,
++                                                                      bExecuteCallback);
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR ResManPrePower(PVR_POWER_STATE eNewPowerState, 
++                                                      PVR_POWER_STATE eCurrentPowerState)
++{
++      if ((eNewPowerState != eCurrentPowerState) &&
++              (eNewPowerState==PVRSRV_POWER_STATE_D3))
++      {
++              SaveRestoreBuffers(IMG_TRUE);           
++      }
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR ResManPostPower(PVR_POWER_STATE eNewPowerState, 
++                                                       PVR_POWER_STATE eCurrentPowerState)
++{
++      if ((eNewPowerState != eCurrentPowerState) 
++      &&      (eCurrentPowerState == PVRSRV_POWER_STATE_D3))
++      {
++              SaveRestoreBuffers(IMG_FALSE);
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(RESMAN_ITEM *psItem)
++{
++      RESMAN_PROCESS *psProcess;
++      RESMAN_ITEM *psCurItem;
++      IMG_UINT32 ui32CurProcessID;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      
++      ui32CurProcessID = OSGetCurrentProcessIDKM();
++
++      psProcess = FindProcess(ui32CurProcessID);
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FindResourceByPtr: "
++                               "ERROR finding process struct for 0x%x", ui32CurProcessID));
++              
++              
++              VALIDATERESLIST();
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++
++      PVR_ASSERT(psItem != IMG_NULL);
++      PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++                      psItem, psItem->psNext));
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FindResourceByPtr: Resource Proc 0x%x, Type 0x%x, Addr 0x%x, "
++                      "Param 0x%x, FnCall %08X, Flags 0x%x",
++                      OSGetCurrentProcessIDKM(),
++                      psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++                      psItem->pfnFreeResource, psItem->ui32Flags));
++
++      
++      psCurItem       = psProcess->psResItemList;
++
++      while(psCurItem != IMG_NULL)
++      {
++              
++              if(psCurItem != psItem)
++              {
++                      
++                      psCurItem = psCurItem->psNext;
++              }
++              else
++              {
++                      
++                      RELEASE_SYNC_OBJ;
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_ERROR_NOT_OWNER;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_ASSERT(psItem != IMG_NULL);
++      PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++                      psItem, psItem->psNext));
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FreeResourceByPtr: Resource Proc 0x%x, Type 0x%x, Addr 0x%x, "
++                      "Param 0x%x, FnCall %08X, Flags 0x%x",
++                      OSGetCurrentProcessIDKM(),
++                      psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++                      psItem->pfnFreeResource, psItem->ui32Flags));
++
++      
++      if (psItem->psNext)
++      {
++              psItem->psNext->ppsThis = psItem->ppsThis;
++      }
++      *psItem->ppsThis = psItem->psNext;
++
++      
++      RELEASE_SYNC_OBJ;
++
++      
++      if (bExecuteCallback)
++      {
++              eError = psItem->pfnFreeResource(psItem->ui32ProcessID,
++                                                                               psItem->pvParam, psItem->ui32Param);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
++              }
++      }
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      if(OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psItem, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR freeing resource list item memory"));
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++
++      return(eError);
++}
++
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_PROCESS    psProcess,
++                                                                                 IMG_UINT32           ui32SearchCriteria, 
++                                                                                 IMG_UINT32           ui32ResType, 
++                                                                                 IMG_PVOID            pvParam, 
++                                                                                 IMG_UINT32           ui32Param, 
++                                                                                 IMG_BOOL                     bExecuteCallback)
++{
++      PRESMAN_ITEM    psCurItem;
++      IMG_BOOL                bMatch;
++      PVRSRV_ERROR    eError = PVRSRV_OK;
++
++      
++      psCurItem       = psProcess->psResItemList;
++
++      while(psCurItem != IMG_NULL)
++      {
++              
++              bMatch = IMG_TRUE;
++
++              
++              if((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) &&
++                              psCurItem->ui32ResType != ui32ResType)
++              {
++                      bMatch = IMG_FALSE;
++              }
++                      
++              
++              else if((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) &&
++                              psCurItem->pvParam != pvParam)
++              {
++                      bMatch = IMG_FALSE;
++              }
++
++              
++              else if((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) &&
++                              psCurItem->ui32Param != ui32Param)
++              {
++                      bMatch = IMG_FALSE;
++              }               
++              
++              if(!bMatch)
++              {
++                      
++                      psCurItem = psCurItem->psNext;
++              }
++              else
++              {
++                      
++                      eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++
++                      if(eError != PVRSRV_OK)
++                      {
++                              return eError;
++                      }
++
++                      
++
++
++                      psCurItem = psProcess->psResItemList;
++              }
++      }
++
++      return eError;
++}
++
++
++
++
++static PRESMAN_PROCESS FindProcess(IMG_UINT32 ui32ProcessID)
++{
++      PRESMAN_PROCESS psCurProcess;
++
++      
++      
++      if (gpsResList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FindProcess: resman not initialised yet"));
++              return ((PRESMAN_PROCESS)IMG_NULL);
++      }
++      
++      psCurProcess = gpsResList->psProcessList;
++
++      while(psCurProcess != IMG_NULL)
++      {
++              if(psCurProcess->ui32ProcessID == ui32ProcessID)
++              {
++                      return(psCurProcess);
++              }
++              psCurProcess = psCurProcess->psNext;
++      }
++
++      return((PRESMAN_PROCESS)IMG_NULL);
++}
++
++
++#ifdef DEBUG
++static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
++{
++      PRESMAN_ITEM    psCurItem, *ppsThisItem;
++      PRESMAN_PROCESS psCurProcess, *ppsThisProcess;
++
++      
++      if (psResList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
++              return;
++      }
++
++      psCurProcess = psResList->psProcessList;
++      ppsThisProcess = &psResList->psProcessList;
++
++      
++      while(psCurProcess != IMG_NULL)
++      {
++              
++              PVR_ASSERT(psCurProcess->ui32Signature == RESMAN_SIGNATURE);
++              if (psCurProcess->ppsThis != ppsThisProcess)
++              {
++                      PVR_DPF((PVR_DBG_WARNING,
++                                      "psCP=%08X pid=0x%x psCP->ppsThis=%08X psCP->psNext=%08X ppsTP=%08X",
++                                      psCurProcess, psCurProcess->ui32ProcessID, psCurProcess->ppsThis,
++                                      psCurProcess->psNext, ppsThisProcess));
++                      PVR_ASSERT(psCurProcess->ppsThis == ppsThisProcess);
++              }
++      
++              
++              psCurItem = psCurProcess->psResItemList;
++              ppsThisItem = &psCurProcess->psResItemList;
++              while(psCurItem != IMG_NULL)
++              {
++                      
++                      PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
++                      if (psCurItem->ppsThis != ppsThisItem)
++                      {
++                              PVR_DPF((PVR_DBG_WARNING,
++                                              "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++                                              psCurItem, psCurItem->ppsThis, psCurItem->psNext, ppsThisItem));
++                              PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++                      }
++
++                      
++                      ppsThisItem = &psCurItem->psNext;
++                      psCurItem = psCurItem->psNext;
++              }
++
++              
++              ppsThisProcess = &psCurProcess->psNext;
++              psCurProcess = psCurProcess->psNext;
++      }
++}
++#endif 
++
++
++
++IMG_INTERNAL
++PVRSRV_ERROR PVRSRVResManConnect(IMG_UINT32   ui32ProcID,
++                                                               IMG_BOOL       bConnect)
++{
++      if (ui32ProcID == RESMAN_PROCESSID_FIND)
++      {
++              ui32ProcID = OSGetCurrentProcessIDKM();
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVResManConnect(%s): ProcID:%lu",
++                       bConnect ? "T" : "F", ui32ProcID));
++
++      if(bConnect)
++      {
++              return ResManProcessConnect(ui32ProcID);
++      }
++      else
++      {
++              return ResManProcessDisconnect(ui32ProcID);
++      }
++}
++
++
++static IMG_VOID SaveRestoreBuffers(IMG_BOOL bSaveBuffers)
++{
++      PVR_UNREFERENCED_PARAMETER(bSaveBuffers);
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2020 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++
++
++typedef struct _MMU_PT_INFO_
++{
++      
++      IMG_VOID *hPTPageOSMemHandle;
++      IMG_CPU_VIRTADDR PTPageCpuVAddr;
++      IMG_UINT32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_
++{
++      
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      
++      IMG_CPU_VIRTADDR pvPDCpuVAddr;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++
++      IMG_VOID *hPDOSMemHandle;
++
++      
++      MMU_PT_INFO *apsPTInfoList[1024];
++
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_
++{
++      MMU_CONTEXT *psMMUContext;
++
++      IMG_UINT32 ui32PTBaseIndex;
++      IMG_UINT32 ui32PTPageCount;
++      IMG_UINT32 ui32PTEntryCount;
++
++      
++      RA_ARENA *psVMArena;
++
++      DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE     0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables   (MMU_HEAP *pMMUHeap,
++                                       IMG_DEV_VIRTADDR DevVAddr,
++                                       IMG_SIZE_T uSize,
++                                       IMG_BOOL bForUnmap,
++                                       IMG_HANDLE hUniqueTag);
++#endif 
++
++#define PAGE_TEST                                     0
++#if PAGE_TEST
++static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 ui32RegVal;
++      IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++      
++
++
++      ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++      OSWriteHWReg(pvRegsBaseKM,
++                              EUR_CR_BIF_CTRL,
++                              ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++      
++      PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 ui32RegVal;
++      IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++      
++
++
++
++
++      OSWriteHWReg(pvRegsBaseKM,
++                              EUR_CR_BIF_CTRL,
++                              ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++      
++      PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;      
++}
++
++
++IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;      
++}
++
++
++static IMG_BOOL
++_AllocPageTables (MMU_HEAP *pMMUHeap)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "_AllocPageTables()"));
++
++      PVR_ASSERT (pMMUHeap!=IMG_NULL);
++      PVR_ASSERT (HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
++
++      
++
++
++
++
++
++      
++      pMMUHeap->ui32PTEntryCount = pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pMMUHeap->ui32PTBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >> SGX_MMU_PAGE_SHIFT;
++
++      
++
++
++      pMMUHeap->ui32PTPageCount = (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >> SGX_MMU_PT_SHIFT;
++
++      return IMG_TRUE;
++}
++
++static IMG_VOID
++_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex)
++{
++      IMG_UINT32 *pui32PDEntry;
++      IMG_UINT32 i;
++      IMG_UINT32 ui32PDIndex;
++      SYS_DATA *psSysData;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePageTables: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      
++      ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      {
++              
++              PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
++      }
++
++      
++      PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PTPageCount);
++      if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++      {
++              PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++      }
++
++      switch(pMMUHeap->psDevArena->DevMemHeapType)
++      {
++              case DEVICE_MEMORY_HEAP_SHARED :
++              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++              {
++                      
++                      MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++                      while(psMMUContext)
++                      {
++                              
++                              pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++                              pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                              
++                              pui32PDEntry[ui32PTIndex] = psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++#else
++                              
++                              pui32PDEntry[ui32PTIndex] = 0;
++#endif
++
++                              
++                              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                              
++                              psMMUContext = psMMUContext->psNext;
++                      }
++                      break;
++              }
++              case DEVICE_MEMORY_HEAP_PERCONTEXT :
++              case DEVICE_MEMORY_HEAP_KERNEL :
++              {
++                      
++                      pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++                      pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      
++                      pui32PDEntry[ui32PTIndex] = pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++#else
++                      
++                      pui32PDEntry[ui32PTIndex] = 0;
++#endif
++
++                      
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++                      break;
++              }
++              default:
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
++                      return;
++              }
++      }
++
++      
++      if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
++      {
++              if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
++              {
++                      IMG_PUINT32 pui32Tmp;
++
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++                      
++
++
++                      for(i=0; (i<pMMUHeap->ui32PTEntryCount) && (i<1024); i++)
++                      {
++                              pui32Tmp[i] = 0;
++                      }
++
++                      
++
++
++
++                      if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++                      {
++                              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                        SGX_MMU_PAGE_SIZE,
++                                                        ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++                                                        ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle);
++                      }
++                      else
++                      {
++                              IMG_SYS_PHYADDR sSysPAddr;
++                              IMG_CPU_PHYADDR sCpuPAddr;
++
++                              
++                              sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
++                              sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
++
++                              
++                              OSUnMapPhysToLin(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++                                 SGX_MMU_PAGE_SIZE,
++                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                 ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle);
++
++                              
++
++
++                              RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++                      }
++
++                      
++
++
++                      pMMUHeap->ui32PTEntryCount -= i;
++              }
++              else
++              {
++                      
++                      pMMUHeap->ui32PTEntryCount -= 1024;
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(MMU_PT_INFO),
++                                      ppsPTInfoList[ui32PTIndex],
++                                      IMG_NULL);
++              ppsPTInfoList[ui32PTIndex] = IMG_NULL;
++      }
++      else
++      {
++              
++              pMMUHeap->ui32PTEntryCount -= 1024;
++      }
++
++      PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PTPageCount);
++}
++
++static IMG_VOID
++_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
++{
++      IMG_UINT32 i;
++
++      for(i=0; i<pMMUHeap->ui32PTPageCount; i++)
++      {
++              _DeferredFreePageTable(pMMUHeap, i);
++      }
++      MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++
++static IMG_BOOL
++_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++      IMG_UINT32 ui32PTPageCount;
++      IMG_UINT32 ui32PDIndex;
++      IMG_UINT32 i;
++      IMG_UINT32 *pui32PDEntry;
++      MMU_PT_INFO **ppsPTInfoList;
++      SYS_DATA *psSysData;
++
++      
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++      PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      
++      ui32PDIndex = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ui32PTPageCount = (DevVAddr.uiAddr + ui32Size + (1<<(SGX_MMU_PAGE_SHIFT+SGX_MMU_PT_SHIFT)) - 1)
++                                              >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++      ui32PTPageCount -= ui32PDIndex;
++
++      
++      pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++      pui32PDEntry += ui32PDIndex;
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
++      PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PTPageCount);
++
++      
++      for(i=0; i<ui32PTPageCount; i++)
++      {
++              if(ppsPTInfoList[i] == IMG_NULL)
++              {
++                      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                               sizeof (MMU_PT_INFO),
++                                               (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL);
++                      if (ppsPTInfoList[i] == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++                              return IMG_FALSE;
++                      }
++                      OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++              }
++
++              if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
++              && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
++              {
++                      IMG_CPU_PHYADDR sCpuPAddr;
++                      IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      IMG_UINT32 *pui32Tmp;
++                      IMG_UINT32 j;
++#else
++                      
++                      PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++              
++                      
++
++
++                      if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++                      {
++                              if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                                 SGX_MMU_PAGE_SIZE,
++                                                                 (IMG_VOID **)&ppsPTInfoList[i]->PTPageCpuVAddr,
++                                                                 &ppsPTInfoList[i]->hPTPageOSMemHandle) != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocPages failed"));        
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              if(ppsPTInfoList[i]->PTPageCpuVAddr)
++                              {
++                                      sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList[i]->PTPageCpuVAddr);
++                              }
++                              else
++                              {
++                                      
++                                      sCpuPAddr = OSMemHandleToCpuPAddr(ppsPTInfoList[i]->hPTPageOSMemHandle, 0);
++                              }
++                              sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++                      }
++                      else
++                      {
++                              IMG_SYS_PHYADDR sSysPAddr;
++
++                              
++
++
++                              if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      IMG_NULL,
++                                                      IMG_NULL,
++                                                      0,
++                                                      SGX_MMU_PAGE_SIZE, 
++                                                      0, 
++                                                      &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to RA_Alloc failed"));
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                              ppsPTInfoList[i]->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++                                                                                                                      SGX_MMU_PAGE_SIZE,
++                                                                                                                      PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                      &ppsPTInfoList[i]->hPTPageOSMemHandle);
++                              if(!ppsPTInfoList[i]->PTPageCpuVAddr)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR failed to map page tables"));
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++                              #if PAGE_TEST
++                              PageTest(ppsPTInfoList[i]->PTPageCpuVAddr, sDevPAddr);
++                              #endif
++                      }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[i]->PTPageCpuVAddr;
++                      
++                      for(j=0; j<SGX_MMU_PT_SIZE; j++)
++                      {
++                              pui32Tmp[j] = pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++                      }
++#else
++                      
++                      OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0, SGX_MMU_PAGE_SIZE);
++#endif
++                      
++                      PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[i]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++                      
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[i]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                      switch(pMMUHeap->psDevArena->DevMemHeapType)
++                      {
++                              case DEVICE_MEMORY_HEAP_SHARED :
++                              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++                              {
++                                      
++                                      MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++                                      while(psMMUContext)
++                                      {
++                                              
++                                              pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++                                              pui32PDEntry += ui32PDIndex;
++
++                                              
++                                              pui32PDEntry[i] = sDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++
++                                              
++                                              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                                              
++                                              psMMUContext = psMMUContext->psNext;
++                                      }
++                                      break;
++                              }
++                              case DEVICE_MEMORY_HEAP_PERCONTEXT :
++                              case DEVICE_MEMORY_HEAP_KERNEL :
++                              {
++                                      
++                                      pui32PDEntry[i] = sDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++
++                                      
++                                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                                      break;
++                              }
++                              default:
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
++                                      return IMG_FALSE;
++                              }
++                      }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++                      
++
++
++
++                      MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++              }
++              else
++              {
++                      
++                      PVR_ASSERT(pui32PDEntry[i] != 0);
++              }
++      }
++
++      return IMG_TRUE;
++}
++
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++      IMG_UINT32 *pui32Tmp;
++      IMG_UINT32 i;
++      IMG_CPU_VIRTADDR pvPDCpuVAddr;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      MMU_CONTEXT *psMMUContext;
++      IMG_HANDLE hPDOSMemHandle;
++      SYS_DATA *psSysData;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++      
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to SysAcquireData failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                               sizeof (MMU_CONTEXT),
++                               (IMG_VOID **)&psMMUContext, IMG_NULL);
++      if (psMMUContext == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++      
++      psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      psMMUContext->psDevInfo = psDevInfo;
++
++      
++      psMMUContext->psDeviceNode = psDeviceNode;
++
++      
++      if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
++      {
++              if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      &pvPDCpuVAddr,
++                                                      &hPDOSMemHandle) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              if(pvPDCpuVAddr)
++              {
++                      sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++              }
++              else
++              {
++                      
++                      sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++              }
++              sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++              #if PAGE_TEST
++              PageTest(pvPDCpuVAddr, sPDDevPAddr);
++              #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psDevInfo->pvMMUContextList)
++              {
++                      
++                      if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                              SGX_MMU_PAGE_SIZE, 
++                                                              &psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                              &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      if(psDevInfo->pvDummyPTPageCpuVAddr)
++                      {
++                              sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++                      }
++                      else
++                      {
++                              
++                              sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
++                      }
++                      psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++                      
++                      if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, 
++                                                              SGX_MMU_PAGE_SIZE, 
++                                                              &psDevInfo->pvDummyDataPageCpuVAddr, 
++                                                              &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      if(psDevInfo->pvDummyDataPageCpuVAddr)
++                      {
++                              sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++                      }
++                      else
++                      {
++                              sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
++                      }
++                      psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++              }
++#endif 
++      }
++      else
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              
++              if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                      SGX_MMU_PAGE_SIZE,
++                                      IMG_NULL,
++                                      IMG_NULL,
++                                      0,
++                                      SGX_MMU_PAGE_SIZE,
++                                      0,
++                                      &(sSysPAddr.uiAddr))!= IMG_TRUE)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++              sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++              pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr, 
++                                                                              SGX_MMU_PAGE_SIZE, 
++                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                              &hPDOSMemHandle);
++              if(!pvPDCpuVAddr)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              #if PAGE_TEST
++              PageTest(pvPDCpuVAddr, sPDDevPAddr);
++              #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psDevInfo->pvMMUContextList)
++              {
++                      
++                      if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                              SGX_MMU_PAGE_SIZE,
++                                              IMG_NULL,
++                                              IMG_NULL,
++                                              0,
++                                              SGX_MMU_PAGE_SIZE,
++                                              0,
++                                              &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                      psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++                      psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++                                                                                                                              SGX_MMU_PAGE_SIZE,
++                                                                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                              &psDevInfo->hDummyPTPageOSMemHandle);
++                      if(!psDevInfo->pvDummyPTPageCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                              SGX_MMU_PAGE_SIZE,
++                                              IMG_NULL,
++                                              IMG_NULL,
++                                              0,
++                                              SGX_MMU_PAGE_SIZE,
++                                              0,
++                                              &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                      psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++                      psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, 
++                                                                                                                              SGX_MMU_PAGE_SIZE, 
++                                                                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                              &psDevInfo->hDummyDataPageOSMemHandle);
++                      if(!psDevInfo->pvDummyDataPageCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++              }
++#endif 
++      }
++
++      
++      PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(psMMUContext);
++#endif
++
++      PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++      pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              pui32Tmp[i] = psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++      }
++
++      if(!psDevInfo->pvMMUContextList)
++      {
++              
++
++
++              pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
++              for(i=0; i<SGX_MMU_PT_SIZE; i++)
++              {
++                      pui32Tmp[i] = psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++              }
++              
++              PDUMPCOMMENT("Dummy Page table contents");
++              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++              
++
++              pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
++              for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
++              {
++                      pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++              }
++              
++              PDUMPCOMMENT("Dummy Data Page contents");
++              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++      }
++#else 
++      
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              
++              pui32Tmp[i] = 0;
++      }
++#endif 
++
++      
++      PDUMPCOMMENT("Page directory contents");
++      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++      
++      psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++      psMMUContext->sPDDevPAddr = sPDDevPAddr;
++      psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++      
++      *ppsMMUContext = psMMUContext;
++
++      
++      *psPDDevPAddr = sPDDevPAddr;
++
++      
++      psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++      psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(psMMUContext);
++#endif
++
++      return PVRSRV_OK;
++}
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 *pui32Tmp, i;
++      SYS_DATA *psSysData;
++      MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
++      MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++#endif
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Finalise: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      
++      PDUMPCOMMENT("Free page directory");
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++      pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
++
++      
++
++
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              
++              pui32Tmp[i] = 0;
++      }
++
++      
++
++
++
++      if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
++      {
++              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                              SGX_MMU_PAGE_SIZE,
++                                              psMMUContext->pvPDCpuVAddr,
++                                              psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psMMUContextList->psNext)
++              {
++                      OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                      psDevInfo->hDummyPTPageOSMemHandle);
++                      OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      psDevInfo->pvDummyDataPageCpuVAddr,
++                                                      psDevInfo->hDummyDataPageOSMemHandle);
++              }
++#endif
++      }
++      else
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++              IMG_CPU_PHYADDR sCpuPAddr;
++
++              
++              sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++              sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++              
++              OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr, 
++                                                      SGX_MMU_PAGE_SIZE,
++                            PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                      psMMUContext->hPDOSMemHandle);
++              
++              RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psMMUContextList->psNext)
++              {
++                      
++                      sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++                      sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++      
++                      
++                      OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                              SGX_MMU_PAGE_SIZE,
++                                PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                              psDevInfo->hDummyPTPageOSMemHandle);
++                      
++                      RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++                      
++                      sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++                      sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++      
++                      
++                      OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr, 
++                                                              SGX_MMU_PAGE_SIZE,
++                                PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                              psDevInfo->hDummyDataPageOSMemHandle);
++                      
++                      RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);                  
++              }
++#endif
++      }
++      
++      PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++      
++      ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
++      while(*ppsMMUContext)
++      {
++              if(*ppsMMUContext == psMMUContext)
++              {
++                      
++                      *ppsMMUContext = psMMUContext->psNext;
++                      break;
++              }
++              
++              
++              ppsMMUContext = &((*ppsMMUContext)->psNext);
++      }
++
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
++}
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
++{
++      IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
++      IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++      IMG_UINT32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++#endif
++
++      
++      pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++      pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++
++
++      PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(psMMUContext);
++#endif
++
++      for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount; ui32PDEntry++)
++      {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++              
++              pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
++              if (pui32PDCpuVAddr[ui32PDEntry])
++              {
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++                      bInvalidateDirectoryCache = IMG_TRUE;
++#endif
++              }
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      if (bInvalidateDirectoryCache)
++      {
++              
++
++
++
++              MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++      }
++#endif
++}
++
++
++static IMG_VOID
++MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
++                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                IMG_UINT32 ui32PageCount,
++                                                IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32                      uPageSize = HOST_PAGESIZE();
++      IMG_DEV_VIRTADDR        sTmpDevVAddr;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PDIndex;
++      IMG_UINT32                      ui32PTIndex;
++      IMG_UINT32                      *pui32Tmp;
++      IMG_BOOL                        bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++      
++      sTmpDevVAddr = sDevVAddr;
++
++      for(i=0; i<ui32PageCount; i++)
++      {
++              MMU_PT_INFO **ppsPTInfoList;
++
++              
++              ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++              
++              ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++              {
++                      
++                      ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++      
++                      
++                      if (!ppsPTInfoList[0])
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++      
++                              
++                              sTmpDevVAddr.uiAddr += uPageSize;
++      
++                              
++                              continue;
++                      }
++      
++                      
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++                      
++                      if (!pui32Tmp)
++                      {
++                              continue;
++                      }
++      
++                      
++                      if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++                      {
++                              ppsPTInfoList[0]->ui32ValidPTECount--;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++                      }
++      
++                      
++                      PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      
++                      pui32Tmp[ui32PTIndex] = psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++#else
++                      
++                      pui32Tmp[ui32PTIndex] = 0;
++#endif
++              }
++
++              
++
++              if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
++              {
++                      _DeferredFreePageTable(psMMUHeap, ui32PDIndex - (psMMUHeap->ui32PTBaseIndex >> SGX_MMU_PT_SHIFT));
++                      bInvalidateDirectoryCache = IMG_TRUE;
++              }
++
++              
++              sTmpDevVAddr.uiAddr += uPageSize;
++      }
++
++      if(bInvalidateDirectoryCache)
++      {
++              MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
++      }
++      else
++      {
++              MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif 
++}
++
++
++IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
++                            IMG_UINT32 ui32Start,
++                            IMG_UINT32 ui32End,
++                            IMG_HANDLE hUniqueTag)
++{
++      MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
++      IMG_DEV_VIRTADDR Start;
++
++      Start.uiAddr = ui32Start;
++
++      MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE, hUniqueTag);
++}
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++                      DEV_ARENA_DESCRIPTOR *psDevArena,
++                      RA_ARENA **ppsVMArena)
++{
++      MMU_HEAP *pMMUHeap;
++      IMG_BOOL bRes;
++
++      PVR_ASSERT (psDevArena != IMG_NULL);
++
++      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                               sizeof (MMU_HEAP),
++                               (IMG_VOID **)&pMMUHeap, IMG_NULL);
++      if (pMMUHeap == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
++              return IMG_NULL;
++      }
++
++      pMMUHeap->psMMUContext = psMMUContext;
++      pMMUHeap->psDevArena = psDevArena;
++
++      bRes = _AllocPageTables (pMMUHeap);
++      if (!bRes)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to _AllocPageTables failed"));
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++              return IMG_NULL;
++      }
++
++      
++      pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++                                                                      psDevArena->BaseDevVAddr.uiAddr,
++                                                                      psDevArena->ui32Size,
++                                                                      IMG_NULL,
++                                                                      SGX_MMU_PAGE_SIZE,
++                                                                      IMG_NULL,
++                                                                      IMG_NULL,
++                                                                      MMU_FreePageTables,
++                                                                      pMMUHeap);
++
++      if (pMMUHeap->psVMArena == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
++              _DeferredFreePageTables (pMMUHeap);
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++              return IMG_NULL;
++      }
++
++#if 0 
++      
++      if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
++      {
++              IMG_UINT32 ui32RegVal;
++              IMG_UINT32 ui32XTileStride;
++
++              
++
++
++
++
++              ui32XTileStride = 2;
++
++              ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
++                                              & ((psDevArena->BaseDevVAddr.uiAddr>>20)
++                                              << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
++                                      |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
++                                              & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
++                                              << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
++                                      |(EUR_CR_BIF_TILE0_CFG_MASK
++                                              & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
++              PDUMPREG(EUR_CR_BIF_TILE0, ui32RegVal);
++      }
++#endif
++
++      
++
++      *ppsVMArena = pMMUHeap->psVMArena;
++
++      return pMMUHeap;
++}
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMUHeap)
++{
++      if (pMMUHeap != IMG_NULL)
++      {
++              PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++              if(pMMUHeap->psVMArena)
++              {
++                      RA_Delete (pMMUHeap->psVMArena);
++              }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++              EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++              _DeferredFreePageTables (pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++              DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++      }
++}
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMUHeap,
++                 IMG_SIZE_T uSize,
++                 IMG_SIZE_T *pActualSize,
++                 IMG_UINT32 uFlags,
++                 IMG_UINT32 uDevVAddrAlignment,
++                 IMG_DEV_VIRTADDR *psDevVAddr)
++{
++      IMG_BOOL bStatus;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++              uSize, uFlags, uDevVAddrAlignment));
++
++      
++
++      if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++      {
++              bStatus = RA_Alloc (pMMUHeap->psVMArena,
++                                                      uSize,
++                                                      pActualSize,
++                                                      IMG_NULL,
++                                                      0,
++                                                      uDevVAddrAlignment,
++                                                      0,
++                                                      &(psDevVAddr->uiAddr));
++              if(!bStatus)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
++                      return bStatus;
++              }
++      }
++
++      #ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(pMMUHeap->psMMUContext);
++      #endif
++
++      
++      bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++      
++      #ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(pMMUHeap->psMMUContext);
++      #endif
++
++      if (!bStatus)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
++              if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++              {
++                      
++                      RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
++              }
++      }
++
++      return bStatus;
++}
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, DevVAddr.uiAddr));
++
++      if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) && 
++              (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
++      {
++              RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++              return;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't find DevVAddr %08X in a DevArena",DevVAddr.uiAddr));
++}
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMUHeap)
++{
++      PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++      
++}
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMUHeap)
++{
++      PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++              
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables   (MMU_HEAP *pMMUHeap,
++                                       IMG_DEV_VIRTADDR DevVAddr,
++                                       IMG_SIZE_T uSize,
++                                       IMG_BOOL bForUnmap,
++                                       IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32      ui32NumPTEntries;
++      IMG_UINT32      ui32PTIndex;
++      IMG_UINT32      *pui32PTEntry;
++
++      MMU_PT_INFO **ppsPTInfoList;
++      IMG_UINT32 ui32PDIndex;
++      IMG_UINT32 ui32PTDumpCount;
++
++      
++      ui32NumPTEntries = (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      ui32PDIndex = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      
++      ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++      
++      while(ui32NumPTEntries > 0)
++      {
++              MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
++
++              if(ui32NumPTEntries <= 1024 - ui32PTIndex)
++              {
++                      ui32PTDumpCount = ui32NumPTEntries;
++              }
++              else
++              {
++                      ui32PTDumpCount = 1024 - ui32PTIndex;
++              }
++
++              if (psPTInfo)
++              {
++                      pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr; 
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++              }
++
++              
++              ui32NumPTEntries -= ui32PTDumpCount;
++
++              
++              ui32PTIndex = 0;
++      }
++
++      PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
++}
++#endif 
++
++
++static IMG_VOID
++MMU_MapPage (MMU_HEAP *pMMUHeap,
++                       IMG_DEV_VIRTADDR DevVAddr,
++                       IMG_DEV_PHYADDR DevPAddr,
++                       IMG_UINT32 ui32MemFlags)
++{
++      IMG_UINT32 ui32Index;
++      IMG_UINT32 *pui32Tmp;
++      IMG_UINT32 ui32MMUFlags = 0;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      
++
++      if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
++      {
++              
++              ui32MMUFlags = 0;
++      }
++      else if(PVRSRV_MEM_READ & ui32MemFlags)
++      {
++              
++              ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++      }
++      else if(PVRSRV_MEM_WRITE & ui32MemFlags)
++      {
++              
++              ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++      }
++      
++      
++      if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++      {
++              ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++      }
++
++      
++      if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++      {
++              ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++      }
++      
++      
++
++
++      
++      ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++      
++      ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",DevVAddr.uiAddr, DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT), ui32Index ));
++      }
++
++      PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++      
++      ppsPTInfoList[0]->ui32ValidPTECount++;
++      
++      
++      pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
++                                              | SGX_MMU_PTE_VALID
++                                              | ui32MMUFlags;
++
++}
++
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMUHeap,
++                              IMG_DEV_VIRTADDR DevVAddr,
++                              IMG_SYS_PHYADDR *psSysAddr,
++                              IMG_SIZE_T uSize,
++                              IMG_UINT32 ui32MemFlags,
++                              IMG_HANDLE hUniqueTag)
++{
++#if defined(PDUMP)
++      IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif 
++      IMG_UINT32 uCount, i;
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++#if defined(PDUMP)
++      MapBaseDevVAddr = DevVAddr;
++#else
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif 
++
++      for (i=0, uCount=0; uCount<uSize; i++, uCount+=SGX_MMU_PAGE_SIZE)
++      {
++              IMG_SYS_PHYADDR sSysAddr;
++
++              sSysAddr = psSysAddr[i];
++
++
++              DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++              MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++              DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
++
++              PVR_DPF ((PVR_DBG_MESSAGE, 
++                               "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++                                DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMUHeap,
++                        IMG_DEV_VIRTADDR DevVAddr,
++                        IMG_SYS_PHYADDR SysPAddr,
++                        IMG_SIZE_T uSize,
++                        IMG_UINT32 ui32MemFlags,
++                        IMG_HANDLE hUniqueTag)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++      IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif 
++      IMG_UINT32 uCount;
++      IMG_UINT32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
++      IMG_UINT32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++                pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++#if defined(PDUMP)
++      MapBaseDevVAddr = DevVAddr;
++#else
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif 
++
++      DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++#if defined(FIX_HW_BRN_23281)
++      if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              ui32VAdvance *= 2;
++      }
++#endif
++
++      
++
++
++      if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++      {
++              ui32PAdvance = 0;
++      }
++
++      for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
++      {
++              MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++              DevVAddr.uiAddr += ui32VAdvance;
++              DevPAddr.uiAddr += ui32PAdvance;
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP          *pMMUHeap,
++                         IMG_DEV_VIRTADDR   MapBaseDevVAddr,
++                         IMG_SIZE_T         uByteSize,
++                         IMG_CPU_VIRTADDR   CpuVAddr,
++                         IMG_HANDLE         hOSMemHandle,
++                         IMG_DEV_VIRTADDR  *pDevVAddr,
++                         IMG_UINT32         ui32MemFlags,
++                         IMG_HANDLE         hUniqueTag)
++{
++      IMG_UINT32                      i;
++      IMG_UINT32                      uOffset = 0;
++      IMG_DEV_VIRTADDR        MapDevVAddr;
++      IMG_UINT32                      ui32VAdvance = SGX_MMU_PAGE_SIZE;
++      IMG_UINT32                      ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "MMU_MapShadow: %08X, 0x%x, %08X",
++                      MapBaseDevVAddr.uiAddr,
++                      uByteSize,
++                      CpuVAddr));
++
++      PVR_ASSERT(((IMG_UINT32)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32)uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++      if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              ui32VAdvance *= 2;
++      }
++#endif
++
++      
++
++
++      if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++      {
++              ui32PAdvance = 0;
++      }
++
++      
++      MapDevVAddr = MapBaseDevVAddr;
++      for (i=0; i<uByteSize; i+=ui32VAdvance)
++      {
++              IMG_CPU_PHYADDR CpuPAddr;
++              IMG_DEV_PHYADDR DevPAddr;
++
++              if(CpuVAddr)
++              {
++                      CpuPAddr = OSMapLinToCPUPhys ((IMG_VOID *)((IMG_UINT32)CpuVAddr + uOffset));
++              }
++              else
++              {
++                      CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++              }
++              DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++              PVR_DPF ((PVR_DBG_MESSAGE,
++                              "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++                              uOffset, 
++                              (IMG_UINTPTR_T)CpuVAddr + uOffset, 
++                              CpuPAddr.uiAddr, 
++                              MapDevVAddr.uiAddr, 
++                              DevPAddr.uiAddr));
++
++              MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++              
++              MapDevVAddr.uiAddr += ui32VAdvance;
++              uOffset += ui32PAdvance;
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *psMMUHeap,
++                              IMG_DEV_VIRTADDR sDevVAddr,
++                              IMG_UINT32 ui32PageCount,
++                              IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32                      uPageSize = HOST_PAGESIZE();
++      IMG_DEV_VIRTADDR        sTmpDevVAddr;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PDIndex;
++      IMG_UINT32                      ui32PTIndex;
++      IMG_UINT32                      *pui32Tmp;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++      
++      sTmpDevVAddr = sDevVAddr;
++
++      for(i=0; i<ui32PageCount; i++)
++      {
++              MMU_PT_INFO **ppsPTInfoList;
++
++              
++              ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++              
++              ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++              
++              ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              if (!ppsPTInfoList[0])
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++
++                      
++                      sTmpDevVAddr.uiAddr += uPageSize;
++
++                      
++                      continue;
++              }
++
++              
++              pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++              
++              if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++              {
++                      ppsPTInfoList[0]->ui32ValidPTECount--;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++              }
++
++              
++              PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              pui32Tmp[ui32PTIndex] = psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++#else
++              
++              pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++              
++              sTmpDevVAddr.uiAddr += uPageSize;
++      }
++
++      MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif 
++}
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++      IMG_UINT32 *pui32PageTable;
++      IMG_UINT32 ui32Index;
++      IMG_DEV_PHYADDR sDevPAddr;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      
++      ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++      if (!ppsPTInfoList[0])
++      {
++              PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
++              sDevPAddr.uiAddr = 0;
++              return sDevPAddr;
++      }
++
++      
++      ui32Index = (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++      
++      sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++      
++      sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
++
++      return sDevPAddr;
++}
++
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
++{
++      return (pMMUContext->sPDDevPAddr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
++                                                                 IMG_DEV_VIRTADDR sDevVAddr,
++                                                                 IMG_DEV_PHYADDR *pDevPAddr,
++                                                                 IMG_CPU_PHYADDR *pCpuPAddr)
++{
++      MMU_HEAP *pMMUHeap;
++      IMG_DEV_PHYADDR DevPAddr;
++
++      
++
++      pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
++
++      DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++      pCpuPAddr->uiAddr = DevPAddr.uiAddr; 
++      pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++      return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE             hDevCookie,
++                                                              IMG_HANDLE              hDevMemContext,
++                                                              IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++      if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie); 
++
++      
++      *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++      SYS_DATA *psSysData;
++      RA_ARENA *psLocalDevMemArena;
++      IMG_HANDLE hOSMemHandle = IMG_NULL;
++      IMG_BYTE *pui8MemBlock = IMG_NULL;
++      IMG_SYS_PHYADDR sMemBlockSysPAddr;
++      IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed"));
++              return eError;
++      }
++
++      psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++      
++      if(psLocalDevMemArena == IMG_NULL)
++      {
++              
++              eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                    3 * SGX_MMU_PAGE_SIZE,
++                                                    (IMG_VOID **)&pui8MemBlock,
++                                                    &hOSMemHandle);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));     
++                      return eError;
++              }
++
++              
++              if(pui8MemBlock)
++              {
++                      sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++              }
++              else
++              {
++                      
++                      sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++              }
++      }
++      else
++      {
++              
++
++              if(RA_Alloc(psLocalDevMemArena,
++                                      3 * SGX_MMU_PAGE_SIZE,
++                                      IMG_NULL,
++                                      IMG_NULL,
++                                      0,
++                                      SGX_MMU_PAGE_SIZE,
++                                      0,
++                                      &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++              pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++                                                                        SGX_MMU_PAGE_SIZE * 3,
++                                                                        PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                        &hOSMemHandle);
++              if(!pui8MemBlock)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++                      return PVRSRV_ERROR_BAD_MAPPING;
++              }
++      }
++
++      psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++      psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++      psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++      psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++      psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
++      psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
++      
++      
++      OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++      OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++      
++      OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++      return PVRSRV_OK;
++}
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++      SYS_DATA *psSysData;
++      RA_ARENA *psLocalDevMemArena;
++      IMG_SYS_PHYADDR sPDSysPAddr;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDFree: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++      
++      if(psLocalDevMemArena == IMG_NULL)
++      {
++              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                      3 * SGX_MMU_PAGE_SIZE,
++                                      psDevInfo->pui32BIFResetPD,
++                                      psDevInfo->hBIFResetPDOSMemHandle);
++      }
++      else
++      {
++              OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++                         3 * SGX_MMU_PAGE_SIZE,
++                         PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                         psDevInfo->hBIFResetPDOSMemHandle);
++                                               
++              sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
++              RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++      }
++}
++
++
++#if PAGE_TEST
++static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++      volatile IMG_UINT32 ui32WriteData;
++      volatile IMG_UINT32 ui32ReadData;
++      volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
++      int n;
++      IMG_BOOL bOK=IMG_TRUE;
++
++      ui32WriteData = 0xffffffff;
++
++      for (n=0; n<1024; n++)
++      {
++              pMem32[n] = ui32WriteData;
++              ui32ReadData = pMem32[n];
++
++              if (ui32WriteData != ui32ReadData)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++                      PVR_DBG_BREAK;
++                      bOK = IMG_FALSE;
++              }
++      }
++
++      ui32WriteData = 0;
++
++      for (n=0; n<1024; n++)
++      {
++              pMem32[n] = ui32WriteData;
++              ui32ReadData = pMem32[n];
++
++              if (ui32WriteData != ui32ReadData)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++                      PVR_DBG_BREAK;
++                      bOK = IMG_FALSE;
++              }
++      }
++
++      if (bOK)
++      {
++              PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
++      }
++      else
++      {
++              PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
++      }
++}
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,123 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++                      DEV_ARENA_DESCRIPTOR *psDevArena,
++                      RA_ARENA **ppsVMArena);
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMU);
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMU,
++           IMG_SIZE_T uSize,
++           IMG_SIZE_T *pActualSize,
++           IMG_UINT32 uFlags,
++                 IMG_UINT32 uDevVAddrAlignment,
++           IMG_DEV_VIRTADDR *pDevVAddr);
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMU,
++          IMG_DEV_VIRTADDR DevVAddr,
++                IMG_UINT32 ui32Size);
++
++IMG_VOID 
++MMU_Enable (MMU_HEAP *pMMU);
++
++IMG_VOID 
++MMU_Disable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMU,
++                        IMG_DEV_VIRTADDR devVAddr,
++                        IMG_SYS_PHYADDR SysPAddr,
++                        IMG_SIZE_T uSize,
++                        IMG_UINT32 ui32MemFlags,
++                        IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP          * pMMU,
++               IMG_DEV_VIRTADDR    MapBaseDevVAddr,
++               IMG_SIZE_T          uSize, 
++               IMG_CPU_VIRTADDR    CpuVAddr,
++               IMG_HANDLE          hOSMemHandle,
++               IMG_DEV_VIRTADDR  * pDevVAddr,
++               IMG_UINT32          ui32MemFlags,
++               IMG_HANDLE          hUniqueTag);
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *pMMU,
++             IMG_DEV_VIRTADDR dev_vaddr,
++             IMG_UINT32 ui32PageCount,
++             IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMU,
++                              IMG_DEV_VIRTADDR DevVAddr,
++                              IMG_SYS_PHYADDR *psSysAddr,
++                              IMG_SIZE_T uSize,
++                              IMG_UINT32 ui32MemFlags,
++                              IMG_HANDLE hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,408 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++IMG_EXPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc;
++      PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++              psStubPBDesc != IMG_NULL;
++              psStubPBDesc = psStubPBDesc->psNext)
++      {
++              if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++              {
++                      IMG_UINT32 i;
++                      PRESMAN_ITEM psResItem;
++
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                      * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
++                                                IMG_NULL) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++                              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++                              goto ExitNotFound;
++                      }
++                      psResItem = ResManRegisterRes(RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++
++                      if (psResItem == IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                      * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                ppsSharedPBDescSubKernelMemInfos,
++                                                0);
++
++                              PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++                              eError = PVRSRV_ERROR_GENERIC;
++                              goto ExitNotFound;
++                      }
++
++                      *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
++                      *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
++                      *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++
++                      *ui32SharedPBDescSubKernelMemInfosCount =
++                              psStubPBDesc->ui32SubKernelMemInfosCount;
++
++                      *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
++
++                      for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++                      {
++                              ppsSharedPBDescSubKernelMemInfos[i] =
++                                      psStubPBDesc->ppsSubKernelMemInfos[i];
++                      }
++
++                      psStubPBDesc->ui32RefCount++;
++                      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++      eError = PVRSRV_OK;
++ExitNotFound:
++      *phSharedPBDesc = IMG_NULL;
++
++      return eError;
++}
++
++IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO* psSGXDevInfo) 
++{
++      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
++      
++      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
++              *ppsStubPBDesc != IMG_NULL;
++              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
++      {
++              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
++              IMG_UINT32* pui32Flags = (IMG_UINT32*)psStubPBDesc->psHWPBDescKernelMemInfo->pvLinAddrKM;
++              *pui32Flags |= 1;
++      }
++}
++
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
++      IMG_UINT32 i;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)psStubPBDescIn->hDevCookie)->pvDevice;
++
++      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
++              *ppsStubPBDesc != IMG_NULL;
++              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
++      {
++              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
++
++              if(psStubPBDesc == psStubPBDescIn)
++              {
++                      psStubPBDesc->ui32RefCount--;
++                      PVR_ASSERT((IMG_INT32)psStubPBDesc->ui32RefCount >= 0);
++
++                      if(psStubPBDesc->ui32RefCount == 0)
++                      {
++                              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXDevInfo->psSGXHostCtl;
++#if defined (PDUMP)
++                              IMG_HANDLE hUniqueTag = MAKEUNIQUETAG(psSGXDevInfo->psKernelSGXHostCtlMemInfo);
++#endif
++
++                              
++                              
++                              psSGXHostCtl->sTAHWPBDesc.uiAddr = 0;
++                              psSGXHostCtl->s3DHWPBDesc.uiAddr = 0;
++
++                              
++                              PDUMPCOMMENT("TA/3D CCB Control - Reset HW PBDesc records");
++                              PDUMPMEM(IMG_NULL, psSGXDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, sTAHWPBDesc), sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++                              PDUMPMEM(IMG_NULL, psSGXDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, s3DHWPBDesc), sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++
++                              *ppsStubPBDesc = psStubPBDesc->psNext;
++
++                              for(i=0 ; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++                              {
++                                      
++                                      PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie,
++                                                                                psStubPBDesc->ppsSubKernelMemInfos[i],
++                                                                                IMG_FALSE);
++                              }
++
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                psStubPBDesc->ppsSubKernelMemInfos,
++                                                0);
++
++                              PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->psBlockKernelMemInfo);
++
++                              PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
++
++                              PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->psSharedPBDescKernelMemInfo);
++              
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_STUB_PBDESC),
++                                                psStubPBDesc,
++                                                0);
++
++                      }
++                      return PVRSRV_OK;
++              }
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++IMG_EXPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
++{
++      PVR_ASSERT(hSharedPBDesc != IMG_NULL);
++
++      return ResManFreeResByPtr((PRESMAN_ITEM)hSharedPBDesc, IMG_TRUE);
++}
++
++IMG_EXPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
++      PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++      IMG_UINT32 i;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++      PRESMAN_ITEM psResItem;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++              psStubPBDesc != IMG_NULL;
++              psStubPBDesc = psStubPBDesc->psNext)
++      {
++              
++              if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++              {
++                      
++                      psResItem = ResManRegisterRes(
++                                      RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++                      if (psResItem == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,
++                                      "SGXAddSharedPBDescKM: "
++                                      "Failed to register exisitng shared "
++                                      "PBDesc with the resource manager"));
++                              goto NoAddKeepPB;
++                      }
++
++                      
++                      psStubPBDesc->ui32RefCount++;
++
++                      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++                      eRet = PVRSRV_OK;
++                      goto NoAddKeepPB;
++              }
++              if(psStubPBDesc->psSharedPBDescKernelMemInfo
++                 == psSharedPBDescKernelMemInfo)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Tried to add an already managed "
++                                       "meminfo"));
++                      eRet = PVRSRV_ERROR_INVALID_PARAMS;
++                      goto NoAddKeepPB;
++              }
++      }
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_STUB_PBDESC),
++                                (IMG_VOID **)&psStubPBDesc,
++                                0) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++                                      "StubPBDesc"));
++              eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto NoAdd;
++      }
++
++
++      psStubPBDesc->ppsSubKernelMemInfos=IMG_NULL;
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                * ui32SharedPBDescSubKernelMemInfosCount,
++                                (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
++                                0) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                               "Failed to alloc "
++                               "StubPBDesc->ppsSubKernelMemInfos"));
++              eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++      
++      psStubPBDesc->ui32RefCount = 1;
++      psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++      psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++      psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++      psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++
++      psStubPBDesc->ui32SubKernelMemInfosCount =
++              ui32SharedPBDescSubKernelMemInfosCount;
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
++              psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
++              if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
++                 != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Failed to dissociate shared PBDesc "
++                                       "from process"));
++                      goto NoAdd;
++              }
++      }
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Failed to register shared PBDesc "
++                                       " with the resource manager"));
++              goto NoAdd;
++      }
++      psStubPBDesc->hDevCookie = hDevCookie;
++
++      
++      psStubPBDesc->psNext = psSGXDevInfo->psStubPBDescListKM;
++      psSGXDevInfo->psStubPBDescListKM = psStubPBDesc;
++
++      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++
++      return PVRSRV_OK;
++
++NoAdd:
++      if(psStubPBDesc)
++      {
++              if(psStubPBDesc->ppsSubKernelMemInfos)
++              {
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                        sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                        * ui32SharedPBDescSubKernelMemInfosCount,
++                                        psStubPBDesc->ppsSubKernelMemInfos,
++                                        0);
++              }
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_STUB_PBDESC),
++                                psStubPBDesc,
++                                0);
++      }
++
++NoAddKeepPB:
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++              PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i], IMG_FALSE);
++
++      PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++      PVRSRVFreeDeviceMemKM(hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
++
++      PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++
++      return eRet;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,877 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxinfo.h"
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++#include "sgx2dcore.h"
++
++#define SGX2D_FLUSH_BH                                                        (0xF0000000) 
++#define SGX2D_QUEUED_BLIT_PAD 4
++
++#define SGX2D_COMMAND_QUEUE_SIZE 1024
++
++#define SGX2D_2D_NOT_IDLE(psDevInfo)  ((psDevInfo)->ui322DFifoSize > SGX2DFifoFreeSpace(psDevInfo) || SGX2DIsBusy(psDevInfo))
++
++static IMG_VOID SGX2DHardwareKick(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS, EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK | EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK);
++}
++
++IMG_VOID SGX2DHWRecoveryStart(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->b2DHWRecoveryInProgress = IMG_TRUE;
++      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++}
++
++IMG_VOID SGX2DHWRecoveryEnd(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->b2DHWRecoveryEndPending = IMG_TRUE;
++      psDevInfo->b2DHWRecoveryInProgress = IMG_FALSE;
++      SGX2DHardwareKick(psDevInfo);
++}
++
++#if !defined(NO_HARDWARE)
++static IMG_VOID SGX2DKick(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++}
++#endif 
++
++IMG_BOOL SGX2DIsBusy(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_UINT32 ui32BlitStatus;
++
++      ui32BlitStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++              EUR_CR_2D_BLIT_STATUS);
++
++      return (ui32BlitStatus & EUR_CR_2D_BLIT_STATUS_BUSY_MASK) != 0;
++}
++
++IMG_UINT32 SGX2DCompletedBlits(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_UINT32 ui32BlitStatus;
++
++      ui32BlitStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++              EUR_CR_2D_BLIT_STATUS);
++
++      return (ui32BlitStatus & EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK) >>
++                                      EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireSlavePort)
++#endif
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireSlavePort)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DAcquireSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                                 IMG_BOOL                       bBlock)
++{
++#if defined(SGX2D_DIRECT_BLITS)
++      PVR_UNREFERENCED_PARAMETER(bBlock);
++      return OSLockResource(&psDevInfo->s2DSlaveportResource, ISR_ID);
++#else
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(bBlock);
++
++      return PVRSRV_OK;
++#endif
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DReleaseSlavePort)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DReleaseSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++#if defined(SGX2D_DIRECT_BLITS)
++      return OSUnlockResource(&psDevInfo->s2DSlaveportResource, ISR_ID);
++#else
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      return PVRSRV_OK;
++#endif
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireFifoSpace)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DAcquireFifoSpace(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                                 IMG_UINT32                   ui32MinBytesRequired,
++                                                                 IMG_UINT32                   *pui32BytesObtained)
++{
++      PVRSRV_ERROR    eError = PVRSRV_ERROR_FIFO_SPACE;
++      IMG_UINT32              ui32FifoBytes;
++
++#if defined(DEBUG) && defined(SGX2D_DIRECT_BLITS)
++      
++      if (OSIsResourceLocked(&psDevInfo->s2DSlaveportResource, ISR_ID) == IMG_FALSE)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGX2DAcquireFifoSpace: 2D slaveport is not locked"));
++              return PVRSRV_ERROR_PROCESSING_BLOCKED;
++      }
++#endif 
++
++      
++      ui32FifoBytes = SGX2DFifoFreeSpace(psDevInfo);
++
++      
++      if (ui32FifoBytes >= ui32MinBytesRequired)
++      {
++              if (pui32BytesObtained)
++                      *pui32BytesObtained = ui32FifoBytes;
++              
++              eError = PVRSRV_OK;
++      }
++
++      return eError;
++}
++
++#if defined(DEBUG) && defined (SGX2D_TRACE_BLIT)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DTraceBlt)
++#endif
++FORCE_INLINE
++IMG_VOID SGX2DTraceBlt(IMG_UINT32 *pui32BltData, IMG_UINT32 ui32Count)
++{
++      IMG_UINT32 i;
++
++      PVR_TRACE(("----SGX 2D BLIT----"));
++
++      for (i = 0; i < ui32Count; i++)
++      {
++              PVR_TRACE(("word[%02d]: 0x%08x", i, pui32BltData[i]));
++      }
++}
++#else
++#define SGX2DTraceBlt(pui32BltData, ui32Count)
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DWriteSlavePort)
++#endif
++FORCE_INLINE
++IMG_VOID SGX2DWriteSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                       IMG_UINT32 ui32Value)
++{
++      SGX_SLAVE_PORT          *psSlavePort= &psDevInfo->s2DSlavePortKM;
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      IMG_UINT32 *pui32Offset = psSlavePort->pui32Offset;
++
++      
++      if(*pui32Offset > (psSlavePort->ui32DataRange >> 1))
++      {
++              
++              *pui32Offset = 0;
++      }
++#endif
++
++      SGX2DTraceBlt(&ui32Value, 1);
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      *((IMG_UINT32*)((IMG_UINT32)psSlavePort->pvData + *pui32Offset)) = ui32Value;
++#else
++      *((IMG_UINT32*)psSlavePort->pvData) = ui32Value;
++#endif
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      *pui32Offset += 4;
++#endif
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DWriteSlavePortBatch)
++#endif
++FORCE_INLINE
++PVRSRV_ERROR SGX2DWriteSlavePortBatch(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                                        IMG_UINT32                    *pui32LinDataAddr,
++                                                                        IMG_UINT32                    ui32Bytes)
++{
++      IMG_INT32       i;
++      SGX_SLAVE_PORT  *psSlavePort= &psDevInfo->s2DSlavePortKM;
++      IMG_UINT32      *pui32LinPortAddrBase = (IMG_UINT32*) psSlavePort->pvData;
++      IMG_UINT32      ui32DWORDs = ui32Bytes >> 2;
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      IMG_UINT32      *pui32Offset = psSlavePort->pui32Offset;
++      IMG_UINT32      *pui32LinPortAddr;
++
++      
++      if (ui32Bytes > (psSlavePort->ui32DataRange >> 1))
++      {
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      
++      if(*pui32Offset > (psSlavePort->ui32DataRange >> 1))
++      {
++              
++              *pui32Offset = 0;
++      }
++
++      
++      pui32LinPortAddr = (IMG_UINT32*)((IMG_UINT32)pui32LinPortAddrBase + *pui32Offset);
++#endif
++      
++      SGX2DTraceBlt(pui32LinDataAddr, ui32DWORDs);
++
++      
++      for (i = ui32DWORDs; i != 0 ; i -= ui32DWORDs)
++      {
++              ui32DWORDs = (i < 32) ? i : 32;
++
++              switch(ui32DWORDs)
++              {
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++                      case 32:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 31:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 30:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 29:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 28:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 27:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 26:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 25:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 24:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 23:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 22:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 21:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 20:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 19:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 18:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 17:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 16:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 15:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 14:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 13:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 12:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 11:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 10:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 9:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 8:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 7:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 6:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 5:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 4:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 3:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 2:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 1:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++#else
++                      case 32:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 31:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 30:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 29:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 28:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 27:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 26:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 25:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 24:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 23:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 22:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 21:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 20:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 19:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 18:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 17:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 16:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 15:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 14:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 13:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 12:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 11:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 10:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 9:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 8:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 7:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 6:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 5:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 4:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 3:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 2:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 1:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++#endif
++              }
++      }
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      
++      *pui32Offset += ui32Bytes;
++#endif
++
++      return PVRSRV_OK;
++}
++
++IMG_BOOL SGX2DProcessBlit(IMG_HANDLE          hCmdCookie,
++                                                      IMG_UINT32              ui32DataSize,
++                                                      IMG_VOID                *pvData)
++{
++      PVRSRV_BLT_CMD_INFO             *psBltCmd;
++      PVRSRV_SGXDEV_INFO              *psDevInfo;
++      IMG_UINT32                      ui32BytesRequired;
++      IMG_UINT32                      ui32BytesObtained = 0;
++      IMG_BOOL                        bError = IMG_TRUE;
++      PVRSRV_ERROR                    eError;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DProcessBlit: Start"));
++
++      psBltCmd = (PVRSRV_BLT_CMD_INFO*)pvData;
++
++      
++      if (psBltCmd == IMG_NULL || psBltCmd->ui32CmdSize != ui32DataSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"ProcessBlit: Data packet size is incorrect"));
++              return IMG_FALSE;
++      }
++
++      
++      psDevInfo = psBltCmd->psDevInfo;
++
++      if (psDevInfo->h2DCmdCookie != IMG_NULL)
++      {
++              return IMG_FALSE;
++      }
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress)
++      {
++              psDevInfo->h2DCmdCookie = hCmdCookie;
++              SGX2DHardwareKick(psDevInfo);
++              return IMG_TRUE;
++      }
++
++      
++      if (SGX2DAcquireSlavePort(psDevInfo, IMG_FALSE) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ProcessBlit: Couldn't acquire slaveport"));
++              return IMG_FALSE;
++      }
++
++#ifdef        FIXME
++      
++
++#endif
++
++      
++      if (psDevInfo->b2DHWRecoveryEndPending && SGX2D_2D_NOT_IDLE(psDevInfo))
++      {
++                              psDevInfo->h2DCmdCookie = hCmdCookie;
++                              SGX2DHardwareKick(psDevInfo);
++                              PVR_ASSERT(bError);
++                              goto ErrorExit;
++      }
++      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++
++      ui32BytesRequired = psBltCmd->ui32DataByteSize + SGX2D_QUEUED_BLIT_PAD;
++
++      
++      eError = SGX2DAcquireFifoSpace(psDevInfo, ui32BytesRequired,    &ui32BytesObtained);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ProcessBlit: Get Fifo Space failed"));
++              bError = IMG_FALSE;
++              goto ErrorExit;
++      }
++
++      
++      SGX2DWriteSlavePortBatch(psDevInfo,
++                                                       psBltCmd->aui32BltData,
++                                                       psBltCmd->ui32DataByteSize);
++
++      
++      psDevInfo->h2DCmdCookie = hCmdCookie;
++
++      
++      SGX2DWriteSlavePort(psDevInfo, SGX2D_FLUSH_BH);
++
++      PVR_ASSERT(bError);
++ErrorExit:
++
++      
++      if(SGX2DReleaseSlavePort(psDevInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGX2DReleaseSlavePort: failed to release slaveport"));
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DProcessBlit: Exit.  Error %d", (int)bError));
++
++      return bError;
++}
++
++IMG_VOID SGX2DHandle2DComplete(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_HANDLE hCmdCookie;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Start"));
++
++      hCmdCookie = psDevInfo->h2DCmdCookie;
++      psDevInfo->h2DCmdCookie = IMG_NULL;
++
++      
++      if (hCmdCookie != IMG_NULL)
++      {
++              PVRSRVCommandCompleteKM(hCmdCookie, IMG_FALSE);
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Exit"));
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32                            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *apsSrcSync[],
++                                                        IMG_UINT32                            ui32DataByteSize,
++                                                        IMG_UINT32                            *pui32BltData)
++{
++#if defined(NO_HARDWARE)
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(psDstSync);
++      PVR_UNREFERENCED_PARAMETER(ui32NumSrcSyncs);
++      PVR_UNREFERENCED_PARAMETER(apsSrcSync);
++      PVR_UNREFERENCED_PARAMETER(ui32DataByteSize);
++      PVR_UNREFERENCED_PARAMETER(pui32BltData);
++
++      return PVRSRV_OK;
++#else
++      PVRSRV_COMMAND          *psCommand;
++      PVRSRV_BLT_CMD_INFO     *psBltCmd;
++      IMG_UINT32              ui32CmdByteSize;
++      IMG_UINT32              i;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueueBlitKM: Start"));
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress == IMG_TRUE)
++      {
++              return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++      }
++
++      
++      if ((ui32DataByteSize + SGX2D_QUEUED_BLIT_PAD) > psDevInfo->ui322DFifoSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: Blit too big for FIFO. Blit size: %d (+ padding %d), FIFO size: %d", ui32DataByteSize, SGX2D_QUEUED_BLIT_PAD, psDevInfo->ui322DFifoSize));
++
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      ui32CmdByteSize = sizeof(PVRSRV_BLT_CMD_INFO)
++                              + ui32DataByteSize
++                              - sizeof(IMG_UINT32);
++
++      eError = PVRSRVInsertCommandKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue,
++                                      &psCommand,
++                                      SYS_DEVICE_SGX, 
++                                      SGX_2D_BLT_COMMAND,
++                                      (psDstSync == IMG_NULL) ? 0 : 1,
++                                      &psDstSync,
++                                      ui32NumSrcSyncs,
++                                      apsSrcSync,
++                                      ui32CmdByteSize );
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: PVRSRVInsertCommandKM failed. Error %d", eError));
++#ifdef DEBUG
++              if (eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++              {
++                      if (!SGX2DIsBusy(psDevInfo))
++                      {
++                              
++                              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: 2D core not busy, command queue full - lockup suspected"));
++                      }
++              }
++#endif
++              return eError;
++      }
++
++      
++      psBltCmd                = (PVRSRV_BLT_CMD_INFO*) psCommand->pvData;
++      psBltCmd->ui32CmdSize   = ui32CmdByteSize;
++      psBltCmd->psDevInfo     = psDevInfo;
++
++      
++      psBltCmd->psDstSync = psDstSync;
++
++      psBltCmd->ui32NumSrcSyncInfos = ui32NumSrcSyncs;
++      for(i = 0; i < ui32NumSrcSyncs; i++)
++      {
++              
++              psBltCmd->apsSrcSync[i] = apsSrcSync[i];
++      }
++
++      if (pui32BltData != IMG_NULL)
++      {
++              for(i = 0; i < (ui32DataByteSize>>2); i++)
++              {
++                      psBltCmd->aui32BltData[i] = pui32BltData[i];
++              }
++      }
++
++      psBltCmd->ui32DataByteSize = ui32DataByteSize;
++
++      
++      eError = PVRSRVSubmitCommandKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: PVRSRVSubmitCommandKM failed. Error %d", eError));
++      }
++
++      SGX2DKick(psDevInfo);
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueueBlitKM: Exit. Error: %d", eError));
++
++      return eError;
++#endif        
++}
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_EXPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData)
++{
++#if defined(NO_HARDWARE)
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(ui32DataByteSize);
++      PVR_UNREFERENCED_PARAMETER(pui32BltData);
++
++      return PVRSRV_OK;
++#else
++      PVRSRV_ERROR    eError;
++      PVRSRV_ERROR    eSrvErr;
++      
++      IMG_UINT32              ui32CmdByteSize = ui32DataByteSize + 4;
++      IMG_BOOL                bStart = IMG_FALSE;
++      IMG_UINT32              uiStart = 0;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DDirectBlitKM: Start"));
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress == IMG_TRUE)
++      {
++              return PVRSRV_ERROR_FIFO_SPACE;
++      }
++
++      
++      if ( ui32CmdByteSize > psDevInfo->ui322DFifoSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Blit too big for FIFO. Blit size: %d (+ padding %d), FIFO size: %d", ui32DataByteSize, 4, psDevInfo->ui322DFifoSize));
++
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      eSrvErr = SGX2DAcquireSlavePort (psDevInfo, IMG_TRUE);
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot acquire slaveport. Error %d", eSrvErr));
++              return eSrvErr;
++      }
++
++#ifdef        FIXME
++      
++
++#endif
++      do
++      {
++              eSrvErr = SGX2DAcquireFifoSpace(psDevInfo,
++                                        ui32CmdByteSize,
++                                        IMG_NULL);
++              if (eSrvErr == PVRSRV_OK)
++              {
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot acquire FIFO space. Error %d", eSrvErr));
++              
++              eError = eSrvErr;
++      }
++      else
++      {
++              
++              if (psDevInfo->b2DHWRecoveryEndPending && SGX2D_2D_NOT_IDLE(psDevInfo))
++              {
++                      eError = PVRSRV_ERROR_FIFO_SPACE;
++              }
++              else
++              {
++                      eError = PVRSRV_OK;
++
++                      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++
++                      SGX2DWriteSlavePortBatch(psDevInfo, pui32BltData, ui32DataByteSize);
++
++                      SGX2DWriteSlavePort(psDevInfo, EURASIA2D_FENCE_BH);
++              }
++      }
++
++      eSrvErr = SGX2DReleaseSlavePort(psDevInfo);
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot release slave port.  Error %d", eSrvErr));
++
++              if (eError != PVRSRV_OK)
++              {
++                      eError = eSrvErr;
++              }
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DDirectBlitKM: Exit.  Error: %d", eError));
++
++      
++      SGX2DKick(psDevInfo);
++
++      return eError;
++#endif        
++}
++#endif 
++
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DQuerySyncOpsComplete)
++#endif
++static INLINE
++IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo)
++{
++      PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++      return (IMG_BOOL)(
++                                        (psSyncData->ui32ReadOpsComplete == psSyncData->ui32ReadOpsPending) &&
++                                        (psSyncData->ui32WriteOpsComplete == psSyncData->ui32WriteOpsPending)
++                                       );
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++      if(SGX2DQuerySyncOpsComplete(psSyncInfo))
++      {
++              
++              PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++              return PVRSRV_OK;
++      }
++
++      
++      if (!bWaitForComplete)
++      {
++              
++              PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++              return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++
++       
++      PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++      do
++      {
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++
++              if(SGX2DQuerySyncOpsComplete(psSyncInfo))
++              {
++                      
++                      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over.  Blits complete."));
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++      return PVRSRV_ERROR_TIMEOUT;
++}
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++PVRSRV_ERROR SGX2DInit(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++
++      
++      PVR_ASSERT(psDevInfo->ui322DFifoSize == 0);
++      psDevInfo->ui322DFifoSize =  SGX2DFifoFreeSpace(psDevInfo);
++
++      PVR_TRACE(("SGX2DInit: 2D FIFO size: %d", psDevInfo->ui322DFifoSize));
++
++      
++      PVR_ASSERT(psDevInfo->s2DSlavePortKM.pui32Offset == 0);
++      PVR_ASSERT(psDevInfo->ui322DFifoOffset == 0);
++      psDevInfo->s2DSlavePortKM.pui32Offset = &psDevInfo->ui322DFifoOffset;
++
++      PVR_ASSERT(psDevInfo->h2DQueue == IMG_NULL);
++      eError = PVRSRVCreateCommandQueueKM(SGX2D_COMMAND_QUEUE_SIZE,
++                                              (PVRSRV_QUEUE_INFO **)&psDevInfo->h2DQueue);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DInit: PVRSRVCreateCommandQueueKM failed (%d)", eError));
++
++              return eError;
++      }
++
++      PVR_ASSERT(psDevInfo->h2DCmdCookie == IMG_NULL);
++      PVR_ASSERT(!psDevInfo->b2DHWRecoveryInProgress);
++      PVR_ASSERT(!psDevInfo->b2DHWRecoveryEndPending);
++      PVR_ASSERT(psDevInfo->ui322DCompletedBlits == 0);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGX2DDeInit(PVRSRV_SGXDEV_INFO      *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++
++      if (psDevInfo->h2DQueue != IMG_NULL)
++      {
++              eError = PVRSRVDestroyCommandQueueKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGX2DDeInit: PVRSRVDestroyCommandQueueKM failed (%d)", eError));
++
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,131 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#define DEV_DEVICE_TYPE                       PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS              PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION             1
++#define DEV_MINOR_VERSION             0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++      #define SGX_ADDRESS_SPACE_SIZE                          32
++
++      #define SGX_GENERAL_HEAP_BASE                           0x00400000
++      #define SGX_GENERAL_HEAP_SIZE                           (0x78000000-0x00800000)
++
++      #define SGX_TADATA_HEAP_BASE                            0x78000000
++      #define SGX_TADATA_HEAP_SIZE                            (0x08000000-0x00400000)
++
++      #define SGX_KERNEL_CODE_HEAP_BASE                       0x80000000
++      #define SGX_KERNEL_CODE_HEAP_SIZE                       0x00080000
++
++      #define SGX_VIDEO_CODE_HEAP_BASE                        0x81000000
++      #define SGX_VIDEO_CODE_HEAP_SIZE                        0x00080000
++
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_BASE         0x82000000
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_SIZE         0x05000000
++
++      #define SGX_PIXELSHADER_HEAP_BASE                       0x88000000
++      #define SGX_PIXELSHADER_HEAP_SIZE                       0x00500000
++      
++      #define SGX_VERTEXSHADER_HEAP_BASE                      0x89000000
++      #define SGX_VERTEXSHADER_HEAP_SIZE                      0x00200000
++
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE         0x8A000000
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE         0x02000000
++
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE        0x8C000000
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE        0x02000000
++
++      #define SGX_SYNCINFO_HEAP_BASE                          0xA0000000
++      #define SGX_SYNCINFO_HEAP_SIZE                          0x01000000
++
++      #define SGX_3DPARAMETERS_HEAP_BASE                      0xC0000000
++      #define SGX_3DPARAMETERS_HEAP_SIZE                      (0x10000000-0x00400000)
++
++      #define SGX_2D_HEAP_BASE                                        0xD0000000
++      #define SGX_2D_HEAP_SIZE                                        (0x08000000-0x00400000)
++
++      #define SGX_GENERAL_MAPPING_HEAP_BASE           0xD8000000
++      #define SGX_GENERAL_MAPPING_HEAP_SIZE           (0x08000000-0x00400000)
++
++      
++      #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++      #define SGX_ADDRESS_SPACE_SIZE                          28
++
++      #define SGX_GENERAL_HEAP_BASE                           0x00400000
++      #define SGX_GENERAL_HEAP_SIZE                           (0x07000000-0x00401000)
++
++      
++      #define SGX_TADATA_HEAP_BASE                            0x07000000
++      #define SGX_TADATA_HEAP_SIZE                            (0x01000000-0x00001000)
++
++      #define SGX_3DPARAMETERS_HEAP_BASE                      0x08000000
++      #define SGX_3DPARAMETERS_HEAP_SIZE                      (0x04000000-0x00001000)
++
++      #define SGX_GENERAL_MAPPING_HEAP_BASE           0x0C000000
++      #define SGX_GENERAL_MAPPING_HEAP_SIZE           (0x01000000-0x00001000)
++
++      #define SGX_PIXELSHADER_HEAP_BASE                       0x0D000000
++      #define SGX_PIXELSHADER_HEAP_SIZE                       0x00500000
++
++      #define SGX_VERTEXSHADER_HEAP_BASE                      0x0D800000
++      #define SGX_VERTEXSHADER_HEAP_SIZE                      0x00200000
++
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE         0x0E000000
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE         (0x00800000-0x00001000)
++
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE        0x0E800000
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE        (0x00800000-0x00001000)
++
++      #define SGX_KERNEL_CODE_HEAP_BASE                       0x0F000000
++      #define SGX_KERNEL_CODE_HEAP_SIZE                       0x00080000
++
++      #define SGX_VIDEO_CODE_HEAP_BASE                        0x0F400000
++      #define SGX_VIDEO_CODE_HEAP_SIZE                        0x00080000
++
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_BASE         0x0F800000
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_SIZE         (0x00400000-0x00001000)
++              
++      #define SGX_SYNCINFO_HEAP_BASE                          0x0FC00000
++      #define SGX_SYNCINFO_HEAP_SIZE                          (0x00400000-0x00001000)
++
++      
++      #define SGX_CORE_IDENTIFIED
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++      #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif        
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++#define               SGX_HOSTPORT_PRESENT                    0x00000001
++
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST               (1 << 0)        
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE      (1 << 1)        
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE     (1 << 2)        
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK                                                (1 << 3)        
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR                 (1 << 0)        
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER        (1 << 1)        
++
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST     0x01    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST     0x02    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x04    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x10    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x20    
++
++
++
++
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++      IMG_UINT32                      ui32CoreClockSpeed;
++      IMG_UINT32                      ui32HWRecoveryFreq;
++      IMG_UINT32                      ui32ActivePowManLatencyms;
++      IMG_UINT32                      ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{     
++      IMG_UINT32                      ui32Flags;
++
++      
++      IMG_SYS_PHYADDR         sRegsSysPBase;
++      IMG_DEV_PHYADDR         sRegsDevPBase;
++      IMG_CPU_PHYADDR         sRegsCpuPBase;
++      IMG_UINT32                      ui32RegsSize;
++      
++      
++      IMG_SYS_PHYADDR         sSPSysPBase;
++      IMG_DEV_PHYADDR         sSPDevPBase;
++      IMG_CPU_PHYADDR         sSPCpuPBase;
++      IMG_UINT32                      ui32SPSize;
++
++
++      
++      IMG_SYS_PHYADDR         sLocalMemSysPBase;
++      IMG_DEV_PHYADDR         sLocalMemDevPBase;
++      IMG_CPU_PHYADDR         sLocalMemCpuPBase;
++      IMG_UINT32                      ui32LocalMemSize;
++
++      
++      IMG_UINT32                      ui32IRQ;
++
++      
++      SGX_TIMING_INFORMATION sTimingInfo;
++} SGX_DEVICE_MAP;
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++struct _PVRSRV_STUB_PBDESC_
++{
++      IMG_UINT32              ui32RefCount;
++      IMG_UINT32              ui32TotalPBSize;
++      PVRSRV_KERNEL_MEM_INFO  *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO  **ppsSubKernelMemInfos;
++      IMG_UINT32              ui32SubKernelMemInfosCount;
++      IMG_HANDLE              hDevCookie;
++      PVRSRV_KERNEL_MEM_INFO  *psBlockKernelMemInfo;
++      PVRSRV_STUB_PBDESC      *psNext;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo;                  
++      PVRSRV_KERNEL_MEM_INFO  *psCCBCtlMemInfo;               
++      PVRSRV_SGX_COMMAND              *psCommands;                    
++      IMG_UINT32                              *pui32WriteOffset;              
++      volatile IMG_UINT32             *pui32ReadOffset;               
++#if defined(PDUMP)
++      IMG_UINT32                              ui32CCBDumpWOff;                
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++IMG_VOID SGXOSTimer(IMG_VOID *pvData);
++
++IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO  *psDevInfo);
++#if defined(NO_HARDWARE)
++static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                                              IMG_UINT32 ui32StatusRegister,
++                                                                                              IMG_UINT32 ui32StatusValue,
++                                                                                              IMG_UINT32 ui32StatusMask)
++{
++      IMG_UINT32 ui32RegVal;
++
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++      ui32RegVal &= ~ui32StatusMask;
++      ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1809 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#ifdef        SGX_FEATURE_2D_HARDWARE
++#include "sgx2dcore.h"
++#endif
++
++#include "sgxutils.h"
++
++#if defined (SGX_FEATURE_2D_HARDWARE)
++#define       SGX_USING_CMD_PROC_LIST
++#endif
++
++IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
++IMG_VOID SGXScheduleProcessQueues(IMG_VOID *pvData);
++
++IMG_UINT32 gui32EventStatusServicesByISR = 0;
++
++static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
++                                               IMG_UINT32                      ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                 IMG_BOOL                             bHardwareRecovery);
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM    15
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
++#else
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
++#endif
++
++static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      SGXScheduleProcessQueues(psDeviceNode);
++}
++
++static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      if (psDevInfo->psKernelCCBInfo != IMG_NULL)
++      {
++              
++
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                              PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                              SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++      PVRSRV_ERROR            eError;
++
++      PVRSRV_SGX_CCB_INFO     *psKernelCCBInfo = IMG_NULL;
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      psDevInfo->sScripts = psInitInfo->sScripts;
++
++      psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++      psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++      psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
++      psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
++      psDevInfo->psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++
++      
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                              sizeof(PVRSRV_SGX_CCB_INFO),
++                                              (IMG_VOID **)&psKernelCCBInfo, 0);
++      if (eError != PVRSRV_OK)        
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to alloc memory"));
++              goto failed_allockernelccb;
++      }
++
++
++      OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++      psKernelCCBInfo->psCCBMemInfo           = psDevInfo->psKernelCCBMemInfo;
++      psKernelCCBInfo->psCCBCtlMemInfo        = psDevInfo->psKernelCCBCtlMemInfo;
++      psKernelCCBInfo->psCommands                     = psDevInfo->psKernelCCB->asCommands;
++      psKernelCCBInfo->pui32WriteOffset       = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++      psKernelCCBInfo->pui32ReadOffset        = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++      psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++      
++
++      psDevInfo->ui32TAKickAddress = psInitInfo->ui32TAKickAddress;
++
++      
++
++      psDevInfo->ui32VideoHandlerAddress = psInitInfo->ui32VideoHandlerAddress;
++
++      psDevInfo->bForcePTOff = IMG_FALSE;
++      psDevInfo->ui32RegFlags = 0;
++      psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++      psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++      psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++      psDevInfo->ui32ClockGateMask = psInitInfo->ui32ClockGateMask;   
++
++
++      
++      OSMemCopy(&psDevInfo->asSGXDevData,  &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
++
++      return PVRSRV_OK;
++
++failed_allockernelccb:
++      DeinitDevInfo(psDevInfo);
++
++      return eError;
++}
++
++
++
++
++PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE             hDevHandle, 
++                                                         PVR_POWER_STATE      eNewPowerState, 
++                                                         PVR_POWER_STATE      eCurrentPowerState)
++{
++      if (eNewPowerState != eCurrentPowerState)
++      {
++              PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++              PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++
++              
++
++
++              if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++              {
++                      PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++                      #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
++                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClockGateMask;
++                      #endif
++
++                      
++                      psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST;
++
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Trigger power down event on uKernel...");
++                      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++
++                      
++                      #if !defined(NO_HARDWARE)
++                      if (PollForValueKM((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags),
++                                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                                              MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                              WAIT_TRY_COUNT) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip power off failed."));
++                      }
++                      #endif
++
++                      #ifdef PDUMP
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Wait for power down event on uKernel...");
++                      PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++                                              offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags),
++                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                              PDUMP_POLL_OPERATOR_EQUAL,
++                                              IMG_FALSE, IMG_FALSE,
++                                              MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++                      #endif
++
++                      SGXDeinitialise(psDevInfo);
++
++                      #if defined(SGX_FEATURE_AUTOCLOCKGATING)
++                      
++                      #if !defined(NO_HARDWARE)
++                      if (PollForValueKM((volatile IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_CLKGATESTATUS),
++                                                              0,
++                                                              ui32ClockMask,
++                                                              MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                              WAIT_TRY_COUNT) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip idle failed."));
++                      }
++                      #endif
++                      PDUMPREGPOL(EUR_CR_CLKGATESTATUS, 0, ui32ClockMask);
++                      #endif
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE            hDevHandle, 
++                                                              PVR_POWER_STATE eNewPowerState, 
++                                                              PVR_POWER_STATE eCurrentPowerState)
++{
++      if (eNewPowerState != eCurrentPowerState)
++      {
++              PVRSRV_ERROR            eError;
++              PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++              PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++              SYS_DATA                        *psSysData;
++
++              
++
++              eError = SysAcquireData(&psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++              
++              
++
++              if(eCurrentPowerState == PVRSRV_POWER_STATE_D3)
++              {
++                      PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++                      
++                      psSGXHostCtl->ui32PowManFlags = 0;
++
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Reset Power Manager flags");
++                      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++
++                      eError = SGXInitialise(psDevInfo, IMG_FALSE);
++
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
++                              return eError;
++                      }
++              }
++
++              PVR_DPF((PVR_DBG_WARNING,
++                              "SGXPostPowerState : SGX Power Transition from %d to %d OK",
++                              eCurrentPowerState, eNewPowerState));
++      }
++
++      return PVRSRV_OK;
++}
++
++#define       SCRIPT_DATA(pData, offset, type) (*((type *)(((char *)pData) + offset)))
++#define       SCRIPT_DATA_UI32(pData, offset) SCRIPT_DATA(pData, offset, IMG_UINT32)
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
++{
++      IMG_UINT32 ui32PC;
++      SGX_INIT_COMMAND *psComm;
++
++      for (ui32PC = 0, psComm = psScript;
++              ui32PC < ui32NumInitCommands;
++              ui32PC++, psComm++)
++      {
++              switch (psComm->eOp)
++              {
++                      case SGX_INIT_OP_WRITE_HW_REG:
++                      {
++                              OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++                              PDUMPREG(psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++                              break;
++                      }
++#if defined(PDUMP)
++                      case SGX_INIT_OP_PDUMP_HW_REG:
++                      {
++                              PDUMPREG(psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
++                              break;
++                      }
++#endif
++                      case SGX_INIT_OP_HALT:
++                      {
++                              return PVRSRV_OK;
++                      }
++                      case SGX_INIT_OP_ILLEGAL:
++                      
++                      default:
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++              }
++
++      }
++
++      return PVRSRV_ERROR_GENERIC;;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                 IMG_BOOL                             bHardwareRecovery)
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      ui32ReadOffset, ui32WriteOffset;
++
++      
++      ResetSGX(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++      
++      *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++                       sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
++                       MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++#endif 
++
++      
++
++
++      psDevInfo->psSGXHostCtl->sTAHWPBDesc.uiAddr = 0;
++      psDevInfo->psSGXHostCtl->s3DHWPBDesc.uiAddr = 0;
++#if defined(PDUMP)
++      PDUMPCOMMENT(" CCB Control - Reset HW PBDesc records");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++                       offsetof(PVRSRV_SGX_HOST_CTL, sTAHWPBDesc), sizeof(IMG_DEV_VIRTADDR),
++                       PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++                       offsetof(PVRSRV_SGX_HOST_CTL, s3DHWPBDesc), sizeof(IMG_DEV_VIRTADDR),
++                       PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif 
++
++      eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommands, SGX_MAX_INIT_COMMANDS);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript failed (%d)", eError));
++              return (PVRSRV_ERROR_GENERIC);
++      }
++
++      if (bHardwareRecovery)
++      {
++              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++              
++              if (PollForValueKM((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32InterruptClearFlags),
++                                                 0,
++                                                 PVRSRV_USSE_EDM_INTERRUPT_HWR,
++                                                 MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGXEDM: Wait for uKernel HW Recovery failed"));
++              }
++      }
++
++      
++
++
++      for (ui32ReadOffset = psDevInfo->psKernelCCBCtl->ui32ReadOffset,
++                       ui32WriteOffset = psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++               ui32ReadOffset != ui32WriteOffset;
++               ui32ReadOffset = (ui32ReadOffset + 1) & 0xFF)
++      {
++              *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK);
++      }
++
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
++
++{
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++      PVRSRV_ERROR            eError;
++
++      
++      if (psDevInfo->pvRegsBaseKM == IMG_NULL)
++      {
++              return PVRSRV_OK;
++      }
++
++      eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
++              return (PVRSRV_ERROR_GENERIC);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static IMG_VOID ResetSGXSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        IMG_UINT32                    ui32PDUMPFlags,
++                                                        IMG_BOOL                              bPDump)
++{
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      
++      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++      if (bPDump)
++      {
++              PDUMPIDLWITHFLAGS(1000, ui32PDUMPFlags);
++      }
++}
++
++
++static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
++                                               IMG_UINT32                      ui32PDUMPFlags)
++{
++      IMG_UINT32 ui32RegVal;
++
++      const IMG_UINT32 ui32SoftResetRegVal =
++                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
++                                      #endif
++                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
++                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++      const IMG_UINT32 ui32BifInvalDCVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++
++      const IMG_UINT32 ui32BifFaultMask =
++                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_UINT32                      ui32BIFCtrl;
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      IMG_UINT32                      ui32BIFMemArb;
++#endif 
++#endif 
++
++#ifndef PDUMP
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      psDevInfo->ui32NumResets++;
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++      if (ui32RegVal & ui32BifFaultMask)
++      {
++              
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      }
++#endif 
++
++      
++      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      
++
++      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
++#endif 
++#endif 
++
++
++      
++
++
++
++
++      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++      ui32RegVal = ui32SoftResetRegVal;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++      
++
++      for (;;)
++      {
++              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++              IMG_DEV_VIRTADDR sBifFault;
++              IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++              {
++                      break;
++              }
++              
++              
++
++
++              
++
++
++              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++              PVR_DPF((PVR_DBG_WARNING, "ResetSGX: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++
++              
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              ui32RegVal = ui32SoftResetRegVal;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++              ui32RegVal = 0;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++      }
++
++
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++#endif
++#if defined(FIX_HW_BRN_23410)
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++#endif
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
++      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++      
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      ui32RegVal = ui32SoftResetRegVal;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32BifInvalDCVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      
++      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
++{
++      PVRSRV_SGXDEV_INFO      *psDevInfo;     
++      IMG_HANDLE              hKernelDevMemContext;
++      IMG_DEV_PHYADDR         sPDDevPAddr;
++      IMG_UINT32              i;
++      PVRSRV_DEVICE_NODE  *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++      IMG_HANDLE          hDevInfoOSMemHandle = (IMG_HANDLE)IMG_NULL;
++      PVRSRV_ERROR            eError;
++
++      PDUMPCOMMENT("SGX Initialisation Part 1");
++
++      
++      PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++      PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++      PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif        
++
++      
++      if(OSAllocPages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_CACHED,
++                                      sizeof(PVRSRV_SGXDEV_INFO),
++                                      (IMG_VOID **)&psDevInfo,
++                                      &hDevInfoOSMemHandle) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++      
++      psDevInfo->eDeviceType          = DEV_DEVICE_TYPE;
++      psDevInfo->eDeviceClass         = DEV_DEVICE_CLASS;
++
++      
++      psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
++      psDeviceNode->hDeviceOSMemHandle = hDevInfoOSMemHandle;
++      
++      
++      psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
++
++      
++      hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++                                                                                      &sPDDevPAddr,
++                                                                                      IMG_TRUE,
++                                                                                      IMG_NULL);
++
++      psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++      
++      for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
++      {
++              IMG_HANDLE hDevMemHeap;
++
++              switch(psDeviceMemoryHeap[i].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_KERNEL:
++                      case DEVICE_MEMORY_HEAP_SHARED:
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
++                                                                                              &psDeviceMemoryHeap[i]);
++                              
++
++
++                              psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++                              break;
++                      }
++              }
++      }
++      
++      eError = MMU_BIFResetPDAlloc(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_EVENTOBJECT) , 
++                                       (IMG_VOID **)&psDevInfo->psSGXEventObject, 0) != PVRSRV_OK)    
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for event object"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      if(OSEventObjectCreate("PVRSRV_EVENTOBJECT_SGX", psDevInfo->psSGXEventObject) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to create event object"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      
++      }
++#endif 
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      PVRSRV_ERROR            eError;
++      SGX_DEVICE_MAP          *psSGXDeviceMap;
++      SGX_TIMING_INFORMATION* psSGXTimingInfo;
++
++      PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++      psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++      eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, 
++                                                                      (IMG_VOID**)&psSGXDeviceMap);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: Failed to get device memory map!"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
++      
++      
++      psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++      
++      
++      psInitInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++#if defined(SUPPORT_HW_RECOVERY)
++      psInitInfo->ui32HWRecoverySampleRate = psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
++#endif 
++
++      psInitInfo->ui32ActivePowManSampleRate =
++              psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++      
++
++
++
++
++
++
++
++      
++      psInitInfo->ui32ActivePowManSampleRate += 2;
++      
++      return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                IMG_HANDLE hDevHandle,
++                                SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++#if defined(SGX_USING_CMD_PROC_LIST)
++      PFN_CMD_PROC            pfnCmdProcList[SGX_COMMAND_COUNT];
++      IMG_UINT32              ui32SyncCountList[SGX_COMMAND_COUNT][2];
++#endif
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      PVRSRV_ERROR            eError;
++      SGX_DEVICE_MAP          *psSGXDeviceMap;
++      PVR_POWER_STATE         eDefaultPowerState;
++
++      PDUMPCOMMENT("SGX Initialisation Part 2");
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++      
++
++      eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
++              goto failed_init_dev_info;
++      }
++
++      
++#ifdef SGX_FEATURE_2D_HARDWARE
++      eError = OSCreateResource(&psDevInfo->s2DSlaveportResource);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to create resource !"));
++              return PVRSRV_ERROR_INIT_FAILURE;
++      }
++#endif
++
++      eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++                                                                      (IMG_VOID**)&psSGXDeviceMap);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
++              return PVRSRV_ERROR_INIT_FAILURE;
++      }
++
++      
++      psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++                                                                                 psSGXDeviceMap->ui32RegsSize,
++                                                                                 PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                                 IMG_NULL);
++      if (!psDevInfo->pvRegsBaseKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++      psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++      psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      psDevInfo->s2DSlavePortKM.pvData = OSMapPhysToLin (psSGXDeviceMap->sSPCpuPBase,
++                                                                                                              psSGXDeviceMap->ui32SPSize,
++                                                                                                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                                                              IMG_NULL);
++
++              
++      if (!psDevInfo->s2DSlavePortKM.pvData)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map 2D Slave port region\n"));
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++      psDevInfo->s2DSlavePortKM.ui32DataRange = psSGXDeviceMap->ui32SPSize;
++      psDevInfo->s2DSlavePortKM.sPhysBase = psSGXDeviceMap->sSPSysPBase;
++#endif
++
++
++#if defined (SYS_USING_INTERRUPTS)
++
++      
++      psDeviceNode->pvISRData = psDeviceNode;
++      
++      PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif 
++
++      
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      psDevInfo->psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++      eDefaultPowerState = PVRSRV_POWER_STATE_D3;
++#else 
++      eDefaultPowerState = PVRSRV_POWER_STATE_D0;
++#endif 
++      eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                              SGXPrePowerState,
++                                                                              SGXPostPowerState,
++                                                                              (IMG_HANDLE)psDeviceNode,
++                                                                              PVRSRV_POWER_STATE_D3,
++                                                                              eDefaultPowerState);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
++              return eError;
++      }
++
++#if defined(SGX_USING_CMD_PROC_LIST)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      pfnCmdProcList[SGX_2D_BLT_COMMAND] = SGX2DProcessBlit;
++      ui32SyncCountList[SGX_2D_BLT_COMMAND][0] = 1;   
++      ui32SyncCountList[SGX_2D_BLT_COMMAND][1] = PVRSRV_MAX_BLT_SRC_SYNCS;
++#endif
++      eError = PVRSRVRegisterCmdProcListKM(psDeviceNode->sDevId.ui32DeviceIndex, &pfnCmdProcList[0], ui32SyncCountList, SGX_COMMAND_COUNT);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: PVRSRVRegisterCmdProcList failed"));
++              return eError;
++      }
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      eError = SGX2DInit(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: SGX2DInit failed"));
++              return eError;
++      }
++#endif
++
++
++
++      
++
++      OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++      OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++      OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++      PDUMPCOMMENT("Kernel CCB");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++      PDUMPCOMMENT("Kernel CCB Control");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++      PDUMPCOMMENT("Kernel CCB Event Kicker");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++
++      
++      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                               PVRSRV_POWER_Unspecified,
++                                                                               KERNEL_ID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed PVRSRVSetDevicePowerStateKM call"));
++              return eError;
++      }
++
++#if defined(SUPPORT_HW_RECOVERY)
++      {
++              SGX_TIMING_INFORMATION* psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
++              
++              psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++              if(psDevInfo->hTimer == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"OSAddTimer : Failed to register timer callback function"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++#endif
++
++      return PVRSRV_OK;
++
++failed_init_dev_info:
++      return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
++{
++      PVRSRV_DEVICE_NODE                      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++      PVRSRV_SGXDEV_INFO                      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      IMG_HANDLE                                      hDevInfoOSMemHandle = psDeviceNode->hDeviceOSMemHandle;
++      PVRSRV_ERROR                            eError = PVRSRV_ERROR_INVALID_PARAMS;
++      IMG_UINT32                                      ui32Heap;
++      DEVICE_MEMORY_HEAP_INFO         *psDeviceMemoryHeap;
++
++      if (!psDevInfo)
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
++              return PVRSRV_OK;
++      }
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      if(psDevInfo->hTimer)
++      {
++              eError = OSRemoveTimer (psDevInfo->hTimer);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++                      return  eError;
++              }
++      }
++#endif
++
++      MMU_BIFResetPDFree(psDevInfo);
++
++      
++
++
++
++
++
++
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      eError = SGXDeinitialise((IMG_HANDLE)psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGXDeinitialise failed"));
++              return eError;
++      }
++#endif 
++
++
++
++      
++
++      DeinitDevInfo(psDevInfo);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      eError = SGX2DDeInit(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGX2DDeInit failed"));
++              return eError;
++      }
++#endif
++
++#if defined(SGX_USING_CMD_PROC_LIST)
++      eError = PVRSRVRemoveCmdProcListKM(psDeviceNode->sDevId.ui32DeviceIndex, SGX_COMMAND_COUNT);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: PVRSRVRemoveCmdProcList failed"));
++              return eError;
++      }
++#endif
++      
++      psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++      for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
++      {
++              switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_KERNEL:
++                      case DEVICE_MEMORY_HEAP_SHARED:
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
++                              {
++                                      BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
++                              }
++                              break;
++                      }
++              }
++      }
++
++      
++      eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_TRUE, IMG_FALSE, IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
++              return eError;
++      }
++
++      
++      eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      eError = OSDestroyResource(&psDevInfo->s2DSlaveportResource);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif
++
++      
++      if (psDevInfo->pvRegsBaseKM != IMG_NULL)
++      {
++              OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++                                               psDevInfo->ui32RegSize,
++                                               PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                               IMG_NULL);
++      }
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      if (psDevInfo->s2DSlavePortKM.pvData != IMG_NULL)
++      {
++              OSUnMapPhysToLin(psDevInfo->s2DSlavePortKM.pvData, 
++                                         psDevInfo->s2DSlavePortKM.ui32DataRange,
++                                         PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                         IMG_NULL);
++      }
++#endif 
++
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      
++      if(psDevInfo->psSGXEventObject)
++      {
++              OSEventObjectDestroy(psDevInfo->psSGXEventObject);
++              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                               sizeof(PVRSRV_EVENTOBJECT) , 
++                                               psDevInfo->psSGXEventObject, 0);
++      }
++#endif 
++      
++      
++      OSFreePages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS,
++                              sizeof(PVRSRV_SGXDEV_INFO),
++                              psDevInfo,
++                              hDevInfoOSMemHandle);
++
++      if (psDeviceMemoryHeap != IMG_NULL)
++      {
++      
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                              sizeof(DEVICE_MEMORY_HEAP_INFO) * psDeviceNode->sDevMemoryInfo.ui32HeapCount, 
++                              psDeviceMemoryHeap, 
++                              0);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++
++
++IMG_VOID HWRecoveryResetSGX (PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                       IMG_UINT32             ui32Component,
++                                                       IMG_UINT32                     ui32CallerID)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_UNREFERENCED_PARAMETER(ui32Component);
++      PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++      
++      
++      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++      
++      
++      PDUMPSUSPEND();
++
++      
++      ResetPBs(psDevInfo);
++
++      
++      eError = SGXInitialise(psDevInfo, IMG_TRUE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++      }
++
++      
++      PDUMPRESUME();
++}
++
++
++IMG_VOID HWRecoveryResetSGXEDM (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                                      IMG_UINT32                      ui32Component,
++                                                                      IMG_UINT32                      ui32CallerID)
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SGX2DHWRecoveryStart(psDevInfo);
++#endif
++      
++
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              
++
++
++              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGXEDM: Power transition in progress"));
++              return;
++      }
++
++      psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++      
++      HWRecoveryResetSGX(psDevInfo, ui32Component, ui32CallerID);
++
++      PVRSRVPowerUnlock(ui32CallerID);
++      
++      
++      SGXScheduleProcessQueues(psDeviceNode);
++      
++      
++      
++      PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SGX2DHWRecoveryEnd(psDevInfo);
++#endif
++}
++
++#if defined(SUPPORT_HW_RECOVERY)
++IMG_VOID SGXOSTimer(IMG_VOID *pvData)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++      PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++      static IMG_UINT32       ui32EDMTasks = 0;
++      static IMG_UINT32       ui32LockupCounter = 0; 
++      static IMG_UINT32       ui32NumResets = 0;
++      IMG_UINT32              ui32CurrentEDMTasks;
++      IMG_BOOL                bLockup = IMG_FALSE;
++      IMG_BOOL                bPoweredDown;
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      IMG_UINT32              ui322DCompletedBlits = 0;
++      IMG_BOOL                b2DCoreIsBusy;
++#endif
++
++      
++      psDevInfo->ui32TimeStamp++;
++
++      bPoweredDown = (IMG_BOOL)!SGXIsDevicePowered(psDeviceNode);
++
++      
++      
++      if (bPoweredDown)
++      {
++              ui32LockupCounter = 0;
++      }
++      else
++      {
++              
++              ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
++              if (psDevInfo->ui32EDMTaskReg1 != 0)
++              {
++                      ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
++              }
++              if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++                      (psDevInfo->ui32NumResets == ui32NumResets))
++              {
++                      ui32LockupCounter++;
++                      if (ui32LockupCounter == 3)
++                      {
++                              ui32LockupCounter = 0;
++                              PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
++
++                              bLockup = IMG_TRUE;
++                      }
++              }
++              else
++              {
++                      ui32LockupCounter = 0;
++                      ui32EDMTasks = ui32CurrentEDMTasks;
++                      ui32NumResets = psDevInfo->ui32NumResets;
++              }
++      }
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      if (!bPoweredDown)
++      {
++              ui322DCompletedBlits = psDevInfo->ui322DCompletedBlits;
++              psDevInfo->ui322DCompletedBlits = SGX2DCompletedBlits(psDevInfo);
++      }
++
++      if (!bLockup && !bPoweredDown)
++      {
++              b2DCoreIsBusy = SGX2DIsBusy(psDevInfo);
++
++              if (b2DCoreIsBusy && ui322DCompletedBlits == psDevInfo->ui322DCompletedBlits)
++              {
++                      if (psDevInfo->b2DLockupSuspected)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXTimer() detects 2D lockup (%d blits completed)", psDevInfo->ui322DCompletedBlits));
++                              bLockup = IMG_TRUE;
++                              psDevInfo->b2DLockupSuspected = IMG_FALSE;
++                      }
++                      else
++                      {
++                              
++                              psDevInfo->b2DLockupSuspected = IMG_TRUE;
++                      }
++              }
++              else
++              {
++                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
++              }
++      }
++      else
++      {
++                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
++      }
++#endif 
++
++      if (bLockup)
++      {
++              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++              
++              
++              psSGXHostCtl->ui32HostDetectedLockups ++;
++
++              
++              HWRecoveryResetSGXEDM(psDeviceNode, 0, KERNEL_ID);
++      }
++}
++#endif 
++
++
++#if defined(SYS_USING_INTERRUPTS)
++
++
++IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
++{
++      IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++      
++      {
++              IMG_UINT32 ui32EventStatus, ui32EventEnable;
++              IMG_UINT32 ui32EventClear = 0;
++              PVRSRV_DEVICE_NODE *psDeviceNode;
++              PVRSRV_SGXDEV_INFO *psDevInfo;
++
++              
++              if(pvData == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));                   
++                      return bInterruptProcessed;
++              }
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++              psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++              ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
++
++              
++
++              gui32EventStatusServicesByISR = ui32EventStatus;
++
++              
++              ui32EventStatus &= ui32EventEnable;
++
++              if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++              {
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++              }
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++              if (ui32EventStatus & EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK)
++              {
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK;
++                      SGX2DHandle2DComplete(psDevInfo);
++              }
++#endif
++
++              if (ui32EventClear)
++              {
++                      bInterruptProcessed = IMG_TRUE;
++
++                      
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++                      
++                      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++              }
++      }
++
++              return bInterruptProcessed;
++}
++
++
++IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      PVRSRV_DEVICE_NODE      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++      
++      if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) &&
++              !(psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR))
++      {
++              HWRecoveryResetSGXEDM(psDeviceNode, 0, ISR_ID);
++      }
++
++      if ((eError == PVRSRV_OK) &&
++              (psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
++              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
++      {
++              
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++              {
++
++                      
++                      PDUMPSUSPEND();
++              
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                               PVRSRV_POWER_STATE_D3,
++                                                                                               ISR_ID, IMG_FALSE);
++                      if (eError == PVRSRV_OK)
++                      {
++                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
++                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++                              {
++                                      
++
++
++                                      psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                              }
++                      }
++                      else if (eError == PVRSRV_ERROR_RETRY)
++                      {
++                              
++
++                              eError = PVRSRV_OK;
++                      }
++                      
++                      
++                      PDUMPRESUME();
++              }
++#endif 
++      }
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              if(psEventObject->hOSEventKM)
++              {
++                      OSEventObjectSignal(psEventObject->hOSEventKM);
++              }
++      }
++
++#endif
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX_MISRHandler error:%lu", eError));
++      }
++}
++#endif 
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_BOOL bSharedPB = IMG_TRUE;
++
++      
++      psDeviceNode->sDevId.eDeviceType        = DEV_DEVICE_TYPE;
++      psDeviceNode->sDevId.eDeviceClass       = DEV_DEVICE_CLASS;
++
++      psDeviceNode->pfnInitDevice             = DevInitSGXPart1;
++      psDeviceNode->pfnDeInitDevice           = DevDeInitSGX;
++
++      
++
++      psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++      psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++      psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++      psDeviceNode->pfnMMUCreate = MMU_Create;
++      psDeviceNode->pfnMMUDelete = MMU_Delete;
++      psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++      psDeviceNode->pfnMMUFree = MMU_Free;
++      psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++      psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++      psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++      psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++      psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++      psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++      
++
++      psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++      psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++      
++
++      psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++      
++      
++
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++      
++      psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_ADDRESS_SPACE_SIZE;
++
++      
++      psDevMemoryInfo->ui32Flags = 0;
++
++      
++      psDevMemoryInfo->ui32HeapCount = SGX_MAX_HEAP_ID;
++
++      
++      psDevMemoryInfo->ui32SyncHeapID = SGX_SYNCINFO_HEAP_ID;
++
++      
++      psDevMemoryInfo->ui32MappingHeapID = SGX_GENERAL_MAPPING_HEAP_ID;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(DEVICE_MEMORY_HEAP_INFO) * psDevMemoryInfo->ui32HeapCount, 
++                                       (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0) != PVRSRV_OK)    
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * psDevMemoryInfo->ui32HeapCount);
++      
++      psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++      
++
++
++      
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_HEAP_ID);
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszName = "General";
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszBSName = "General BS";
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_TADATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszName = "TA Data";
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszBSName = "TA Data BS";
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_KERNEL_CODE_HEAP_ID);
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                                      | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                      | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszName = "Kernel";
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszBSName = "Kernel BS";
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++      
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_VIDEO_CODE_HEAP_ID);
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].sDevVAddrBase.uiAddr = SGX_VIDEO_CODE_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapSize = SGX_VIDEO_CODE_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszName = "Video";
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszBSName = "Video BS";
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED;
++
++      
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_KERNEL_VIDEO_DATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_KERNEL_VIDEO_DATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapSize = SGX_KERNEL_VIDEO_DATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++#if 0
++                                                                                                                              PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                              PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszName = "KernelVideoData";
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszBSName = "KernelVideoData BS";
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++      
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PIXELSHADER_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszName = "PixelShaderUSSE";
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszBSName = "PixelShaderUSSE BS";
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_VERTEXSHADER_HEAP_ID);
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszName = "VertexShaderUSSE";
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszBSName = "VertexShaderUSSE BS";
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszName = "PDSPixelCodeData";
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszBSName = "PDSPixelCodeData BS";
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszName = "PDSVertexCodeData";
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszBSName = "PDSVertexCodeData BS";
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_SYNCINFO_HEAP_ID);
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent";
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS";
++#if defined(SGX535)
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++      
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID = HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters";
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName = "3DParameters BS";
++
++
++      if(bSharedPB)
++      {
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++      }
++      else
++      {
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++      }
++
++      
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_MAPPING_HEAP_ID);
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszName = "GeneralMapping";
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszBSName = "GeneralMapping BS";
++
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++
++      
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_2D_HEAP_ID);
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32HeapSize = SGX_2D_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].pszName = "2D";
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].pszBSName = "2D BS";
++      
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif 
++
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*           psClientInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      
++
++      psDevInfo->ui32ClientRefCount++;
++#ifdef PDUMP
++      if(psDevInfo->ui32ClientRefCount == 1)
++      {
++              psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++      }
++#endif
++      
++
++      psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      psClientInfo->s2DSlavePort = psDevInfo->s2DSlavePortKM;
++#endif
++      psClientInfo->pvRegsBase = psDevInfo->pvRegsBaseKM;
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              psClientInfo->hOSEventKM = psEventObject->hOSEventKM;
++      }
++      else
++      {
++              psClientInfo->hOSEventKM = IMG_NULL;
++      }
++#endif
++      
++      
++
++      OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
++
++      
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_MISC_INFO *psMiscInfo)
++{
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++              {
++                      
++                      return PVRSRV_ERROR_INVALID_PARAMS;
++              }
++      }
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,201 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++
++#define       CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++      ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++              (psCCBKick)->offset))
++
++#define       CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, offset) \
++              ((psCCBKick)->offset < (psCCBMemInfo)->ui32AllocSize)
++
++IMG_EXPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, PVR3DIF4_CCB_KICK *psCCBKick)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++      IMG_UINT32 *pui32DstReadOpsPendingVal;
++      IMG_UINT32 *pui32DstWriteOpsPendingVal;
++      IMG_UINT32 i;
++
++
++#if defined(NO_HARDWARE)
++      pui32DstReadOpsPendingVal = IMG_NULL;
++      pui32DstWriteOpsPendingVal = IMG_NULL;
++#endif
++
++      if (psCCBKick->hDstKernelSyncInfo != IMG_NULL)
++      {
++              
++              if (!CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset) || !CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: ui32DstReadOpsPendingOffset or ui32DstWriteOpsPendingOffset out of range"));
++              }
++              else
++              {
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
++                              pui32DstReadOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset);
++                              pui32DstWriteOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset);
++
++                              *pui32DstReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                              *pui32DstWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              }
++
++      }
++
++      if (psCCBKick->ui32NumTAStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                              *pui32TAStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui32TAStatusValueOffset[%d] out of range", i));
++                      }
++              }
++      }
++
++      if (psCCBKick->ui32Num3DStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                              *pui323DStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui323DStatusValueOffset[%d] out of range", i));
++                      }
++              }
++      }
++
++      eError = SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand, &psCCBKick->sCommand, KERNEL_ID);
++      if (eError == PVRSRV_ERROR_RETRY)
++      {
++              
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
++              psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              return eError;
++      }
++      else if (PVRSRV_OK != eError)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed.")); 
++              return eError;
++      }
++
++
++#if defined(NO_HARDWARE)
++      if (psCCBKick->ui32NumTAStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                              psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui32TAStatusValue;
++                      }
++              }
++      }
++      
++      if (psCCBKick->bTerminate)
++      {
++              if (psCCBKick->hUpdateDstKernelSyncInfo != IMG_NULL)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hUpdateDstKernelSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = ((pui32DstWriteOpsPendingVal != IMG_NULL) ? *pui32DstWriteOpsPendingVal : psCCBKick->ui32WriteOpsPendingVal) + 1;
++              }
++
++              if (psCCBKick->ui32Num3DStatusVals != 0)
++              {
++                      
++                      for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++                      {
++                              if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
++                              {
++                                      IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
++                                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                                      psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui323DStatusValue;
++                              }
++                      }
++              }
++      }
++#endif
++
++      return eError;
++}
++
++
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++      PVRSRV_SGX_COMMAND              sCommand = {0};
++
++      ui32PowManFlags = psHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++      {
++              
++              return;
++      }
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
++      }
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
++                                                                                      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr)
++                                          
++{
++      PVRSRV_SGX_COMMAND sCommand = {0};
++
++    sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
++    sCommand.ui32Data[1] = sHWRenderContextDevVAddr.uiAddr;
++      
++      return SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);  
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,706 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#ifdef __linux__
++#include <linux/tty.h>                        
++#else
++#include <stdio.h>
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGXAcquireKernelCCBSlot)
++#endif
++static INLINE PVRSRV_SGX_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      
++      do
++      {
++              if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
++              {
++                      return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++              }
++              
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
++                                                                       PVRSRV_SGX_COMMAND_TYPE        eCommandType,
++                                                                       PVRSRV_SGX_COMMAND                     *psCommandData,
++                                                                       IMG_UINT32                                     ui32CallerID)
++{
++      PVRSRV_SGX_CCB_INFO *psKernelCCB;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      PVRSRV_SGX_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++      IMG_VOID *pvDumpCommand;
++#endif
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++      psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (ui32CallerID == ISR_ID)
++      {
++              PDUMPSUSPEND();
++      }
++
++      
++      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                               PVRSRV_POWER_STATE_D0,
++                                                                               ui32CallerID,
++                                                                               IMG_TRUE);
++                                                                               
++      if (ui32CallerID == ISR_ID)
++      {
++              PDUMPRESUME();
++      }
++#else
++      
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++#endif 
++       
++      if (eError == PVRSRV_OK)
++      {
++              psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++      }
++      else
++      {
++              if (eError == PVRSRV_ERROR_RETRY)
++              {
++                      if (ui32CallerID == ISR_ID)
++                      {
++                              
++
++
++                              psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                              eError = PVRSRV_OK;
++                      }
++                      else
++                      {
++                              
++
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
++                                       "ui32CallerID:%ld eError:%lu", ui32CallerID, eError));
++              }
++
++              return eError;
++      }
++      
++      psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++      
++      if(!psSGXCommand)
++      {
++              eError = PVRSRV_ERROR_TIMEOUT;
++              goto Exit;
++      }
++      
++      
++      psCommandData->ui32Data[2] = psDevInfo->ui32CacheControl;
++      
++#if defined(PDUMP)
++      
++      psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++      
++      psDevInfo->ui32CacheControl = 0;
++      
++      
++      *psSGXCommand = *psCommandData;
++      
++      switch(eCommandType)
++      {
++              case PVRSRV_SGX_COMMAND_EDM_KICK:
++                      psSGXCommand->ui32ServiceAddress = psDevInfo->ui32TAKickAddress;
++                      break;
++              case PVRSRV_SGX_COMMAND_VIDEO_KICK:
++                      psSGXCommand->ui32ServiceAddress = psDevInfo->ui32VideoHandlerAddress;
++                      break;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM: Unknown command type: %d", eCommandType)) ;
++                      eError = PVRSRV_ERROR_GENERIC;
++                      goto Exit;
++      }
++
++#if defined(PDUMP)
++      if (ui32CallerID != ISR_ID)
++      {
++              
++              PDUMPCOMMENTWITHFLAGS(0, "Poll for space in the Kernel CCB\r\n");
++              PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, 0xff, PDUMP_POLL_OPERATOR_NOTEQUAL, IMG_FALSE, IMG_FALSE, MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB command\r\n");
++              pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(PVRSRV_SGX_COMMAND)));
++
++              PDUMPMEM(pvDumpCommand,
++                                      psKernelCCB->psCCBMemInfo,
++                                      psKernelCCB->ui32CCBDumpWOff * sizeof(PVRSRV_SGX_COMMAND),
++                                      sizeof(PVRSRV_SGX_COMMAND),
++                                      0,
++                                      MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++              
++              PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++                                      psKernelCCB->psCCBMemInfo,
++                                      psKernelCCB->ui32CCBDumpWOff * sizeof(PVRSRV_SGX_COMMAND) +
++                                      offsetof(PVRSRV_SGX_COMMAND, ui32Data[2]),
++                                      sizeof(IMG_UINT32),
++                                      0,
++                                      MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++              if(PDumpIsCaptureFrameKM())
++              {
++                      
++                      psDevInfo->sPDContext.ui32CacheControl = 0;
++              }
++      }
++#endif
++
++      
++
++      *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
++      
++#if defined(PDUMP)
++      if (ui32CallerID != ISR_ID)
++      {
++              if(PDumpIsCaptureFrameKM())
++              {
++                      psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++              }
++
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB write offset\r\n");
++              PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++                               psKernelCCB->psCCBCtlMemInfo,
++                               offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++                               sizeof(IMG_UINT32),
++                               0,
++                               MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB event kicker\r\n");
++              PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++                               psDevInfo->psKernelCCBEventKickerMemInfo,
++                               0,
++                               sizeof(IMG_UINT32),
++                               0,
++                               MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++              PDUMPCOMMENTWITHFLAGS(0, "Event kick\r\n");
++              PDUMPREGWITHFLAGS(EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK, 0);
++      }
++#endif
++
++      *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(NO_HARDWARE)
++      
++      *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++      PVRSRVPowerUnlock(ui32CallerID);
++
++      return eError;
++}
++
++
++#if 0 
++PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
++                                         IMG_UINT32                   ui32CCBSize,
++                                         IMG_UINT32                   ui32AllocGran,
++                                         IMG_UINT32                   ui32OverrunSize,
++                                         IMG_HANDLE                   hDevMemHeap,
++                                         PVRSRV_SGX_CCB               **ppsCCB)
++{
++      PVRSRV_SGX_CCB  *psCCB;
++
++      PVR_UNREFERENCED_PARAMETER(psSGXDevInfo);
++
++      psCCB = IMG_NULL;
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                 sizeof(PVRSRV_SGX_CCB),
++                                 (IMG_VOID **)&psCCB,
++                                 IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: psCCB alloc failed"));
++
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psCCB->psCCBMemInfo = IMG_NULL;
++      psCCB->psCCBCtlMemInfo = IMG_NULL;
++      psCCB->pui32CCBLinAddr = IMG_NULL;
++      psCCB->pui32WriteOffset = IMG_NULL;
++      psCCB->pui32ReadOffset = IMG_NULL;
++
++      #ifdef PDUMP
++      psCCB->ui32CCBDumpWOff = 0;
++      #endif
++
++      
++      if ( ui32CCBSize < 0x1000 )
++      {
++              IMG_UINT32      i, ui32PowOfTwo;
++
++              ui32PowOfTwo = 0x1000;
++
++              for (i = 12; i > 0; i--)
++              {
++                      if (ui32CCBSize & ui32PowOfTwo)
++                      {
++                              break;
++                      }
++      
++                      ui32PowOfTwo >>= 1;
++              }
++      
++              if (ui32CCBSize & (ui32PowOfTwo - 1))
++              {
++                      ui32PowOfTwo <<= 1;
++              }
++
++              ui32AllocGran = ui32PowOfTwo;
++      }
++      else
++      {
++              ui32AllocGran = 0x1000;
++      }
++
++      
++      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
++                                                         hDevMemHeap,
++                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
++                                                         ui32CCBSize + ui32OverrunSize,
++                                                         ui32AllocGran,
++                                                         &psCCB->psCCBMemInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBMemInfo alloc failed"));
++
++              goto ErrorExit;
++      }
++
++      psCCB->pui32CCBLinAddr = psCCB->psCCBMemInfo->pvLinAddrKM;
++      psCCB->sCCBDevAddr = psCCB->psCCBMemInfo->sDevVAddr;
++      psCCB->ui32Size = ui32CCBSize;
++      psCCB->ui32AllocGran = ui32AllocGran;
++
++      
++      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
++                                                         hDevMemHeap,
++                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
++                                                         sizeof(PVRSRV_SGX_CCB_CTL),
++                                                         32,
++                                                         &psCCB->psCCBCtlMemInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBCtlMemInfo alloc failed"));
++
++              goto ErrorExit;
++      }
++
++      
++      psCCB->pui32WriteOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32WriteOffset;
++      psCCB->pui32ReadOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32ReadOffset;
++
++      
++      *psCCB->pui32WriteOffset = 0;
++      *psCCB->pui32ReadOffset = 0;
++
++      
++      *ppsCCB = psCCB;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++      if (psCCB->psCCBMemInfo)
++      {
++              PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++;
++}
++
++IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags)
++{
++      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++
++      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBCtlMemInfo, IMG_FALSE);
++
++      if (!(ui32PFlags & PFLAGS_POWERDOWN))
++      {
++              if (psCCB)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
++              }
++      }
++}
++#endif 
++#if defined (PDUMP)
++IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
++                                               IMG_UINT32                                             ui32BufferArrayLength,
++                                               IMG_BOOL                                               bDumpPolls)
++{
++      IMG_UINT32      i;
++
++      for (i=0; i<ui32BufferArrayLength; i++)
++      {
++              PPVR3DIF4_KICKTA_DUMP_BUFFER    psBuffer;
++              PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++              IMG_CHAR * pszName;
++              IMG_HANDLE hUniqueTag;
++              
++              psBuffer = &psBufferArray[i];
++              pszName = psBuffer->pszName;
++              if (!pszName)
++              {
++                      pszName = "Nameless buffer";
++              }
++
++              hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++              psSyncInfo = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo;
++
++              if (psBuffer->ui32Start <= psBuffer->ui32End)
++              {
++                      if (bDumpPolls)
++                      {
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               psBuffer->ui32Start,
++                                               psBuffer->ui32SpaceUsed,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++
++                      PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       psBuffer->ui32Start,
++                                       psBuffer->ui32End - psBuffer->ui32Start,
++                                       0,
++                                       hUniqueTag);
++              }
++              else
++              {
++                      
++
++                      if (bDumpPolls)
++                      {
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               psBuffer->ui32Start,
++                                               psBuffer->ui32BackEndLength,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++                      PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       psBuffer->ui32Start,
++                                       psBuffer->ui32BackEndLength,
++                                       0,
++                                       hUniqueTag);
++
++                      if (bDumpPolls)
++                      {
++                              PDUMPMEMPOL(psSyncInfo->psSyncDataMemInfoKM,
++                                                      offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                                      0,
++                                                      0xFFFFFFFF,
++                                                      PDUMP_POLL_OPERATOR_NOTEQUAL,
++                                                      IMG_FALSE,
++                                                      IMG_FALSE,
++                                                      MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               0,
++                                               psBuffer->ui32End,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++                      PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       0,
++                                       psBuffer->ui32End,
++                                       0,
++                                       hUniqueTag);
++              }
++      }
++}
++#endif 
++
++
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, 
++                                                                      PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++      psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++      psSGXInternalDevInfo->ui32RegFlags = (IMG_BOOL)psDevInfo->ui32RegFlags;
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              psSGXInternalDevInfo->hOSEvent = psEventObject->hOSEventKM;
++      }
++      else
++      {
++              psSGXInternalDevInfo->hOSEvent = IMG_NULL;
++      }
++#endif
++
++      
++      psSGXInternalDevInfo->hCtlKernelMemInfoHandle =
++              (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++      return PVRSRV_OK;
++}
++
++static IMG_VOID SGXCleanupRequest(PVRSRV_SGXDEV_INFO  *psSGXDevInfo,
++                                                                IMG_DEV_VIRTADDR              *psHWDataDevVAddr,
++                                                                IMG_BOOL                              bContextCleanup)
++{
++      IMG_UINT32                              ui32ResManRequestFlag = 0;
++      PVRSRV_KERNEL_MEM_INFO  *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL             *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++#if defined (PDUMP)
++      IMG_HANDLE hUniqueTag = MAKEUNIQUETAG(psSGXHostCtlMemInfo);
++#endif
++
++      ui32PowManFlags = psSGXHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++      {
++              
++      }
++      else
++      {
++              
++              if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PDCACHE)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
++                      psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PDCACHE;
++              }
++              if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PTCACHE)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
++                      psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PTCACHE;
++              }
++              if (bContextCleanup)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST;
++              }
++              else
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST;
++              }
++              
++              
++              psSGXHostCtl->sResManCleanupData.uiAddr = psHWDataDevVAddr->uiAddr;
++              psSGXHostCtl->ui32ResManFlags |= ui32ResManRequestFlag;
++
++              
++              PDUMPCOMMENT("TA/3D CCB Control - Request clean-up event on uKernel...");
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, sResManCleanupData.uiAddr), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++
++              
++              #if !defined(NO_HARDWARE)
++              if(PollForValueKM ((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32ResManFlags),
++                                      PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                      PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                      MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                      WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up render context failed"));
++              }
++              #endif
++
++              #ifdef PDUMP
++              
++              PDUMPCOMMENT("TA/3D CCB Control - Wait for clean-up request to complete...");
++              PDUMPMEMPOL(psSGXHostCtlMemInfo,
++                                         offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags),
++                                         PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                         PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                         PDUMP_POLL_OPERATOR_EQUAL,
++                                         IMG_FALSE, IMG_FALSE,
++                                         hUniqueTag);
++              #endif
++
++              psSGXHostCtl->ui32ResManFlags &= ~(ui32ResManRequestFlag);
++              psSGXHostCtl->ui32ResManFlags &= ~(PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE);
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++      }
++}
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_DEV_VIRTADDR sHWDataDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDevInfo,
++                                                      &psCleanup->sHWDataDevVAddr, IMG_TRUE);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDevInfo = psSGXDevInfo;
++      psCleanup->sHWDataDevVAddr = *psHWRenderContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_RENDER_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHWRenderContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++      SGXCleanupRequest(psDevInfo, &sHWRTDataSetDevVAddr, IMG_FALSE);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
++      (((ROff - WOff) + (CCBSize - 1)) & (CCBSize - 1))
++
++#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
++      Off = ((Off + PacketSize) & (CCBSize - 1))
++
++static INLINE IMG_UINT32 SGXCalcContextCCBParamSize(IMG_UINT32 ui32ParamSize, IMG_UINT32 ui32AllocGran)
++{
++      return (ui32ParamSize + (ui32AllocGran - 1)) & ~(ui32AllocGran - 1);
++}
++
++static INLINE IMG_PVOID SGXAcquireCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32CmdSize)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      do
++      {
++              if(GET_CCB_SPACE(*psCCB->pui32WriteOffset, *psCCB->pui32ReadOffset, psCCB->ui32Size) > ui32CmdSize)
++              {
++                      return (IMG_PVOID)((IMG_UINT32)psCCB->psCCBMemInfo->pvLinAddrKM + *psCCB->pui32WriteOffset);
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
++                                         IMG_UINT32                   ui32CCBSize,
++                                         IMG_UINT32                   ui32AllocGran,
++                                         IMG_UINT32                   ui32OverrunSize,
++                                         IMG_HANDLE                   hDevMemHeap,
++                                         PVRSRV_SGX_CCB               **ppsCCB);
++IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags);
++
++#if defined (PDUMP)
++IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
++                                               IMG_UINT32                                             ui32BufferArrayLength,
++                                               IMG_BOOL                                               bDumpPolls);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
++                                                                       PVRSRV_SGX_COMMAND_TYPE        eCommandType,
++                                                                       PVRSRV_SGX_COMMAND                     *psCommandData,
++                                                                       IMG_UINT32                                     ui32CallerID);
++
++IMG_IMPORT
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,50 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE     0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE    0x1000
++
++
++typedef struct _ENV_DATA_TAG
++{
++      IMG_VOID                *pvBridgeData;
++      struct pm_dev           *psPowerDevice;
++      IMG_BOOL                bLISRInstalled;
++      IMG_BOOL                bMISRInstalled;
++      IMG_UINT32              ui32IRQ;
++      IMG_VOID                *pvISRCookie;
++      struct tasklet_struct   sMISRTasklet;
++      struct pci_dev          *psPCIDev;
++      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} ENV_DATA;
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,841 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++
++
++
++static PKV_OFFSET_STRUCT FindOffsetStructFromLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++static IMG_UINT32 GetFirstFreePageAlignedNumber(void);
++static PKV_OFFSET_STRUCT FindOffsetStructByKVIndexAddress(IMG_VOID *pvVirtAddress,
++                                                  IMG_UINT32 ui32ByteSize);
++static void DeterminUsersSizeAndByteOffset(IMG_VOID *pvKVIndexAddress,
++                                            LinuxMemArea *psLinuxMemArea,
++                                            IMG_UINT32 *pui32RealByteSize,
++                                            IMG_UINT32 *pui32ByteOffset);
++static PKV_OFFSET_STRUCT FindOffsetStructByMMapOffset(IMG_UINT32 ui32Offset);
++static IMG_BOOL DoMapToUser(LinuxMemArea *psLinuxMemArea,
++                            struct vm_area_struct* ps_vma,
++                            IMG_UINT32 ui32ByteOffset,
++                            IMG_UINT32 ui32Size);
++static IMG_UINT32 MapPageToVMA(struct vm_area_struct *psVma,
++                               unsigned long ulFromCpuVAddr,
++                               struct page *pPage);
++static IMG_UINT32 MapIORangeToVMA(struct vm_area_struct *psVma,
++                                  unsigned long ulFromCpuVAddr,
++                                  unsigned long ulCpuPAddr,
++                                  unsigned long ulBytes);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static off_t PrintMMapRegistrations(char * buffer, size_t size, off_t off);
++#endif
++
++
++static void MMapVOpen(struct vm_area_struct* ps_vma);
++static void MMapVClose(struct vm_area_struct* ps_vma);
++
++static struct vm_operations_struct MMapIOOps =
++{
++      open:           MMapVOpen,
++      close:          MMapVClose
++};
++
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++static PKV_OFFSET_STRUCT g_psKVOffsetTable = 0;
++static LinuxKMemCache *g_psMemmapCache = 0;
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static IMG_UINT32 g_ui32RegisteredAreas = 0;
++static IMG_UINT32 g_ui32TotalByteSize = 0;
++#endif
++
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++    g_psKVOffsetTable = 0;
++
++    g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++    if (g_psMemmapCache)
++    {
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++        CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++    }
++}
++
++
++IMG_VOID
++PVRMMapCleanup(void)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++
++    if(!g_psMemmapCache)
++        return;
++    
++    if(g_psKVOffsetTable)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: BUG! g_psMemmapCache isn't empty!",
++                __FUNCTION__));
++        
++        for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct = psOffsetStruct->psNext)
++        {
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Un-registering mmapable area: psLinuxMemArea=0x%p, CpuPAddr=0x%08lx\n",
++                    __FUNCTION__,
++                    psOffsetStruct->psLinuxMemArea,
++                    LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0).uiAddr
++                    ));
++                      PVRMMapRemoveRegisteredArea(psOffsetStruct->psLinuxMemArea);
++        }
++    }
++    
++    RemoveProcEntry("mmap");
++    KMemCacheDestroyWrapper(g_psMemmapCache);
++    g_psMemmapCache = NULL;
++    PVR_DPF((PVR_DBG_MESSAGE,"PVRMMapCleanup: KVOffsetTable deallocated"));
++}
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(const IMG_CHAR *pszName,
++                    LinuxMemArea *psLinuxMemArea,
++                    IMG_UINT32 ui32AllocFlags)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s(%s, psLinuxMemArea=%p, ui32AllocFlags=0x%8lx)",
++             __FUNCTION__, pszName, psLinuxMemArea, ui32AllocFlags));
++
++    
++    psOffsetStruct = FindOffsetStructFromLinuxMemArea(psLinuxMemArea);
++    if(psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "PVRMMapRegisterArea: psLinuxMemArea=%p is already registered",
++                psOffsetStruct->psLinuxMemArea));
++        return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++    if(!psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    
++    
++    psOffsetStruct->ui32MMapOffset = GetFirstFreePageAlignedNumber();
++    psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++    
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        psOffsetStruct->ui32AllocFlags = ui32AllocFlags;
++    }
++    else
++    {
++        PKV_OFFSET_STRUCT psParentOffsetStruct;
++        psParentOffsetStruct = 
++            FindOffsetStructFromLinuxMemArea(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++        PVR_ASSERT(psParentOffsetStruct);
++        psOffsetStruct->ui32AllocFlags = psParentOffsetStruct->ui32AllocFlags;
++    }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    
++    psOffsetStruct->pszName                                   = pszName;
++    psOffsetStruct->pid                                               = current->pid;
++    psOffsetStruct->ui16Mapped                                = 0;
++    psOffsetStruct->ui16Faults                                = 0;
++
++    g_ui32RegisteredAreas++;
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_ui32TotalByteSize+=psLinuxMemArea->ui32ByteSize;
++    }
++#endif
++    
++      
++    psOffsetStruct->psNext                                    = g_psKVOffsetTable;
++    
++    g_psKVOffsetTable                         = psOffsetStruct;
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++    PKV_OFFSET_STRUCT *ppsOffsetStruct, psOffsetStruct;
++    
++    for(ppsOffsetStruct=&g_psKVOffsetTable;
++        (psOffsetStruct = *ppsOffsetStruct);
++        ppsOffsetStruct=&(*ppsOffsetStruct)->psNext)
++    {
++        if(psOffsetStruct->psLinuxMemArea == psLinuxMemArea)
++        {
++            break;
++        }
++    }
++
++    if(!psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: Registration for psLinuxMemArea = 0x%p not found",
++                __FUNCTION__,
++                psLinuxMemArea));
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    
++    if(psOffsetStruct->ui16Mapped)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: Unregistering still-mapped area! (psLinuxMemArea=0x%p)\n",
++                __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++    g_ui32RegisteredAreas--;
++
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_ui32TotalByteSize -= psOffsetStruct->psLinuxMemArea->ui32ByteSize;
++    }
++#endif
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++             "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++    
++    *ppsOffsetStruct = psOffsetStruct->psNext;
++ 
++    KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++
++    return PVRSRV_OK;
++}
++
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructFromLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct = NULL;
++    
++    for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++    {
++        if(psOffsetStruct->psLinuxMemArea == psLinuxMemArea)
++        {
++            return psOffsetStruct;
++        }
++    }
++    return NULL;
++}
++
++
++
++static IMG_UINT32
++GetFirstFreePageAlignedNumber(void)
++{
++    PKV_OFFSET_STRUCT psCurrentRec;
++    IMG_UINT32 ui32CurrentPageOffset;
++    
++    if(!g_psKVOffsetTable)
++    {
++        return 0;
++    }
++
++    psCurrentRec = g_psKVOffsetTable;
++    ui32CurrentPageOffset = (g_psKVOffsetTable->ui32MMapOffset);
++
++    while(psCurrentRec)
++    {
++        if(ui32CurrentPageOffset != (psCurrentRec->ui32MMapOffset))
++        {
++            return ui32CurrentPageOffset;
++        }
++        psCurrentRec = psCurrentRec->psNext;
++        ui32CurrentPageOffset+=PAGE_SIZE;
++    }
++    
++    return g_psKVOffsetTable->ui32MMapOffset + PAGE_SIZE;
++}
++
++
++
++PVRSRV_ERROR
++PVRMMapKVIndexAddressToMMapData(IMG_VOID *pvKVIndexAddress,
++                                IMG_UINT32 ui32Size,
++                                IMG_UINT32 *pui32MMapOffset,
++                                IMG_UINT32 *pui32ByteOffset,
++                                IMG_UINT32 *pui32RealByteSize)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    psOffsetStruct = FindOffsetStructByKVIndexAddress(pvKVIndexAddress, ui32Size);
++    if (!psOffsetStruct)
++    {
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++    *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++
++    DeterminUsersSizeAndByteOffset(pvKVIndexAddress,
++                                   psOffsetStruct->psLinuxMemArea,
++                                   pui32RealByteSize,
++                                   pui32ByteOffset);
++
++    return PVRSRV_OK;
++}
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructByKVIndexAddress(IMG_VOID *pvKVIndexAddress,
++                                 IMG_UINT32 ui32ByteSize)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    IMG_UINT8 *pui8CpuVAddr;
++    IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *)pvKVIndexAddress;
++
++    for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++    {
++        LinuxMemArea *psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++        
++              switch(psLinuxMemArea->eAreaType)
++              {
++                      case LINUX_MEM_AREA_IOREMAP:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++                              break;
++                      case LINUX_MEM_AREA_VMALLOC:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++                              break;
++                      case LINUX_MEM_AREA_EXTERNAL_KV:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++                              break;
++                      default:
++                              pui8CpuVAddr = IMG_NULL;
++                              break;
++              }
++        
++        
++        if(pui8CpuVAddr)
++        {
++            if(pui8IndexCpuVAddr >= pui8CpuVAddr
++               && (pui8IndexCpuVAddr + ui32ByteSize) <= (pui8CpuVAddr + psLinuxMemArea->ui32ByteSize))
++            {
++                return psOffsetStruct;
++            }
++            else
++            {
++                pui8CpuVAddr = NULL;
++            }
++        }
++        
++        if(pvKVIndexAddress == psOffsetStruct->psLinuxMemArea)
++        {
++            if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++            {
++                PVR_ASSERT(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea->eAreaType
++                              != LINUX_MEM_AREA_SUB_ALLOC);
++            }
++            return psOffsetStruct;
++        }
++    }
++    printk(KERN_ERR "%s: Failed to find offset struct (KVAddress=%p)\n", __FUNCTION__, pvKVIndexAddress);
++    return NULL;
++}
++
++
++static void
++DeterminUsersSizeAndByteOffset(IMG_VOID *pvKVIndexAddress,
++                               LinuxMemArea *psLinuxMemArea,
++                               IMG_UINT32 *pui32RealByteSize,
++                               IMG_UINT32 *pui32ByteOffset)
++{
++    IMG_UINT8 *pui8StartVAddr = NULL;
++    IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *)pvKVIndexAddress;
++    IMG_UINT32 ui32PageAlignmentOffset=0;
++    IMG_CPU_PHYADDR CpuPAddr;
++    
++    CpuPAddr=LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++    ui32PageAlignmentOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++    if(pvKVIndexAddress != psLinuxMemArea &&
++       (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP
++       || psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC))
++    {
++        pui8StartVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++        *pui32ByteOffset = (pui8IndexCpuVAddr - pui8StartVAddr) + ui32PageAlignmentOffset;
++    }
++    else
++    {
++        *pui32ByteOffset = ui32PageAlignmentOffset;
++    }
++
++    *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++      unsigned long ulBytes;
++      PKV_OFFSET_STRUCT psCurrentRec = NULL;
++    int iRetVal=0;
++
++    LinuxLockMutex(&gPVRSRVLock);
++    
++      ulBytes = ps_vma->vm_end - ps_vma->vm_start;
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Recieved mmap(2) request with a ui32MMapOffset=0x%08lx,"
++                              " and ui32ByteSize=%ld(0x%08lx)\n",
++            __FUNCTION__,
++            (ps_vma->vm_pgoff<<PAGE_SHIFT),
++            ulBytes, ulBytes));
++   
++      
++    if(
++       (ps_vma->vm_flags & VM_WRITE) &&
++       !(ps_vma->vm_flags & VM_SHARED)
++      )
++    {
++        PVR_DPF((PVR_DBG_ERROR,"PVRMMap: Error - Cannot mmap non-shareable writable areas."));
++        iRetVal = -EINVAL;
++        goto unlock_and_return;
++    }
++   
++    psCurrentRec=FindOffsetStructByMMapOffset((ps_vma->vm_pgoff<<PAGE_SHIFT));
++    if (!psCurrentRec)
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "PVRMMap: Error - Attempted to mmap unregistered area at vm_pgoff=%ld",
++                 ps_vma->vm_pgoff));
++        iRetVal = -EINVAL;
++        goto unlock_and_return;
++    }
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: > psCurrentRec->psLinuxMemArea=%p\n",
++             __FUNCTION__, psCurrentRec->psLinuxMemArea));
++    
++    ps_vma->vm_flags |= VM_RESERVED;
++    ps_vma->vm_flags |= VM_IO;
++    
++    ps_vma->vm_flags |= VM_DONTEXPAND;
++    
++    ps_vma->vm_private_data = (void *)psCurrentRec;
++    
++    
++    
++    switch(psCurrentRec->ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++            
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__) || defined(__sh__)
++            ps_vma->vm_page_prot = pgprot_writecombine(ps_vma->vm_page_prot);
++#else
++#if defined(__i386__)
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++            
++            if(psCurrentRec->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP
++               || psCurrentRec->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO)
++            {
++                ps_vma->vm_page_prot = __pgprot(pgprot_val(ps_vma->vm_page_prot) &= ~_PAGE_PWT);
++            }
++#endif
++
++#else
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++    #error  Unsupported architecture!
++#endif
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++    }
++    
++    
++    
++    ps_vma->vm_ops = &MMapIOOps;
++    
++    if(!DoMapToUser(psCurrentRec->psLinuxMemArea, ps_vma, 0, ulBytes))
++    {
++        iRetVal = -EAGAIN;
++        goto unlock_and_return;
++    }
++    
++    
++    MMapVOpen(ps_vma);
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++             __FUNCTION__, ps_vma->vm_pgoff));
++    
++unlock_and_return:
++    
++    LinuxUnLockMutex(&gPVRSRVLock);
++    
++    return iRetVal;
++}
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructByMMapOffset(IMG_UINT32 ui32MMapOffset)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    for(psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; psOffsetStruct = psOffsetStruct->psNext)
++    {
++        if(psOffsetStruct->ui32MMapOffset == ui32MMapOffset)
++        {
++            return psOffsetStruct;
++        }
++    }
++    return NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++            struct vm_area_struct* ps_vma,
++            IMG_UINT32 ui32ByteOffset,
++            IMG_UINT32 ui32ByteSize)
++{
++    IMG_INT32 ui32Status=0;
++    LINUX_MEM_AREA_TYPE eAreaType = psLinuxMemArea->eAreaType;
++
++    PVR_ASSERT((ui32ByteSize & (PAGE_SIZE-1))==0);
++
++      if(eAreaType == LINUX_MEM_AREA_EXTERNAL_KV)
++      {
++              unsigned long phys_addr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr;
++      
++              
++              if (phys_addr > virt_to_phys(high_memory - 1)) 
++              {
++                      eAreaType = LINUX_MEM_AREA_IOREMAP;
++              }
++      }
++
++    switch(eAreaType)
++    {
++        case LINUX_MEM_AREA_IO: 
++        case LINUX_MEM_AREA_IOREMAP: 
++        {
++            unsigned long ulAddr;
++            
++            
++            ulAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr;
++            
++            
++            ulAddr &= ~(PAGE_SIZE-1);
++            
++            ui32Status = MapIORangeToVMA(ps_vma, ps_vma->vm_start, ulAddr, ui32ByteSize);
++            if(ui32Status != 0)
++            {
++                PVR_DPF((PVR_DBG_ERROR, "%s: Error - Failed to map memory.\n", __FUNCTION__));
++                return IMG_FALSE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_VMALLOC: 
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++        {
++            IMG_CHAR *pAddr, *pCurrentAddr;
++            unsigned long ulVMAPos;
++
++            
++                      pAddr = (IMG_CHAR *)LinuxMemAreaToCpuVAddr(psLinuxMemArea) + ui32ByteOffset;
++
++                      pAddr = (IMG_CHAR *)((unsigned long)pAddr & ~(PAGE_SIZE-1));
++            pCurrentAddr = pAddr;
++            
++            ulVMAPos=ps_vma->vm_start;
++            
++            while(pCurrentAddr < (pAddr + ui32ByteSize))
++            {
++                struct page *current_page;
++
++                current_page = vmalloc_to_page(pCurrentAddr);
++                ui32Status = MapPageToVMA(ps_vma, ulVMAPos, current_page);
++                if(ui32Status != 0)
++                {
++                    PVR_DPF((PVR_DBG_ERROR,"%s: Error - Failed to map memory.\n", __FUNCTION__));
++                    return IMG_FALSE;
++                }
++                pCurrentAddr += PAGE_SIZE;
++                ulVMAPos += PAGE_SIZE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++        {
++            struct page **pvPageList;
++            IMG_UINT32 ui32PageIndex, ui32PageCount, i;
++            unsigned long ulVMAPos;
++
++            pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++            ui32PageIndex = ui32ByteOffset>>PAGE_SHIFT;
++            ui32PageCount = ui32ByteSize>>PAGE_SHIFT;
++
++            
++            ulVMAPos=ps_vma->vm_start;
++            
++            for(i=ui32PageIndex; i<(ui32PageIndex+ui32PageCount); i++)
++            {
++                ui32Status = MapPageToVMA(ps_vma, ulVMAPos, pvPageList[i]);
++                if(ui32Status != 0)
++                {
++                    PVR_DPF((PVR_DBG_ERROR,"%s: Error - Failed to map memory.\n", __FUNCTION__));
++                    return IMG_FALSE;
++                }
++                ulVMAPos += PAGE_SIZE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            if(!DoMapToUser(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                            ps_vma,
++                            psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset,
++                            ui32ByteSize))
++            {
++                return IMG_FALSE;
++            }
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"pvr_mmap: Unknown mapping type requested! (0x%X)\n",
++                     psLinuxMemArea->eAreaType));
++    }
++
++    return IMG_TRUE;
++}
++
++
++static IMG_UINT32
++MapPageToVMA(struct vm_area_struct *psVma,
++             unsigned long ulFromCpuVAddr,
++             struct page *pPage)
++{
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++    return vm_insert_page(psVma, ulFromCpuVAddr, pPage);
++#else 
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++    return remap_pfn_range(psVma, ulFromCpuVAddr, page_to_pfn(pPage), PAGE_SIZE, psVma->vm_page_prot);
++#else 
++    return remap_page_range(ulFromCpuVAddr, page_to_phys(pPage), PAGE_SIZE, psVma->vm_page_prot);
++#endif
++#endif
++}
++
++
++static IMG_UINT32
++MapIORangeToVMA(struct vm_area_struct *psVma,
++                unsigned long ulFromCpuVAddr,
++                unsigned long ulCpuPAddr,
++                unsigned long ulBytes)
++{
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++    return io_remap_pfn_range(psVma, ulFromCpuVAddr, ulCpuPAddr>>PAGE_SHIFT, ulBytes, psVma->vm_page_prot);
++#else 
++    return io_remap_page_range(ulFromCpuVAddr, ulCpuPAddr, ulBytes, psVma->vm_page_prot);
++#endif
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++    PVR_ASSERT(psOffsetStruct != IMG_NULL)
++    psOffsetStruct->ui16Mapped++;
++
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s: psLinuxMemArea=%p, KVAddress=%p MMapOffset=%ld, ui16Mapped=%d",
++             __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++             psOffsetStruct->ui32MMapOffset,
++             psOffsetStruct->ui16Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++    MOD_INC_USE_COUNT;
++#endif
++}
++
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++    PVR_ASSERT(psOffsetStruct != IMG_NULL)
++    psOffsetStruct->ui16Mapped--;
++
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s: psLinuxMemArea=%p, CpuVAddr=%p ui32MMapOffset=%ld, ui16Mapped=%d",
++             __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++             psOffsetStruct->ui32MMapOffset,
++             psOffsetStruct->ui16Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++    MOD_DEC_USE_COUNT;
++#endif
++}
++
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static off_t
++PrintMMapRegistrations(char * buffer, size_t size, off_t off)
++{
++      PKV_OFFSET_STRUCT psOffsetStruct;
++    off_t Ret;
++      
++    LinuxLockMutex(&gPVRSRVLock);
++
++      if(!off)
++    {
++              Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                                                "Allocations registered for mmap: %lu\n"
++                          "In total these areas correspond to %lu bytes (excluding SUB areas)\n"
++                          "psLinuxMemArea "
++                                                "CpuVAddr "
++                                                "CpuPAddr "
++                          "MMapOffset "
++                          "ByteLength "
++                          "LinuxMemType             "
++                                                "Pid   Name     Mapped Flags\n",
++#else
++                          "<mmap_header>\n"
++                          "\t<count>%lu</count>\n"
++                          "\t<bytes>%lu</bytes>\n" 
++                          "</mmap_header>\n",
++#endif
++                                                g_ui32RegisteredAreas,
++                          g_ui32TotalByteSize
++                          );
++
++        goto unlock_and_return;
++    }
++
++      if (size < 135) 
++    {
++              Ret = 0;
++        goto unlock_and_return;
++    }
++      
++      for(psOffsetStruct=g_psKVOffsetTable; --off && psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++              ;
++      if(!psOffsetStruct)
++    {
++              Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++      Ret =  printAppend (buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                                              "%-8p       %-8p %08lx %08lx   %-8ld   %-24s %-5d %-8s %-5u  %08lx(%s)\n",
++#else
++                        "<mmap_record>\n"
++                                              "\t<pointer>%-8p</pointer>\n"
++                        "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                        "\t<cpu_physical>%08lx</cpu_physical>\n"
++                        "\t<mmap_offset>%08lx</mmap_offset>\n"
++                        "\t<bytes>%-8ld</bytes>\n"
++                        "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++                        "\t<pid>%-5d</pid>\n"
++                        "\t<name>%-8s</name>\n"
++                        "\t<mapping_count>%-5u</mapping_count>\n"
++                        "\t<flags>%08lx</flags>\n"
++                        "\t<flags_string>%s</flags_string>\n"
++                        "</mmap_record>\n",
++#endif
++                        psOffsetStruct->psLinuxMemArea,
++                                              LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++                        LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea,0).uiAddr,
++                                              psOffsetStruct->ui32MMapOffset,
++                                              psOffsetStruct->psLinuxMemArea->ui32ByteSize,
++                        LinuxMemAreaTypeToString(psOffsetStruct->psLinuxMemArea->eAreaType),
++                                              psOffsetStruct->pid,
++                                              psOffsetStruct->pszName,
++                                              psOffsetStruct->ui16Mapped,
++                                              psOffsetStruct->ui32AllocFlags,
++                        HAPFlagsToString(psOffsetStruct->ui32AllocFlags));
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret;
++}
++#endif
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,84 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++    
++    IMG_UINT32                  ui32MMapOffset;
++    
++    
++    LinuxMemArea                *psLinuxMemArea;
++    
++    
++    IMG_UINT32                  ui32AllocFlags;
++    
++    
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    pid_t                                             pid;
++    const IMG_CHAR                            *pszName;
++    IMG_UINT16                                        ui16Mapped;
++    IMG_UINT16                                        ui16Faults;
++#endif
++    
++    
++    struct KV_OFFSET_STRUCT_TAG       *psNext;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(void);
++
++
++IMG_VOID PVRMMapCleanup(void);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(const IMG_CHAR *pszName,
++                                 LinuxMemArea *psLinuxMemArea,
++                                 IMG_UINT32 ui32AllocFlags);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapKVIndexAddressToMMapData(IMG_VOID *pvKVIndexAddress,
++                                             IMG_UINT32 ui32Size,
++                                             IMG_UINT32 *pui32MMapOffset,
++                                             IMG_UINT32 *pui32ByteOffset,
++                                             IMG_UINT32 *pui32RealByteSize);
++
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1870 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/highmem.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++
++#if defined(CONFIG_ARCH_OMAP)
++#define       PVR_FLUSH_CACHE_BEFORE_KMAP
++#endif
++
++#if defined(PVR_FLUSH_CACHE_BEFORE_KMAP)
++#include <asm/cacheflush.h>
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++    DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++    DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++    DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++    DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++    DEBUG_MEM_ALLOC_TYPE_IO,
++    DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++    DEBUG_MEM_ALLOC_TYPE_KMAP,
++    DEBUG_MEM_ALLOC_TYPE_COUNT
++}DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC
++{
++    DEBUG_MEM_ALLOC_TYPE    eAllocType;
++      IMG_VOID                                *pvKey; 
++    IMG_VOID                *pvCpuVAddr;
++    unsigned long           ulCpuPAddr;
++    IMG_VOID                *pvPrivateData;
++      IMG_UINT32                              ui32Bytes;
++      pid_t                                   pid;
++    IMG_CHAR                *pszFileName;
++    IMG_UINT32              ui32Line;
++    
++    struct _DEBUG_MEM_ALLOC_REC   *psNext;
++}DEBUG_MEM_ALLOC_REC;
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static IMG_UINT32 g_SysRAMWaterMark;
++static IMG_UINT32 g_SysRAMHighWaterMark;
++
++static IMG_UINT32 g_IOMemWaterMark;
++static IMG_UINT32 g_IOMemHighWaterMark;
++
++static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++                                       IMG_VOID *pvKey,
++                                       IMG_VOID *pvCpuVAddr,
++                                       unsigned long ulCpuPAddr,
++                                       IMG_VOID *pvPrivateData,
++                                       IMG_UINT32 ui32Bytes,
++                                       IMG_CHAR *pszFileName,
++                                       IMG_UINT32 ui32Line);
++static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey);
++static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++static off_t printMemoryRecords(char * buffer, size_t size, off_t off);
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC
++{
++      LinuxMemArea                *psLinuxMemArea;
++    IMG_UINT32                  ui32Flags;
++      pid_t                                       pid;
++
++      struct _DEBUG_LINUX_MEM_AREA_REC  *psNext;
++}DEBUG_LINUX_MEM_AREA_REC;
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static IMG_UINT32 g_LinuxMemAreaCount;
++static IMG_UINT32 g_LinuxMemAreaWaterMark;
++static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
++
++static off_t printLinuxMemAreaRecords(char * buffer, size_t size, off_t off);
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
++static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
++static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR
++LinuxMMInit(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        int iStatus;
++        iStatus = CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++        if(iStatus!=0)
++        {
++            return PVRSRV_ERROR_OUT_OF_MEMORY;
++        }
++    }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    {
++        int iStatus;
++        iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++        if(iStatus!=0)
++        {
++            return PVRSRV_ERROR_OUT_OF_MEMORY;
++        }
++    }
++#endif
++
++    psLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++    if(!psLinuxMemAreaCache)
++    {
++        PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++
++    return PVRSRV_OK;
++}
++
++
++IMG_VOID
++LinuxMMCleanup(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord = g_LinuxMemAreaRecords, *psNextRecord;
++
++        if(g_LinuxMemAreaCount)
++        {
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++                    __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
++        }
++
++        while(psCurrentRecord)
++        {
++            LinuxMemArea *psLinuxMemArea;
++
++            psNextRecord = psCurrentRecord->psNext;
++            psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++                        __FUNCTION__,
++                        psCurrentRecord->psLinuxMemArea,
++                        LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
++                        psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++            
++            LinuxMemAreaDeepFree(psLinuxMemArea);
++
++            psCurrentRecord = psNextRecord;
++        }
++        RemoveProcEntry("mem_areas");
++    }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    {
++        DEBUG_MEM_ALLOC_REC *psCurrentRecord = g_MemoryRecords, *psNextRecord;
++        
++        
++        while(psCurrentRecord)
++        {
++            psNextRecord = psCurrentRecord->psNext;
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++                                    "type=%s "
++                                    "CpuVAddr=%p "
++                                    "CpuPAddr=0x%08lx, "
++                                    "allocated @ file=%s,line=%d",
++                    __FUNCTION__,
++                    DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++                    psCurrentRecord->pvCpuVAddr,
++                    psCurrentRecord->ulCpuPAddr,
++                    psCurrentRecord->pszFileName,
++                    psCurrentRecord->ui32Line));
++            switch(psCurrentRecord->eAllocType)
++            {
++                case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++                    KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++                    IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_IO:
++                    
++                    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++                    VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++                    
++                    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++                    KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_KMAP:
++                    KUnMapWrapper(psCurrentRecord->pvKey);
++                    break;
++                default:
++                    PVR_ASSERT(0);
++            }
++            psCurrentRecord = psNextRecord;
++        }
++        RemoveProcEntry("meminfo");
++    }
++#endif
++
++    if(psLinuxMemAreaCache)
++    {
++        KMemCacheDestroyWrapper(psLinuxMemAreaCache); 
++        psLinuxMemAreaCache=NULL;
++    }
++}
++
++
++IMG_VOID *
++_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++    pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++                               pvRet,
++                               pvRet,
++                               0,
++                               NULL,
++                               ui32ByteSize,
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++    return pvRet;
++}
++
++
++IMG_VOID
++KFreeWrapper(IMG_VOID *pvCpuVAddr)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr);
++#endif
++    kfree(pvCpuVAddr);
++}
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static IMG_VOID
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++                       IMG_VOID *pvKey,
++                       IMG_VOID *pvCpuVAddr,
++                       unsigned long ulCpuPAddr,
++                       IMG_VOID *pvPrivateData,
++                       IMG_UINT32 ui32Bytes,
++                       IMG_CHAR *pszFileName,
++                       IMG_UINT32 ui32Line)
++{
++    DEBUG_MEM_ALLOC_REC *psRecord;
++
++    psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++    psRecord->eAllocType = eAllocType;
++    psRecord->pvKey = pvKey;
++    psRecord->pvCpuVAddr = pvCpuVAddr;
++    psRecord->ulCpuPAddr = ulCpuPAddr;
++    psRecord->pvPrivateData = pvPrivateData;
++    psRecord->pid = current->pid;
++    psRecord->ui32Bytes = ui32Bytes;
++    psRecord->pszFileName = pszFileName;
++    psRecord->ui32Line = ui32Line;
++    
++    psRecord->psNext = g_MemoryRecords;
++    g_MemoryRecords = psRecord;
++    
++    g_WaterMarkData[eAllocType] += ui32Bytes;
++    if(g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++    {
++        g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++    }
++
++    if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++    {
++        g_SysRAMWaterMark += ui32Bytes;
++        if(g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++        {
++            g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++        }
++    }
++    else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++            || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++    {
++        g_IOMemWaterMark += ui32Bytes;
++        if(g_IOMemWaterMark > g_IOMemHighWaterMark)
++        {
++            g_IOMemHighWaterMark = g_IOMemWaterMark;
++        }
++    }
++}
++
++
++
++static IMG_VOID
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey)
++{
++    DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;
++
++    
++    for(ppsCurrentRecord = &g_MemoryRecords;
++        *ppsCurrentRecord;
++        ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++    {
++        if((*ppsCurrentRecord)->eAllocType == eAllocType
++           && (*ppsCurrentRecord)->pvKey == pvKey)
++        {
++            DEBUG_MEM_ALLOC_REC *psNextRecord;
++            DEBUG_MEM_ALLOC_TYPE eAllocType;
++
++            psNextRecord = (*ppsCurrentRecord)->psNext;
++            eAllocType = (*ppsCurrentRecord)->eAllocType;
++            g_WaterMarkData[eAllocType] -= (*ppsCurrentRecord)->ui32Bytes;
++            
++            if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++            {
++                g_SysRAMWaterMark -= (*ppsCurrentRecord)->ui32Bytes;
++            }
++            else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++                    || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++            {
++                g_IOMemWaterMark -= (*ppsCurrentRecord)->ui32Bytes;
++            }
++            
++            kfree(*ppsCurrentRecord);
++            *ppsCurrentRecord = psNextRecord;
++            return;
++        }
++    }
++    
++    PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p\n",
++             __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey));
++}
++
++
++static IMG_CHAR *
++DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++    char *apszDebugMemoryRecordTypes[] = {
++        "KMALLOC",
++        "VMALLOC",
++        "ALLOC_PAGES",
++        "IOREMAP",
++        "IO",
++        "KMEM_CACHE_ALLOC",
++        "KMAP"
++    };
++    return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++
++
++IMG_VOID *
++_VMallocWrapper(IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32AllocFlags,
++                IMG_CHAR *pszFileName,
++                IMG_UINT32 ui32Line)
++{
++    pgprot_t PGProtFlags;
++    IMG_VOID *pvRet;
++    
++    switch(ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++            PGProtFlags = PAGE_KERNEL;
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++            PGProtFlags = pgprot_writecombine(PAGE_KERNEL);
++#else
++            PGProtFlags = pgprot_noncached(PAGE_KERNEL);
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            PGProtFlags = pgprot_noncached(PAGE_KERNEL);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR,
++                     "VMAllocWrapper: unknown mapping flags=0x%08lx",
++                     ui32AllocFlags));
++            dump_stack();
++            return NULL;
++    }
++
++      
++    pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++                               pvRet,
++                               pvRet,
++                               0,
++                               NULL,
++                               PAGE_ALIGN(ui32Bytes),
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++VFreeWrapper(IMG_VOID *pvCpuVAddr)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr);
++#endif
++    vfree(pvCpuVAddr);
++}
++
++
++LinuxMemArea *
++NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_VOID *pvCpuVAddr;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        goto failed;
++    }
++
++    pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++    if(!pvCpuVAddr)
++    {
++        goto failed;
++    }
++    
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++    
++    ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++    psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++
++failed:
++    PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++    if(psLinuxMemArea)
++        LinuxMemAreaStructFree(psLinuxMemArea);
++    return NULL;
++}
++
++
++IMG_VOID
++FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea);
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++    PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++                    psLinuxMemArea->ui32ByteSize);
++#endif
++
++    PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
++             __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++    VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID
++ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++      IMG_VOID *pvPage;
++      IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++      for(pvPage = pvAddress; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++      {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++              SetPageReserved(ConvertKVToPage(pvPage));
++#else
++              mem_map_reserve(ConvertKVToPage(pvPage));
++#endif
++      }
++}
++
++
++static IMG_VOID
++UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++      IMG_VOID *pvPage;
++      IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++      for(pvPage = pvAddress; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++      {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++              ClearPageReserved(ConvertKVToPage(pvPage));
++#else
++              mem_map_unreserve(ConvertKVToPage(pvPage));
++#endif
++      }
++}
++#endif 
++
++
++IMG_VOID *
++_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++               IMG_UINT32 ui32Bytes,
++               IMG_UINT32 ui32MappingFlags,
++               IMG_CHAR *pszFileName,
++               IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvIORemapCookie = IMG_NULL;
++    
++    switch(ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++            pvIORemapCookie = (IMG_VOID *)ioremap_cached(BasePAddr.uiAddr, ui32Bytes);
++#else
++                  pvIORemapCookie = (IMG_VOID *)ioremap(BasePAddr.uiAddr, ui32Bytes);
++#endif
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++                      pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++#else
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, L_PTE_BUFFERABLE);
++#else
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, L_PTE_BUFFERABLE, 1);
++#endif
++#endif
++#else
++#if defined(__i386__) && defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, _PAGE_PCD);
++#else
++                      pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++#endif
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
++            return NULL;
++    }
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvIORemapCookie)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++                               pvIORemapCookie,
++                               pvIORemapCookie,
++                               BasePAddr.uiAddr,
++                               NULL,
++                               ui32Bytes,
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++
++    return pvIORemapCookie;
++}
++
++
++IMG_VOID
++IOUnmapWrapper(IMG_VOID *pvIORemapCookie)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie);
++#endif
++    iounmap(pvIORemapCookie);
++}
++
++
++LinuxMemArea *
++NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++                       IMG_UINT32 ui32Bytes,
++                       IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_VOID *pvIORemapCookie;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++    if(!pvIORemapCookie)
++    {
++        LinuxMemAreaStructFree(psLinuxMemArea);
++        return NULL;
++    }
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++    psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++    psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCPUVAddr, 
++                                                                              IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++    psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++    psLinuxMemArea->uData.sExternalKV.CPUPhysAddr = BasePAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++                  IMG_UINT32 ui32Bytes,
++                  IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++    psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++                           (IMG_VOID *)BasePAddr.uiAddr,
++                           0,
++                           BasePAddr.uiAddr,
++                           NULL,
++                           ui32Bytes,
++                           "unknown",
++                           0
++                           );
++#endif
++   
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++                              (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr);
++#endif
++
++    
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page *
++ConvertKVToPage(IMG_VOID *pvCpuVAddr)
++{
++      struct page *psPage = 0;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++
++      psPage = vmalloc_to_page(pvCpuVAddr);
++
++#else 
++
++      IMG_UINT32 ui32Addr = (IMG_UINT32)pvCpuVAddr;
++      pgd_t *ppgd;
++      pmd_t *ppmd;
++      pte_t *ppte, pte;
++
++      {
++              
++              pvAddr = (IMG_VOID *) VMALLOC_VMADDR(pvAddr);
++
++              
++              ppgd = pgd_offset_k(ui32Addr);
++
++              
++              if (!pgd_none(*ppgd))
++              {
++                      
++                      ppmd = pmd_offset(ppgd, ui32Addr);
++
++                      
++                      if (!pmd_none(*ppmd))
++                      {
++                              
++#ifndef PVR_ATOMIC_PTE
++                              ppte = pte_offset(ppmd, ui32Addr);
++                              pte = *ppte;
++#else
++                              ppte = pte_offset_atomic(ppmd, ui32Addr);
++                              pte = *ppte;
++                              pte_kunmap(ppte);
++#endif
++                              
++                              if (pte_present(pte))
++                              {
++                                      
++                                      psPage = pte_page(pte);
++                              }
++                              else
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid page table entry"));
++                              }
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid mid-level page directory"));
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid page directory"));
++              }
++      }
++#endif
++      return psPage;
++}
++
++
++
++LinuxMemArea *
++NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_UINT32 ui32PageCount;
++    struct page **pvPageList;
++    IMG_UINT32 i;
++    
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        goto failed_area_alloc;
++    }
++    
++    ui32PageCount = PAGE_ALIGN(ui32Bytes)>>PAGE_SHIFT;
++    pvPageList = VMallocWrapper(sizeof(void *) * ui32PageCount, PVRSRV_HAP_CACHED);
++    if(!pvPageList)
++    {
++        goto failed_vmalloc;
++    }
++    
++    for(i=0; i<ui32PageCount; i++)
++    {
++        pvPageList[i] = alloc_pages(GFP_KERNEL, 0);
++        if(!pvPageList[i])
++        {
++            goto failed_alloc_pages;
++        }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))              
++      SetPageReserved(pvPageList[i]);
++#else
++              mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++    }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++                           pvPageList,
++                           0,
++                           0,
++                           NULL,
++                           PAGE_ALIGN(ui32Bytes),
++                           "unknown",
++                           0
++                           );
++#endif
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++    psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++    
++failed_alloc_pages:
++    for(i--;i>=0;i--)
++    {
++        __free_pages(pvPageList[i], 0);
++    }
++    VFreeWrapper(pvPageList);
++failed_vmalloc:
++    LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++    PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++    
++    return NULL;
++}
++
++
++IMG_VOID
++FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    IMG_UINT32 ui32PageCount;
++    struct page **pvPageList;
++    IMG_UINT32 i;
++
++    PVR_ASSERT(psLinuxMemArea);
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    ui32PageCount = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize)>>PAGE_SHIFT;
++    pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList);
++#endif
++
++    for(i=0;i<ui32PageCount;i++)
++    {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))             
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))              
++              ClearPageReserved(pvPageList[i]);
++#else
++              mem_map_reserve(pvPageList[i]);
++#endif                
++#endif        
++        __free_pages(pvPageList[i], 0);
++    }
++    VFreeWrapper(psLinuxMemArea->uData.sPageList.pvPageList);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page*
++LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
++                         IMG_UINT32 ui32ByteOffset)
++{
++    IMG_UINT32 ui32PageIndex;
++    IMG_CHAR *pui8Addr;
++
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            ui32PageIndex = ui32ByteOffset>>PAGE_SHIFT;
++            return psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++            break;
++        case LINUX_MEM_AREA_VMALLOC:
++            pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++            pui8Addr += ui32ByteOffset;
++            return vmalloc_to_page(pui8Addr);
++            break;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                                            psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++                                             + ui32ByteOffset);
++        default:
++            PVR_DPF((PVR_DBG_ERROR,
++                    "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++                    LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++            return NULL;
++    }
++}
++
++
++IMG_VOID *
++_KMapWrapper(struct page *psPage, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++
++#if defined(PVR_FLUSH_CACHE_BEFORE_KMAP)
++    
++    flush_cache_all();
++#endif
++
++    pvRet = kmap(psPage);
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMAP,
++                               psPage,
++                               pvRet,
++                               0,
++                               NULL,
++                               PAGE_SIZE,
++                               "unknown",
++                               0
++                               );
++    }
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++KUnMapWrapper(struct page *psPage)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMAP, psPage);
++#endif
++
++    kunmap(psPage);
++}
++
++
++LinuxKMemCache *
++KMemCacheCreateWrapper(IMG_CHAR *pszName,
++                       size_t Size,
++                       size_t Align,
++                       IMG_UINT32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++    ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
++#endif
++    return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL);
++}
++
++
++IMG_VOID
++KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
++{
++    kmem_cache_destroy(psCache);
++}
++
++
++IMG_VOID *
++_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
++                      gfp_t Flags,
++                      IMG_CHAR *pszFileName,
++                      IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++    
++    pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++                           pvRet,
++                           pvRet,
++                           0,
++                           psCache,
++                           kmem_cache_size(psCache),
++                           pszFileName,
++                           ui32Line
++                           );
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject);
++#endif
++
++    kmem_cache_free(psCache, pvObject);
++}
++
++
++const IMG_CHAR *
++KMemCacheNameWrapper(LinuxKMemCache *psCache)
++{
++    
++    return "";
++}
++
++
++LinuxMemArea *
++NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++                   IMG_UINT32 ui32ByteOffset,
++                   IMG_UINT32 ui32Bytes)
++{
++    LinuxMemArea *psLinuxMemArea;
++    
++    PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
++    
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++    
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++    psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
++    psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++        psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++        DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
++    }
++#endif
++    
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static LinuxMemArea *
++LinuxMemAreaStructAlloc(IMG_VOID)
++{
++#if 0
++    LinuxMemArea *psLinuxMemArea;
++    psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++    printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
++    dump_stack();
++    return psLinuxMemArea;
++#else
++    return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++
++static IMG_VOID
++LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
++{
++    KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++    
++    
++}
++
++
++IMG_VOID
++LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
++{
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_VMALLOC:
++            FreeVMallocLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_IOREMAP:
++            FreeIORemapLinuxMemArea(psLinuxMemArea);
++            break;
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      FreeExternalKVLinuxMemArea(psLinuxMemArea);
++                      break;
++        case LINUX_MEM_AREA_IO:
++            FreeIOLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            FreeSubLinuxMemArea(psLinuxMemArea);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++                     __FUNCTION__, psLinuxMemArea->eAreaType));
++    }
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID
++DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++    const char *pi8FlagsString;
++    
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++        if(g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++        {
++            g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++        }
++    }
++    g_LinuxMemAreaCount++;
++    
++    
++    psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++    if(psNewRecord)
++    {
++        
++        psNewRecord->psLinuxMemArea = psLinuxMemArea;
++        psNewRecord->ui32Flags = ui32Flags;
++        psNewRecord->pid = current->pid;
++        psNewRecord->psNext = g_LinuxMemAreaRecords;
++        g_LinuxMemAreaRecords = psNewRecord;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "%s: failed to allocate linux memory area record.",
++                 __FUNCTION__));
++    }
++    
++    
++    pi8FlagsString = HAPFlagsToString(ui32Flags);
++    if(strstr(pi8FlagsString, "UNKNOWN"))
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++                 __FUNCTION__,
++                 ui32Flags,
++                 psLinuxMemArea));
++        
++    }
++}
++
++
++static DEBUG_LINUX_MEM_AREA_REC *
++DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++    for(psCurrentRecord = g_LinuxMemAreaRecords;
++        psCurrentRecord;
++        psCurrentRecord = psCurrentRecord->psNext)
++    {
++        if(psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++        {
++            return psCurrentRecord;
++        }
++    }
++    return NULL;
++}
++
++
++static IMG_VOID
++DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
++{
++    DEBUG_LINUX_MEM_AREA_REC **ppsCurrentRecord;
++
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++    }
++    g_LinuxMemAreaCount--;
++
++    
++    for(ppsCurrentRecord = &g_LinuxMemAreaRecords;
++        *ppsCurrentRecord;
++        ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++    {
++        if((*ppsCurrentRecord)->psLinuxMemArea == psLinuxMemArea)
++        {
++            DEBUG_LINUX_MEM_AREA_REC *psNextRecord;
++            
++            psNextRecord = (*ppsCurrentRecord)->psNext;
++            kfree(*ppsCurrentRecord);
++            *ppsCurrentRecord = psNextRecord;
++            return;
++        }
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++             __FUNCTION__, psLinuxMemArea));
++}
++#endif
++
++
++IMG_VOID *
++LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
++{
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_VMALLOC:
++            return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++        case LINUX_MEM_AREA_IOREMAP:
++            return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++        {
++            IMG_CHAR *pAddr =
++                LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++            if(!pAddr)
++            {
++                return NULL;
++            }
++            return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++        }
++        default:
++            return NULL;
++    }
++}
++
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
++{
++    IMG_CPU_PHYADDR CpuPAddr;
++    
++    CpuPAddr.uiAddr = 0;
++
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_IOREMAP:
++        {
++            CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++        }
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++              {
++            CpuPAddr = psLinuxMemArea->uData.sExternalKV.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++              }
++        case LINUX_MEM_AREA_IO:
++        {
++            CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++        }
++        case LINUX_MEM_AREA_VMALLOC:
++        {
++            IMG_CHAR *pCpuVAddr;
++            struct page *page;
++            pCpuVAddr =
++                (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++            pCpuVAddr += ui32ByteOffset;
++            page = ConvertKVToPage(pCpuVAddr);
++            CpuPAddr.uiAddr = page_to_phys(page);
++            CpuPAddr.uiAddr += ui32ByteOffset & (PAGE_SIZE - 1);
++            break;
++        }
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++        {
++            struct page *page;
++            IMG_UINT32 ui32PageIndex = ui32ByteOffset >> PAGE_SHIFT;
++            page = psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++            CpuPAddr.uiAddr = page_to_phys(page);
++            CpuPAddr.uiAddr += ui32ByteOffset & (PAGE_SIZE - 1);
++            break;
++        }
++        case LINUX_MEM_AREA_SUB_ALLOC:
++        {
++            CpuPAddr =
++                OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                                      psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++                                        + ui32ByteOffset);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++                     __FUNCTION__, psLinuxMemArea->eAreaType));
++    }
++    
++    PVR_ASSERT(CpuPAddr.uiAddr);
++    return CpuPAddr;
++}
++
++
++LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++    if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        return LinuxMemAreaRootType(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++    }
++    else
++    {
++        return psLinuxMemArea->eAreaType;
++    }
++}
++
++
++const IMG_CHAR *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++    PVR_ASSERT(LINUX_MEM_AREA_TYPE_COUNT == 5);
++    PVR_ASSERT(eMemAreaType < LINUX_MEM_AREA_TYPE_COUNT);
++    
++    
++    switch(eMemAreaType)
++    {
++        case LINUX_MEM_AREA_IOREMAP:
++            return "LINUX_MEM_AREA_IOREMAP";
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      return "LINUX_MEM_AREA_EXTERNAL_KV";
++        case LINUX_MEM_AREA_IO:
++            return "LINUX_MEM_AREA_IO";
++        case LINUX_MEM_AREA_VMALLOC:
++            return "LINUX_MEM_AREA_VMALLOC";
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            return "LINUX_MEM_AREA_SUB_ALLOC";
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            return "LINUX_MEM_AREA_ALLOC_PAGES";
++        default:
++            PVR_ASSERT(0);
++    }
++
++    return "";
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static off_t
++printLinuxMemAreaRecords(char * buffer, size_t count, off_t off)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psRecord;
++    off_t Ret;
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++    if(!off)
++    {
++        if(count < 500)
++        {
++            Ret = 0;
++            goto unlock_and_return;
++        }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++        Ret = printAppend(buffer, count, 0,
++                          "Number of Linux Memory Areas: %lu\n"
++                          "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++                          "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++                          "\nDetails for all Linux Memory Areas:\n"
++                          "%s %-24s %s %s %-8s %-5s %s\n",
++                          g_LinuxMemAreaCount,
++                          g_LinuxMemAreaWaterMark,
++                          g_LinuxMemAreaHighWaterMark,
++                          "psLinuxMemArea",
++                          "LinuxMemType",
++                          "CpuVAddr",
++                          "CpuPAddr",
++                          "Bytes",
++                          "Pid",
++                          "Flags"
++                         );
++#else
++        Ret = printAppend(buffer, count, 0,
++                          "<mem_areas_header>\n"
++                          "\t<count>%lu</count>\n"
++                          "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n" 
++                          "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n" 
++                          "</mem_areas_header>\n",
++                          g_LinuxMemAreaCount,
++                          g_LinuxMemAreaWaterMark,
++                          g_LinuxMemAreaHighWaterMark
++                         );
++#endif
++        goto unlock_and_return;
++    }
++
++    for(psRecord=g_LinuxMemAreaRecords; --off && psRecord; psRecord=psRecord->psNext)
++        ;
++    if(!psRecord)
++    {
++        Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++    if(count < 500)
++    {
++        Ret = 0;
++        goto unlock_and_return;
++    }
++
++    Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                       "%8p       %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++                       "<linux_mem_area>\n"
++                       "\t<pointer>%8p</pointer>\n"
++                       "\t<type>%s</type>\n"
++                       "\t<cpu_virtual>%8p</cpu_virtual>\n"
++                       "\t<cpu_physical>%08lx</cpu_physical>\n"
++                       "\t<bytes>%ld</bytes>\n"
++                       "\t<pid>%u</pid>\n"
++                       "\t<flags>%08lx</flags>\n"
++                       "\t<flags_string>%s</flags_string>\n"
++                       "</linux_mem_area>\n",
++#endif
++                       psRecord->psLinuxMemArea,
++                       LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++                       LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++                       LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++                       psRecord->psLinuxMemArea->ui32ByteSize,
++                       psRecord->pid,
++                       psRecord->ui32Flags,
++                       HAPFlagsToString(psRecord->ui32Flags)
++                      );
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret;
++}
++#endif 
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static off_t
++printMemoryRecords(char * buffer, size_t count, off_t off)
++{
++    DEBUG_MEM_ALLOC_REC *psRecord;
++    off_t Ret;
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++
++    if(!off)
++    {
++        if(count < 1000)
++        {
++            Ret = 0;
++            goto unlock_and_return;
++        }
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++        
++        Ret =  printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via kmalloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via kmalloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via vmalloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via vmalloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via alloc_pages",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via alloc_pages",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via ioremap",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via ioremap",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes reserved for \"IO\" memory areas",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via kmem_cache_alloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes mapped via kmap",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes mapped via kmap",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Current Water Mark for memory allocated from system RAM",
++                           g_SysRAMWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Highest Water Mark for memory allocated from system RAM",
++                           g_SysRAMHighWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Current Water Mark for memory allocated from IO memory",
++                           g_IOMemWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Highest Water Mark for memory allocated from IO memory",
++                           g_IOMemHighWaterMark);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret, "Details for all known allocations:\n"
++                           "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++                           "Type",
++                           "CpuVAddr",
++                           "CpuPAddr",
++                           "Bytes",
++                           "PID",
++                           "PrivateData",
++                           "Filename:Line");
++
++#else 
++              
++              
++        Ret =  printAppend(buffer, count, 0, "<meminfo>\n<meminfo_header>\n");
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr12\" description=\"kmap_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr13\" description=\"kmap_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++                           g_SysRAMWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++                           g_SysRAMHighWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++                           g_IOMemWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++                           g_IOMemHighWaterMark);
++
++        Ret =  printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif 
++
++        goto unlock_and_return;
++    }
++
++    if(count < 1000)
++    {
++        Ret = 0;
++        goto unlock_and_return;
++    }
++
++    for(psRecord=g_MemoryRecords; --off && psRecord; psRecord=psRecord->psNext)
++        ;
++    if(!psRecord)
++    {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++              if(off == 0)
++              {
++                      Ret =  printAppend(buffer, count, 0, "</meminfo>\n");
++                      goto unlock_and_return;
++              }
++#endif
++        Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++    if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++    {
++        Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                           "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++                           "<allocation>\n"
++                           "\t<type>%s</type>\n"
++                           "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                           "\t<cpu_physical>%08lx</cpu_physical>\n"
++                           "\t<bytes>%ld</bytes>\n"
++                           "\t<pid>%d</pid>\n"
++                           "\t<private>%s</private>\n"
++                           "\t<filename>%s</filename>\n"
++                           "\t<line>%ld</line>\n"
++                           "</allocation>\n",
++#endif
++                           DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++                           psRecord->pvCpuVAddr,
++                           psRecord->ulCpuPAddr,
++                           psRecord->ui32Bytes,
++                           psRecord->pid,
++                           "NULL",
++                           psRecord->pszFileName,
++                           psRecord->ui32Line);
++    }
++    else
++    {
++        Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                           "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++                           "<allocation>\n"
++                           "\t<type>%s</type>\n"
++                           "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                           "\t<cpu_physical>%08lx</cpu_physical>\n"
++                           "\t<bytes>%ld</bytes>\n"
++                           "\t<pid>%d</pid>\n"
++                           "\t<private>%s</private>\n"
++                           "\t<filename>%s</filename>\n"
++                           "\t<line>%ld</line>\n"
++                           "</allocation>\n",
++#endif
++                           DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++                           psRecord->pvCpuVAddr,
++                           psRecord->ulCpuPAddr,
++                           psRecord->ui32Bytes,
++                           psRecord->pid,
++                           KMemCacheNameWrapper(psRecord->pvPrivateData),
++                           psRecord->pszFileName,
++                           psRecord->ui32Line);
++    }
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret; 
++}
++#endif 
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const IMG_CHAR *
++HAPFlagsToString(IMG_UINT32 ui32Flags)
++{
++    static IMG_CHAR szFlags[50];
++    IMG_UINT32 ui32Pos = 0;
++    IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
++    IMG_CHAR *apszCacheTypes[] = {
++        "UNCACHED",
++        "CACHED",
++        "WRITECOMBINE",
++        "UNKNOWN"
++    };
++    IMG_CHAR *apszMapType[] = {
++        "KERNEL_ONLY",
++        "SINGLE_PROCESS",
++        "MULTI_PROCESS",
++        "FROM_EXISTING_PROCESS",
++        "NO_CPU_VIRTUAL",
++        "UNKNOWN"
++    };
++    
++    
++    if(ui32Flags & PVRSRV_HAP_UNCACHED){
++        ui32CacheTypeIndex=0;
++    }else if(ui32Flags & PVRSRV_HAP_CACHED){
++        ui32CacheTypeIndex=1;
++    }else if(ui32Flags & PVRSRV_HAP_WRITECOMBINE){
++        ui32CacheTypeIndex=2;
++    }else{
++        ui32CacheTypeIndex=3;
++        PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%d)",
++                 __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++    }
++
++    
++    if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY){
++        ui32MapTypeIndex = 0;
++    }else if(ui32Flags & PVRSRV_HAP_SINGLE_PROCESS){
++        ui32MapTypeIndex = 1;
++    }else if(ui32Flags & PVRSRV_HAP_MULTI_PROCESS){
++        ui32MapTypeIndex = 2;
++    }else if(ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS){
++        ui32MapTypeIndex = 3;
++    }else if(ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL){
++        ui32MapTypeIndex = 4;
++    }else{
++        ui32MapTypeIndex = 5;
++        PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%d)",
++                 __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++    }
++
++    ui32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++    sprintf(szFlags + ui32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++
++    return szFlags;
++}
++#endif
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,243 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++
++typedef enum {
++    LINUX_MEM_AREA_IOREMAP,
++      LINUX_MEM_AREA_EXTERNAL_KV,
++    LINUX_MEM_AREA_IO,
++    LINUX_MEM_AREA_VMALLOC,
++    LINUX_MEM_AREA_ALLOC_PAGES,
++    LINUX_MEM_AREA_SUB_ALLOC,
++    LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++    LINUX_MEM_AREA_TYPE eAreaType;
++    union _uData
++    {
++        struct _sIORemap
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++            IMG_VOID *pvIORemapCookie;
++        }sIORemap;
++        struct _sExternalKV
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++            IMG_VOID *pvExternalKV;
++        }sExternalKV;
++        struct _sIO
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++        }sIO;
++        struct _sVmalloc
++        {
++            
++            IMG_VOID *pvVmallocAddress;
++        }sVmalloc;
++        struct _sPageList
++        {
++            
++            struct page **pvPageList;
++        }sPageList;
++        struct _sSubAlloc
++        {
++            
++            LinuxMemArea *psParentLinuxMemArea;
++            IMG_UINT32 ui32ByteOffset;
++        }sSubAlloc;
++    }uData;
++
++    IMG_UINT32 ui32ByteSize;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(IMG_VOID);
++
++
++IMG_VOID LinuxMMCleanup(IMG_VOID);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KFreeWrapper(IMG_VOID *pvCpuVAddr);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID VFreeWrapper(IMG_VOID *pvCpuVAddr);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++    _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++    _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++                          IMG_UINT32 ui32Bytes,
++                          IMG_UINT32 ui32MappingFlags,
++                          IMG_CHAR *pszFileName,
++                          IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, 
++                                                                              IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_VOID IOUnmapWrapper(IMG_VOID *pvIORemapCookie);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMapWrapper(psPage) _KMapWrapper(psPage, __FILE__, __LINE__)
++#else
++#define KMapWrapper(psPage) _KMapWrapper(psPage, NULL, 0)
++#endif
++IMG_VOID *_KMapWrapper(struct page *psPage, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KUnMapWrapper(struct page *psPage);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
++
++
++IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject);
++
++
++const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++struct page *ConvertKVToPage(IMG_VOID *pvAddr);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++                                 IMG_UINT32 ui32ByteOffset,
++                                 IMG_UINT32 ui32Bytes);
++
++
++IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++LINUX_MEM_AREA_TYPE LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea);
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/module.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/module.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,407 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++// #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#if defined(LDM_PLATFORM)
++#include <linux/platform_device.h>
++#endif 
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++
++
++#define CLASSNAME     "powervr"
++#define DRVNAME               "pvrsrvkm"
++#define DEVNAME               "pvrsrvkm"
++
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static int debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++
++void PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
++
++static int AssignedMajorNumber;
++
++
++extern int PVRSRV_BridgeDispatchKM(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
++static int PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static int PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++static struct file_operations pvrsrv_fops = {
++      owner:THIS_MODULE,
++      ioctl:PVRSRV_BridgeDispatchKM,
++      open:PVRSRVOpen,
++      release:PVRSRVRelease,
++      mmap:PVRMMap,
++};
++
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverRemove(struct platform_device *device);
++static int PVRSRVDriverProbe(struct platform_device *device);
++static int PVRSRVDriverSuspend(struct platform_device *device, pm_message_t state);
++static void PVRSRVDriverShutdown(struct platform_device *device);
++static int PVRSRVDriverResume(struct platform_device *device);
++
++static struct platform_driver powervr_driver = {
++      .driver = {
++              .name           = DEVNAME,
++      },
++      .probe          = PVRSRVDriverProbe,
++      .remove         = PVRSRVDriverRemove,
++      .suspend        = PVRSRVDriverSuspend,
++      .resume         = PVRSRVDriverResume,
++      .shutdown       = PVRSRVDriverShutdown,
++};
++
++static void PVRSRVDeviceRelease(struct device *device);
++
++static struct platform_device powervr_device = {
++      .name                   = DEVNAME,
++      .id                             = -1,
++      .dev                    = {
++              .release                = PVRSRVDeviceRelease
++      }
++};
++
++
++
++static int PVRSRVDriverProbe(struct platform_device *pDevice)
++{
++      SYS_DATA *psSysData;
++      PVRSRV_ERROR eError;
++      int error;
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++      pDevice->dev.driver_data = NULL;
++
++#if 0
++      
++      if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++#endif        
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              if (SysInitialise() != PVRSRV_OK)
++              {
++                      return -ENODEV;
++              }
++
++              eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDriverProbe: Failed to connect to resource manager"));
++                      error = -ENODEV;
++              }
++      }
++
++      return 0;
++}
++
++
++static int PVRSRVDriverRemove(struct platform_device *pDevice)
++{
++      SYS_DATA *psSysData;
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++      if(PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++      
++      if (SysAcquireData(&psSysData) == PVRSRV_OK)
++      {
++              SysDeinitialise(psSysData);
++      }
++
++#if 0
++      if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++#endif
++
++
++      return 0;
++}
++
++
++static void PVRSRVDriverShutdown(struct platform_device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++      (void) PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3);
++}
++
++
++static int PVRSRVDriverSuspend(struct platform_device *pDevice, pm_message_t state)
++{
++
++      PVR_DPF((PVR_DBG_WARNING,
++                      "PVRSRVDriverSuspend(pDevice=%p)",
++                      pDevice));
++
++      if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++static int PVRSRVDriverResume(struct platform_device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverResume(pDevice=%p)", pDevice));
++
++      if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++static void PVRSRVDeviceRelease(struct device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDeviceRelease(pDevice=%p)", pDevice));
++}
++#endif 
++
++static int PVRSRVOpen(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++      int Ret = 0;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVOpen"));
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++      if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_TRUE) != PVRSRV_OK)
++      {
++              Ret = -ENOMEM;
++      }
++      
++    LinuxUnLockMutex(&gPVRSRVLock);
++
++      return Ret;
++}
++
++
++static int PVRSRVRelease(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++      int Ret = 0;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRelease"));
++
++      if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_FALSE) != PVRSRV_OK)
++      {
++              Ret = -ENOMEM;
++      }
++
++      return Ret;
++}
++
++
++static int __init PVRCore_Init(void)
++{
++      int error;
++#if !defined(LDM_PLATFORM)
++      PVRSRV_ERROR eError;
++#endif 
++      
++      AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++      if (AssignedMajorNumber <= 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++              return -EBUSY;
++      }
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: major device %d", AssignedMajorNumber));
++
++      
++      if (CreateProcEntries ())
++      {
++              unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++              return -ENOMEM;
++      }
++
++    LinuxInitMutex(&gPVRSRVLock);
++
++#ifdef DEBUG
++      PVRDebugSetLevel(debug);
++#endif
++
++      if(LinuxMMInit() != PVRSRV_OK)
++    {
++        error = -ENOMEM;
++        goto init_failed;
++    }
++
++      LinuxBridgeInit();
++
++      PVRMMapInit();
++
++#if defined(LDM_PLATFORM)
++      if ((error = platform_driver_register(&powervr_driver)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++              goto init_failed;
++      }
++
++      if ((error = platform_device_register(&powervr_device)) != 0)
++      {
++              platform_driver_unregister(&powervr_driver);
++
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++              goto init_failed;
++      }
++#else 
++      
++      if ((eError = SysInitialise()) != PVRSRV_OK)
++      {
++              error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++              if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++              {
++                      printk("\nAtlas wrapper (FPGA image) version mismatch");
++                      error = -ENODEV;
++              }
++#endif
++              goto init_failed;
++      }
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRCore_Init: Failed to connect to resource manager"));
++              error = -ENODEV;
++              goto init_failed;
++      }
++#endif 
++      return 0;
++
++init_failed:
++
++      (void) PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      PVRMMapCleanup();
++      LinuxMMCleanup();
++      RemoveProcEntries();
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++      return error;
++
++} 
++
++
++static void __exit PVRCore_Cleanup(void)
++{
++      SYS_DATA *psSysData;
++#if !defined(LDM_PLATFORM)
++      PVRSRV_ERROR eError;
++#endif 
++
++      SysAcquireData(&psSysData);
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++      
++#if defined (LDM_PLATFORM)
++      platform_device_unregister(&powervr_device);
++      platform_driver_unregister(&powervr_driver);
++#else 
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"KernelResManDisconnect: Failed to disconnect"));
++      }
++
++      
++      SysDeinitialise(psSysData);
++#endif 
++
++      PVRMMapCleanup();
++
++      LinuxMMCleanup();
++
++      LinuxBridgeDeInit();
++
++      RemoveProcEntries();
++
++      PVR_DPF((PVR_DBG_WARNING,"unloading"));
++}
++
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,134 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/module.h>
++
++#include <img_defs.h>
++#include <services.h>
++
++#include "mutex.h"
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_init(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_lock(psPVRSRVMutex);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
++    {
++        return PVRSRV_ERROR_GENERIC;
++    }else{
++        return PVRSRV_OK;
++    }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    return mutex_trylock(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_unlock(psPVRSRVMutex);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    return mutex_is_locked(psPVRSRVMutex);
++}
++
++
++#else 
++
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    init_MUTEX(&psPVRSRVMutex->sSemaphore);
++    atomic_set(&psPVRSRVMutex->Count, 0);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    down(&psPVRSRVMutex->sSemaphore);
++    atomic_dec(&psPVRSRVMutex->Count);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
++    {
++        
++        return PVRSRV_ERROR_GENERIC;
++    }else{
++        atomic_dec(&psPVRSRVMutex->Count);
++        return PVRSRV_OK;
++    }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
++    if(Status == 0)
++    {
++        atomic_dec(&psPVRSRVMutex->Count);
++    }
++
++    return Status;
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    atomic_inc(&psPVRSRVMutex->Count);
++    up(&psPVRSRVMutex->sSemaphore);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    IMG_INT32 iCount;
++    
++    iCount = atomic_read(&psPVRSRVMutex->Count);
++
++    return (IMG_BOOL)iCount;
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,70 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++typedef struct mutex PVRSRV_LINUX_MUTEX;
++
++#else 
++
++
++typedef struct {
++    struct semaphore sSemaphore;
++    
++    atomic_t Count;
++}PVRSRV_LINUX_MUTEX;
++
++#endif
++
++
++extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1617 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++#else
++PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *szFilename, IMG_UINT32 ui32Line)
++#endif
++{
++    PVR_UNREFERENCED_PARAMETER(phBlockAlloc);
++    PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    *ppvCpuVAddr = _KMallocWrapper(ui32Size, szFilename, ui32Line);
++#else
++    *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++    if(*ppvCpuVAddr)
++    {
++        return PVRSRV_OK;
++    }
++    else
++    {
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++}
++
++
++PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++{     
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      PVR_UNREFERENCED_PARAMETER(ui32Size);
++      PVR_UNREFERENCED_PARAMETER(hBlockAlloc);
++
++    KFreeWrapper(pvCpuVAddr);
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages(IMG_UINT32 ui32AllocFlags,
++               IMG_UINT32 ui32Size,
++               IMG_VOID **ppvCpuVAddr,
++               IMG_HANDLE *phOSMemHandle)
++{
++      LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++      {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++            psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        {
++            
++            
++            psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++                      PVRMMapRegisterArea("Import Arena", psLinuxMemArea, ui32AllocFlags);
++            break;
++        }
++
++              case PVRSRV_HAP_MULTI_PROCESS:
++              {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++            psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++                      PVRMMapRegisterArea("Import Arena", psLinuxMemArea, ui32AllocFlags);
++            break;
++        }
++        default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++            *ppvCpuVAddr = NULL;
++            *phOSMemHandle = (IMG_HANDLE)0;
++                      return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++    *phOSMemHandle = psLinuxMemArea;
++    
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{   
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++    
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++    switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                PVR_DPF((PVR_DBG_ERROR,
++                         "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++                                        "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++                         ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        default:
++                      PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++                    __FUNCTION__, ui32AllocFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                  IMG_UINT32 ui32ByteOffset,
++                  IMG_UINT32 ui32Bytes,
++                  IMG_UINT32 ui32Flags,
++                  IMG_HANDLE *phOSMemHandleRet)
++{
++    LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++    PVRSRV_ERROR eError = PVRSRV_OK;
++
++    psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    
++    psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++    if(!psLinuxMemArea)
++    {
++        *phOSMemHandleRet = NULL;
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    *phOSMemHandleRet = psLinuxMemArea;
++
++    
++    if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        return PVRSRV_OK;
++    }
++
++    
++    if(psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO)
++    {
++        eError = PVRMMapRegisterArea("Physical",
++                                     psLinuxMemArea,
++                                     0); 
++        if(eError != PVRSRV_OK)
++        {
++            goto failed_register_area;
++        }
++    }
++    else if(psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES)
++    {
++        eError = PVRMMapRegisterArea("Import Arena",
++                                     psLinuxMemArea,
++                                     0); 
++        if(eError != PVRSRV_OK)
++        {
++            goto failed_register_area;
++        }
++    }
++
++    return PVRSRV_OK;
++
++failed_register_area:
++    *phOSMemHandleRet = NULL;
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++    return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++    LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++    PVRSRV_ERROR eError;
++    
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++    
++    psParentLinuxMemArea = psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++    
++    if(!(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++       && (psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO
++           || psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES)
++      )
++    {
++        eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++        if(eError != PVRSRV_OK)
++        {
++            return eError;
++        }
++    }
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++    PVR_ASSERT(hOSMemHandle);
++
++    return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++    unsigned char *Src,*Dst;
++    int i;
++
++    Src=(unsigned char *)pvSrc;
++    Dst=(unsigned char *)pvDst;
++    for(i=0;i<ui32Size;i++)
++    {
++        Dst[i]=Src[i];
++    }
++#else
++      memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++    unsigned char *Buff;
++    int i;
++
++    Buff=(unsigned char *)pvDest;
++    for(i=0;i<ui32Size;i++)
++    {
++        Buff[i]=ui8Value;
++    }
++#else
++      memset(pvDest, (int) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++      return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++    va_list argList;
++    IMG_INT32 iCount;
++
++    va_start(argList, pszFormat);
++    iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++    va_end(argList);
++
++    return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++      if(*pui32Access)
++      {
++              if(psResource->ui32ID == ui32ID)
++              {
++                      psResource->ui32ID = 0;
++                      *pui32Access = 0;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process.")); 
++              }
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++      }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++      psResource->ui32ID = 0;
++      psResource->ui32Lock = 0;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++      OSBreakResourceLock (psResource, psResource->ui32ID);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++      ENV_DATA                *psEnvData;
++      
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID *)&psEnvData, IMG_NULL) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, 
++                                      &psEnvData->pvBridgeData, IMG_NULL) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++
++      
++      psEnvData->bMISRInstalled = IMG_FALSE;
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      
++      psEnvData->psPCIDev = NULL;
++
++      
++      *ppvEnvSpecificData = psEnvData;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++      ENV_DATA                *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++      PVR_ASSERT(!psEnvData->bMISRInstalled);
++      PVR_ASSERT(!psEnvData->bLISRInstalled);
++      PVR_ASSERT(psEnvData->psPCIDev == NULL);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0x1000, psEnvData->pvBridgeData, IMG_NULL);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg)
++{
++        struct pci_dev *dev;
++        IMG_UINT32 ui32Value;
++
++        dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++        if (dev)
++        {
++                pci_read_config_dword(dev, (int) ui32Reg, (u32 *) & ui32Value);
++                return (ui32Value);
++        }
++        else
++        {
++                return (0);
++        }
++}
++
++
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value)
++{
++        struct pci_dev *dev;
++
++        dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++        if (dev)
++        {
++                pci_write_config_dword(dev, (int) ui32Reg, (u32) ui32Value);
++        }
++}
++
++ 
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++      schedule();
++}
++
++
++ 
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++      unsigned long time, j = jiffies;
++
++      time = j * (1000000 / HZ);
++
++      return time;
++}
++
++
++ 
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++      udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++      if (in_interrupt())
++      {
++              return KERNEL_ID;
++      }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++      return current->pgrp;
++#else
++      return current->signal->pgrp;
++#endif
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++      IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++      return (ui32ReturnValue);
++#else
++      return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++              , struct pt_regs *regs
++#endif
++              )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_BOOL bStatus = IMG_FALSE;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++      PVR_UNREFERENCED_PARAMETER(regs);
++#endif        
++      psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++      if(!psDeviceNode)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++              goto out;
++      }
++
++      bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++      if (bStatus)
++      {
++              SYS_DATA *psSysData = psDeviceNode->psSysData;
++              ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++      return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++              , struct pt_regs *regs
++#endif
++              )
++{
++      SYS_DATA *psSysData;
++      IMG_BOOL bStatus = IMG_FALSE;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++      PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++      psSysData = (SYS_DATA *)dev_id;
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++              goto out;
++      }
++
++      bStatus = PVRSRVSystemLISR(psSysData);
++
++      if (bStatus)
++      {
++              ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++      return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++                                                                      IMG_UINT32 ui32Irq,
++                                                                      IMG_CHAR *pszISRName,
++                                                                      IMG_VOID *pvDeviceNode)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++      if(request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++              SA_SHIRQ
++#else
++              IRQF_SHARED
++#endif
++              , pszISRName, pvDeviceNode))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->ui32IRQ = ui32Irq;
++      psEnvData->pvISRCookie = pvDeviceNode;
++      psEnvData->bLISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;       
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++              
++      PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ,  psEnvData->pvISRCookie));
++
++      free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++      if(request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++              SA_SHIRQ
++#else
++              IRQF_SHARED
++#endif
++              , "PowerVR", pvSysData))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->ui32IRQ = ui32Irq;
++      psEnvData->pvISRCookie = pvSysData;
++      psEnvData->bLISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;       
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++      free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++
++static void MISRWrapper(unsigned long data)
++{
++      SYS_DATA *psSysData;
++
++      psSysData = (SYS_DATA *)data;
++      
++      PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bMISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++      tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++      psEnvData->bMISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bMISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Uninstalling MISR"));
++
++      tasklet_kill(&psEnvData->sMISRTasklet);
++
++      psEnvData->bMISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bMISRInstalled)
++      {
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++      return PVRSRV_OK;       
++}
++
++
++#endif 
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define       OS_TAS(p)       xchg((p), 1)
++#else
++#define       OS_TAS(p)       tas(p)
++#endif
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE         *psResource,
++                                                              IMG_UINT32                      ui32ID)
++
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(!OS_TAS(&psResource->ui32Lock))
++              psResource->ui32ID = ui32ID;
++      else
++              eError = PVRSRV_ERROR_GENERIC;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(*pui32Access)
++      {
++              if(psResource->ui32ID == ui32ID)
++              {
++                      psResource->ui32ID = 0;
++                      *pui32Access = 0;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource)); 
++                      PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++                      eError = PVRSRV_ERROR_GENERIC;
++              }
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++      
++      return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++      return  (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++                      ?       IMG_TRUE
++                      :       IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++    struct page *page;
++    IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32PageOffset = (IMG_UINT32)pvLinAddr & (PAGE_SIZE - 1);
++
++      page = ConvertKVToPage(pvLinAddr);
++    CpuPAddr.uiAddr = (IMG_UINTPTR_T) page_to_phys(page) + ui32PageOffset;
++
++    return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++               IMG_UINT32 ui32Bytes,
++               IMG_UINT32 ui32MappingFlags,
++               IMG_HANDLE *phOSMemHandle)
++{
++    if(phOSMemHandle)
++    {
++        *phOSMemHandle = (IMG_HANDLE)0;
++    }
++
++    if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        IMG_VOID *pvIORemapCookie;
++        pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++        if(pvIORemapCookie == IMG_NULL)
++        {
++            return NULL;
++        }
++        return pvIORemapCookie;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++                 " (Use OSReservePhys otherwise)"));
++        *phOSMemHandle = (IMG_HANDLE)0;
++        return NULL;
++    }
++
++    PVR_ASSERT(0);
++    return NULL;
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++    PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++    PVR_UNREFERENCED_PARAMETER(hPageAlloc);   
++
++    if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        IOUnmapWrapper(pvLinAddr);
++        return IMG_TRUE;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                     "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++                     " (Use OSUnReservePhys otherwise)"));
++        return IMG_FALSE;
++    }
++
++    PVR_ASSERT(0);
++    return IMG_FALSE;
++}
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++                        IMG_VOID *pvCPUVAddr,
++              IMG_UINT32 ui32Bytes,
++              IMG_UINT32 ui32MappingFlags,
++              IMG_HANDLE *phOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++              
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        {
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++            *phOSMemHandle = (IMG_HANDLE)0;
++            return PVRSRV_ERROR_GENERIC;
++    }
++    
++    *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++                IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32MappingFlags,
++                IMG_HANDLE hOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                 PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++                          __FUNCTION__, pvCpuVAddr, ui32Bytes,
++                          ui32MappingFlags, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        default:
++        {
++            PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++        }
++    }
++
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++              IMG_UINT32 ui32Bytes,
++              IMG_UINT32 ui32MappingFlags,
++              IMG_VOID **ppvCpuVAddr,
++              IMG_HANDLE *phOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++            
++            psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++                      }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++              {
++            
++            psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++                      }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++            psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++            *ppvCpuVAddr = NULL;
++            *phOSMemHandle = (IMG_HANDLE)0;
++            return PVRSRV_ERROR_GENERIC;
++      }
++
++    *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++    *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++                IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32MappingFlags,
++                IMG_HANDLE hOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                 PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++                          __FUNCTION__, pvCpuVAddr, ui32Bytes,
++                          ui32MappingFlags, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        default:
++        {
++            PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++        }
++    }
++    
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr)
++{
++    PVR_DPF((PVR_DBG_ERROR, "%s: Not available on Linux\n"));
++    return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++    
++#if 0
++    unsigned long ulOrder;
++    struct page *psPage;
++    
++    ui32Size = PAGE_ALIGN(ui32Size);
++    
++    ulOrder = get_order(ui32Size);
++    
++    psPage = alloc_pages(GFP_KERNEL, ulOrder);
++    if(!psPage)
++    {
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    *pLinAddr = page_address(psPage);
++    pPhysAddr->uiAddr = page_to_phys(psPage);
++    
++    
++    while(ui32Size > 0)
++    {
++        SetPageReserved(psPage);
++        psPage++;
++        ui32Size -= PAGE_SIZE;
++    }
++    
++    return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr)
++{
++    return PVRSRV_OK;
++#if 0
++    unsigned long ulOrder;
++    struct page *psPage;
++    
++    ui32Size = PAGE_ALIGN(ui32Size);
++    ulOrder = get_order(ui32Size);
++
++    psPage = virt_to_page((IMG_VOID *)LinAddr);
++
++    while(ui32Size > 0)
++    {
++        ClearPageReserved(psPage);
++        psPage++;
++        ui32Size -= PAGE_SIZE;
++    }
++
++    __free_pages(psPage, ulOrder);
++
++    return PVRSRV_OK;
++#endif
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++      return (IMG_UINT32) readl(pvLinRegBaseAddr+ui32Offset);
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      writel(ui32Value, pvLinRegBaseAddr+ui32Offset);
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      int err;
++      IMG_UINT32 i;
++
++      if (psEnvData->psPCIDev != NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: A device has already been acquired"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, psEnvData->psPCIDev);
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_enable_device(psEnvData->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't enable device (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (eFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psEnvData->psPCIDev);
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIIRQ: Device hasn't been acquired"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      *pui32IRQ = psEnvData->psPCIDev->irq;
++
++      return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++      HOST_PCI_ADDR_RANGE_FUNC_LEN,
++      HOST_PCI_ADDR_RANGE_FUNC_START,
++      HOST_PCI_ADDR_RANGE_FUNC_END,
++      HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++      HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++                                                                       IMG_VOID *pvSysData,
++                                                                       IMG_UINT32 ui32Index
++                                                                       
++)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Device hasn't been acquired"));
++              return 0;
++      }
++
++      if (ui32Index >= DEVICE_COUNT_RESOURCE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++              return 0;
++
++      }
++
++      switch (eFunc)
++      {
++              case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++                      return pci_resource_len(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_START:
++                      return pci_resource_start(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_END:
++                      return pci_resource_end(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++              {
++
++                      
++#ifdef FIXME
++                      int err;
++                      err = pci_request_region(psEnvData->psPCIDev, ui32Index, "PowerVR");
++                      if (err != 0)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++                              return 0;
++                      }
++#endif
++                      psEnvData->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++                      return 1;
++              }
++              case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++                      if (psEnvData->abPCIResourceInUse[ui32Index])
++                      {
++                              pci_release_region(psEnvData->psPCIDev, ui32Index);
++                              psEnvData->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++                      }
++                      return 1;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++                      break;
++      }
++
++      return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, pvSysData, ui32Index); 
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, pvSysData, ui32Index); 
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, pvSysData, ui32Index); 
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData,
++                                                                 IMG_UINT32 ui32Index
++                                                                 
++)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      int i;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              return PVRSRV_OK;
++      }
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              if (psEnvData->abPCIResourceInUse[i])
++              {
++                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++                      pci_release_region(psEnvData->psPCIDev, i);
++                      psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++              }
++      }
++
++      pci_disable_device(psEnvData->psPCIDev);
++
++      psEnvData->psPCIDev = NULL;
++
++      return PVRSRV_OK;
++}
++#endif 
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++      PFN_TIMER_FUNC          pfnTimerFunc;
++      IMG_VOID                *pvData;        
++      struct timer_list       sTimer;
++      IMG_UINT32              ui32Delay;
++      IMG_BOOL                bActive;
++}TIMER_CALLBACK_DATA;
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++      
++      if (!psTimerCBData->bActive)
++              return;
++
++      
++      psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++      
++      
++      mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData;
++      
++      
++      if(!pfnTimerFunc)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));                
++              return IMG_NULL;                
++      }
++      
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(TIMER_CALLBACK_DATA), 
++                                      (IMG_VOID **)&psTimerCBData, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: failed to allocate memory for TIMER_CALLBACK_DATA"));              
++              return IMG_NULL;        
++      }
++
++      psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++      psTimerCBData->pvData = pvData;
++      psTimerCBData->bActive = IMG_TRUE;
++      
++      
++
++
++      psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++                                                              ?       1
++                                                              :       ((HZ * ui32MsTimeout) / 1000);
++      
++      init_timer(&psTimerCBData->sTimer);
++      
++      
++      psTimerCBData->sTimer.function = OSTimerCallbackWrapper;
++      psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++      psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++      
++      
++      add_timer(&psTimerCBData->sTimer);
++      
++      return (IMG_HANDLE)psTimerCBData;
++}
++
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      psTimerCBData->bActive = IMG_FALSE;
++
++      
++      del_timer_sync(&psTimerCBData->sTimer); 
++      
++      
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(psEventObject)
++      {
++              struct completion *psCompletion;
++
++              if(pszName)
++              {
++                      
++                      strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++              }
++              else
++              {
++                              
++                      static IMG_UINT16 ui16NameIndex = 0;                    
++                      snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++              }
++              
++              
++              if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(struct completion), 
++                                      (IMG_VOID **)&psCompletion, IMG_NULL) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: failed to allocate memory for completion variable"));             
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++              }
++
++              init_completion(psCompletion);
++      
++              psEventObject->hOSEventKM = (IMG_HANDLE) psCompletion;
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(psEventObject)
++      {
++              if(psEventObject->hOSEventKM)
++              {
++                      struct completion *psCompletion = (struct completion *) psEventObject->hOSEventKM;
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(struct completion), psCompletion, IMG_NULL);
++              }
++              else
++              {
++          PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(hOSEventKM)
++      {
++              LinuxUnLockMutex(&gPVRSRVLock);         
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
++              wait_for_completion_timeout((struct completion *)hOSEventKM, msecs_to_jiffies(ui32MSTimeout));
++#else
++              wait_for_completion((struct completion *)hOSEventKM);
++#endif        
++              LinuxLockMutex(&gPVRSRVLock);
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(hOSEventKM)
++      {
++              complete_all((struct completion *) hOSEventKM);         
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++      return capable(CAP_SYS_MODULE) != 0;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, 
++                          IMG_VOID *pvDest, 
++                          IMG_VOID *pvSrc, 
++                          IMG_UINT32 ui32Bytes)
++{
++      PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++      if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++              return PVRSRV_OK;
++      else
++              return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess, 
++                             IMG_VOID *pvDest, 
++                             IMG_VOID *pvSrc, 
++                             IMG_UINT32 ui32Bytes)
++{
++      PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++      if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++              return PVRSRV_OK;
++      else
++              return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++      int linuxType;
++
++      if(eVerification == PVR_VERIFY_READ)
++              linuxType = VERIFY_READ;
++      else if(eVerification == PVR_VERIFY_WRITE)
++              linuxType = VERIFY_WRITE;
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: Unknown eVerification", __FUNCTION__));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      return (IMG_BOOL)access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1388 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined (PDUMP)
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>                        
++
++static IMG_BOOL PDumpWriteString2             (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock                       (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame                           (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame                 (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker                  (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite                            (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL                (1)
++
++#define MIN(a,b)       (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++#define PDUMP_STREAM_PARAM2                   0
++#define PDUMP_STREAM_SCRIPT2          1
++#define PDUMP_STREAM_DRIVERINFO               2
++#define PDUMP_NUM_STREAMS                     3
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = {        "ParamStream2",
++                                                                                              "ScriptStream2",
++                                                                                              "DriverInfoStream"};
++
++#define __PDBG_PDUMP_STATE_GET_MSG_STRING(ERROR) \
++      IMG_CHAR *pszMsg = gsDBGPdumpState.pszMsg; \
++      if(!pszMsg) return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(ERROR) \
++      IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \
++      if(!pszScript) return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(ERROR) \
++      IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \
++      IMG_CHAR *pszFile = gsDBGPdumpState.pszFile; \
++      if(!pszScript || !pszFile) return ERROR
++
++typedef struct PDBG_PDUMP_STATE_TAG 
++{
++      PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++      IMG_UINT32 ui32ParamFileNum;
++
++      IMG_CHAR *pszMsg;
++      IMG_CHAR *pszScript;
++      IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX                       PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX            PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX  PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++void DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++IMG_VOID PDumpInit(IMG_VOID)
++{     
++      IMG_UINT32 i=0;
++
++      
++      if (!gpfnDbgDrv)
++      {
++              DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++              
++
++              
++              if (gpfnDbgDrv == IMG_NULL)
++              {       
++                      return;
++              }
++      
++              if(!gsDBGPdumpState.pszFile)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;
++                      }
++              }       
++              
++              if(!gsDBGPdumpState.pszMsg)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;
++                      }
++              }
++              
++              if(!gsDBGPdumpState.pszScript)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;               
++                      }
++              }
++              
++              for(i=0; i < PDUMP_NUM_STREAMS; i++)
++              {
++                      gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i], 
++                                                                                                              DEBUG_CAPMODE_FRAMED, 
++                                                                                                              DEBUG_OUTMODE_STREAMENABLE, 
++                                                                                                              0,
++                                                                                                              10);
++                      
++                      gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++                      gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++              }
++
++              PDUMPCOMMENT("Start of Init Phase");
++      }
++
++      return;
++
++init_failed:  
++
++      if(gsDBGPdumpState.pszFile)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++              gsDBGPdumpState.pszFile = IMG_NULL;
++      }
++      
++      if(gsDBGPdumpState.pszScript)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++              gsDBGPdumpState.pszScript = IMG_NULL;
++      }
++
++      if(gsDBGPdumpState.pszMsg)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++              gsDBGPdumpState.pszMsg = IMG_NULL;
++      }
++
++      gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{     
++      IMG_UINT32 i=0;
++
++      for(i=0; i < PDUMP_NUM_STREAMS; i++)
++      {
++              gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++      }
++
++      if(gsDBGPdumpState.pszFile)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++              gsDBGPdumpState.pszFile = IMG_NULL;
++      }
++      
++      if(gsDBGPdumpState.pszScript)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++              gsDBGPdumpState.pszScript = IMG_NULL;
++      }
++
++      if(gsDBGPdumpState.pszMsg)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++              gsDBGPdumpState.pszMsg = IMG_NULL;
++      }
++
++      gpfnDbgDrv = IMG_NULL;
++}
++
++IMG_VOID PDumpEndInitPhase(IMG_VOID)
++{
++      IMG_UINT32 i;
++      
++      PDUMPCOMMENT("End of Init Phase");
++
++      for(i=0; i < PDUMP_NUM_STREAMS; i++)
++      {
++              gpfnDbgDrv->pfnEndInitPhase(gsDBGPdumpState.psStream[i]);
++      }
++}
++
++void PDumpComment(IMG_CHAR *pszFormat, ...)
++{
++      __PDBG_PDUMP_STATE_GET_MSG_STRING();    
++
++      
++      vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, (IMG_CHAR *) (&pszFormat + 1));
++
++      PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
++{     
++      __PDBG_PDUMP_STATE_GET_MSG_STRING();
++
++      
++      vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, (IMG_CHAR *) (&pszFormat + 1));
++
++      PDumpCommentKM(pszMsg, ui32Flags);
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++      return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++      return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++void PDumpReg(IMG_UINT32 ui32Reg,IMG_UINT32 ui32Data)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags)
++{
++      #define POLL_DELAY                      1000
++      #define POLL_COUNT_LONG         (2000000000 / POLL_DELAY)
++      #define POLL_COUNT_SHORT        (1000000 / POLL_DELAY)
++
++      IMG_UINT32      ui32PollCount;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      if (((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK)) ||
++              ((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK)) ||
++              ((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK)))
++      {
++              ui32PollCount = POLL_COUNT_LONG;
++      }
++      else
++      {
++              ui32PollCount = POLL_COUNT_SHORT;
++      }
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n", ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, POLL_DELAY);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask)
++{
++      return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS);
++}
++
++IMG_VOID PDumpMallocPages (PVRSRV_DEVICE_TYPE eDeviceType,
++                           IMG_UINT32         ui32DevVAddr,
++                           IMG_CPU_VIRTADDR   pvLinAddr,
++                           IMG_HANDLE         hOSMemHandle,
++                           IMG_UINT32         ui32NumBytes,
++                           IMG_HANDLE         hUniqueTag)
++{
++    IMG_UINT32      ui32Offset;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++      PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++
++
++      PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(hOSMemHandle);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %d\r\n", ui32DevVAddr, ui32NumBytes, SGX_MMU_PAGE_SIZE);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      ui32Offset = 0;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr   = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++              PVR_ASSERT((sCpuPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++              ui32Offset  += SGX_MMU_PAGE_SIZE;
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "MALLOC :SGXMEM:PA_%p%8.8lX %d %d 0x%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpMallocPageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++                               IMG_CPU_VIRTADDR   pvLinAddr,
++                               IMG_UINT32         ui32NumBytes,
++                               IMG_HANDLE         hUniqueTag)
++{
++      IMG_PUINT8              pui8LinAddr;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %d\r\n", ui32NumBytes, SGX_MMU_PAGE_SIZE);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      pui8LinAddr             = (IMG_PUINT8) pvLinAddr;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr       = OSMapLinToCPUPhys(pui8LinAddr);
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++              pui8LinAddr     += SGX_MMU_PAGE_SIZE;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "MALLOC :SGXMEM:PA_%p%8.8lX 0x%x %d 0x%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpFreePages       (BM_HEAP                        *psBMHeap,
++                         IMG_DEV_VIRTADDR  sDevVAddr,
++                         IMG_UINT32        ui32NumBytes,
++                         IMG_HANDLE        hUniqueTag,
++                                               IMG_BOOL                  bInterleaved)
++{
++      IMG_UINT32 ui32NumPages, ui32PageCounter;
++      IMG_DEV_PHYADDR sDevPAddr;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      psDeviceNode = psBMHeap->pBMContext->psDeviceNode;      
++      for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
++      {
++              if (!bInterleaved || (ui32PageCounter % 2) == 0)
++              {
++                      sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
++
++                      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, sDevPAddr.uiAddr);
++                      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++              }
++              else
++              {
++                      
++              }
++
++              sDevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
++      }
++}
++
++IMG_VOID PDumpFreePageTable   (PVRSRV_DEVICE_TYPE eDeviceType,
++                                                       IMG_CPU_VIRTADDR   pvLinAddr,
++                                                       IMG_UINT32         ui32NumBytes,
++                                                       IMG_HANDLE         hUniqueTag)
++{
++      IMG_PUINT8              pui8LinAddr;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      pui8LinAddr             = (IMG_PUINT8) pvLinAddr;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr       = OSMapLinToCPUPhys(pui8LinAddr);
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++              pui8LinAddr     += SGX_MMU_PAGE_SIZE;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpPDReg   (IMG_UINT32 ui32Reg,
++                                       IMG_UINT32 ui32Data,
++                                       IMG_HANDLE hUniqueTag)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++
++      snprintf        (pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                              ui32Reg,
++                              hUniqueTag,
++                              ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++                              ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++                                                       IMG_UINT32 ui32Data,
++                                                       IMG_UINT32     ui32Flags,
++                                                       IMG_HANDLE hUniqueTag)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++
++      snprintf        (pszScript,
++                      SZ_SCRIPT_SIZE_MAX,
++                       "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                       ui32Reg,
++                       hUniqueTag,
++                       ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++                       ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO             *psMemInfo,
++                                                 IMG_UINT32                   ui32Offset,
++                                                 IMG_UINT32                   ui32Value,
++                                                 IMG_UINT32                   ui32Mask,
++                                                 PDUMP_POLL_OPERATOR  eOperator,
++                                                 IMG_BOOL                             bLastFrame,
++                                                 IMG_BOOL                             bOverwrite,
++                                                 IMG_HANDLE                   hUniqueTag)
++{
++      #define MEMPOLL_DELAY           (1000)
++      #define MEMPOLL_COUNT           (2000000000 / MEMPOLL_DELAY)
++      
++      IMG_UINT32                      ui32PageOffset;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      IMG_DEV_VIRTADDR        sDevVPageAddr;
++    IMG_CPU_PHYADDR     CpuPAddr;
++      IMG_UINT32                      ui32Flags;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++      
++      
++      PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->ui32AllocSize);
++      
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++      
++      ui32Flags = 0;
++      
++      if (bLastFrame)
++      {
++              ui32Flags |= PDUMP_FLAGS_LASTFRAME;
++      }
++
++      if (bOverwrite)
++      {
++              ui32Flags |= PDUMP_FLAGS_RESETLFBUFFER;
++      }
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++    ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++      
++      
++      sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++      
++      
++      BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++      
++      
++      sDevPAddr.uiAddr += ui32PageOffset;
++      
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "POL :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++                       hUniqueTag,
++                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                       ui32Value,
++                       ui32Mask,
++                       eOperator,
++                       MEMPOLL_COUNT,
++                       MEMPOLL_DELAY);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++                                              PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                              IMG_UINT32 ui32Offset,
++                                              IMG_UINT32 ui32Bytes,
++                                              IMG_UINT32 ui32Flags,
++                                              IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32 ui32PageByteOffset;
++      IMG_UINT8* pui8DataLinAddr;
++      IMG_DEV_VIRTADDR sDevVPageAddr;
++      IMG_DEV_VIRTADDR sDevVAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32ParamOutPos;
++      IMG_UINT32 ui32CurrentOffset;
++      IMG_UINT32 ui32BytesRemaining;
++      LinuxMemArea *psLinuxMemArea;
++      LINUX_MEM_AREA_TYPE eRootAreaType;
++      IMG_CHAR *pui8TransientCpuVAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++      
++
++      PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++      
++      if(pvAltLinAddr)
++      {
++              pui8DataLinAddr = pvAltLinAddr;
++      }
++    else if(psMemInfo->pvLinAddrKM)
++    {
++        pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
++    }
++    else
++    {
++        pui8DataLinAddr = 0;
++        psLinuxMemArea = (LinuxMemArea *)psMemInfo->sMemBlk.hOSMemHandle;
++        eRootAreaType = LinuxMemAreaRootType(psLinuxMemArea);
++    }
++    
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++      
++
++    if(pui8DataLinAddr)
++    {
++        if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                            pui8DataLinAddr,
++                            ui32Bytes,
++                            ui32Flags))
++        {
++            return PVRSRV_ERROR_GENERIC;
++        }
++    }
++    
++    else if(eRootAreaType == LINUX_MEM_AREA_IO)
++    {
++        
++        CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++        pui8TransientCpuVAddr = IORemapWrapper(CpuPAddr, ui32Bytes, PVRSRV_HAP_CACHED);
++        if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                            pui8TransientCpuVAddr,
++                            ui32Bytes,
++                            ui32Flags))
++        {
++            IOUnmapWrapper(pui8TransientCpuVAddr);
++            return PVRSRV_ERROR_GENERIC;
++        }
++        IOUnmapWrapper(pui8TransientCpuVAddr);
++    }
++    else
++    {
++        
++        PVR_ASSERT(eRootAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++        
++        ui32BytesRemaining = ui32Bytes;
++        ui32CurrentOffset = ui32Offset;
++
++        while(ui32BytesRemaining > 0)
++        {
++            IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++            struct page *psCurrentPage=NULL;
++
++            CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++                                             ui32CurrentOffset);
++            
++            if(CpuPAddr.uiAddr & (PAGE_SIZE -1))
++            {
++                ui32BlockBytes =
++                    MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++            }
++            
++            psCurrentPage = LinuxMemAreaOffsetToPage(psLinuxMemArea, ui32CurrentOffset);
++            pui8TransientCpuVAddr = KMapWrapper(psCurrentPage);
++            pui8TransientCpuVAddr += (CpuPAddr.uiAddr & ~PAGE_MASK);
++            if(!pui8TransientCpuVAddr)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++
++            if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                pui8TransientCpuVAddr,
++                                ui32BlockBytes,
++                                ui32Flags))
++            {
++                KUnMapWrapper(psCurrentPage);
++                return PVRSRV_ERROR_GENERIC;
++            }
++
++            KUnMapWrapper(psCurrentPage);
++
++            ui32BytesRemaining -= ui32BlockBytes;
++            ui32CurrentOffset += ui32BlockBytes;
++        }
++        PVR_ASSERT(ui32BytesRemaining == 0);
++
++    }
++
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++      
++
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "-- LDB :SGXMEM:VA_%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                       psMemInfo->sDevVAddr.uiAddr,
++                       ui32Offset,
++                       ui32Bytes,
++                       ui32ParamOutPos,
++                       pszFile);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++      ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++    
++      sDevVAddr = psMemInfo->sDevVAddr;
++      sDevVAddr.uiAddr += ui32Offset;
++
++    ui32BytesRemaining = ui32Bytes;
++    ui32CurrentOffset = ui32Offset;
++
++    while(ui32BytesRemaining > 0)
++    {
++        IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++        CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++                                         ui32CurrentOffset);
++
++        sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32CurrentOffset - ui32PageByteOffset;
++        
++        BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++              
++        sDevPAddr.uiAddr += ui32PageByteOffset;
++
++        if(ui32PageByteOffset)
++        {
++            ui32BlockBytes =
++                MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++            
++            ui32PageByteOffset = 0;
++        }
++
++        snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                               hUniqueTag,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               ui32BlockBytes,
++                               ui32ParamOutPos,
++                               pszFile);
++              PDumpWriteString2(pszScript, ui32Flags);
++
++        ui32BytesRemaining -= ui32BlockBytes;
++        ui32CurrentOffset += ui32BlockBytes;
++        ui32ParamOutPos += ui32BlockBytes;
++    }
++    PVR_ASSERT(ui32BytesRemaining == 0);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++                                               IMG_CPU_VIRTADDR pvLinAddr,
++                                               IMG_UINT32 ui32Bytes,
++                                               IMG_UINT32 ui32Flags,
++                                               IMG_BOOL bInitialisePages,
++                                               IMG_HANDLE hUniqueTag1,
++                                               IMG_HANDLE hUniqueTag2)
++{
++      IMG_UINT32 ui32NumPages;
++      IMG_UINT32 ui32PageOffset;
++      IMG_UINT32 ui32BlockBytes;
++      IMG_UINT8* pui8LinAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32ParamOutPos;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++      if (ui32Flags);
++
++      if (!pvLinAddr)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++    
++      if (bInitialisePages)
++      {
++              
++
++
++              if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                                      pvLinAddr,
++                                                      ui32Bytes,
++                                                      PDUMP_FLAGS_CONTINUOUS))
++              {               
++                      return PVRSRV_ERROR_GENERIC;    
++              }
++      
++              if (gsDBGPdumpState.ui32ParamFileNum == 0)
++              {
++                      snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++              }
++              else
++              {
++                      snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++              }
++      }
++
++      
++
++      
++      ui32PageOffset  = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1);
++      ui32NumPages    = (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++      pui8LinAddr             = (IMG_UINT8*) pvLinAddr;
++      
++      while (ui32NumPages--)
++      {
++      sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr); 
++      sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++              
++              if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++              {
++                      
++                      ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++              }
++              else
++              {
++                      
++                      ui32BlockBytes = ui32Bytes;
++              }
++
++              
++
++              if (bInitialisePages)
++              {
++                      snprintf(pszScript,
++                                       SZ_SCRIPT_SIZE_MAX,
++                                       "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                                       hUniqueTag1,
++                                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                                       ui32BlockBytes,
++                                       ui32ParamOutPos,
++                                       pszFile);
++                      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++              }
++              else
++              {
++                      for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
++                      {
++                              IMG_UINT32              ui32PTE = *((IMG_UINT32 *) (pui8LinAddr + ui32Offset));
++
++                              if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0)
++                              {                               
++                                      snprintf(pszScript,
++                                                      SZ_SCRIPT_SIZE_MAX,
++                                                       "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                                                       hUniqueTag1,
++                                                       (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_SIZE - 1),
++                                                       (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_SIZE - 1),
++                                                       hUniqueTag2,
++                                                       ui32PTE & SGX_MMU_PDE_ADDR_MASK,
++                                                       ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++                              }
++                              else
++                              {
++                                      PVR_ASSERT(!(ui32PTE & SGX_MMU_PTE_VALID));
++                                      snprintf(pszScript,
++                                                       SZ_SCRIPT_SIZE_MAX,
++                                                       "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX%p\r\n",
++                                                       hUniqueTag1,
++                                                       (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_SIZE - 1),
++                                                       (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_SIZE - 1),
++                                                       ui32PTE,
++                                                       hUniqueTag2);
++                              }
++                              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++                      }
++              }
++
++              
++
++              
++              ui32PageOffset = 0;
++              
++              ui32Bytes -= ui32BlockBytes;
++              
++              pui8LinAddr += ui32BlockBytes;
++              
++              ui32ParamOutPos += ui32BlockBytes;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                         IMG_UINT32 ui32Offset,
++                                                         IMG_DEV_PHYADDR sPDDevPAddr,
++                                                         IMG_HANDLE hUniqueTag1,
++                                                         IMG_HANDLE hUniqueTag2)
++{
++      IMG_UINT32 ui32ParamOutPos;
++    IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32PageByteOffset;
++      IMG_DEV_VIRTADDR sDevVAddr;
++      IMG_DEV_VIRTADDR sDevVPageAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++      if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                              (IMG_UINT8 *)&sPDDevPAddr,
++                                              sizeof(IMG_DEV_PHYADDR),
++                                              PDUMP_FLAGS_CONTINUOUS))
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++        
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++      ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++      sDevVAddr = psMemInfo->sDevVAddr;
++      sDevVAddr.uiAddr += ui32Offset;
++
++      sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++      BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++      sDevPAddr.uiAddr += ui32PageByteOffset;
++
++      if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0)
++      {
++              snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                               hUniqueTag1,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               hUniqueTag2,
++                               sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++                               sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++      }
++      else
++      {
++              PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++              snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++                               hUniqueTag1,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               sPDDevPAddr.uiAddr);
++      }
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++      IMG_UINT32      ui32Stream;
++
++      for     (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++      {
++              if      (gsDBGPdumpState.psStream[ui32Stream])
++              {
++                      DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++              }
++      }
++              
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++      *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Count = 0;
++      PVRSRV_ERROR eError;
++      __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++      if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++      {
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++      
++      if (!PDumpWriteString2("-- ", ui32Flags))
++      {
++              return eError;
++      }
++
++      
++      snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s",pszComment);
++
++      
++      while ((pszMsg[ui32Count]!=0) && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              ui32Count++;
++      }
++      
++      
++      if ( (pszMsg[ui32Count-1] != '\n') && (ui32Count<SZ_MSG_SIZE_MAX))
++      {
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++      if ( (pszMsg[ui32Count-2] != '\r') && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              pszMsg[ui32Count-1] = '\r';
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++
++      PDumpWriteString2(pszMsg, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Count = 0;
++      __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++      
++      snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszString);
++
++      
++      while ((pszMsg[ui32Count]!=0) && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              ui32Count++;
++      }
++      
++      
++      if ( (pszMsg[ui32Count-1] != '\n') && (ui32Count<SZ_MSG_SIZE_MAX))
++      {
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++      if ( (pszMsg[ui32Count-2] != '\r') && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              pszMsg[ui32Count-1] = '\r';
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++
++      if      (!PDumpWriteILock(gsDBGPdumpState.
++                                                psStream[PDUMP_STREAM_DRIVERINFO],
++                                                (IMG_UINT8 *)pszMsg,
++                                                ui32Count,
++                                                ui32Flags))
++      {
++              if      (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              else
++              {
++                      return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM(   IMG_CHAR *pszFileName,
++                                                      IMG_UINT32 ui32FileOffset,
++                                                      IMG_UINT32 ui32Width,
++                                                      IMG_UINT32 ui32Height,
++                                                      IMG_UINT32 ui32StrideInBytes,
++                                                      IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                      IMG_UINT32 ui32Size,
++                                                      PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                      PDUMP_MEM_FORMAT eMemFormat,
++                                                      IMG_UINT32 ui32PDumpFlags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++      PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      snprintf(pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++                              pszFileName,
++                              pszFileName,
++                              PDUMP_DATAMASTER_PIXEL,
++                              sDevBaseAddr.uiAddr,
++                              ui32Size,
++                              ui32FileOffset,
++                              ePixelFormat,
++                              ui32Width,
++                              ui32Height,
++                              ui32StrideInBytes,
++                              eMemFormat);
++#else
++      snprintf(pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++                              pszFileName,
++                              pszFileName,
++                              sDevBaseAddr.uiAddr,
++                              ui32Size,
++                              ui32FileOffset,
++                              ePixelFormat,
++                              ui32Width,
++                              ui32Height,
++                              ui32StrideInBytes,
++                              eMemFormat);
++#endif
++
++      PDumpWriteString2( pszScript, ui32PDumpFlags);
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM           (       IMG_CHAR *pszFileName,
++                                                                      IMG_UINT32 ui32FileOffset,
++                                                                      IMG_UINT32 ui32Address,
++                                                                      IMG_UINT32 ui32Size,
++                                                                      IMG_UINT32 ui32PDumpFlags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      snprintf(pszScript,
++                      SZ_SCRIPT_SIZE_MAX,
++                      "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++                      ui32Address,
++                      ui32FileOffset,
++                      pszFileName);
++
++      PDumpWriteString2( pszScript, ui32PDumpFlags);
++
++      return PVRSRV_OK;
++}
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++      return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Written = 0;
++      IMG_UINT32 ui32Off = 0;
++
++      if (!psStream)
++      {
++              return IMG_TRUE;
++      }
++      
++
++      
++
++      if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++      {
++              IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++              if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++              {
++                      if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++                      {
++                              DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++                              gsDBGPdumpState.ui32ParamFileNum++;
++                      }
++              }
++      }
++      
++
++      while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++      {
++              ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++#if 0
++              
++
++
++              if (ui32Written == 0)
++              {
++                      ZwYieldExecution();
++              }
++#endif
++              if (ui32Written != 0xFFFFFFFF)
++              {
++                      ui32Off += ui32Written;
++                      ui32Count -= ui32Written;
++              }
++      }
++
++      if (ui32Written == 0xFFFFFFFF)
++      {
++              return IMG_FALSE;
++      }
++
++      return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{     
++      gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{     
++      return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{     
++      gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32      ui32BytesWritten;
++
++      if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++      {
++              
++
++              if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) && 
++                      (psStream->ui32Start == 0xFFFFFFFF) &&
++                      (psStream->ui32End == 0xFFFFFFFF) &&
++                      psStream->bInitPhaseComplete)
++              {
++                      ui32BytesWritten = ui32BCount;
++              }
++              else
++              {
++                      ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++              }
++      }
++      else
++      {
++              if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++              {
++                      IMG_UINT32      ui32DbgFlags;
++
++                      ui32DbgFlags = 0;
++                      if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++                      {
++                              ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++                      }
++
++                      ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++              }
++              else
++              {
++                      ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++              }
++      }
++
++      return ui32BytesWritten;
++}
++
++IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
++{
++      IMG_BOOL        bFrameDumped;
++
++      
++
++      bFrameDumped = IMG_FALSE;
++      PDumpSetFrameKM(ui32CurrentFrame + 1);
++      bFrameDumped = PDumpIsCaptureFrameKM();
++      PDumpSetFrameKM(ui32CurrentFrame);
++
++      return bFrameDumped;
++}
++
++IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++      PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++}
++
++void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO         psROffMemInfo,
++                        IMG_UINT32                                    ui32ROffOffset,
++                        IMG_UINT32                                    ui32WPosVal,
++                        IMG_UINT32                                    ui32PacketSize,
++                        IMG_UINT32                                    ui32BufferSize,
++                        IMG_UINT32                                    ui32Flags,
++                        IMG_HANDLE                                    hUniqueTag)
++{
++      IMG_UINT32                      ui32PageOffset;
++      IMG_DEV_VIRTADDR        sDevVAddr;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      IMG_DEV_VIRTADDR        sDevVPageAddr;
++    IMG_CPU_PHYADDR     CpuPAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++      PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->ui32AllocSize);
++      
++      sDevVAddr = psROffMemInfo->sDevVAddr;
++      
++      
++      sDevVAddr.uiAddr += ui32ROffOffset;
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psROffMemInfo->sMemBlk.hOSMemHandle, ui32ROffOffset);
++    ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++
++      
++      sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++      
++      
++      BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++      
++      
++      sDevPAddr.uiAddr += ui32PageOffset;
++      
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "CBP :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++                       hUniqueTag,
++                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                       ui32WPosVal,
++                       ui32PacketSize,
++                       ui32BufferSize);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++
++IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      sprintf(pszScript, "IDL %lu\r\n", ui32Clocks);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++
++IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks)
++{
++      PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,369 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++
++#ifdef DEBUG
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data);
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++static struct proc_dir_entry * dir;
++
++static off_t procDumpSysNodes(char *buf, size_t size, off_t off);
++static off_t procDumpVersion(char *buf, size_t size, off_t off);
++
++off_t printAppend(char * buffer, size_t size, off_t off, const char * format, ...)
++{
++    int n;
++    int space = size - off;
++    va_list ap;
++
++    va_start (ap, format);
++
++    n = vsnprintf (buffer+off, space, format, ap);
++
++    va_end (ap);
++    
++
++    if (n > space || n < 0)
++    {
++        return size;
++    }
++    else
++    {
++        return off+n;
++    }
++}
++
++
++static int pvr_read_proc(char *page, char **start, off_t off,
++                         int count, int *eof, void *data)
++{
++      pvr_read_proc_t *pprn = data;
++
++    off_t len = pprn (page, count, off);
++
++    if (len == END_OF_FILE)
++    {
++        len  = 0;
++        *eof = 1;
++    }
++    else if (!len)             
++    {
++        *start = (char *) 0;   
++    }
++    else
++    {
++        *start = (char *) 1;
++    }
++
++    return len;
++}
++
++
++int CreateProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data)
++{
++    struct proc_dir_entry * file;
++      mode_t mode;
++
++      if (!dir)
++      {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no parent", name));
++        return -ENOMEM;
++      }
++
++      mode = S_IFREG;
++
++      if (rhandler)
++      {
++              mode |= S_IRUGO;
++      }
++
++      if (whandler)
++      {
++              mode |= S_IWUSR;
++      }
++
++      file = create_proc_entry(name, mode, dir);
++
++    if (file)
++    {
++        file->owner = THIS_MODULE;
++              file->read_proc = rhandler;
++              file->write_proc = whandler;
++              file->data = data;
++
++              PVR_DPF((PVR_DBG_MESSAGE, "Created /proc/pvr/%s", name));
++
++        return 0;
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no memory", name));
++
++    return -ENOMEM;
++}
++
++
++int CreateProcReadEntry(const char * name, pvr_read_proc_t handler)
++{
++    struct proc_dir_entry * file;
++
++      if (!dir)
++      {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no parent", name));
++
++        return -ENOMEM;
++      }
++
++      file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (void *)handler);
++
++    if (file)
++    {
++        file->owner = THIS_MODULE;
++
++        return 0;
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no memory", name));
++
++    return -ENOMEM;
++}
++
++
++int CreateProcEntries(void)
++{
++    dir = proc_mkdir ("pvr", NULL);
++
++    if (!dir)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/pvr directory"));
++
++        return -ENOMEM;
++    }
++
++    if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++              CreateProcReadEntry("version", procDumpVersion) ||
++              CreateProcReadEntry("nodes", procDumpSysNodes))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr files"));
++
++        return -ENOMEM;
++    }
++
++#ifdef DEBUG
++      if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr/debug_level"));
++
++        return -ENOMEM;
++    }
++#endif
++
++    return 0;
++}
++
++
++void RemoveProcEntry(const char *name)
++{
++      if (dir)
++      {
++      remove_proc_entry(name, dir);
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/pvr/%s", name));
++}
++
++
++void RemoveProcEntries(void)
++{
++#ifdef DEBUG
++    RemoveProcEntry("debug_level");
++#endif
++    RemoveProcEntry("queue");
++    RemoveProcEntry("nodes");
++    RemoveProcEntry("version");
++
++      while (dir->subdir)
++      {
++              PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/pvr/%s", dir->subdir->name));
++
++              RemoveProcEntry(dir->subdir->name);
++      }
++
++    remove_proc_entry("pvr", NULL);
++}
++
++
++static off_t procDumpVersion(char *buf, size_t size, off_t off)
++{
++    SYS_DATA *psSysData;
++    
++      if (off == 0)
++      {
++              return printAppend(buf, size, 0,
++                                                      "Version %s (%s) %s\n",
++                                                      PVRVERSION_STRING,
++                                                      PVR_BUILD_TYPE, PVR_BUILD_DIR);
++      }
++
++    if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++    
++    if (off == 1)
++    {
++        IMG_CHAR *pszSystemVersionString = "None";
++
++        if(psSysData->pszVersionString)
++        {
++            pszSystemVersionString = psSysData->pszVersionString;
++        }
++            
++        if(strlen(pszSystemVersionString) 
++            + strlen("System Version String: \n") 
++            + 1 > size)
++        {
++            return 0;
++        }
++        return printAppend(buf, size, 0,
++                            "System Version String: %s\n",
++                            pszSystemVersionString);
++    }
++    
++      return END_OF_FILE;
++}
++
++
++static const char *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++      switch (deviceType)
++      {
++              default:
++              {
++                      static char text[10];
++
++                      sprintf(text, "?%x", deviceType);
++
++                      return text;
++              }
++      }
++}
++
++
++static const char *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++      switch (deviceClass) 
++      {
++              case PVRSRV_DEVICE_CLASS_3D:
++              {
++                      return "3D";
++              }
++              case PVRSRV_DEVICE_CLASS_DISPLAY:
++              {
++                      return "display";
++              }
++              case PVRSRV_DEVICE_CLASS_BUFFER:
++              {
++                      return "buffer";
++              }
++              default:
++              {
++                      static char text[10];
++
++                      sprintf(text, "?%x", deviceClass);
++                      return text;
++              }
++      }
++}
++
++static
++off_t procDumpSysNodes(char *buf, size_t size, off_t off)
++{
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDevNode;
++      off_t                           len;
++      
++      
++      if (size < 80)
++      {
++              return 0;
++      }
++
++      if (off == 0)
++      {
++              return printAppend(buf, size, 0, 
++                                                      "Registered nodes\n"
++                                                      "Addr     Type     Class    Index Ref pvDev     Size Res\n");
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      for(psDevNode = psSysData->psDeviceNodeList;
++                      --off && psDevNode;
++                      psDevNode = psDevNode->psNext)
++              ;
++
++      if (!psDevNode)
++      {
++              return END_OF_FILE;
++      }
++
++      len = printAppend(buf, size, 0,
++                                        "%p %-8s %-8s %4d  %2lu  %p  %3lu  %p\n",
++                                        psDevNode,
++                                        deviceTypeToString(psDevNode->sDevId.eDeviceType),
++                                        deviceClassToString(psDevNode->sDevId.eDeviceClass),
++                                        psDevNode->sDevId.eDeviceClass,
++                                        psDevNode->ui32RefCount,
++                                        psDevNode->pvDevice,
++                                        psDevNode->ui32pvDeviceSize,
++                                        psDevNode->psResItem);
++      return (len);
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,50 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>               
++#include <linux/proc_fs.h>    
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(char *, size_t, off_t);
++
++off_t printAppend(char * buffer, size_t size, off_t off, const char * format, ...)
++      __attribute__((format(printf, 4, 5)));
++
++int CreateProcEntries(void);
++
++int CreateProcReadEntry (const char * name, pvr_read_proc_t handler);
++
++int CreateProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data);
++
++void RemoveProcEntry(const char * name);
++
++void RemoveProcEntries(void);
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,215 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++
++#include "sgx_bridge.h"
++
++#include "bridged_pvr_bridge.h"
++
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t printLinuxBridgeStats(char * buffer, size_t size, off_t off);
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++      {
++              int iStatus;
++              iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++              if(iStatus!=0)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++      }
++#endif
++      return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++      RemoveProcEntry("bridge_stats");
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t
++printLinuxBridgeStats(char * buffer, size_t count, off_t off)
++{
++      PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++      off_t Ret;
++
++      LinuxLockMutex(&gPVRSRVLock);
++
++      if(!off)
++      {
++              if(count < 500)
++              {
++                      Ret = 0;
++                      goto unlock_and_return;
++              }
++              Ret = printAppend(buffer, count, 0,
++                                                "Total ioctl call count = %lu\n"
++                                                "Total number of bytes copied via copy_from_user = %lu\n"
++                                                "Total number of bytes copied via copy_to_user = %lu\n"
++                                                "Total number of bytes copied via copy_*_user = %lu\n\n"
++                                                "%-45s | %-40s | %10s | %20s | %10s\n",
++                                                g_BridgeGlobalStats.ui32IOCTLCount,
++                                                g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++                                                g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++                                                g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++                                                "Bridge Name",
++                                                "Wrapper Function",
++                                                "Call Count",
++                                                "copy_from_user Bytes",
++                                                "copy_to_user Bytes"
++                                               );
++              goto unlock_and_return;
++      }
++
++      if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++      {
++              Ret = END_OF_FILE;
++              goto unlock_and_return;
++      }
++
++      if(count < 300)
++      {
++              Ret = 0;
++              goto unlock_and_return;
++      }
++
++      psEntry = &g_BridgeDispatchTable[off-1];
++      Ret =  printAppend(buffer, count, 0,
++                                         "%-45s   %-40s   %-10lu   %-20lu   %-10lu\n",
++                                         psEntry->pszIOCName,
++                                         psEntry->pszFunctionName,
++                                         psEntry->ui32CallCount,
++                                         psEntry->ui32CopyFromUserTotalBytes,
++                                         psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++      LinuxUnLockMutex(&gPVRSRVLock);
++      return Ret;
++}
++#endif 
++
++
++
++int
++PVRSRV_BridgeDispatchKM(struct inode *inode,
++                                              struct file *file,
++                                              unsigned int cmd,
++                                              unsigned long arg)
++{
++      IMG_UINT32 ui32BridgeID = PVRSRV_GET_BRIDGE_ID(cmd);
++      PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++      PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++      IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++      PVRSRV_PER_PROCESS_DATA *psPerProc;
++      int err = -EFAULT;
++
++      LinuxLockMutex(&gPVRSRVLock);
++
++
++      if(!OSAccessOK(PVR_VERIFY_WRITE,
++                                 psBridgePackageUM,
++                                 sizeof(PVRSRV_BRIDGE_PACKAGE)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++                               __FUNCTION__));
++
++              goto unlock_and_return;
++      }
++      
++      
++      if(OSCopyFromUser(IMG_NULL,
++                                        &sBridgePackageKM,
++                                        psBridgePackageUM,
++                                        sizeof(PVRSRV_BRIDGE_PACKAGE))
++        != PVRSRV_OK)
++      {
++              goto unlock_and_return;
++      }
++      
++      if(ui32BridgeID != PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES))
++      {
++              PVRSRV_ERROR eError;
++
++              eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++                                                                      (IMG_PVOID *)&psPerProc,
++                                                                      sBridgePackageKM.hKernelServices,
++                                                                      PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++                                       __FUNCTION__, eError));
++                      goto unlock_and_return;
++              }
++
++              if(psPerProc->ui32PID != ui32PID)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++                                       "belonging to process %d", __FUNCTION__, ui32PID,
++                                       psPerProc->ui32PID));
++                      goto unlock_and_return;
++              }
++      }
++      else
++      {
++              
++              psPerProc = PVRSRVPerProcessData(ui32PID, IMG_TRUE);
++              if(psPerProc == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++                                       "Couldn't create per-process data area"));
++                      goto unlock_and_return;
++              }
++      }
++
++      sBridgePackageKM.ui32BridgeID = PVRSRV_GET_BRIDGE_ID(sBridgePackageKM.ui32BridgeID);
++      
++      err = BridgedDispatchKM(psPerProc, &sBridgePackageKM);
++      
++unlock_and_return:
++      LinuxUnLockMutex(&gPVRSRVLock);
++      return err;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,199 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++  
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/tty.h>                        
++#include <stdarg.h>
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "proc.h"
++
++#if defined(DEBUG) || defined(TIMING)
++
++IMG_UINT32    gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_STRING_TERMINATOR         '\0'
++#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
++
++void PVRSRVDebugPrintf        (
++                                              IMG_UINT32      ui32DebugLevel,
++                                              const IMG_CHAR* pszFileName,
++                                              IMG_UINT32      ui32Line,
++                                              const IMG_CHAR* pszFormat,
++                                              ...
++                                      )
++{
++      IMG_BOOL bTrace, bDebug;
++#if !defined(__sh__)
++      IMG_CHAR *pszLeafName;
++      
++      pszLeafName = (char *)strrchr (pszFileName, '\\');
++      
++      if (pszLeafName)
++      {
++              pszFileName = pszLeafName;
++      }
++#endif 
++              
++      bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++      bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++      if (bTrace || bDebug)
++      {
++              va_list vaArgs;
++              static char szBuffer[256];
++
++              va_start (vaArgs, pszFormat);
++
++              
++              if (bDebug)
++              {
++                      switch(ui32DebugLevel)
++                      {
++                              case DBGPRIV_FATAL:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Fatal): ");
++                                      break;
++                              }
++                              case DBGPRIV_ERROR:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Error): ");
++                                      break;
++                              }
++                              case DBGPRIV_WARNING:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Warning): ");
++                                      break;
++                              }
++                              case DBGPRIV_MESSAGE:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Message): ");
++                                      break;
++                              }
++                              case DBGPRIV_VERBOSE:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Verbose): ");
++                                      break;
++                              }
++                              default:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Unknown message level)");
++                                      break;
++                              }
++                      }
++              }
++              else
++              {
++                      strcpy (szBuffer, "PVR_K: ");
++              }
++
++              vsprintf (&szBuffer[strlen(szBuffer)], pszFormat, vaArgs);
++
++              
++
++              if (!bTrace)
++              {
++                      sprintf (&szBuffer[strlen(szBuffer)], " [%d, %s]", (int)ui32Line, pszFileName);
++              }
++
++              printk(KERN_INFO "%s\r\n", szBuffer);
++
++              va_end (vaArgs);
++      }
++}
++
++void PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++      PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++      BUG();
++}
++
++void PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++      static IMG_CHAR szMessage[PVR_MAX_DEBUG_MESSAGE_LEN+1];
++      IMG_CHAR* pszEndOfMessage = IMG_NULL;
++      va_list ArgList;
++
++      strncpy(szMessage, "PVR: ", PVR_MAX_DEBUG_MESSAGE_LEN);
++
++      pszEndOfMessage = &szMessage[strlen(szMessage)];
++
++      va_start(ArgList, pszFormat);
++      vsprintf(pszEndOfMessage, pszFormat, ArgList);
++      va_end(ArgList);
++
++      strcat(szMessage,"\r\n");
++
++      printk(KERN_INFO "%s", szMessage);
++}
++
++
++void PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x",(unsigned int)uDebugLevel);
++
++      gPVRDebugLevel = uDebugLevel;
++}
++
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data)
++{
++#define       _PROC_SET_BUFFER_SZ             2
++      char data_buffer[_PROC_SET_BUFFER_SZ];
++
++      if (count != _PROC_SET_BUFFER_SZ)
++      {
++              return -EINVAL;
++      }
++      else
++      {
++              if (copy_from_user(data_buffer, buffer, count))
++                      return -EINVAL;
++              if (data_buffer[count - 1] != '\n')
++                      return -EINVAL;
++              PVRDebugSetLevel(data_buffer[0] - '0');
++      }
++      return (count);
++}
++
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      if (off == 0) {
++              *start = (char *)1;
++              return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++      }
++      *eof = 1;
++      return 0;
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,423 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGX530DEFS_KM_H_
++#define _SGX530DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL                   0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK      0x00000003
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT     0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK     0x00000030
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT    4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK     0x00000300
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT    8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK      0x00003000
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT     12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK     0x00030000
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT    16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK     0x00300000
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT    20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS                0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK   0x00000001
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT  0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK  0x00000010
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK  0x00000100
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK   0x00001000
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT  12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK  0x00010000
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK  0x00100000
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR                0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK   0x00000003
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT  0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK  0x00000030
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK  0x00000300
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK   0x00003000
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT  12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK  0x00030000
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK  0x00300000
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID                      0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK          0x0000FFFF
++#define EUR_CR_CORE_ID_CONFIG_SHIFT         0
++#define EUR_CR_CORE_ID_ID_MASK              0xFFFF0000
++#define EUR_CR_CORE_ID_ID_SHIFT             16
++#define EUR_CR_CORE_REVISION                0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK     0x0000FF00
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT    8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK     0x00FF0000
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT    16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK  0xFF000000
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1          0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2          0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET                   0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK    0x00000001
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT   0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK   0x00000002
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT  1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK    0x00000004
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT   2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK     0x00000008
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT    3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK    0x00000010
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT   4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK    0x00000020
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT   5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK    0x00000040
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT   6
++#define EUR_CR_EVENT_HOST_ENABLE2           0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2            0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2                0x0118
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS                 0x012C
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK      0x20000000
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT     29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK   0x00100000
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT  20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK   0x00020000
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT  17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK   0x00004000
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT  14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK  0x00000800
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK  0x00000400
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE            0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR             0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK  0x20000000
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS                          0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE                0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK      0x0FF00000
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT     20
++#define EUR_CR_EVENT_KICKER                 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK    0x0FFFFFF0
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT   4
++#define EUR_CR_EVENT_KICK                   0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK          0x00000001
++#define EUR_CR_EVENT_KICK_NOW_SHIFT         0
++#define EUR_CR_PDS_INV0                     0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV0_DSC_SHIFT           0
++#define EUR_CR_PDS_INV1                     0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV1_DSC_SHIFT           0
++#define EUR_CR_PDS_INV2                     0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV2_DSC_SHIFT           0
++#define EUR_CR_PDS_INV3                     0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV3_DSC_SHIFT           0
++#define EUR_CR_PDS_INV_CSC                  0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK        0x00000001
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT       0
++#define EUR_CR_PDS_PC_BASE                  0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK     0x3FFFFFFF
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT    0
++#define EUR_CR_BIF_CTRL                     0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK      0x00000001
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT     0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK          0x00000002
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT         1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK          0x00000004
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT         2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK        0x00000008
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT       3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK    0x00000010
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT   4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK  0x00000400
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_INT_STAT                 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK      0x00003FFF
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT     0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK    0x00004000
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT   14
++#define EUR_CR_BIF_FAULT                    0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK          0x0FFFF000
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT         12
++#define EUR_CR_BIF_DIR_LIST_BASE0           0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE            0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK  0x0FF00000
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE              0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK    0x0FF00000
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT   20
++#define EUR_CR_BIF_MEM_REQ_STAT             0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK  0x000000FF
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE              0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK    0x0FF00000
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT   20
++#define EUR_CR_BIF_ZLS_REQ_BASE             0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK   0x0FF00000
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT  20
++#define EUR_CR_2D_BLIT_STATUS               0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK     0x01000000
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT    24
++#define EUR_CR_2D_VIRTUAL_FIFO_0            0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1            0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_USE_CODE_BASE(X)     (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK      0x00FFFFFF
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT     0
++#define EUR_CR_USE_CODE_BASE_DM_MASK        0x03000000
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT       24
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXCORETYPES_KM_H_
++#define _SGXCORETYPES_KM_H_
++
++typedef enum
++{
++      SGX_CORE_ID_INVALID = 0,
++      SGX_CORE_ID_530 = 2,
++      SGX_CORE_ID_535 = 3,
++} SGX_CORE_ID_TYPE;
++
++typedef struct _SGX_CORE_INFO
++{
++      SGX_CORE_ID_TYPE        eID;
++      IMG_UINT32                      uiRev;
++} SGX_CORE_INFO, *PSGX_CORE_INFO;
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define       _SGXDEFS_H_
++
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#endif
++#endif
++#endif
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,108 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++      
++      #define SGX_CORE_REV_HEAD       0
++      #if defined(USE_SGX_CORE_REV_HEAD)
++              
++              #define SGX_CORE_REV    SGX_CORE_REV_HEAD
++      #endif
++
++      #if SGX_CORE_REV == 103
++      #else
++      #if SGX_CORE_REV == 110
++      #else
++      #if SGX_CORE_REV == 111
++      #else
++      #if SGX_CORE_REV == 120
++      #else
++      #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++              
++      #else
++              #error "sgxerrata.h: SGX530 Core Revision unspecified"
++      #endif
++      #endif
++      #endif
++      #endif
++        #endif
++      
++      #define SGX_CORE_DEFINED
++#endif
++
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++      
++      #define SGX_CORE_REV_HEAD       0
++      #if defined(USE_SGX_CORE_REV_HEAD)
++              
++              #define SGX_CORE_REV    SGX_CORE_REV_HEAD
++      #endif
++
++      #if SGX_CORE_REV == 111
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 1111
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 112
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 113
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23944
++              #define FIX_HW_BRN_23410
++      #else
++      #if SGX_CORE_REV == 121
++              #define FIX_HW_BRN_23944
++              #define FIX_HW_BRN_23410
++      #else
++      #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++              
++      #else
++              #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++      #endif
++      #endif
++      #endif
++      #endif
++      #endif
++      #endif
++      
++      #define SGX_CORE_DEFINED
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,55 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(SGX530)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX530"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_530
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (28)
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX535"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_535
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (32)
++      #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++      #define SGX_FEATURE_2D_HARDWARE
++              #define SGX_FEATURE_AUTOCLOCKGATING
++
++#endif
++#endif
++
++#if !defined(SGX_DONT_SWITCH_OFF_FEATURES)
++
++#if defined(FIX_HW_BRN_22693) 
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#endif 
++
++#include "img_types.h"
++
++#include "sgxcoretypes.h"
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT                            (12)
++#define SGX_MMU_PAGE_SIZE                             (1<<SGX_MMU_PAGE_SHIFT)
++
++#define SGX_MMU_PD_SHIFT                              (10)
++#define SGX_MMU_PD_SIZE                                       (1<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK                                       (0xFFC00000)
++
++#define SGX_MMU_PDE_ADDR_MASK                 (0xFFFFF000)
++#define SGX_MMU_PDE_VALID                             (0x00000001)
++#define SGX_MMU_PDE_WRITEONLY                 (0x00000002)
++#define SGX_MMU_PDE_READONLY                  (0x00000004)
++#define SGX_MMU_PDE_CACHECONSISTENT           (0x00000008)
++#define SGX_MMU_PDE_EDMPROTECT                        (0x00000010)
++
++#define SGX_MMU_PT_SHIFT                              (10)
++#define SGX_MMU_PT_SIZE                                       (1<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK                                       (0x003FF000)
++
++#define SGX_MMU_PTE_ADDR_MASK                 (0xFFFFF000)
++#define SGX_MMU_PTE_VALID                             (0x00000001)
++#define SGX_MMU_PTE_WRITEONLY                 (0x00000002)
++#define SGX_MMU_PTE_READONLY                  (0x00000004)
++#define SGX_MMU_PTE_CACHECONSISTENT           (0x00000008)
++#define SGX_MMU_PTE_EDMPROTECT                        (0x00000010)
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,210 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif        
++      
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++      enum
++      {
++              hm_wrapped = 1,         
++              hm_wrapped_scatter,     
++              hm_wrapped_virtaddr, 
++              hm_env,                         
++              hm_contiguous           
++      } eCpuMemoryOrigin;
++
++      BM_HEAP                         *pBMHeap;       
++      RA_ARENA                        *pArena;        
++
++      IMG_CPU_VIRTADDR        CpuVAddr;
++      IMG_CPU_PHYADDR         CpuPAddr;
++      IMG_DEV_VIRTADDR        DevVAddr;
++      IMG_SYS_PHYADDR         *psSysAddr;
++      IMG_SIZE_T                      uSize;
++    IMG_HANDLE          hOSMemHandle;
++      IMG_UINT32                      ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++      IMG_CPU_VIRTADDR        *CpuVAddr;
++    IMG_VOID            *hOSMemHandle;
++      IMG_CPU_PHYADDR         CpuPAddr;
++      IMG_DEV_VIRTADDR        DevVAddr;
++
++      BM_MAPPING                      *pMapping;
++      IMG_UINT32                      ui32RefCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++      IMG_UINT32                              ui32Attribs;
++      BM_CONTEXT                              *pBMContext;
++      RA_ARENA                                *pImportArena;
++      RA_ARENA                                *pLocalDevMemArena;
++      RA_ARENA                                *pVMArena;
++      DEV_ARENA_DESCRIPTOR    sDevArena;
++      MMU_HEAP                                *pMMUHeap;
++      
++      struct _BM_HEAP_                *psNext;
++};
++
++struct _BM_CONTEXT_
++{
++      MMU_CONTEXT     *psMMUContext;
++
++      
++       BM_HEAP *psBMHeap;
++       
++      
++       BM_HEAP *psBMSharedHeap;
++
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      
++      HASH_TABLE *pBufferHash;
++
++      
++      IMG_HANDLE hResItem;
++
++      IMG_UINT32 ui32RefCount;
++
++      
++
++      struct _BM_CONTEXT_ *psNext;
++};
++
++
++
++typedef void *BM_HANDLE;
++
++#define BP_POOL_MASK         0x7 
++
++#define BP_CONTIGUOUS                 (1 << 3)
++#define BP_PARAMBUFFER                        (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS  2
++
++IMG_HANDLE
++BM_CreateContext (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                      IMG_DEV_PHYADDR *psPDDevPAddr,
++                                      IMG_BOOL bKernelContext,
++                                      IMG_BOOL *pbCreated);
++
++PVRSRV_ERROR
++BM_DestroyContext (IMG_HANDLE hBMContext,
++                                      IMG_BOOL bKernelContext,
++                                      IMG_BOOL bResManCallback,
++                                      IMG_BOOL *pbCreated);
++
++
++IMG_HANDLE 
++BM_CreateHeap (IMG_HANDLE hBMContext,
++                              DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++IMG_VOID 
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
++
++
++IMG_BOOL 
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL
++BM_Alloc (IMG_HANDLE                  hDevMemHeap,
++                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                      IMG_SIZE_T                      uSize,
++                      IMG_UINT32                      *pui32Flags,
++                      IMG_UINT32                      uDevVAddrAlignment,
++                      BM_HANDLE                       *phBuf);
++
++IMG_BOOL
++BM_Wrap (     IMG_HANDLE hDevMemHeap,
++                  IMG_UINT32 ui32Size,
++                      IMG_UINT32 ui32Offset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psSysAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 *pui32Flags,
++                      BM_HANDLE *phBuf);
++
++void
++BM_Free (BM_HANDLE hBuf, 
++              IMG_UINT32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++IMG_HANDLE
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++                         IMG_UINT32 *pTotalBytes,
++                         IMG_UINT32 *pAvailableBytes);
++
++
++PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo, 
++                                                              IMG_DEV_VIRTADDR sDevVPageAddr,  
++                                                              IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, 
++                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/device.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/device.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/device.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/device.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,267 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++      
++#include "ra.h"               
++#include "resman.h"           
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG             (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG  (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG           (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG        (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT         0
++#define DEVICE_MEMORY_HEAP_KERNEL                     1
++#define DEVICE_MEMORY_HEAP_SHARED                     2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED    3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY        1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV  2       
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++      
++      IMG_UINT32                              ui32HeapID;
++
++      
++      IMG_CHAR                                *pszName;
++
++      
++      IMG_CHAR                                *pszBSName;
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddrBase;
++
++      
++      IMG_UINT32                              ui32HeapSize;
++
++      
++      IMG_UINT32                              ui32Attribs;
++
++      
++      DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++      
++      
++      IMG_HANDLE                              hDevMemHeap;
++      
++      
++      RA_ARENA                                *psLocalDevMemArena;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++      
++      IMG_UINT32                              ui32AddressSpaceSizeLog2;
++
++      
++
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32HeapCount;
++      
++      
++      IMG_UINT32                              ui32SyncHeapID;
++      
++      
++      IMG_UINT32                              ui32MappingHeapID;
++
++      
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++      
++    BM_CONTEXT                                *pBMKernelContext;
++
++      
++    BM_CONTEXT                                *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++      IMG_UINT32                              ui32HeapID;             
++
++      IMG_CHAR                                *pszName;               
++
++      IMG_DEV_VIRTADDR                BaseDevVAddr;   
++
++      IMG_UINT32                              ui32Size;               
++
++      DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++      PVRSRV_DEVICE_IDENTIFIER        sDevId;
++      IMG_UINT32                                      ui32RefCount;
++
++      
++
++      
++      PVRSRV_ERROR                    (*pfnInitDevice) (IMG_VOID*);
++      
++      PVRSRV_ERROR                    (*pfnDeInitDevice) (IMG_VOID*);
++
++      
++      PVRSRV_ERROR                    (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++      IMG_VOID                                (*pfnMMUFinalise)(MMU_CONTEXT*);
++      IMG_VOID                                (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++      MMU_HEAP*                               (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++      IMG_VOID                                (*pfnMMUDelete)(MMU_HEAP*);
++      IMG_BOOL                                (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++                                                                                 IMG_SIZE_T uSize,
++                                                                                 IMG_SIZE_T *pActualSize,
++                                                                                 IMG_UINT32 uFlags,
++                                                                                 IMG_UINT32 uDevVAddrAlignment,
++                                                                                 IMG_DEV_VIRTADDR *pDevVAddr);
++      IMG_VOID                                (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
++      IMG_VOID                                (*pfnMMUEnable)(MMU_HEAP*);
++      IMG_VOID                                (*pfnMMUDisable)(MMU_HEAP*);
++      IMG_VOID                                (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++                                                                                        IMG_DEV_VIRTADDR devVAddr,
++                                                                                        IMG_SYS_PHYADDR SysPAddr,
++                                                                                        IMG_SIZE_T uSize,
++                                                                                        IMG_UINT32 ui32MemFlags,
++                                                                                        IMG_HANDLE hUniqueTag);
++      IMG_VOID                                (*pfnMMUMapShadow)(MMU_HEAP            *pMMU,
++                                                                                         IMG_DEV_VIRTADDR    MapBaseDevVAddr,
++                                                                                         IMG_SIZE_T          uSize, 
++                                                                                         IMG_CPU_VIRTADDR    CpuVAddr,
++                                                                                         IMG_HANDLE          hOSMemHandle,
++                                                                                         IMG_DEV_VIRTADDR    *pDevVAddr,
++                                                                                         IMG_UINT32 ui32MemFlags,
++                                                                                         IMG_HANDLE hUniqueTag);
++      IMG_VOID                                (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++                                                                                              IMG_DEV_VIRTADDR dev_vaddr,
++                                                                                              IMG_UINT32 ui32PageCount,
++                                                                                              IMG_HANDLE hUniqueTag);
++
++      IMG_VOID                                (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++                                                                                              IMG_DEV_VIRTADDR DevVAddr,
++                                                                                              IMG_SYS_PHYADDR *psSysAddr,
++                                                                                              IMG_SIZE_T uSize,
++                                                                                              IMG_UINT32 ui32MemFlags,
++                                                                                              IMG_HANDLE hUniqueTag);
++
++      IMG_DEV_PHYADDR                 (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++      IMG_DEV_PHYADDR                 (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++      
++      IMG_BOOL                                (*pfnDeviceISR)(IMG_VOID*);
++      
++      IMG_VOID                                *pvISRData;
++      
++      IMG_UINT32                              ui32SOCInterruptBit;
++      
++      IMG_VOID                                (*pfnDeviceMISR)(IMG_VOID*);
++
++      
++      IMG_VOID                                (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++      
++      IMG_BOOL                                bReProcessDeviceCommandComplete;
++      
++      
++      DEVICE_MEMORY_INFO              sDevMemoryInfo;
++
++      
++      IMG_VOID                                *pvDevice;
++      IMG_UINT32                              ui32pvDeviceSize; 
++      IMG_VOID                                *hDeviceOSMemHandle;
++              
++      
++      PRESMAN_ITEM                    psResItem;
++      
++      
++      PSYS_DATA                               psSysData;
++      
++      
++      RA_ARENA                                *psLocalDevMemArena;
++      
++      IMG_UINT32                              ui32Flags;
++      
++      struct _PVRSRV_DEVICE_NODE_     *psNext;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
++                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                IMG_UINT32 ui32SOCInterruptBit,
++                                                                IMG_UINT32 *pui32DeviceIndex );
++
++PVRSRV_ERROR PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT PVRSRV_ERROR PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++                                                                         IMG_UINT32 ui32Value,
++                                                                         IMG_UINT32 ui32Mask,
++                                                                         IMG_UINT32 ui32Waitus,
++                                                                         IMG_UINT32 ui32Tries);
++
++#endif 
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR PollForInterruptKM(IMG_UINT32 ui32Value,
++                                                              IMG_UINT32 ui32Mask,
++                                                              IMG_UINT32 ui32Waitus,
++                                                              IMG_UINT32 ui32Tries);
++#endif 
++
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData);
++
++#if defined(__cplusplus)
++}
++#endif
++      
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/handle.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/handle.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/handle.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/handle.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,339 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++      PVRSRV_HANDLE_TYPE_NONE = 0,
++      PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++      PVRSRV_HANDLE_TYPE_DEV_NODE,
++      PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++      PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++      PVRSRV_HANDLE_TYPE_MEM_INFO,
++      PVRSRV_HANDLE_TYPE_SYNC_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++      PVRSRV_HANDLE_TYPE_BUF_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++      PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++      PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++      PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 1,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 2,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 4
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct sHandleList
++{
++      IMG_UINT32 ui32Prev;
++      IMG_UINT32 ui32Next;
++      IMG_HANDLE hParent;
++};
++
++struct sHandle
++{
++      
++      PVRSRV_HANDLE_TYPE eType;
++      
++      IMG_VOID *pvData;
++      
++      IMG_UINT32 ui32NextIndexPlusOne;
++      
++      PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++      
++      IMG_UINT32 ui32PID;
++      
++      IMG_UINT32 ui32Index;
++      
++      struct sHandleList sChildren;
++      
++      struct sHandleList sSiblings;
++};
++
++typedef struct _PVRSRV_HANDLE_BASE_
++{
++      
++      IMG_HANDLE hBaseBlockAlloc;
++
++      
++      IMG_UINT32 ui32PID;
++
++      
++      IMG_HANDLE hHandBlockAlloc;
++
++      
++      PRESMAN_ITEM psResManItem;
++
++      
++      struct sHandle *psHandleArray;
++
++      
++      HASH_TABLE *psHashTab;
++
++      
++      IMG_UINT32 ui32FreeHandCount;
++
++      
++      IMG_UINT32 ui32FirstFreeIndex;
++
++      
++      IMG_UINT32 ui32TotalHandCount;
++
++      
++      IMG_UINT32 ui32LastFreeIndexPlusOne;
++} PVRSRV_HANDLE_BASE;
++
++#ifdef        PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define       KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
++
++#else 
++
++#define KERNEL_HANDLE_BASE IMG_NULL
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(eFlag);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(eFlag);
++      PVR_UNREFERENCED_PARAMETER(hParent);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandleAnyType)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      
++      *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(hAncestor);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetParentHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(hHandle);
++
++      *phParent = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupAndReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(hHandle);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32PID);
++
++      *ppsBase = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFreeHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleDeInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++      return PVRSRV_OK;
++}
++
++#endif        
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/hash.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/hash.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/hash.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/hash.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++
++IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
++
++IMG_VOID HASH_Delete (HASH_TABLE *pHash);
++
++IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
++
++IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
++
++IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++#ifdef HASH_TRACE
++void HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/metrics.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/metrics.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/metrics.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/metrics.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct 
++{
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32Stop;
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[]; 
++
++extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
++extern IMG_VOID   PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
++extern IMG_VOID   PVRSRVOutputMetricTotals(IMG_VOID);
++
++
++#define PVRSRV_TIMER_DUMMY                            0
++
++#define PVRSRV_TIMER_EXAMPLE_1                        1
++#define PVRSRV_TIMER_EXAMPLE_2                        2
++
++
++#define PVRSRV_NUM_TIMERS             (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X)  { \
++                                                                      asTimers[X].ui32Count += 1; \
++                                                                      asTimers[X].ui32Count |= 0x80000000L; \
++                                                                      asTimers[X].ui32Start = PVRSRVTimeNow(); \
++                                                                      asTimers[X].ui32Stop  = 0; \
++                                                              }
++
++#define PVRSRV_TIME_SUSPEND(X)        { \
++                                                                      asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++                                                              }
++
++#define PVRSRV_TIME_RESUME(X) { \
++                                                                      asTimers[X].ui32Start = PVRSRVTimeNow(); \
++                                                              }
++
++#define PVRSRV_TIME_STOP(X)           { \
++                                                                      asTimers[X].ui32Stop  += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++                                                                      asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++                                                                      asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++                                                              }
++
++#define PVRSRV_TIME_RESET(X)  { \
++                                                                      asTimers[X].ui32Start = 0; \
++                                                                      asTimers[X].ui32Stop  = 0; \
++                                                                      asTimers[X].ui32Total = 0; \
++                                                                      asTimers[X].ui32Count = 0; \
++                                                              }
++
++
++#if defined(__sh__)
++
++#define TST_REG   ((volatile unsigned char *) (psDevInfo->pvSOCRegsBaseKM))   
++
++#define TCOR_2    ((volatile unsigned int *)  (psDevInfo->pvSOCRegsBaseKM+28))        
++#define TCNT_2    ((volatile unsigned int *)  (psDevInfo->pvSOCRegsBaseKM+32))        
++#define TCR_2     ((volatile unsigned short *)(psDevInfo->pvSOCRegsBaseKM+36))        
++
++#define TIMER_DIVISOR  4
++
++#endif 
++
++
++
++
++
++#else 
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif 
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/osfunc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/osfunc.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,246 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG         1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#ifdef        __linux__
++#ifdef        __KERNEL__
++#include <linux/string.h>
++#endif
++#endif
++
++
++#define KERNEL_ID                     0xffffffffL
++#define POWER_MANAGER_ID      0xfffffffeL
++#define ISR_ID                                0xfffffffdL
++#define TIMER_ID                      0xfffffffcL
++
++
++#define HOST_PAGESIZE                 OSGetPageSize
++#define HOST_PAGEMASK                 (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr)  (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK                   0xf 
++#define PVRSRV_OS_PAGEABLE_HEAP               0x1 
++#define PVRSRV_OS_NON_PAGEABLE_HEAP   0x2 
++
++
++IMG_UINT32 OSClockus(IMG_VOID);
++IMG_UINT32 OSGetPageSize(IMG_VOID);
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++                                                               IMG_UINT32 ui32Irq,
++                                                               IMG_CHAR *pszISRName,
++                                                               IMG_VOID *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID* pvLinAddr);
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, 
++                                                 IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++#if defined(__linux__)
++PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                                                         IMG_UINT32 ui32ByteOffset,
++                                                         IMG_UINT32 ui32Bytes,
++                                                         IMG_UINT32 ui32Flags,
++                                                         IMG_HANDLE *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSGetSubMemHandle)
++#endif
++static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                                                                                       IMG_UINT32 ui32ByteOffset,
++                                                                                       IMG_UINT32 ui32Bytes,
++                                                                                       IMG_UINT32 ui32Flags,
++                                                                                       IMG_HANDLE *phOSMemHandleRet)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++      PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++      *phOSMemHandleRet = hOSMemHandle;
++      return PVRSRV_OK;
++}
++
++static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++      PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      return PVRSRV_OK;
++}
++#endif
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
++IMG_UINT32 OSGetCurrentThreadID( IMG_VOID );
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *szFilename, IMG_UINT32 ui32Line);
++#define OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc) _OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc, __FILE__, __LINE__)
++#else
++PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
++#endif
++PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
++PVRSRV_ERROR OSAllocPages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
++PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
++#if defined(__linux__)
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSMemHandleToCpuPAddr)
++#endif
++static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++      IMG_CPU_PHYADDR sCpuPAddr;
++      PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++      PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++      sCpuPAddr.uiAddr = 0;
++      return sCpuPAddr;
++}
++#endif
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
++IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++PVRSRV_ERROR OSPowerManagerConnect(IMG_VOID);
++PVRSRV_ERROR OSPowerManagerDisconnect(IMG_VOID);
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
++                                                               PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_UINT32 ui32Size,IMG_HANDLE *phMemBlock);
++IMG_VOID  UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_UINT32 ui32Size, IMG_HANDLE hMemBlock);
++
++IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
++IMG_VOID  OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
++
++IMG_SYS_PHYADDR OSMapLinToPhys(IMG_PVOID pvLinAddr);
++
++
++PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
++                                                                IMG_SYS_PHYADDR sCPUPhysAddr,
++                                                                IMG_UINT32 uiSizeInBytes,
++                                                                IMG_UINT32 ui32CacheFlags,
++                                                                IMG_PVOID *ppvUserAddr,
++                                                                IMG_UINT32 *puiActualSize,
++                                                                IMG_HANDLE hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
++                                                                      IMG_PVOID pvUserAddr,
++                                                                      IMG_PVOID pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
++
++#ifndef OSReadHWReg
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++#endif
++
++typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(IMG_UINT32 *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++      HOST_PCI_INIT_FLAG_BUS_MASTER = 0x1,
++      HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCISetDev(IMG_VOID *pvSysData, IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSPCIResumeDev(IMG_VOID *pvSysData);
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
++
++typedef enum _img_verify_test
++{
++      PVR_VERIFY_WRITE = 0,
++      PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Bytes);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define PDUMP_FLAGS_NEVER                     0x08000000
++#define PDUMP_FLAGS_TOOUT2MEM         0x10000000
++#define PDUMP_FLAGS_LASTFRAME         0x20000000
++#define PDUMP_FLAGS_RESETLFBUFFER     0x40000000
++#define PDUMP_FLAGS_CONTINUOUS                0x80000000
++
++#define PDUMP_PD_UNIQUETAG                    (IMG_HANDLE)0
++#define PDUMP_PT_UNIQUETAG                    (IMG_HANDLE)0
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo)       (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++      #define PDUMP_REG_FUNC_NAME PDumpReg
++
++      IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                IMG_UINT32                    ui32Offset,
++                                                                                IMG_UINT32                    ui32Value,
++                                                                                IMG_UINT32                    ui32Mask,
++                                                                                PDUMP_POLL_OPERATOR   eOperator,
++                                                                                IMG_BOOL                              bLastFrame,
++                                                                                IMG_BOOL                              bOverwrite,
++                                                                                IMG_HANDLE                    hUniqueTag);
++
++      IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID                    pvAltLinAddr,
++                                                                         PVRSRV_KERNEL_MEM_INFO       *psMemInfo,
++                                                                         IMG_UINT32                   ui32Offset,
++                                                                         IMG_UINT32                   ui32Bytes,
++                                                                         IMG_UINT32                   ui32Flags,
++                                                                         IMG_HANDLE                   hUniqueTag);
++      PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                               IMG_DEV_PHYADDR                *pPages,
++                                                               IMG_UINT32                     ui32NumPages,
++                                                               IMG_DEV_VIRTADDR       sDevAddr,
++                                                               IMG_UINT32                     ui32Start,
++                                                               IMG_UINT32                     ui32Length,
++                                                               IMG_UINT32                     ui32Flags);
++
++      PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE     eDeviceType,
++                                                       IMG_CPU_VIRTADDR       pvLinAddr,
++                                                       IMG_UINT32                     ui32Bytes,
++                                                       IMG_UINT32                     ui32Flags,
++                                                       IMG_BOOL                       bInitialisePages,
++                                                       IMG_HANDLE                     hUniqueTag1,
++                                                       IMG_HANDLE                     hUniqueTag2);
++      IMG_VOID PDumpInit(IMG_VOID);
++      IMG_VOID PDumpDeInit(IMG_VOID);
++      IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
++      IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
++      IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
++      PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr,
++                                                                       IMG_UINT32 ui32RegValue,
++                                                                       IMG_UINT32 ui32Flags);
++      IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR *pszFileName,
++                                                                                IMG_UINT32 ui32FileOffset,
++                                                                                IMG_UINT32 ui32Width,
++                                                                                IMG_UINT32 ui32Height,
++                                                                                IMG_UINT32 ui32StrideInBytes,
++                                                                                IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                                                IMG_UINT32 ui32Size,
++                                                                                PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                                                PDUMP_MEM_FORMAT eMemFormat,
++                                                                                IMG_UINT32 ui32PDumpFlags);
++      IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszFileName,
++                                                                                 IMG_UINT32 ui32FileOffset,
++                                                                                 IMG_UINT32 ui32Address,
++                                                                                 IMG_UINT32 ui32Size,
++                                                                                 IMG_UINT32 ui32PDumpFlags);
++      IMG_VOID PDUMP_REG_FUNC_NAME(IMG_UINT32         dwReg,
++                                                               IMG_UINT32             dwData);
++
++      IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR* const        pRegRegion,
++                                                         const IMG_UINT32             dwRegOffset);
++
++      IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR* const       pRegRegion,
++                                                              const IMG_UINT32                dwRegOffset,
++                                                              const IMG_UINT32                dwData);
++
++      PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR* const     pRegRegion,
++                                                                const IMG_UINT32              ui32Offset,
++                                                                const IMG_UINT32              ui32CheckFuncIdExt,
++                                                                const IMG_UINT32              ui32RequValue,
++                                                                const IMG_UINT32              ui32Enable,
++                                                                const IMG_UINT32              ui32PollCount,
++                                                                const IMG_UINT32              ui32TimeOut);
++
++      PVRSRV_ERROR  PDumpMsvdxWriteRef(const IMG_CHAR* const  pRegRegion,
++                                                                       const IMG_UINT32               ui32VLROffset,
++                                                                       const IMG_UINT32               ui32Physical );
++
++      IMG_VOID PDumpComment(IMG_CHAR* pszFormat, ...);
++      IMG_VOID PDumpCommentWithFlags(IMG_UINT32       ui32Flags,
++                                                                 IMG_CHAR*    pszFormat,
++                                                                 ...);
++      PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr,
++                                                         IMG_UINT32 ui32RegValue,
++                                                         IMG_UINT32 ui32Mask);
++      PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr,
++                                                                              IMG_UINT32 ui32RegValue,
++                                                                              IMG_UINT32 ui32Mask,
++                                                                              IMG_UINT32 ui32Flags);
++      IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
++      IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
++
++      IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE    eDeviceType,
++                                                        IMG_UINT32                    ui32DevVAddr,
++                                                        IMG_CPU_VIRTADDR              pvLinAddr,
++                                                        IMG_HANDLE                    hOSMemHandle,
++                                                        IMG_UINT32                    ui32NumBytes,
++                                                        IMG_HANDLE                    hUniqueTag);
++      IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE        eDeviceType,
++                                                                IMG_UINT32                    ui32DevVAddr,
++                                                                IMG_PUINT32                   pui32PhysPages,
++                                                                IMG_UINT32                    ui32NumPages);
++      IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE        eDeviceType,
++                                                                IMG_CPU_VIRTADDR              pvLinAddr,
++                                                                IMG_UINT32                    ui32NumBytes,
++                                                                IMG_HANDLE                    hUniqueTag);
++      IMG_VOID PDumpFreePages(struct _BM_HEAP_        *psBMHeap,
++                                                      IMG_DEV_VIRTADDR        sDevVAddr,
++                                                      IMG_UINT32                      ui32NumBytes,
++                                                      IMG_HANDLE              hUniqueTag,
++                                                      IMG_BOOL                        bInterleaved);
++      IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE  eDeviceType,
++                                                              IMG_CPU_VIRTADDR        pvLinAddr,
++                                                              IMG_UINT32                      ui32NumBytes,
++                                                              IMG_HANDLE                      hUniqueTag);
++      IMG_VOID PDumpPDReg(IMG_UINT32  ui32Reg,
++                                              IMG_UINT32      ui32dwData,
++                                              IMG_HANDLE      hUniqueTag);
++      IMG_VOID PDumpPDRegWithFlags(IMG_UINT32         ui32Reg,
++                                                               IMG_UINT32             ui32Data,
++                                                               IMG_UINT32             ui32Flags,
++                                                               IMG_HANDLE             hUniqueTag);
++
++      PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                 IMG_UINT32 ui32Offset,
++                                                                 IMG_DEV_PHYADDR sPDDevPAddr,
++                                                                 IMG_HANDLE hUniqueTag1,
++                                                                 IMG_HANDLE hUniqueTag2);
++
++      IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
++
++      void PDumpTASignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
++                                                                 IMG_UINT32   ui32TAKickCount,
++                                                                 IMG_BOOL             bLastFrame);
++      void PDump3DSignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
++                                                                 IMG_BOOL             bLastFrame);
++
++      IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++      void PDumpPerformanceCounterRegisters(IMG_UINT32        ui32DumpFrameNum,
++                                                                                IMG_BOOL              bLastFrame);
++
++      IMG_VOID PDumpEndInitPhase(IMG_VOID);
++
++      void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO   psROffMemInfo,
++                                IMG_UINT32                            ui32ROffOffset,
++                                IMG_UINT32                            ui32WPosVal,
++                                IMG_UINT32                            ui32PacketSize,
++                                IMG_UINT32                            ui32BufferSize,
++                                IMG_UINT32                            ui32Flags,
++                                IMG_HANDLE                            hUniqueTag);
++
++      IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++      IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks);
++
++      IMG_VOID PDumpSuspendKM(IMG_VOID);
++      IMG_VOID PDumpResumeKM(IMG_VOID);
++
++      #define PDUMPMEMPOL                             PDumpMemPolKM
++      #define PDUMPMEM                                PDumpMemKM
++      #define PDUMPMEM2                               PDumpMem2KM
++      #define PDUMPINIT                               PDumpInit
++      #define PDUMPDEINIT                             PDumpDeInit
++      #define PDUMPISLASTFRAME                PDumpIsLastCaptureFrameKM
++      #define PDUMPTESTFRAME                  PDumpIsCaptureFrameKM
++      #define PDUMPTESTNEXTFRAME              PDumpTestNextFrame
++      #define PDUMPREGWITHFLAGS               PDumpRegWithFlagsKM
++      #define PDUMPREG                                PDUMP_REG_FUNC_NAME
++      #define PDUMPCOMMENT                    PDumpComment
++      #define PDUMPCOMMENTWITHFLAGS   PDumpCommentWithFlags
++      #define PDUMPREGPOL                             PDumpRegPolKM
++      #define PDUMPREGPOLWITHFLAGS    PDumpRegPolWithFlagsKM
++      #define PDUMPMALLOCPAGES                PDumpMallocPages
++      #define PDUMPMALLOCPAGETABLE    PDumpMallocPageTable
++      #define PDUMPFREEPAGES                  PDumpFreePages
++      #define PDUMPFREEPAGETABLE              PDumpFreePageTable
++      #define PDUMPPDREG                              PDumpPDReg
++      #define PDUMPPDREGWITHFLAGS             PDumpPDRegWithFlags
++      #define PDUMPCBP                                PDumpCBP
++      #define PDUMPMALLOCPAGESPHYS    PDumpMallocPagesPhys
++      #define PDUMPENDINITPHASE               PDumpEndInitPhase
++      #define PDUMPMSVDXREGWRITE              PDumpMsvdxRegWrite
++      #define PDUMPMSVDXREGREAD               PDumpMsvdxRegRead
++      #define PDUMPMSVDXPOL                   PDumpMsvdxRegPol
++      #define PDUMPMSVDXWRITEREF              PDumpMsvdxWriteRef
++      #define PDUMPBITMAPKM                   PDumpBitmapKM
++      #define PDUMPDRIVERINFO                 PDumpDriverInfoKM
++      #define PDUMPIDLWITHFLAGS               PDumpIDLWithFlags
++      #define PDUMPIDL                                PDumpIDL
++      #define PDUMPSUSPEND                    PDumpSuspendKM
++      #define PDUMPRESUME                             PDumpResumeKM
++
++#else
++              #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++                      #define PDUMPMEMPOL(args...)
++                      #define PDUMPMEM(args...)
++                      #define PDUMPMEM2(args...)
++                      #define PDUMPINIT(args...)
++                      #define PDUMPDEINIT(args...)
++                      #define PDUMPISLASTFRAME(args...)
++                      #define PDUMPTESTFRAME(args...)
++                      #define PDUMPTESTNEXTFRAME(args...)
++                      #define PDUMPREGWITHFLAGS(args...)
++                      #define PDUMPREG(args...)
++                      #define PDUMPCOMMENT(args...)
++                      #define PDUMPREGPOL(args...)
++                      #define PDUMPREGPOLWITHFLAGS(args...)
++                      #define PDUMPMALLOCPAGES(args...)
++                      #define PDUMPMALLOCPAGETABLE(args...)
++                      #define PDUMPFREEPAGES(args...)
++                      #define PDUMPFREEPAGETABLE(args...)
++                      #define PDUMPPDREG(args...)
++                      #define PDUMPPDREGWITHFLAGS(args...)
++                      #define PDUMPSYNC(args...)
++                      #define PDUMPCOPYTOMEM(args...)
++                      #define PDUMPWRITE(args...)
++                      #define PDUMPCBP(args...)
++                      #define PDUMPCOMMENTWITHFLAGS(args...)
++                      #define PDUMPMALLOCPAGESPHYS(args...)
++                      #define PDUMPENDINITPHASE(args...)
++                      #define PDUMPMSVDXREG(args...)
++                      #define PDUMPMSVDXREGWRITE(args...)
++                      #define PDUMPMSVDXREGREAD(args...)
++                      #define PDUMPMSVDXPOLEQ(args...)
++                      #define PDUMPMSVDXPOL(args...)
++                      #define PDUMPBITMAPKM(args...)
++                      #define PDUMPDRIVERINFO(args...)
++                      #define PDUMPIDLWITHFLAGS(args...)
++                      #define PDUMPIDL(args...)
++                      #define PDUMPSUSPEND(args...)
++                      #define PDUMPRESUME(args...)
++                      #define PDUMPMSVDXWRITEREF(args...)
++              #else
++                      #error Compiler not specified
++              #endif
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/perproc.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/perproc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/perproc.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/perproc.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,65 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++      IMG_UINT32 ui32PID;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResManItem;
++      IMG_HANDLE hPerProcData;
++      PVRSRV_HANDLE_BASE *psHandleBase;
++
++      
++      IMG_BOOL bInitProcess;
++
++      
++      IMG_HANDLE hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID, IMG_BOOL bAlloc);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/power.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/power.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/power.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/power.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,90 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++ 
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++      PFN_PRE_POWER                                   pfnPrePower;
++      PFN_POST_POWER                                  pfnPostPower;
++      IMG_HANDLE                                              hDevCookie;
++      IMG_UINT32                                              ui32DeviceIndex;
++      PVR_POWER_STATE                                 eDefaultPowerState;
++      PVR_POWER_STATE                                 eCurrentPowerState;
++      struct _PVRSRV_POWER_DEV_TAG_   *psNext;
++
++} PVRSRV_POWER_DEV;
++
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32       ui32CallerID,
++                                                       IMG_BOOL       bSystemPowerEvent);
++IMG_IMPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState,
++                                                                               IMG_UINT32                     ui32CallerID,
++                                                                               IMG_BOOL                       bRetainMutex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE eNewPowerState);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE eNewPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVR_POWER_STATE ePVRState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerControlKM(PVR_POWER_CONTROL   ePowerControl,
++                                                                PVR_POWER_STATE       *pePVRPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32             ui32DeviceIndex,
++                                                                         PFN_PRE_POWER        pfnPrePower,
++                                                                         PFN_POST_POWER       pfnPostPower,
++                                                                         IMG_HANDLE           hDevCookie,
++                                                                         PVR_POWER_STATE      eCurrentPowerState,
++                                                                         PVR_POWER_STATE      eDefaultPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/queue.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/queue.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/queue.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/queue.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size)                                          \
++      psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size)  \
++      & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++      IMG_BOOL                        bInUse;
++              
++      IMG_UINT32                      ui32DstSyncCount;       
++      IMG_UINT32                      ui32SrcSyncCount;       
++      PVRSRV_SYNC_OBJECT      *psDstSync;                     
++      PVRSRV_SYNC_OBJECT      *psSrcSync;                     
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVProcessQueues (IMG_UINT32  ui32CallerID,
++                                                                IMG_BOOL              bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__) 
++#include <linux/types.h>
++off_t
++QueuePrintQueues (char * buffer, size_t size, off_t off);
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO     *psQueue,
++                                                                                              PVRSRV_COMMAND          **ppsCommand,
++                                                                                              IMG_UINT32                      ui32DevIndex,
++                                                                                              IMG_UINT16                      CommandType,
++                                                                                              IMG_UINT32                      ui32DstSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++                                                                                              IMG_UINT32                      ui32SrcSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                                                              IMG_UINT32                      ui32DataByteSize );
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              IMG_UINT32 ui32ParamSize,
++                                                                                              IMG_VOID **ppvSpace);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              PVRSRV_COMMAND *psCommand);
++
++IMG_IMPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32           ui32DevIndex,
++                                                                               PFN_CMD_PROC   *ppfnCmdProcList,
++                                                                               IMG_UINT32             ui32MaxSyncsPerCmd[][2],
++                                                                               IMG_UINT32             ui32CmdCount);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32     ui32DevIndex,
++                                                                         IMG_UINT32   ui32CmdCount);
++
++#endif 
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/ra.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/ra.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/ra.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/ra.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS 
++
++
++struct _RA_STATISTICS_
++{
++    
++    IMG_UINT32 uSpanCount;
++
++    
++    IMG_UINT32 uLiveSegmentCount;
++
++    
++    IMG_UINT32 uFreeSegmentCount;
++
++    
++    IMG_UINT32 uTotalResourceCount;
++    
++    
++    IMG_UINT32 uFreeResourceCount;
++
++    
++    IMG_UINT32 uCumulativeAllocs;
++
++    
++    IMG_UINT32 uCumulativeFrees;
++
++    
++    IMG_UINT32 uImportCount;
++
++    
++    IMG_UINT32 uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++      IMG_UINT32      uiSize;
++      IMG_CPU_PHYADDR sCpuPhyAddr;
++      IMG_HANDLE      hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++           IMG_UINTPTR_T base,
++           IMG_SIZE_T uSize,
++           BM_MAPPING *psMapping,
++           IMG_SIZE_T uQuantum, 
++           IMG_BOOL (*alloc)(IMG_VOID *_h,
++                             IMG_SIZE_T uSize,
++                             IMG_SIZE_T *pActualSize,
++                             BM_MAPPING **ppsMapping,
++                             IMG_UINT32 uFlags,
++                                                       IMG_UINTPTR_T *pBase),
++           IMG_VOID (*free) (IMG_VOID *,
++                                                      IMG_UINTPTR_T,
++                                                      BM_MAPPING *psMapping),
++                 IMG_VOID (*backingstore_free) (IMG_VOID *,
++                                                                                IMG_UINT32,
++                                          IMG_UINT32,
++                                          IMG_HANDLE),
++           IMG_VOID *import_handle);
++
++void
++RA_Delete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena, 
++          IMG_SIZE_T uSize,
++          IMG_SIZE_T *pActualSize,
++          BM_MAPPING **ppsMapping, 
++          IMG_UINT32 uFlags,
++          IMG_UINT32 uAlignment,
++                IMG_UINT32 uAlignmentOffset,
++          IMG_UINTPTR_T *pBase);
++
++void 
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total)                                    \
++{                                                                                     \
++      if(total<100)                                                   \
++              return PVRSRV_ERROR_INVALID_PARAMS;     \
++}
++
++#define UPDATE_SPACE(str, count, total)               \
++{                                                                                     \
++      if(count == -1)                                                 \
++              return PVRSRV_ERROR_INVALID_PARAMS;     \
++      else                                                                    \
++      {                                                                               \
++              str += count;                                           \
++              total -= count;                                         \
++      }                                                                               \
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++                                                      IMG_CHAR **ppszStr, 
++                                                      IMG_UINT32 *pui32StrLen);
++
++#endif 
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/resman.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/resman.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/resman.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/resman.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++      
++      RESMAN_TYPE_SHARED_PB_DESC = 1,                                 
++      RESMAN_TYPE_HW_RENDER_CONTEXT,                                          
++      RESMAN_TYPE_TRANSFER_CONTEXT,                                   
++
++      
++      
++      
++      
++      RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,                             
++      RESMAN_TYPE_DISPLAYCLASS_DEVICE,                                
++
++      
++      RESMAN_TYPE_BUFFERCLASS_DEVICE,                                 
++      
++      
++      RESMAN_TYPE_OS_USERMODE_MAPPING,                                
++      
++      
++      RESMAN_TYPE_DEVICEMEM_CONTEXT,                                  
++      RESMAN_TYPE_DEVICECLASSMEM_MAPPING,                             
++      RESMAN_TYPE_DEVICEMEM_MAPPING,                                  
++      RESMAN_TYPE_DEVICEMEM_WRAP,                                             
++      RESMAN_TYPE_DEVICEMEM_ALLOCATION,                               
++      RESMAN_TYPE_RESOURCE_PERPROC_DATA,                              
++    RESMAN_TYPE_SHARED_MEM_INFO,                    
++      
++      
++      RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION                 
++};
++
++#define RESMAN_TYPE_USE_PROCESSID             0x80000000      
++
++#define RESMAN_CRITERIA_ALL                           0x00000000      
++#define RESMAN_CRITERIA_RESTYPE                       0x00000001      
++#define RESMAN_CRITERIA_PVOID_PARAM           0x00000002      
++#define RESMAN_CRITERIA_UI32_PARAM            0x00000004      
++
++#define RESMAN_PROCESSID_FIND                 0xffffffff      
++
++#define RESMAN_SRVINIT_PROCESSID              0xfffffff1      
++
++#define RESMAN_KERNEL_PROCESSID                       0                       
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param); 
++                                                                                                                                              
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID);
++IMG_VOID ResManDeInit(IMG_VOID);
++
++PRESMAN_ITEM ResManRegisterRes(IMG_UINT32             ui32ResType, 
++                                                         IMG_PVOID            pvParam, 
++                                                         IMG_UINT32           ui32Param, 
++                                                         RESMAN_FREE_FN       pfnFreeResource, 
++                                                         IMG_UINT32           ui32ProcessID);
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM  psResItem,
++                                                              IMG_BOOL                bExecuteCallback);
++PVRSRV_ERROR ResManFreeResByCriteria(IMG_UINT32       ui32SearchCriteria, 
++                                                                       IMG_UINT32     ui32ResType, 
++                                                                       IMG_PVOID      pvParam, 
++                                                                       IMG_UINT32     ui32Param, 
++                                                                       IMG_BOOL       bExecuteCallback);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR ResManPrePower(PVR_POWER_STATE eNewPowerState,
++                                                      PVR_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR ResManPostPower(PVR_POWER_STATE eNewPowerState, 
++                                                      PVR_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_UINT32 ui32ProcID, IMG_BOOL bConnect);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/services_headers.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/services_headers.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/services_headers.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/services_headers.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG         1
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/srvkm.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/srvkm.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State);
++
++PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/Makefile git-nokia/drivers/gpu/pvr/services4/srvkm/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/Makefile       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/Makefile 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,68 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++
++obj-y +=      env/linux/osfunc.o              \
++              env/linux/mmap.o                \
++              env/linux/mod.o                 \
++              env/linux/pdump.o               \
++              env/linux/proc.o                \
++              env/linux/pvr_bridge_k.o        \
++              env/linux/pvr_debug.o           \
++              env/linux/mm.o                  \
++              env/linux/mutex.o
++
++obj-y +=      common/buffer_manager.o         \
++              common/devicemem.o              \
++              common/deviceclass.o            \
++              common/handle.o                 \
++              common/hash.o                   \
++              common/metrics.o                \
++              common/pvrsrv.o                 \
++              common/queue.o                  \
++              common/ra.o                     \
++              common/resman.o                 \
++              common/power.o                  \
++              common/mem.o                    \
++              bridged/bridged_pvr_bridge.o    \
++              devices/sgx/sgxinit.o           \
++              devices/sgx/sgxutils.o          \
++              devices/sgx/sgxkick.o           \
++              devices/sgx/sgxtransfer.o       \
++              devices/sgx/mmu.o               \
++              devices/sgx/pb.o                \
++              common/perproc.o                \
++              ../system/$(CONFIG_PVR_SYSTEM)/sysconfig.o      \
++              ../system/$(CONFIG_PVR_SYSTEM)/sysutils.o       \
++              devices/sgx/sgx2dcore.o
++
++INCLUDES =    -I$(src)/env/linux      \
++              -I$(src)/include        \
++              -I$(src)/bridged        \
++              -I$(src)/devices/sgx    \
++              -I$(src)/include        \
++              -I$(src)/hwdefs
++
++ccflags-y += $(CONFIG_PVR_OPTS) $(INCLUDES)
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/include/syscommon.h git-nokia/drivers/gpu/pvr/services4/system/include/syscommon.h
+--- git/drivers/gpu/pvr/services4/system/include/syscommon.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/include/syscommon.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"      
++#include "sysinfo.h"          
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++ 
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++      IMG_UINT32      uiID;
++      IMG_BOOL        bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS   4
++
++typedef struct _SYS_DATA_TAG_
++{
++    IMG_UINT32                  ui32NumDevices;               
++      SYS_DEVICE_ID                           sDeviceID[SYS_DEVICE_COUNT];
++    PVRSRV_DEVICE_NODE                        *psDeviceNodeList;                      
++    PVRSRV_POWER_DEV                  *psPowerDeviceList;                     
++      PVRSRV_RESOURCE                         sPowerStateChangeResource;      
++      PVR_POWER_STATE                         eCurrentPowerState;                     
++      PVR_POWER_STATE                         eFailedPowerState;                      
++      IMG_UINT32                                      ui32CurrentOSPowerState;        
++    PVRSRV_QUEUE_INFO           *psQueueList;                 
++      PVRSRV_KERNEL_SYNC_INFO         *psSharedSyncInfoList;          
++    IMG_PVOID                   pvEnvSpecificData;            
++    IMG_PVOID                   pvSysSpecificData;                    
++      PVRSRV_RESOURCE                         sQProcessResource;                      
++      IMG_VOID                                        *pvSOCRegsBase;                         
++    IMG_HANDLE                  hSOCTimerRegisterOSMemHandle; 
++      IMG_UINT32                                      *pvSOCTimerRegisterKM;          
++      IMG_VOID                                        *pvSOCClockGateRegsBase;        
++      IMG_UINT32                                      ui32SOCClockGateRegsSize;
++      PFN_CMD_PROC                            *ppfnCmdProcList[SYS_DEVICE_COUNT];
++                                                                                                                      
++
++
++      PCOMMAND_COMPLETE_DATA          *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++                                                                                                                      
++
++      IMG_BOOL                    bReProcessQueues;                   
++
++      RA_ARENA                                        *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; 
++
++    IMG_CHAR                    *pszVersionString;          
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID);
++
++IMG_UINT32 GetCPUTranslatedAddress(IMG_VOID);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                                      IMG_VOID **ppvDeviceMap);
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA                     *psSysData,
++                                                               PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++                                                                      PVR_POWER_STATE eNewPowerState,
++                                                                      PVR_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++                                                                       PVR_POWER_STATE eNewPowerState,
++                                                                       PVR_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32      ui32ID, 
++                                                              IMG_VOID        *pvIn,
++                                                              IMG_UINT32  ulInSize,
++                                                              IMG_VOID        *pvOut,
++                                                              IMG_UINT32      ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++
++
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysAcquireData)
++#endif
++static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++      
++      *ppsSysData = gpsSysData;
++
++      
++
++
++
++      if (!gpsSysData)
++      {
++              return PVRSRV_ERROR_GENERIC;    
++      }
++              
++      return PVRSRV_OK;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysInitialiseCommon)
++#endif
++static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++      PVRSRV_ERROR    eError;
++
++      
++      eError = PVRSRVInit(psSysData);
++
++      return eError;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysDeinitialiseCommon)
++#endif
++static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++      
++      PVRSRVDeInit(psSysData);
++
++      OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif 
++
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32   (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32  Ioctl,
++                                                                                              IMG_BYTE   *pInBuf,
++                                                                                              IMG_UINT32  InBufLen, 
++                                                                                          IMG_BYTE   *pOutBuf,
++                                                                                              IMG_UINT32  OutBufLen,
++                                                                                              IMG_UINT32 *pdwBytesTransferred);
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++      PFN_SRV_BRIDGEDISPATCH                  pfnOEMBridgeDispatch;
++      IMG_PVOID                                               pvDummy1;
++      IMG_PVOID                                               pvDummy2;
++      IMG_PVOID                                               pvDummy3;
++
++} PVRSRV_DC_OEM_JTABLE;
++
++#define OEM_GET_EXT_FUNCS                     (1<<1)
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,764 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "pdump_km.h"
++#include "sgxinfokm.h"
++#include "syslocal.h"
++#include "sysconfig.h"
++
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ               (100) 
++#define SYS_SGX_PDS_TIMER_FREQ                                (1000) 
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS               (500)
++
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA  gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++static IMG_UINT32     gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++
++#define DEVICE_SGX_INTERRUPT (1 << 0)
++
++#if defined(NO_HARDWARE)
++
++#if defined(__linux__)
++#include "mm.h"
++#else
++IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
++#endif
++#endif
++
++IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
++                                                                 IMG_BYTE             *pInBuf,
++                                                                 IMG_UINT32   InBufLen,
++                                                                 IMG_BYTE             *pOutBuf,
++                                                                 IMG_UINT32   OutBufLen,
++                                                                 IMG_UINT32   *pdwBytesTransferred);
++
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++#if !defined(__linux__) && defined(NO_HARDWARE)
++      PVRSRV_ERROR eError;
++      IMG_CPU_PHYADDR sCpuPAddr;
++#endif
++
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++      
++      gsSGXDeviceMap.ui32Flags = 0x0;
++      
++#if defined(NO_HARDWARE)
++      
++
++
++      
++#if defined(__linux__)
++      gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_SGX_DUMMY_REGS_SYS_PHYS_BASE;
++      gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++      gsSGXDeviceMap.sRegsDevPBase.uiAddr = 0;
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++      
++      {
++              IMG_VOID *pvRegisters;
++              pvRegisters = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++                              gsSGXDeviceMap.ui32RegsSize,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              IMG_NULL);
++              if (!pvRegisters)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++              OSMemSet(pvRegisters, 0, gsSGXDeviceMap.ui32RegsSize);
++              OSUnMapPhysToLin(pvRegisters,
++                              gsSGXDeviceMap.ui32RegsSize,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              IMG_NULL);
++      }
++#else
++      eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, 
++                                                                       &gsSGXRegsCPUVAddr,
++                                                                       &sCpuPAddr);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++      gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
++      gsSGXDeviceMap.sRegsDevPBase = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, gsSGXDeviceMap.sRegsCpuPBase);
++      gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);;
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++      OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
++#endif
++
++      
++
++
++      gsSGXDeviceMap.ui32IRQ = 0;
++
++#else 
++
++      gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
++      gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++
++      gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
++
++#endif 
++
++
++      
++
++
++      return PVRSRV_OK;
++}
++
++
++IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
++{
++      static IMG_CHAR aszVersionString[100];
++      IMG_VOID        *pvRegsLinAddr;
++      SYS_DATA        *psSysData;
++      IMG_UINT32      ui32SGXRevision;
++      IMG_INT32       i32Count;
++
++      pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
++                                                                 SYS_OMAP3430_SGX_REGS_SIZE,
++                                                                 PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
++                                                                 IMG_NULL);
++      if(!pvRegsLinAddr)
++      {
++              return IMG_NULL;
++      }
++
++      ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
++                                                                EUR_CR_CORE_REVISION);
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      i32Count = OSSNPrintf(aszVersionString, 100,
++                                                "SGX revision = %u.%u.%u",
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++                                               );
++
++      OSUnMapPhysToLin(pvRegsLinAddr,
++                                       SYS_OMAP3430_SGX_REGS_SIZE,
++                                       PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
++                                       IMG_NULL);
++
++      if(i32Count == -1)
++      {
++              return IMG_NULL;
++      }
++
++      return aszVersionString;
++}
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++      IMG_UINT32                      i;
++      PVRSRV_ERROR            eError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_CPU_PHYADDR         TimerRegPhysBase;
++      SGX_TIMING_INFORMATION* psTimingInfo;
++
++      gpsSysData = &gsSysData;
++      OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++      gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++      gsSysSpecificData.ui32SysSpecificData = 0;
++
++      eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_ENVDATA;
++
++      gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++      
++      for(i=0; i<SYS_DEVICE_COUNT; i++)
++      {
++              gpsSysData->sDeviceID[i].uiID = i;
++              gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++      }
++
++      gpsSysData->psDeviceNodeList = IMG_NULL;
++      gpsSysData->psQueueList = IMG_NULL;
++
++      eError = SysInitialiseCommon(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
++      gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
++      gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
++      OSReservePhys(TimerRegPhysBase,
++                                4,
++                                PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
++                                (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
++                                &gpsSysData->hSOCTimerRegisterOSMemHandle);
++
++      
++      psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++      psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++      psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; 
++      psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; 
++      psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; 
++
++      
++
++
++
++      eError = SysLocateDevices(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV;
++
++      
++
++
++      eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++                                                                DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_REGDEV;
++
++      
++
++
++      
++      psDeviceNode = gpsSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              
++              switch(psDeviceNode->sDevId.eDeviceType)
++              {
++                      case PVRSRV_DEVICE_TYPE_SGX:
++                      {
++                              DEVICE_MEMORY_INFO *psDevMemoryInfo;
++                              DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++                              
++
++
++                              psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++                              
++                              psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++                              psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++                              
++                              for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++                              {
++                                      psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++                              }
++
++                              gpsSGXDevNode = psDeviceNode;
++
++                              break;
++                      }
++                      default:
++                              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++                              return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PDUMPINIT();
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT;
++
++      
++
++
++      eError = EnableSystemClocks(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      eError = EnableSGXClocks(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++#endif        
++
++      
++
++      eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_INITDEV;
++
++
++#if defined(SYS_USING_INTERRUPTS)
++      eError = OSInstallMISR(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallMISR: Failed to install MISR"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_MISR;
++
++      
++      eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Failed to install ISR"));
++              OSUninstallMISR(gpsSysData);
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
++#endif 
++
++      
++      gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
++      if (!gpsSysData->pszVersionString)
++      { 
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_WARNING, "SysInitialise: Version string: %s", gpsSysData->pszVersionString));
++      }
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      DisableSGXClocks(gpsSysData);
++#endif        
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++      PVRSRV_ERROR eError;
++      
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++#if defined(SYS_USING_INTERRUPTS)
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR)
++      {
++              eError = OSUninstallDeviceLISR(psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
++                      return eError;
++              }
++      }
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_MISR)
++      {
++              eError = OSUninstallMISR(psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++                      return eError;
++              }
++      }
++#endif 
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_INITDEV)      
++      {
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++              PVR_ASSERT(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
++              
++              eError = EnableSGXClocks(gpsSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
++                      return eError;
++              }
++#endif        
++
++              
++              eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++                      return eError;
++              }
++      }
++      
++      
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
++      {
++              DisableSystemClocks(gpsSysData);
++      }
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_ENVDATA)      
++      {       
++              eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++                      return eError;
++              }
++      }
++
++      if(gpsSysData->pvSOCTimerRegisterKM)
++      {
++              OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
++                                              4,
++                                              PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
++                                              gpsSysData->hSOCTimerRegisterOSMemHandle);
++      }
++
++      SysDeinitialiseCommon(gpsSysData);
++
++#if defined(NO_HARDWARE)
++
++#if !defined(__linux__)
++      if(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV)
++      {
++              
++              OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
++      }
++#endif
++#endif
++
++      
++      if(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT)
++      {
++              PDUMPDEINIT();
++      }
++
++      psSysSpecData->ui32SysSpecificData = 0;
++      gpsSysData = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                                 IMG_VOID                             **ppvDeviceMap)
++{
++
++      switch(eDeviceType)
++      {
++              case PVRSRV_DEVICE_TYPE_SGX:
++              {
++                      
++                      *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++
++                      break;
++              }
++              default:
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++              }
++      }
++      return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE      eDeviceType,
++                                                                        IMG_CPU_PHYADDR               CpuPAddr)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      DevPAddr.uiAddr = CpuPAddr.uiAddr;
++      
++      return DevPAddr;
++}
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++      IMG_CPU_PHYADDR cpu_paddr;
++
++      
++      cpu_paddr.uiAddr = sys_paddr.uiAddr;
++      return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++      IMG_SYS_PHYADDR sys_paddr;
++
++      
++      sys_paddr.uiAddr = cpu_paddr.uiAddr;
++      return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++      return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++      IMG_SYS_PHYADDR SysPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++      return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA                     *psSysData,
++                                                               PVRSRV_DEVICE_NODE     *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++#if defined(NO_HARDWARE)
++      
++      return 0xFFFFFFFF;
++#else
++      
++      return psDeviceNode->ui32SOCInterruptBit;
++#endif
++}
++
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++      PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++
++      
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
++
++#if defined(SYS_USING_INTERRUPTS)
++              if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR)
++              {
++                      eError = OSUninstallDeviceLISR(gpsSysData);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_LISR;
++              }
++#endif        
++              if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
++              {
++                      DisableSystemClocks(gpsSysData);
++                      gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++              }
++      }
++      return eError;
++}
++
++
++PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (eNewPowerState == PVRSRV_POWER_STATE_D0)
++      {
++              PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
++
++              if (!(gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
++              {
++                      eError = EnableSystemClocks(gpsSysData);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocks failed (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++              }
++
++#if defined(SYS_USING_INTERRUPTS)
++              if (!(gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR))
++              {
++                      eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
++              }
++#endif        
++      }
++      return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32                        ui32DeviceIndex,
++                                                                      PVR_POWER_STATE         eNewPowerState,
++                                                                      PVR_POWER_STATE         eCurrentPowerState)
++{
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++
++      if (ui32DeviceIndex != gui32SGXDeviceID)
++      {
++              return PVRSRV_OK;
++      }
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysDevicePrePowerState: SGX Entering state D3"));
++              DisableSGXClocks(gpsSysData);
++      }
++#else 
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++#endif 
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32                       ui32DeviceIndex,
++                                                                       PVR_POWER_STATE        eNewPowerState,
++                                                                       PVR_POWER_STATE        eCurrentPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (ui32DeviceIndex != gui32SGXDeviceID)
++      {
++              return eError;
++      }
++
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (eCurrentPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysDevicePostPowerState: SGX Leaving state D3"));
++              eError = EnableSGXClocks(gpsSysData);
++      }
++#else 
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++#endif        
++      
++      return eError;
++}
++
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32      ui32ID,
++                                                              IMG_VOID        *pvIn,
++                                                              IMG_UINT32      ulInSize,
++                                                              IMG_VOID        *pvOut,
++                                                              IMG_UINT32      ulOutSize)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32ID);
++      PVR_UNREFERENCED_PARAMETER(pvIn);
++      PVR_UNREFERENCED_PARAMETER(ulInSize);
++      PVR_UNREFERENCED_PARAMETER(pvOut);
++      PVR_UNREFERENCED_PARAMETER(ulOutSize);
++
++      if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++              (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++      {
++              
++              PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
++              psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
++              return PVRSRV_OK;
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME       "OMAP3430"
++
++#define SYS_SGX_CLOCK_SPEED                                   (110000000)
++
++#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE  0x50000000
++#define SYS_OMAP3430_SGX_REGS_SIZE           0x4000
++
++#define SYS_OMAP3430_SGX_IRQ                           21
++
++#define SYS_OMAP3430_PM_REGS_SYS_PHYS_BASE     0x48306000
++#define SYS_OMAP3430_PM_REGS_SIZE                      0x1000
++
++#define SYS_OMAP3430_CM_REGS_SYS_PHYS_BASE     0x48004000
++#define SYS_OMAP3430_CM_REGS_SIZE                      0x1000
++
++
++#define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE  0x48088024
++#define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE      0x48088028
++#define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE     0x48088040
++
++#if defined(NO_HARDWARE)
++
++#if defined(__linux__)
++#define SYS_SGX_DUMMY_REGS_SYS_PHYS_BASE  ((127*1024*1024) + 0x80000000)
++#endif
++#endif
++
++ 
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US                                (500000)
++#define WAIT_TRY_COUNT                                (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++      SYS_DEVICE_SGX                                          = 0,
++
++      SYS_DEVICE_FORCE_I16                            = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 3 
++
++#define PRM_REG32(offset)       (offset)
++#define CM_REG32(offset)        (offset)
++
++#define CM_FCLKEN_SGX         CM_REG32(0xB00)
++#define               CM_FCLKEN_SGX_EN_3D                                     0x00000002
++
++#define CM_ICLKEN_SGX         CM_REG32(0xB10)
++#define               CM_ICLKEN_SGX_EN_SGX                            0x00000001
++
++#define CM_IDLEST_SGX         CM_REG32(0xB20)
++#define               CM_IDLEST_SGX_ST_SGX                            0x00000001
++
++#define CM_CLKSEL_SGX         CM_REG32(0xB40)
++#define               CM_CLKSEL_SGX_MASK                                      0x0000000f
++#define               CM_CLKSEL_SGX_L3DIV3                            0x00000000
++#define               CM_CLKSEL_SGX_L3DIV4                            0x00000001
++#define               CM_CLKSEL_SGX_L3DIV6                            0x00000002
++#define               CM_CLKSEL_SGX_96M                                       0x00000003
++
++#define CM_SLEEPDEP_SGX               CM_REG32(0xB44)
++#define CM_CLKSTCTRL_SGX      CM_REG32(0xB48)
++#define       CM_CLKSTCTRL_SGX_AUTOSTATE                      0x00008001
++
++#define CM_CLKSTST_SGX                CM_REG32(0xB4C)
++#define       CM_CLKSTST_SGX_STATUS_VALID                     0x00000001
++
++#define RM_RSTST_SGX          PRM_REG32(0xB58)
++#define       RM_RSTST_SGX_RST_MASK                           0x0000000F
++#define       RM_RSTST_SGX_COREDOMAINWKUP_RST         0x00000008
++#define       RM_RSTST_SGX_DOMAINWKUP_RST                     0x00000004
++#define       RM_RSTST_SGX_GLOBALWARM_RST                     0x00000002
++#define       RM_RSTST_SGX_GLOBALCOLD_RST                     0x00000001
++
++#define PM_WKDEP_SGX          PRM_REG32(0xBC8)
++#define       PM_WKDEP_SGX_EN_WAKEUP                          0x00000010
++#define       PM_WKDEP_SGX_EN_MPU                                     0x00000002
++#define       PM_WKDEP_SGX_EN_CORE                            0x00000001
++
++#define PM_PWSTCTRL_SGX               PRM_REG32(0xBE0)
++#define               PM_PWSTCTRL_SGX_POWERSTATE_MASK         0x00000003
++#define                       PM_PWSTCTRL_SGX_OFF                             0x00000000
++#define                       PM_PWSTCTRL_SGX_RETENTION               0x00000001
++#define                       PM_PWSTCTRL_SGX_ON                              0x00000003
++
++#define PM_PWSTST_SGX         PRM_REG32(0xBE4)
++#define               PM_PWSTST_SGX_INTRANSITION                      0x00100000
++#define               PM_PWSTST_SGX_CLKACTIVITY                       0x00080000
++#define               PM_PWSTST_SGX_POWERSTATE_MASK           0x00000003
++#define                       PM_PWSTST_SGX_OFF                               0x00000003
++#define                       PM_PWSTST_SGX_RETENTION                 0x00000001
++#define                       PM_PWSTST_SGX_ON                                0x00000000
++
++#define PM_PREPWSTST_SGX      PRM_REG32(0xBE8)
++
++
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/syslocal.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/syslocal.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/syslocal.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/syslocal.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,63 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++ 
++ 
++IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
++
++IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
++PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
++
++IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
++PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
++
++#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS    0x00000001
++#define SYS_SPECIFIC_DATA_ENABLE_LISR         0x00000002
++#define SYS_SPECIFIC_DATA_ENABLE_MISR         0x00000004
++#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA      0x00000008
++#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV               0x00000010
++#define SYS_SPECIFIC_DATA_ENABLE_REGDEV               0x00000020
++#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT    0x00000040
++#define SYS_SPECIFIC_DATA_ENABLE_INITDEV      0x00000080
++#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV    0x00000100
++#define SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS    0x00000200
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++      IMG_UINT32 ui32SysSpecificData;
++#if defined(__linux__)
++      struct clk *psSGX_FCK;
++      struct clk *psSGX_ICK;
++      struct clk *psMPU_CK;
++#endif
++} SYS_SPECIFIC_DATA;
++
++#endif        
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysutils.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysutils.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,377 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(__linux__)
++#include <linux/clk.h>
++#include <linux/err.h>
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++#include "sgxapi_km.h"
++
++
++#if defined(__linux__)
++PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
++{
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++#if defined(__linux__)
++      int rate;
++      int res;
++      struct clk *psCLK;
++#endif
++
++      
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS)
++      {
++              return PVRSRV_OK;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Enabling SGX Clocks"));
++
++#if defined(__linux__)
++      if (psSysSpecData->psSGX_FCK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "sgx_fck");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get SGX functional clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psSGX_FCK = psCLK;
++      }
++
++      if (psSysSpecData->psSGX_ICK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "sgx_ick");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get SGX interface clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psSGX_ICK = psCLK;
++      }
++
++      if (psSysSpecData->psMPU_CK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "mpu_ck");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get MPU clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psMPU_CK = psCLK;
++      }
++
++      rate = clk_get_rate(psSysSpecData->psMPU_CK);
++      PVR_TRACE(("CPU Clock is %dMhz", rate/1000000));
++
++      res = clk_enable(psSysSpecData->psSGX_FCK);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      res = clk_enable(psSysSpecData->psSGX_ICK); 
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      rate = clk_get_rate(psSysSpecData->psSGX_FCK);
++      if(rate < 110666666)
++      {
++              PVR_TRACE(("SGX FClock is %dMhz. Setting to 110Mhz now", rate/1000000));
++              clk_set_rate(psSysSpecData->psSGX_FCK, 110666666);
++              rate = clk_get_rate(psSysSpecData->psSGX_FCK);
++              if (rate < 0)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't set SGX functional clock speed (%d)", rate));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      PVR_TRACE(("SGX FClock is %dMhz", rate/1000000));
++
++#else 
++#error "SGX dynamic clock control not supported for this environment"
++#endif        
++
++      
++      psSysSpecData->ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
++{
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++      
++      if (!(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS))
++      {
++              return;
++      }
++
++      PVR_TRACE(("DisableSGXClocks: Disabling SGX Clocks"));
++
++#if defined(__linux__)
++      if (psSysSpecData->psSGX_ICK)
++      {
++              clk_disable(psSysSpecData->psSGX_ICK); 
++      }
++
++      if (psSysSpecData->psSGX_FCK)
++      {
++              clk_disable(psSysSpecData->psSGX_FCK);
++      }
++
++#else 
++#error "SGX dynamic clock control not supported for this environment"
++#endif        
++
++      
++      psSysSpecData->ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS;
++
++      return;
++}
++#else 
++#endif 
++
++PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
++{
++#if defined(__linux__)
++      int res;
++      int rate;
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      PVRSRV_ERROR eError;
++#endif
++#if defined(DEBUG) || defined(TIMING)
++      struct clk *pGpt11_fck;
++      struct clk *pGpt11_ick;
++      struct clk *sys_ck;
++      IMG_CPU_PHYADDR     TimerRegPhysBase;
++      IMG_HANDLE hTimerEnable;
++      IMG_UINT32 *pui32TimerEnable;
++#endif        
++
++      PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
++
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      eError = EnableSGXClocks(psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++      
++      pGpt11_fck = clk_get(NULL, "gpt11_fck");
++      if (IS_ERR(pGpt11_fck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      pGpt11_ick = clk_get(NULL, "gpt11_ick");
++      if (IS_ERR(pGpt11_ick))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      sys_ck = clk_get(NULL, "sys_ck");
++      if (IS_ERR(sys_ck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if(clk_get_parent(pGpt11_fck) != sys_ck)
++      {
++              PVR_TRACE(("Setting GPTIMER11 parent to System Clock (13Mhz)"));
++              res = clk_set_parent(pGpt11_fck, sys_ck);
++              if (res < 0)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      rate = clk_get_rate(pGpt11_fck);
++      PVR_TRACE(("GPTIMER11 clock is %dHz", rate));
++      
++      res = clk_enable(pGpt11_fck);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      res = clk_enable(pGpt11_ick);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
++
++      
++      pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
++                  4,
++                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                  &hTimerEnable);
++
++      if (pui32TimerEnable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              rate = *pui32TimerEnable;
++              if(!(rate & 4))
++              {
++                      PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
++                      
++                      
++                      *pui32TimerEnable = rate | 4;
++              }
++
++              OSUnMapPhysToLin(pui32TimerEnable,
++                          4,
++                          PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                          hTimerEnable);
++      }
++
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
++
++      
++      pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
++                  4,
++                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                  &hTimerEnable);
++
++      if (pui32TimerEnable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              
++              *pui32TimerEnable = 3;
++
++              OSUnMapPhysToLin(pui32TimerEnable,
++                          4,
++                          PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                          hTimerEnable);
++
++      }
++#endif 
++
++#else 
++#error "OMAP graphics clock initialisation not supported for this environment"
++#endif        
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
++{
++#if defined(__linux__)
++#if defined(DEBUG) || defined(TIMING)
++      struct clk *pGpt11_fck;
++      struct clk *pGpt11_ick;
++      IMG_CPU_PHYADDR     TimerRegPhysBase;
++      IMG_HANDLE hTimerDisable;
++      IMG_UINT32 *pui32TimerDisable;
++#endif        
++
++      PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
++
++#if defined(DEBUG) || defined(TIMING)
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
++
++      
++      pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
++                              4,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              &hTimerDisable);
++      
++      if (pui32TimerDisable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
++      }
++      else
++      {
++              *pui32TimerDisable = 0;
++              
++              OSUnMapPhysToLin(pui32TimerDisable,
++                              4,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              hTimerDisable);
++      }
++
++      
++      pGpt11_ick = clk_get(NULL, "gpt11_ick");
++      if (IS_ERR(pGpt11_ick))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: Couldn't get GPTIMER11 interface clock"));
++      }
++      else
++      {
++              clk_disable(pGpt11_ick);
++      }
++
++      pGpt11_fck = clk_get(NULL, "gpt11_fck");
++      if (IS_ERR(pGpt11_fck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: Couldn't get GPTIMER11 functional clock"));
++      }
++      else
++      {
++              clk_disable(pGpt11_fck);
++      }
++
++#endif 
++
++      
++      DisableSGXClocks(psSysData);
++
++#else 
++#error "Disabling of OMAP graphics clock not supported for this environment"
++#endif 
++}
+--- /tmp/Makefile      2008-12-09 15:25:43.000000000 +0100
++++ git/drivers/gpu/Makefile   2008-12-09 15:25:53.000000000 +0100
+@@ -1 +1 @@
+-obj-y                 += drm/
++obj-y                 += drm-tungsten/ pvr/
diff --git a/packages/linux/omap3-pandora-kernel-wifi/read_die_ids.patch b/packages/linux/omap3-pandora-kernel-wifi/read_die_ids.patch
new file mode 100755 (executable)
index 0000000..3f6c930
--- /dev/null
@@ -0,0 +1,23 @@
+OMAP2/3 TAP: enable debug messages
+
+From: Paul Walmsley <paul@pwsan.com>
+
+This patch causes the OMAP2/3 chip ID code to display the full DIE_ID registers at boot.
+
+---
+
+ arch/arm/mach-omap2/id.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index c7f9ab7..a154b5e 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -10,6 +10,7 @@
+  * it under the terms of the GNU General Public License version 2 as
+  * published by the Free Software Foundation.
+  */
++#define DEBUG
+ #include <linux/module.h>
+ #include <linux/kernel.h>
diff --git a/packages/linux/omap3-pandora-kernel-wifi/sitecomwl168-support.diff b/packages/linux/omap3-pandora-kernel-wifi/sitecomwl168-support.diff
new file mode 100755 (executable)
index 0000000..8a9a2f5
--- /dev/null
@@ -0,0 +1,10 @@
+--- /tmp/rtl8187_dev.c 2008-12-20 19:41:30.000000000 +0100
++++ git/drivers/net/wireless/rtl8187_dev.c     2008-12-20 19:42:01.000000000 +0100
+@@ -45,6 +45,7 @@
+       {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
+       /* Sitecom */
+       {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
++      {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+       {}
+ };
diff --git a/packages/linux/omap3-pandora-kernel-wifi/strongly-ordered-memory.diff b/packages/linux/omap3-pandora-kernel-wifi/strongly-ordered-memory.diff
new file mode 100755 (executable)
index 0000000..b60e4f4
--- /dev/null
@@ -0,0 +1,18 @@
+--- /tmp/irq.c 2008-09-16 10:43:30.000000000 +0200
++++ git/arch/arm/mach-omap2/irq.c      2008-09-16 10:46:18.463198000 +0200
+@@ -64,6 +64,7 @@
+ static void omap_ack_irq(unsigned int irq)
+ {
+       intc_bank_write_reg(0x1, &irq_banks[0], INTC_CONTROL);
++      intc_bank_read_reg(&irq_banks[0],INTC_REVISION);
+ }
+ static void omap_mask_irq(unsigned int irq)
+@@ -73,6 +74,7 @@
+       irq &= (IRQ_BITS_PER_REG - 1);
+       intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_SET0 + offset);
++      intc_bank_read_reg(&irq_banks[0],INTC_REVISION);
+ }
+ static void omap_unmask_irq(unsigned int irq)
diff --git a/packages/linux/omap3-pandora-kernel-wifi/timer-suppression.patch b/packages/linux/omap3-pandora-kernel-wifi/timer-suppression.patch
new file mode 100755 (executable)
index 0000000..04362c9
--- /dev/null
@@ -0,0 +1,43 @@
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index b854a89..26f5569 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -253,6 +253,16 @@ void tick_nohz_stop_sched_tick(void)
+       /* Schedule the tick, if we are at least one jiffie off */
+       if ((long)delta_jiffies >= 1) {
++              /*
++               * calculate the expiry time for the next timer wheel
++               * timer
++               */
++              expires = ktime_add_ns(last_update, tick_period.tv64 *
++                                      delta_jiffies);
++
++              /* Skip reprogram of event if its not changed */
++              if(ts->tick_stopped && ktime_equal(expires, dev->next_event))
++              goto out2;
+               if (delta_jiffies > 1)
+                       cpu_set(cpu, nohz_cpu_mask);
+@@ -304,12 +314,7 @@ void tick_nohz_stop_sched_tick(void)
+                       goto out;
+               }
+-              /*
+-               * calculate the expiry time for the next timer wheel
+-               * timer
+-               */
+-              expires = ktime_add_ns(last_update, tick_period.tv64 *
+-                                     delta_jiffies);
++              /* Mark expiries */
+               ts->idle_expires = expires;
+               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+@@ -328,6 +333,7 @@ void tick_nohz_stop_sched_tick(void)
+               tick_do_update_jiffies64(ktime_get());
+               cpu_clear(cpu, nohz_cpu_mask);
+       }
++out2: 
+       raise_softirq_irqoff(TIMER_SOFTIRQ);
+ out:
+       ts->next_jiffies = next_jiffies;
diff --git a/packages/linux/omap3-pandora-kernel-wifi/touchscreen.patch b/packages/linux/omap3-pandora-kernel-wifi/touchscreen.patch
new file mode 100755 (executable)
index 0000000..2325c40
--- /dev/null
@@ -0,0 +1,22 @@
+diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
+index d8109ae..f8ce669 100644
+--- a/arch/arm/mach-omap2/board-omap3evm.c
++++ b/arch/arm/mach-omap2/board-omap3evm.c
+@@ -128,8 +128,16 @@ static int ads7846_get_pendown_state(void)
+ }
+ struct ads7846_platform_data ads7846_config = {
++      .x_max                  = 0x0fff,
++      .y_max                  = 0x0fff,
++      .x_plate_ohms           = 180,
++      .pressure_max           = 255,
++      .debounce_max           = 10,
++      .debounce_tol           = 3,
++      .debounce_rep           = 1,
+       .get_pendown_state      = ads7846_get_pendown_state,
+       .keep_vref_on           = 1,
++      .settle_delay_usecs     = 150,
+ };
+ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+
diff --git a/packages/linux/omap3-pandora-kernel-wifi_2.6.27-pandora.bb b/packages/linux/omap3-pandora-kernel-wifi_2.6.27-pandora.bb
new file mode 100755 (executable)
index 0000000..afd6812
--- /dev/null
@@ -0,0 +1,46 @@
+require linux.inc
+
+DESCRIPTION = "Test WiFi 2.6.27 Linux kernel for the Pandora handheld console"
+KERNEL_IMAGETYPE = "uImage"
+
+COMPATIBLE_MACHINE = "omap3-pandora"
+
+SRCREV = "5f34ff5fc9e4acd56344552dd15ca6aa4c689fc8"
+
+#PV = "2.6.27-pandora+git${SRCREV}"
+PR = "r1"
+
+SRC_URI = " \
+       git://openpandora.org/pandora-kernel.git;protocol=git;branch=test_wifi \
+       file://defconfig \
+       file://no-empty-flash-warnings.patch;patch=1 \
+       file://oprofile-0.9.3.armv7.diff;patch=1 \
+       file://no-cortex-deadlock.patch;patch=1 \
+       file://read_die_ids.patch;patch=1 \
+       file://fix-install.patch;patch=1 \
+       file://musb-dma-iso-in.eml;patch=1 \
+       file://musb-support-high-bandwidth.patch.eml;patch=1 \
+       file://mru-fix-timings.diff;patch=1 \
+       file://mru-fix-display-panning.diff;patch=1 \
+       file://mru-make-dpll4-m4-ck-programmable.diff;patch=1 \
+       file://mru-add-clk-get-parent.diff;patch=1 \
+       file://mru-improve-pixclock-config.diff;patch=1 \
+       file://mru-make-video-timings-selectable.diff;patch=1 \
+       file://mru-enable-overlay-optimalization.diff;patch=1 \
+       file://musb-fix-ISO-in-unlink.diff;patch=1 \
+       file://musb-fix-multiple-bulk-transfers.diff;patch=1 \
+       file://musb-fix-endpoints.diff;patch=1 \
+       file://dvb-fix-dma.diff;patch=1 \
+       file://0001-Removed-resolution-check-that-prevents-scaling-when.patch;patch=1 \
+       file://0001-Implement-downsampling-with-debugs.patch;patch=1 \
+       file://sitecomwl168-support.diff;patch=1 \
+#      file://pvr/pvr-add.patch;patch=1 \
+       file://pvr/dispc.patch;patch=1 \
+#      file://pvr/nokia-TI.diff;patch=1 \
+"
+       
+S = "${WORKDIR}/git"
+
+#do_configure_prepend() {
+#      install -m 0644 ${S}/arch/arm/configs/omap3_pandora_defconfig ${WORKDIR}/defconfig
+#}
diff --git a/packages/linux/omap3-pandora-kernel/0001-Implement-downsampling-with-debugs.patch b/packages/linux/omap3-pandora-kernel/0001-Implement-downsampling-with-debugs.patch
new file mode 100755 (executable)
index 0000000..d3608df
--- /dev/null
@@ -0,0 +1,138 @@
+From 1ef94095e9399a9a387b7b457b48f6c5de7013d8 Mon Sep 17 00:00:00 2001
+From: Tuomas Kulve <tuomas.kulve@movial.com>
+Date: Fri, 31 Oct 2008 14:23:57 +0200
+Subject: [PATCH] Implement downsampling (with debugs).
+
+---
+ drivers/video/omap/dispc.c |   75 +++++++++++++++++++++++++++++++++++++-------
+ 1 files changed, 63 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 68bc887..3640dbe 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -18,6 +18,8 @@
+  * with this program; if not, write to the Free Software Foundation, Inc.,
+  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  */
++#define DEBUG
++#define VERBOSE_DEBUG
+ #include <linux/kernel.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/mm.h>
+@@ -545,6 +547,17 @@ static void write_firhv_reg(int plane, int reg, u32 value)
+       dispc_write_reg(base + reg * 8, value);
+ }
++static void write_firv_reg(int plane, int reg, u32 value)
++{
++      u32 base;
++
++      if (plane == 1)
++              base = 0x1E0;
++      else
++              base = 0x1E0 + 0x20;
++      dispc_write_reg(base + reg * 4, value);
++}
++
+ static void set_upsampling_coef_table(int plane)
+ {
+       const u32 coef[][2] = {
+@@ -565,6 +578,27 @@ static void set_upsampling_coef_table(int plane)
+       }
+ }
++static void set_downsampling_coef_table(int plane)
++{
++      const u32 coef[][3] = {
++                { 0x24382400, 0x24382400, 0x00000000 },
++                { 0x28371FFE, 0x28391F04, 0x000004FE },
++                { 0x2C361BFB, 0x2D381B08, 0x000008FB },
++                { 0x303516F9, 0x3237170C, 0x00000CF9 },
++                { 0x11343311, 0x123737F7, 0x0000F711 },
++                { 0x1635300C, 0x173732F9, 0x0000F90C },
++                { 0x1B362C08, 0x1B382DFB, 0x0000FB08 },
++                { 0x1F372804, 0x1F3928FE, 0x0000FE04 },
++      };
++      int i;
++
++      for (i = 0; i < 8; i++) {
++              write_firh_reg(plane, i, coef[i][0]);
++              write_firhv_reg(plane, i, coef[i][1]);
++              write_firv_reg(plane, i, coef[i][2]);
++      }
++}
++
+ static int omap_dispc_set_scale(int plane,
+                               int orig_width, int orig_height,
+                               int out_width, int out_height)
+@@ -592,25 +626,47 @@ static int omap_dispc_set_scale(int plane,
+               if (orig_height > out_height ||
+                   orig_width * 8 < out_width ||
+                   orig_height * 8 < out_height) {
++                        dev_dbg(dispc.fbdev->dev, 
++                                "Max upsampling is 8x, "
++                                "tried: %dx%d -> %dx%d\n",
++                                orig_width, orig_height,
++                                out_width, out_height);
+                       enable_lcd_clocks(0);
+                       return -EINVAL;
+               }
+               set_upsampling_coef_table(plane);
+       } else if (orig_width > out_width) {
+-              /* Downsampling not yet supported
+-              */
+-
+-              enable_lcd_clocks(0);
+-              return -EINVAL;
++              /*
++               * Downsampling.
++               * Currently you can only scale both dimensions in one way.
++               */
++              if (orig_height < out_height ||
++                  orig_width > out_width * 4||
++                  orig_height > out_height * 4) {
++                        dev_dbg(dispc.fbdev->dev, 
++                                "Max downsampling is 4x, "
++                                "tried: %dx%d -> %dx%d\n",
++                                orig_width, orig_height,
++                                out_width, out_height);
++                      enable_lcd_clocks(0);
++                      return -EINVAL;
++              }
++              set_downsampling_coef_table(plane);
+       }
+       if (!orig_width || orig_width == out_width)
+               fir_hinc = 0;
+       else
+-              fir_hinc = 1024 * orig_width / out_width;
++              fir_hinc = 1024 * (orig_width -1)/ (out_width -1);
+       if (!orig_height || orig_height == out_height)
+               fir_vinc = 0;
+       else
+-              fir_vinc = 1024 * orig_height / out_height;
++              fir_vinc = 1024 * (orig_height-1) / (out_height -1 );
++
++      dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
++              "orig_height %d fir_hinc  %d fir_vinc %d\n",
++              out_width, out_height, orig_width, orig_height,
++              fir_hinc, fir_vinc);
++
+       dispc.fir_hinc[plane] = fir_hinc;
+       dispc.fir_vinc[plane] = fir_vinc;
+@@ -619,11 +675,6 @@ static int omap_dispc_set_scale(int plane,
+                   ((fir_vinc & 4095) << 16) |
+                   (fir_hinc & 4095));
+-      dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
+-              "orig_height %d fir_hinc  %d fir_vinc %d\n",
+-              out_width, out_height, orig_width, orig_height,
+-              fir_hinc, fir_vinc);
+-
+       MOD_REG_FLD(vs_reg[plane],
+                   FLD_MASK(16, 11) | FLD_MASK(0, 11),
+                   ((out_height - 1) << 16) | (out_width - 1));
+-- 
+1.5.6.5
+
diff --git a/packages/linux/omap3-pandora-kernel/0001-Removed-resolution-check-that-prevents-scaling-when.patch b/packages/linux/omap3-pandora-kernel/0001-Removed-resolution-check-that-prevents-scaling-when.patch
new file mode 100755 (executable)
index 0000000..636203e
--- /dev/null
@@ -0,0 +1,26 @@
+From 3227bd5c412e7eb0d4370b2834e71723f6b4be48 Mon Sep 17 00:00:00 2001
+From: Tuomas Kulve <tuomas.kulve@movial.fi>
+Date: Mon, 27 Oct 2008 18:55:59 +0200
+Subject: [PATCH] Removed resolution check that prevents scaling when output resolution doesn't match the original resolution.
+
+---
+ drivers/video/omap/dispc.c |    3 ---
+ 1 files changed, 0 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 0f0b2e5..1df0c1e 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -579,9 +579,6 @@ static int omap_dispc_set_scale(int plane,
+       if ((unsigned)plane > OMAPFB_PLANE_NUM)
+               return -ENODEV;
+-      if (out_width != orig_width || out_height != orig_height)
+-              return -EINVAL;
+-
+       enable_lcd_clocks(1);
+       if (orig_width < out_width) {
+               /*
+-- 
+1.5.6.5
+
diff --git a/packages/linux/omap3-pandora-kernel/cache-display-fix.patch b/packages/linux/omap3-pandora-kernel/cache-display-fix.patch
new file mode 100755 (executable)
index 0000000..019fd5a
--- /dev/null
@@ -0,0 +1,238 @@
+On Tue, 2008-07-01 at 06:23 +0100, Dirk Behme wrote:
+> Catalin Marinas wrote:
+> > But, anyway, if you want a patch, Harry is updating it to a recent
+> > kernel.
+> 
+> Any news on this? I think there are some people wanting a patch ;)
+
+See below for a preliminary patch updated to 2.6.26-rc8. Note that I
+don't plan to submit it in its current form but clean it up a bit first.
+
+
+Show the cache type of ARMv7 CPUs
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+---
+
+ arch/arm/kernel/setup.c  |  137 +++++++++++++++++++++++++++++++++++++++++++++-
+ include/asm-arm/system.h |   18 ++++++
+ 2 files changed, 153 insertions(+), 2 deletions(-)
+
+
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 5ae0eb2..0cd238d 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -256,6 +256,24 @@ static const char *proc_arch[] = {
+       "?(17)",
+ };
++static const char *v7_cache_policy[4] = {
++      "reserved",
++      "AVIVT",
++      "VIPT",
++      "PIPT",
++};
++
++static const char *v7_cache_type[8] = {
++      "none",
++      "instruction only",
++      "data only",
++      "separate instruction and data",
++      "unified",
++      "unknown type",
++      "unknown type",
++      "unknown type",
++};
++
+ #define CACHE_TYPE(x) (((x) >> 25) & 15)
+ #define CACHE_S(x)    ((x) & (1 << 24))
+ #define CACHE_DSIZE(x)        (((x) >> 12) & 4095)    /* only if S=1 */
+@@ -266,6 +284,22 @@ static const char *proc_arch[] = {
+ #define CACHE_M(y)    ((y) & (1 << 2))
+ #define CACHE_LINE(y) ((y) & 3)
++#define CACHE_TYPE_V7(x)      (((x) >> 14) & 3)
++#define CACHE_UNIFIED(x)      ((((x) >> 27) & 7)+1)
++#define CACHE_COHERENT(x)     ((((x) >> 24) & 7)+1)
++
++#define CACHE_ID_LEVEL_MASK   7
++#define CACHE_ID_LEVEL_BITS   3
++
++#define CACHE_LINE_V7(v)      ((1 << (((v) & 7)+4)))
++#define CACHE_ASSOC_V7(v)     ((((v) >> 3) & ((1<<10)-1))+1)
++#define CACHE_SETS_V7(v)      ((((v) >> 13) & ((1<<15)-1))+1)
++#define CACHE_SIZE_V7(v)      (CACHE_LINE_V7(v)*CACHE_ASSOC_V7(v)*CACHE_SETS_V7(v))
++#define CACHE_WA_V7(v)                (((v) & (1<<28)) != 0)
++#define CACHE_RA_V7(v)                (((v) & (1<<29)) != 0)
++#define CACHE_WB_V7(v)                (((v) & (1<<30)) != 0)
++#define CACHE_WT_V7(v)                (((v) & (1<<31)) != 0)
++
+ static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
+ {
+       unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
+@@ -279,11 +313,57 @@ static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
+                       CACHE_LINE(cache)));
+ }
++static void dump_v7_cache(const char *type, int cpu, unsigned int level)
++{
++      unsigned int cachesize;
++                    
++      write_extended_cpuid(2,0,0,0,level);  /* Set the cache size selection register */
++      write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
++      cachesize = read_extended_cpuid(1,0,0,0);
++
++      printk("CPU%u: %s cache: %d bytes, associativity %d, %d byte lines, %d sets,\n      supports%s%s%s%s\n",
++             cpu, type,
++             CACHE_SIZE_V7(cachesize),CACHE_ASSOC_V7(cachesize),
++             CACHE_LINE_V7(cachesize),CACHE_SETS_V7(cachesize),
++             CACHE_WA_V7(cachesize) ? " WA" : "",
++             CACHE_RA_V7(cachesize) ? " RA" : "",
++             CACHE_WB_V7(cachesize) ? " WB" : "",
++             CACHE_WT_V7(cachesize) ? " WT" : "");
++}
++
+ static void __init dump_cpu_info(int cpu)
+ {
+       unsigned int info = read_cpuid(CPUID_CACHETYPE);
+-      if (info != processor_id) {
++      if (info != processor_id && (info & (1 << 31))) {
++              /* ARMv7 style of cache info register */
++              unsigned int id = read_extended_cpuid(1,0,0,1);
++              unsigned int level = 0;
++              printk("CPU%u: L1 I %s cache. Caches unified at level %u, coherent at level %u\n",
++                     cpu,
++                     v7_cache_policy[CACHE_TYPE_V7(info)],
++                     CACHE_UNIFIED(id),
++                     CACHE_COHERENT(id));
++
++              while (id & CACHE_ID_LEVEL_MASK) {
++                      printk("CPU%u: Level %u cache is %s\n",
++                             cpu, (level >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
++
++                      if (id & 1) {
++                              /* Dump I at this level */
++                              dump_v7_cache("I", cpu, level | 1);
++                      }
++
++                      if (id & (4 | 2)) {
++                              /* Dump D or unified at this level */
++                              dump_v7_cache((id & 4) ? "unified" : "D", cpu, level);
++                      }
++
++                      /* Next level out */
++                      level += 2;
++                      id >>= CACHE_ID_LEVEL_BITS;
++              }
++      } else if (info != processor_id) {
+               printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
+                      cache_types[CACHE_TYPE(info)]);
+               if (CACHE_S(info)) {
+@@ -916,6 +996,30 @@ c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
+                           CACHE_LINE(cache)));
+ }
++static void c_show_v7_cache(struct seq_file *m, const char *type, unsigned int levelselect)
++{
++      unsigned int cachesize;
++      unsigned int level = (levelselect >> 1) + 1;
++                    
++      write_extended_cpuid(2,0,0,0,levelselect);  /* Set the cache size selection register */
++      write_extended_cpuid(0,7,5,4,0);      /* Prefetch flush to wait for above */
++      cachesize = read_extended_cpuid(1,0,0,0);
++
++      seq_printf(m, "L%u %s size\t\t: %d bytes\n"
++                 "L%u %s assoc\t\t: %d\n"
++                 "L%u %s line length\t: %d\n"
++                 "L%u %s sets\t\t: %d\n"
++                 "L%u %s supports\t\t:%s%s%s%s\n",
++                 level, type, CACHE_SIZE_V7(cachesize),
++                 level, type, CACHE_ASSOC_V7(cachesize),
++                 level, type, CACHE_LINE_V7(cachesize),
++                 level, type, CACHE_SETS_V7(cachesize),
++                 level, type, CACHE_WA_V7(cachesize) ? " WA" : "",
++                 CACHE_RA_V7(cachesize) ? " RA" : "",
++                 CACHE_WB_V7(cachesize) ? " WB" : "",
++                 CACHE_WT_V7(cachesize) ? " WT" : "");
++}
++
+ static int c_show(struct seq_file *m, void *v)
+ {
+       int i;
+@@ -971,7 +1075,36 @@ static int c_show(struct seq_file *m, void *v)
+       {
+               unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
+-              if (cache_info != processor_id) {
++              if (cache_info != processor_id && (cache_info & (1<<31))) {
++                      /* V7 style of cache info register */
++                      unsigned int id = read_extended_cpuid(1,0,0,1);
++                      unsigned int levelselect = 0;
++                      seq_printf(m, "L1 I cache\t:%s\n"
++                                 "Cache unification level\t: %u\n"
++                                 "Cache coherency level\t: %u\n",
++                                 v7_cache_policy[CACHE_TYPE_V7(cache_info)],
++                                 CACHE_UNIFIED(id),
++                                 CACHE_COHERENT(id));
++
++                      while (id & CACHE_ID_LEVEL_MASK) {
++                              seq_printf(m, "Level %u cache\t\t: %s\n",
++                                         (levelselect >> 1)+1, v7_cache_type[id & CACHE_ID_LEVEL_MASK]);
++
++                              if (id & 1) {
++                                      /* Dump I at this level */
++                                      c_show_v7_cache(m, "I", levelselect | 1);
++                              }
++
++                              if (id & (4 | 2)) {
++                                      /* Dump D or unified at this level */
++                                      c_show_v7_cache(m, (id & 4) ? "cache" : "D", levelselect);
++                              }
++
++                              /* Next level out */
++                              levelselect += 2;
++                              id >>= CACHE_ID_LEVEL_BITS;
++                      }
++              } else if (cache_info != processor_id) {
+                       seq_printf(m, "Cache type\t: %s\n"
+                                     "Cache clean\t: %s\n"
+                                     "Cache lockdown\t: %s\n"
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index 514af79..704738e 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -74,6 +74,24 @@
+                   : "cc");                                            \
+               __val;                                                  \
+       })
++#define read_extended_cpuid(op1,op2,op3,op4)          \
++      ({                                                              \
++              unsigned int __val;                                     \
++              asm("mrc p15," __stringify(op1) ",%0,c" __stringify(op2)",c" __stringify(op3)"," __stringify(op4)       \
++                  : "=r" (__val)                                      \
++                  :                                                   \
++                  : "cc");                                            \
++              __val;                                                  \
++      })
++
++#define write_extended_cpuid(op1,op2,op3,op4,v)               \
++      ({                                                              \
++              unsigned int __val = v;                                 \
++              asm("mcr p15," __stringify(op1) ",%0,c" __stringify(op2)",c" __stringify(op3)"," __stringify(op4)       \
++                  :                                                   \
++                  : "r" (__val)                                       \
++                  : "cc");                                            \
++      })
+ #else
+ extern unsigned int processor_id;
+ #define read_cpuid(reg) (processor_id)
+
+
+-- 
+Catalin
+
+
diff --git a/packages/linux/omap3-pandora-kernel/defconfig b/packages/linux/omap3-pandora-kernel/defconfig
new file mode 100755 (executable)
index 0000000..9fa00d8
--- /dev/null
@@ -0,0 +1,1971 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-omap1
+# Mon Jan 12 18:36:10 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV7=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
+# CONFIG_HAVE_IOREMAP_PROT is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+CONFIG_HAVE_CLK=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LSF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_MSM7X00A is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
+# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
+CONFIG_OMAP_SMARTREFLEX=y
+# CONFIG_OMAP_SMARTREFLEX_TESTING is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_BOOT_TAG=y
+CONFIG_OMAP_BOOT_REASON=y
+# CONFIG_OMAP_COMPONENT_VERSION is not set
+# CONFIG_OMAP_GPIO_SWITCH is not set
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MMU_FWK is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+CONFIG_ARCH_OMAP34XX=y
+CONFIG_ARCH_OMAP3430=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_OVERO is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+CONFIG_OMAP_TICK_GPTIMER=12
+
+#
+# Boot options
+#
+
+#
+# Power management
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=" debug "
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=y
+CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTSDIO=y
+# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIBRF6150 is not set
+CONFIG_BT_HCIH4P=y
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+CONFIG_CFG80211=y
+CONFIG_NL80211=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_MAC80211=y
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_IEEE80211=y
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=y
+CONFIG_IEEE80211_CRYPT_CCMP=y
+CONFIG_IEEE80211_CRYPT_TKIP=y
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_93CX6=y
+# CONFIG_OMAP_STI is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_PANDORA_GAME_CONSOLE=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+# CONFIG_LIBERTAS_SDIO is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_USB_ZD1201=y
+CONFIG_USB_NET_RNDIS_WLAN=y
+CONFIG_RTL8187=y
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_P54_COMMON=y
+CONFIG_P54_USB=y
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=y
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_RT2X00=y
+CONFIG_RT2X00_LIB=y
+CONFIG_RT2X00_LIB_USB=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2500USB=y
+CONFIG_RT2500USB_LEDS=y
+CONFIG_RT73USB=y
+CONFIG_RT73USB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=y
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2102 is not set
+# CONFIG_TOUCHSCREEN_TSC210X is not set
+# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_UINPUT is not set
+CONFIG_INPUT_VSENSE=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_TLV320AIC23 is not set
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL4030_USB=y
+CONFIG_TWL4030_PWRBUTTON=y
+CONFIG_TWL4030_POWEROFF=y
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_LP5521 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+CONFIG_SPI_OMAP24XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_TSC2101 is not set
+# CONFIG_SPI_TSC2102 is not set
+# CONFIG_SPI_TSC210X is not set
+# CONFIG_SPI_TSC2301 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_POWER=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_CX25840=m
+CONFIG_VIDEO_CX2341X=m
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_TUNER_3036 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+# CONFIG_VIDEO_EM28XX is not set
+CONFIG_VIDEO_USBVISION=m
+CONFIG_VIDEO_USBVIDEO=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_QUICKCAM_MESSENGER=m
+# CONFIG_USB_ET61X251 is not set
+CONFIG_VIDEO_OVCAMCHIP=m
+CONFIG_USB_W9968CF=m
+CONFIG_USB_OV511=m
+CONFIG_USB_SE401=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_STV680=m
+# CONFIG_USB_ZC0301 is not set
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_ZR364XX=m
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_RADIO_TEA5761 is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_ANYSEE is not set
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_DVB_CINERGYT2=m
+# CONFIG_DVB_CINERGYT2_TUNING is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+# CONFIG_DVB_DRX397XD is not set
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+CONFIG_DVB_NXT200X=m
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_S5H1411=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_LNBP21=m
+# CONFIG_DVB_ISL6405 is not set
+CONFIG_DVB_ISL6421=m
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GPIO=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CCONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+# CONFIG_SND_OMAP_AIC23 is not set
+# CONFIG_SND_OMAP_TSC2101 is not set
+# CONFIG_SND_SX1 is not set
+# CONFIG_SND_OMAP_TSC2102 is not set
+# CONFIG_SND_OMAP24XX_EAC is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_OMAP_EHCI_PHY_MODE=y
+# CONFIG_OMAP_EHCI_TLL_MODE is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_OMAP_HS=y
+CONFIG_MMC_SPI=m
+CONFIG_OMAP_HS_MMC1=y
+CONFIG_OMAP_HS_MMC2=y
+CONFIG_OMAP_HS_MMC3=y
+CONFIG_TIWLAN_SDIO=y
+CONFIG_TIWLAN_MMC_CONTROLLER=3
+# CONFIG_MMC_SPI is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_OMAP_DEBUG is not set
+# CONFIG_LEDS_OMAP is not set
+# CONFIG_LEDS_OMAP_PWM is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+
+#
+# Voltage and Current regulators
+#
+# CONFIG_REGULATOR is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_UIO is not set
+
+#
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/packages/linux/omap3-pandora-kernel/defconfig.bak b/packages/linux/omap3-pandora-kernel/defconfig.bak
new file mode 100755 (executable)
index 0000000..92f3c08
--- /dev/null
@@ -0,0 +1,1970 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.27-omap1
+# Mon Jan 12 18:36:10 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV7=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+CONFIG_CGROUP_DEVICE=y
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
+# CONFIG_HAVE_IOREMAP_PROT is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_ARCH_TRACEHOOK is not set
+# CONFIG_HAVE_DMA_ATTRS is not set
+# CONFIG_USE_GENERIC_SMP_HELPERS is not set
+CONFIG_HAVE_CLK=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_LSF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_MSM7X00A is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
+# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
+CONFIG_OMAP_SMARTREFLEX=y
+# CONFIG_OMAP_SMARTREFLEX_TESTING is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_BOOT_TAG=y
+CONFIG_OMAP_BOOT_REASON=y
+# CONFIG_OMAP_COMPONENT_VERSION is not set
+# CONFIG_OMAP_GPIO_SWITCH is not set
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MMU_FWK is not set
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+CONFIG_ARCH_OMAP34XX=y
+CONFIG_ARCH_OMAP3430=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_OVERO is not set
+CONFIG_MACH_OMAP3_PANDORA=y
+CONFIG_OMAP_TICK_GPTIMER=12
+
+#
+# Boot options
+#
+
+#
+# Power management
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=" debug "
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=y
+CONFIG_BT_HCIUSB_SCO=y
+# CONFIG_BT_HCIBTUSB is not set
+CONFIG_BT_HCIBTSDIO=y
+# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIBRF6150 is not set
+CONFIG_BT_HCIH4P=y
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+CONFIG_CFG80211=y
+CONFIG_NL80211=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_MAC80211=y
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_IEEE80211=y
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=y
+CONFIG_IEEE80211_CRYPT_CCMP=y
+CONFIG_IEEE80211_CRYPT_TKIP=y
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_93CX6=y
+# CONFIG_OMAP_STI is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_PANDORA_GAME_CONSOLE=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+# CONFIG_LIBERTAS_SDIO is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_USB_ZD1201=y
+CONFIG_USB_NET_RNDIS_WLAN=y
+CONFIG_RTL8187=y
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_P54_COMMON=y
+CONFIG_P54_USB=y
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=y
+# CONFIG_ZD1211RW_DEBUG is not set
+CONFIG_RT2X00=y
+CONFIG_RT2X00_LIB=y
+CONFIG_RT2X00_LIB_USB=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_LEDS=y
+CONFIG_RT2500USB=y
+CONFIG_RT2500USB_LEDS=y
+CONFIG_RT73USB=y
+CONFIG_RT73USB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=y
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_LM8323 is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_TSC2005 is not set
+# CONFIG_TOUCHSCREEN_TSC2102 is not set
+# CONFIG_TOUCHSCREEN_TSC210X is not set
+# CONFIG_TOUCHSCREEN_UCB1400 is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_UINPUT is not set
+CONFIG_INPUT_VSENSE=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_AT24 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_SENSORS_TLV320AIC23 is not set
+CONFIG_TWL4030_MADC=m
+CONFIG_TWL4030_USB=y
+CONFIG_TWL4030_PWRBUTTON=y
+CONFIG_TWL4030_POWEROFF=y
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_LP5521 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+CONFIG_SPI_OMAP24XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_AT25 is not set
+# CONFIG_SPI_TSC2101 is not set
+# CONFIG_SPI_TSC2102 is not set
+# CONFIG_SPI_TSC210X is not set
+# CONFIG_SPI_TSC2301 is not set
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_POWER=y
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMIZE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA827X=m
+CONFIG_MEDIA_TUNER_TDA18271=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_MT2060=m
+CONFIG_MEDIA_TUNER_MT2266=m
+CONFIG_MEDIA_TUNER_QT1010=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MXL5005S=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEO_TVEEPROM=m
+CONFIG_VIDEO_TUNER=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+CONFIG_VIDEO_MSP3400=m
+CONFIG_VIDEO_CS53L32A=m
+CONFIG_VIDEO_WM8775=m
+CONFIG_VIDEO_SAA711X=m
+CONFIG_VIDEO_CX25840=m
+CONFIG_VIDEO_CX2341X=m
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_TUNER_3036 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_VIDEO_PVRUSB2=m
+CONFIG_VIDEO_PVRUSB2_SYSFS=y
+CONFIG_VIDEO_PVRUSB2_DVB=y
+# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
+# CONFIG_VIDEO_EM28XX is not set
+CONFIG_VIDEO_USBVISION=m
+CONFIG_VIDEO_USBVIDEO=m
+CONFIG_USB_VICAM=m
+CONFIG_USB_IBMCAM=m
+CONFIG_USB_KONICAWC=m
+CONFIG_USB_QUICKCAM_MESSENGER=m
+# CONFIG_USB_ET61X251 is not set
+CONFIG_VIDEO_OVCAMCHIP=m
+CONFIG_USB_W9968CF=m
+CONFIG_USB_OV511=m
+CONFIG_USB_SE401=m
+CONFIG_USB_SN9C102=m
+CONFIG_USB_STV680=m
+# CONFIG_USB_ZC0301 is not set
+CONFIG_USB_PWC=m
+# CONFIG_USB_PWC_DEBUG is not set
+CONFIG_USB_ZR364XX=m
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_SOC_CAMERA is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_RADIO_TEA5761 is not set
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+CONFIG_DVB_USB=m
+# CONFIG_DVB_USB_DEBUG is not set
+CONFIG_DVB_USB_A800=m
+CONFIG_DVB_USB_DIBUSB_MB=m
+# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
+CONFIG_DVB_USB_DIBUSB_MC=m
+CONFIG_DVB_USB_DIB0700=m
+CONFIG_DVB_USB_UMT_010=m
+CONFIG_DVB_USB_CXUSB=m
+CONFIG_DVB_USB_M920X=m
+CONFIG_DVB_USB_GL861=m
+CONFIG_DVB_USB_AU6610=m
+CONFIG_DVB_USB_DIGITV=m
+CONFIG_DVB_USB_VP7045=m
+CONFIG_DVB_USB_VP702X=m
+CONFIG_DVB_USB_GP8PSK=m
+CONFIG_DVB_USB_NOVA_T_USB2=m
+CONFIG_DVB_USB_TTUSB2=m
+CONFIG_DVB_USB_DTT200U=m
+CONFIG_DVB_USB_OPERA1=m
+CONFIG_DVB_USB_AF9005=m
+CONFIG_DVB_USB_AF9005_REMOTE=m
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_ANYSEE is not set
+CONFIG_DVB_TTUSB_BUDGET=m
+CONFIG_DVB_TTUSB_DEC=m
+CONFIG_DVB_CINERGYT2=m
+# CONFIG_DVB_CINERGYT2_TUNING is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+
+#
+# Customise DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+
+#
+# DVB-S (satellite) frontends
+#
+CONFIG_DVB_CX24110=m
+CONFIG_DVB_CX24123=m
+CONFIG_DVB_MT312=m
+CONFIG_DVB_S5H1420=m
+CONFIG_DVB_STV0299=m
+CONFIG_DVB_TDA8083=m
+CONFIG_DVB_TDA10086=m
+CONFIG_DVB_VES1X93=m
+CONFIG_DVB_TUNER_ITD1000=m
+CONFIG_DVB_TDA826X=m
+CONFIG_DVB_TUA6100=m
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_SP8870=m
+CONFIG_DVB_SP887X=m
+CONFIG_DVB_CX22700=m
+CONFIG_DVB_CX22702=m
+# CONFIG_DVB_DRX397XD is not set
+CONFIG_DVB_L64781=m
+CONFIG_DVB_TDA1004X=m
+CONFIG_DVB_NXT6000=m
+CONFIG_DVB_MT352=m
+CONFIG_DVB_ZL10353=m
+CONFIG_DVB_DIB3000MB=m
+CONFIG_DVB_DIB3000MC=m
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+CONFIG_DVB_TDA10048=m
+
+#
+# DVB-C (cable) frontends
+#
+CONFIG_DVB_VES1820=m
+CONFIG_DVB_TDA10021=m
+CONFIG_DVB_TDA10023=m
+CONFIG_DVB_STV0297=m
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+CONFIG_DVB_NXT200X=m
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+CONFIG_DVB_BCM3510=m
+CONFIG_DVB_LGDT330X=m
+CONFIG_DVB_S5H1409=m
+CONFIG_DVB_AU8522=m
+CONFIG_DVB_S5H1411=m
+
+#
+# Digital terrestrial only tuners/PLL
+#
+CONFIG_DVB_PLL=m
+CONFIG_DVB_TUNER_DIB0070=m
+
+#
+# SEC control devices for DVB-S
+#
+CONFIG_DVB_LNBP21=m
+# CONFIG_DVB_ISL6405 is not set
+CONFIG_DVB_ISL6421=m
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+CONFIG_FB_OMAP=y
+# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=4
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GPIO=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_PANDORA_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CCONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+# CONFIG_SND_OMAP_AIC23 is not set
+# CONFIG_SND_OMAP_TSC2101 is not set
+# CONFIG_SND_SX1 is not set
+# CONFIG_SND_OMAP_TSC2102 is not set
+# CONFIG_SND_OMAP24XX_EAC is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HID_DEBUG=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_OMAP_EHCI_PHY_MODE=y
+# CONFIG_OMAP_EHCI_TLL_MODE is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP2101=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_RIO500 is not set
+CONFIG_USB_LEGOTOWER=m
+CONFIG_USB_LCD=m
+# CONFIG_USB_BERRY_CHARGE is not set
+CONFIG_USB_LED=m
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+
+#
+# MMC/SD Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_OMAP_HS=y
+CONFIG_MMC_SPI=m
+CONFIG_OMAP_HS_MMC1=y
+CONFIG_OMAP_HS_MMC2=y
+CONFIG_OMAP_HS_MMC3=y
+CONFIG_TIWLAN_SDIO=y
+CONFIG_TIWLAN_MMC_CONTROLLER=3
+# CONFIG_MMC_SPI is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_OMAP_DEBUG is not set
+# CONFIG_LEDS_OMAP is not set
+# CONFIG_LEDS_OMAP_PWM is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+
+#
+# Voltage and Current regulators
+#
+# CONFIG_REGULATOR is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_UIO is not set
+
+#
+# CBUS support
+#
+# CONFIG_CBUS is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_HAVE_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+# CONFIG_CRYPTO_AUTHENC is not set
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/packages/linux/omap3-pandora-kernel/dvb-fix-dma.diff b/packages/linux/omap3-pandora-kernel/dvb-fix-dma.diff
new file mode 100755 (executable)
index 0000000..e05473f
--- /dev/null
@@ -0,0 +1,60 @@
+Hi,
+I post this patch that fixes a kernel crash that happens when using a dvb
+usb stick on a mips platform and I think even on other platforms on which
+the dma access in not cache-coherent.
+
+The problem's origin is that, inside the method usb_bulk_urb_init of file
+drivers/media/dvb/dvb-usb/usb-urb.c, stream->urb_list[i]->transfer_buffer
+points to a memory area that has been allocated to be dma-coherent but
+stream->urb_list[i]->transfer_flags doesn't include the
+URB_NO_TRANSFER_DMA_MAP flag and stream->urb_list[i]->transfer_dma is not
+set.
+When later on the stream->urb_list[i]->transfer_buffer pointer is used
+inside function usb_hcd_submit_urb of file drivers/usb/core/hcd.c since the
+flag URB_NO_TRANSFER_DMA_MAP is not set the urb->transfer_buffer pointer is
+passed to the dma_map_single function that since the address is dma-coherent
+returns a wrong tranfer_dma address that later on leads to the kernel crash.
+
+The problem is solved by setting the URB_NO_TRANSFER_DMA_MAP flag and the
+stream->urb_list[i]->transfer_dma address.
+
+Perhaps to be more safe the URB_NO_TRANSFER_DMA_MAP flag can be set only
+if stream->urb_list[i]->transfer_dma != 0.
+
+I don't know if half of the fault can be of the dma_map_single function that
+should anyway returns a valid address both for a not dma-coherent and a
+dma-coherent address.
+
+Just to be clear:
+I've done this patch to solve my problem and I tested it only on a mips 
+platform
+but I think it should not cause any problems on other platforms.
+I posted it here to help someone else that can have my same problem and to 
+point it out
+to the mantainer of this part of code.
+You can use it at your own risk and I'm not resposible in any way for any 
+problem or
+damage that it can cause.
+I'm available to discuss about it
+
+Bye
+
+Michele Scorcia
+
+--------------------
+
+
+
+
+--- /tmp/usb-urb.c     2008-10-08 09:53:23.000000000 +0200
++++ git/drivers/media/dvb/dvb-usb/usb-urb.c    2008-10-08 09:54:16.000000000 +0200
+@@ -152,7 +152,8 @@
+                               stream->props.u.bulk.buffersize,
+                               usb_urb_complete, stream);
+-              stream->urb_list[i]->transfer_flags = 0;
++              stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
++              stream->urb_list[i]->transfer_dma = stream->dma_addr[i];        
+               stream->urbs_initialized++;
+       }
+       return 0;
diff --git a/packages/linux/omap3-pandora-kernel/evm-mcspi-ts.diff b/packages/linux/omap3-pandora-kernel/evm-mcspi-ts.diff
new file mode 100755 (executable)
index 0000000..64d797c
--- /dev/null
@@ -0,0 +1,132 @@
+From linux-omap-owner@vger.kernel.org Sun Nov 02 21:08:25 2008
+Received: from localhost
+       ([127.0.0.1] helo=dominion ident=koen)
+       by dominion.dominion.void with esmtp (Exim 4.69)
+       (envelope-from <linux-omap-owner@vger.kernel.org>)
+       id 1KwjFJ-0008Hg-0T
+       for koen@localhost; Sun, 02 Nov 2008 21:08:25 +0100
+Received: from xs.service.utwente.nl [130.89.5.250]
+       by dominion with POP3 (fetchmail-6.3.9-rc2)
+       for <koen@localhost> (single-drop); Sun, 02 Nov 2008 21:08:25 +0100 (CET)
+Received: from mail.service.utwente.nl ([130.89.5.253]) by exchange.service.utwente.nl with Microsoft SMTPSVC(6.0.3790.3959);
+        Sun, 2 Nov 2008 20:57:16 +0100
+Received: from mx.utwente.nl ([130.89.2.13]) by mail.service.utwente.nl with Microsoft SMTPSVC(6.0.3790.3959);
+        Sun, 2 Nov 2008 20:57:16 +0100
+Received: from vger.kernel.org (vger.kernel.org [209.132.176.167])
+          by mx.utwente.nl (8.12.10/SuSE Linux 0.7) with ESMTP id mA2JudEK010968
+          for <k.kooi@student.utwente.nl>; Sun, 2 Nov 2008 20:56:40 +0100
+Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
+       id S1752819AbYKBT4i (ORCPT <rfc822;k.kooi@student.utwente.nl>);
+       Sun, 2 Nov 2008 14:56:38 -0500
+Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1752829AbYKBT4i
+       (ORCPT <rfc822;linux-omap-outgoing>); Sun, 2 Nov 2008 14:56:38 -0500
+Received: from fg-out-1718.google.com ([72.14.220.153]:32481 "EHLO
+       fg-out-1718.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
+       with ESMTP id S1752819AbYKBT4h (ORCPT
+       <rfc822;linux-omap@vger.kernel.org>); Sun, 2 Nov 2008 14:56:37 -0500
+Received: by fg-out-1718.google.com with SMTP id 19so1869080fgg.17
+        for <linux-omap@vger.kernel.org>; Sun, 02 Nov 2008 11:56:33 -0800 (PST)
+DKIM-Signature:        v=1; a=rsa-sha256; c=relaxed/relaxed;
+        d=gmail.com; s=gamma;
+        h=domainkey-signature:received:received:from:to:cc:subject:date
+         :message-id:x-mailer:in-reply-to:references;
+        bh=Ftvoq8kE3ciPRy7pNy5VLkNnZD8o0HYWIrO1LMS/lAY=;
+        b=HpEcngDUbAObGNJuQmBIG3SoNHesUL57GluZGlYO7kxFxfH6N8zeHjKuRSk86+mT5s
+         gMhyCC07wjVp75HnqCtKbOJzNw/8F4ZGbL2lY1LC99+zxHW1JBQv5c3ZaoCVqTw6TuH0
+         bQ8Ew2BwHknT3wGA+QcGoMJJs5aw62AhPiyHY=
+DomainKey-Signature: a=rsa-sha1; c=nofws;
+        d=gmail.com; s=gamma;
+        h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references;
+        b=aio1APZhCIcYIrMY844QkdaQzKw0/yiuaVjqfv52fnft1kafGT2qAS3KfXAc61a9If
+         sXHbi2fr/r1a7YZJJVGqkJX0WmWTY0OqdhS1lmugP/dXEMHeqaArKATbvxrq9/svb1bV
+         Vzpkm6sOzLrr54uo+BcZNoxHWqb8W2UrRxuTk=
+Received: by 10.103.131.18 with SMTP id i18mr6668205mun.126.1225655793072;
+        Sun, 02 Nov 2008 11:56:33 -0800 (PST)
+Received: from localhost.localdomain ([78.59.134.74])
+        by mx.google.com with ESMTPS id g1sm23199635muf.8.2008.11.02.11.56.31
+        (version=TLSv1/SSLv3 cipher=RC4-MD5);
+        Sun, 02 Nov 2008 11:56:31 -0800 (PST)
+From: Grazvydas Ignotas <notasas@gmail.com>
+To: linux-omap@vger.kernel.org
+Cc: Grazvydas Ignotas <notasas@gmail.com>
+Subject: Re: omap3evm LCD red-tint workaround
+Date:  Sun,  2 Nov 2008 21:56:19 +0200
+Message-Id: <1225655779-18934-1-git-send-email-notasas@gmail.com>
+X-Mailer: git-send-email 1.5.4.3
+In-Reply-To: <57322719-1A5A-45DC-9846-5C0A3B6EF346@student.utwente.nl>
+References: <57322719-1A5A-45DC-9846-5C0A3B6EF346@student.utwente.nl>
+Sender: linux-omap-owner@vger.kernel.org
+Precedence: bulk
+List-ID: <linux-omap.vger.kernel.org>
+X-Mailing-List:        linux-omap@vger.kernel.org
+X-UTwente-MailScanner-Information: Scanned by MailScanner. Contact servicedesk@icts.utwente.nl for more information.
+X-UTwente-MailScanner: Found to be clean
+X-UTwente-MailScanner-From: linux-omap-owner@vger.kernel.org
+X-Spam-Status: No
+X-OriginalArrivalTime: 02 Nov 2008 19:57:16.0876 (UTC) FILETIME=[34FBA0C0:01C93D25]
+
+> PS: TS is still unusable with the 16x16 pixel resolution
+This is also the case for Pandora. The patch below fixes the problem,
+but as I have no other boards to test this on, I haven't sent it.
+See if it helps you.
+
+
+From 91f3af26bbf751b846e6265d86387e81be7c1364 Mon Sep 17 00:00:00 2001
+From: Grazvydas Ignotas <notasas@gmail.com>
+Date: Tue, 28 Oct 2008 22:01:42 +0200
+Subject: [PATCH] OMAP3: fix McSPI transfers
+
+Currently on OMAP3 if both write and read is set up for a transfer,
+the first byte returned on read is corrupted. Work around this by
+disabling channel between reads and writes, instead of transfers.
+---
+ drivers/spi/omap2_mcspi.c |    7 ++++---
+ 1 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
+index 454a271..4890b6c 100644
+--- a/drivers/spi/omap2_mcspi.c
++++ b/drivers/spi/omap2_mcspi.c
+@@ -710,7 +710,6 @@ static void omap2_mcspi_work(struct work_struct *work)
+               spi = m->spi;
+               cs = spi->controller_state;
+-              omap2_mcspi_set_enable(spi, 1);
+               list_for_each_entry(t, &m->transfers, transfer_list) {
+                       if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+                               status = -EINVAL;
+@@ -741,6 +740,8 @@ static void omap2_mcspi_work(struct work_struct *work)
+                       if (t->len) {
+                               unsigned        count;
++                              omap2_mcspi_set_enable(spi, 1);
++
+                               /* RX_ONLY mode needs dummy data in TX reg */
+                               if (t->tx_buf == NULL)
+                                       __raw_writel(0, cs->base
+@@ -752,6 +753,8 @@ static void omap2_mcspi_work(struct work_struct *work)
+                                       count = omap2_mcspi_txrx_pio(spi, t);
+                               m->actual_length += count;
++                              omap2_mcspi_set_enable(spi, 0);
++
+                               if (count != t->len) {
+                                       status = -EIO;
+                                       break;
+@@ -777,8 +780,6 @@ static void omap2_mcspi_work(struct work_struct *work)
+               if (cs_active)
+                       omap2_mcspi_force_cs(spi, 0);
+-              omap2_mcspi_set_enable(spi, 0);
+-
+               m->status = status;
+               m->complete(m->context);
+-- 
+1.5.4.3
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel/fix-install.patch b/packages/linux/omap3-pandora-kernel/fix-install.patch
new file mode 100755 (executable)
index 0000000..46bc25a
--- /dev/null
@@ -0,0 +1,23 @@
+From: Steve Sakoman <steve@sakoman.com>
+Date: Mon, 18 Aug 2008 16:07:31 +0000 (-0700)
+Subject: scripts/Makefile.fwinst: add missing space when setting mode in cmd_install
+X-Git-Url: http://www.sakoman.net/cgi-bin/gitweb.cgi?p=linux-omap-2.6.git;a=commitdiff_plain;h=f039944bdd491cde7327133e9976881d3133ae70
+
+scripts/Makefile.fwinst: add missing space when setting mode in cmd_install
+
+This was causing build failures on some machines
+---
+
+diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
+index 6bf8e87..fb20532 100644
+--- a/scripts/Makefile.fwinst
++++ b/scripts/Makefile.fwinst
+@@ -37,7 +37,7 @@ install-all-dirs: $(installed-fw-dirs)
+       @true
+ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
+-      cmd_install = $(INSTALL) -m0644 $< $@
++      cmd_install = $(INSTALL) -m 0644 $< $@
+ $(installed-fw-dirs):
+       $(call cmd,mkdir)
diff --git a/packages/linux/omap3-pandora-kernel/fix-irq33.diff b/packages/linux/omap3-pandora-kernel/fix-irq33.diff
new file mode 100755 (executable)
index 0000000..709f042
--- /dev/null
@@ -0,0 +1,111 @@
+From: "Nathan Monson" <nmonson@gmail.com>
+To: "linux-omap@vger.kernel.org List" <linux-omap@vger.kernel.org>
+Subject: Re: omapfb: help from userspace
+Cc: "TK, Pratheesh Gangadhar" <pratheesh@ti.com>
+
+On Wed, Oct 8, 2008 at 11:36 AM, Nathan Monson <nmonson@gmail.com> wrote:
+> "Felipe Contreras" <felipe.contreras@gmail.com> writes:
+>> irq -33, desc: c0335cf8, depth: 0, count: 0, unhandled: 0
+>
+> On the BeagleBoard list, Pratheesh Gangadhar said that mapping I/O
+> regions as Strongly Ordered suppresses this problem:
+> http://groups.google.com/group/beagleboard/browse_thread/thread/23e1c95b4bfb09b5/70d12dca569ca503?show_docid=70d12dca569ca503
+
+Pratheesh helped me make a patch against the latest linux-omap git to
+try this.
+
+With this patch, my IRQ -33 problems with the DSP have disappeared.
+Before, I would end up in IRQ -33 loop after 10 invocations of the DSP
+Bridge 'ping.out' utility.  I just finished running it 50,000 times
+without error.
+
+As stated before, this patch is just a workaround for testing
+purposes, not a fix.  Who knows what performance side effects it
+has...
+
+---
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9eb936e..5cb4f5f 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -25,6 +25,7 @@ struct map_desc {
+ #define MT_HIGH_VECTORS               8
+ #define MT_MEMORY             9
+ #define MT_ROM                        10
++#define MT_MEMORY_SO          11
+
+ #define MT_NONSHARED_DEVICE   MT_DEVICE_NONSHARED
+ #define MT_IXP2000_DEVICE     MT_DEVICE_IXP2000
+diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
+index adbe21f..c11c0e8 100644
+--- a/arch/arm/mach-omap2/io.c
++++ b/arch/arm/mach-omap2/io.c
+@@ -119,13 +119,13 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = L3_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L3_34XX_PHYS),
+               .length         = L3_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L4_34XX_PHYS),
+               .length         = L4_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_WK_34XX_VIRT,
+@@ -137,19 +137,19 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = OMAP34XX_GPMC_VIRT,
+               .pfn            = __phys_to_pfn(OMAP34XX_GPMC_PHYS),
+               .length         = OMAP34XX_GPMC_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = OMAP343X_SMS_VIRT,
+               .pfn            = __phys_to_pfn(OMAP343X_SMS_PHYS),
+               .length         = OMAP343X_SMS_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = OMAP343X_SDRC_VIRT,
+               .pfn            = __phys_to_pfn(OMAP343X_SDRC_PHYS),
+               .length         = OMAP343X_SDRC_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+       {
+               .virtual        = L4_PER_34XX_VIRT,
+@@ -161,7 +161,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
+               .virtual        = L4_EMU_34XX_VIRT,
+               .pfn            = __phys_to_pfn(L4_EMU_34XX_PHYS),
+               .length         = L4_EMU_34XX_SIZE,
+-              .type           = MT_DEVICE
++              .type           = MT_MEMORY_SO
+       },
+ };
+ #endif
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index a713e40..d5f25ad 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -245,6 +245,10 @@ static struct mem_type mem_types[] = {
+               .prot_sect = PMD_TYPE_SECT,
+               .domain    = DOMAIN_KERNEL,
+       },
++      [MT_MEMORY_SO] = {
++              .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_UNCACHED,
++              .domain    = DOMAIN_KERNEL,
++      },
+ };
+
+ const struct mem_type *get_mem_type(unsigned int type)
+--
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel/mru-256MB.diff b/packages/linux/omap3-pandora-kernel/mru-256MB.diff
new file mode 100755 (executable)
index 0000000..0492ca2
--- /dev/null
@@ -0,0 +1,24 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Thu, 2 Oct 2008 00:05:33 +0000 (+0100)
+Subject: OMAP: Increase VMALLOC_END to allow 256MB RAM
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=355a0ce968e4a7b0c8d8203f4517296e932e373d
+
+OMAP: Increase VMALLOC_END to allow 256MB RAM
+
+This increases VMALLOC_END to 0x18000000, making room for 256MB
+RAM with the default 128MB vmalloc region.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/plat-omap/include/mach/vmalloc.h b/arch/arm/plat-omap/include/mach/vmalloc.h
+index d8515cb..b97dfaf 100644
+--- a/arch/arm/plat-omap/include/mach/vmalloc.h
++++ b/arch/arm/plat-omap/include/mach/vmalloc.h
+@@ -17,5 +17,5 @@
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+  */
+-#define VMALLOC_END     (PAGE_OFFSET + 0x17000000)
++#define VMALLOC_END     (PAGE_OFFSET + 0x18000000)
diff --git a/packages/linux/omap3-pandora-kernel/mru-add-clk-get-parent.diff b/packages/linux/omap3-pandora-kernel/mru-add-clk-get-parent.diff
new file mode 100755 (executable)
index 0000000..64944a5
--- /dev/null
@@ -0,0 +1,64 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:51:36 +0000 (+0100)
+Subject: OMAP: Add clk_get_parent() for OMAP2/3
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=08d1f1947a5a970b2fe6e4dfeeb70286b9379056
+
+OMAP: Add clk_get_parent() for OMAP2/3
+
+This makes clk_get_parent() work on OMAP2/3.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
+index 5f48e14..aae0d2e 100644
+--- a/arch/arm/mach-omap2/clock.c
++++ b/arch/arm/mach-omap2/clock.c
+@@ -831,6 +831,11 @@ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
+       return 0;
+ }
++struct clk *omap2_clk_get_parent(struct clk *clk)
++{
++      return clk->parent;
++}
++
+ /* DPLL rate rounding code */
+ /**
+diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
+index 3fa2e26..2916879 100644
+--- a/arch/arm/mach-omap2/clock.h
++++ b/arch/arm/mach-omap2/clock.h
+@@ -29,6 +29,7 @@ int omap2_clk_set_rate(struct clk *clk, unsigned long rate);
+ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent);
+ int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance);
+ long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate);
++struct clk *omap2_clk_get_parent(struct clk *clk);
+ #ifdef CONFIG_OMAP_RESET_CLOCKS
+ void omap2_clk_disable_unused(struct clk *clk);
+diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
+index c26d9d8..f91bd57 100644
+--- a/arch/arm/mach-omap2/clock24xx.c
++++ b/arch/arm/mach-omap2/clock24xx.c
+@@ -423,6 +423,7 @@ static struct clk_functions omap2_clk_functions = {
+       .clk_round_rate         = omap2_clk_round_rate,
+       .clk_set_rate           = omap2_clk_set_rate,
+       .clk_set_parent         = omap2_clk_set_parent,
++      .clk_get_parent         = omap2_clk_get_parent,
+       .clk_disable_unused     = omap2_clk_disable_unused,
+ #ifdef        CONFIG_CPU_FREQ
+       .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
+diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
+index 152d095..2196edd 100644
+--- a/arch/arm/mach-omap2/clock34xx.c
++++ b/arch/arm/mach-omap2/clock34xx.c
+@@ -606,6 +606,7 @@ static struct clk_functions omap2_clk_functions = {
+       .clk_round_rate         = omap2_clk_round_rate,
+       .clk_set_rate           = omap2_clk_set_rate,
+       .clk_set_parent         = omap2_clk_set_parent,
++      .clk_get_parent         = omap2_clk_get_parent,
+       .clk_disable_unused     = omap2_clk_disable_unused,
+ };
diff --git a/packages/linux/omap3-pandora-kernel/mru-enable-overlay-optimalization.diff b/packages/linux/omap3-pandora-kernel/mru-enable-overlay-optimalization.diff
new file mode 100755 (executable)
index 0000000..d027c53
--- /dev/null
@@ -0,0 +1,117 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:45:26 +0000 (+0100)
+Subject: OMAP: Enable overlay optimisation
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=7e052af7e4c73dc450412486ad37eb529e725dc7
+
+OMAP: Enable overlay optimisation
+
+This enables the overlay optimisation feature when the video
+overlay is active. This reduces memory bandwidth used by the
+display subsystem, improving overall performance.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 888d2c2..0f0b2e5 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -315,6 +315,60 @@ void omap_dispc_enable_digit_out(int enable)
+ }
+ EXPORT_SYMBOL(omap_dispc_enable_digit_out);
++#define MIN(a, b) ((a)<(b)?(a):(b))
++#define MAX(a, b) ((a)>(b)?(a):(b))
++
++static void setup_overlay_opt(void)
++{
++      struct fb_info **fbi = dispc.fbdev->fb_info;
++      struct omapfb_plane_struct *gfx, *vid;
++      struct fb_var_screeninfo *gvar;
++      unsigned gx, gx2, gy, gy2, gw, gh;
++      unsigned vx, vx2, vy, vy2, vw, vh;
++      unsigned bpp, skip;
++      static unsigned last_skip;
++
++      if (!fbi[0] || !fbi[1])
++              return;
++
++      gfx = fbi[0]->par;
++      vid = fbi[1]->par;
++      gvar = &fbi[0]->var;
++
++      gx = gfx->info.pos_x;
++      gy = gfx->info.pos_y;
++      gw = gfx->info.out_width;
++      gh = gfx->info.out_height;
++      vx = vid->info.pos_x;
++      vy = vid->info.pos_y;
++      vw = vid->info.out_width;
++      vh = vid->info.out_height;
++      gx2 = gx + gw;
++      gy2 = gy + gh;
++      vx2 = vx + vw;
++      vy2 = vy + vh;
++      bpp = gvar->bits_per_pixel / 8;
++
++      if (!gfx->info.enabled || !vid->info.enabled ||
++          dispc.color_key.key_type != OMAPFB_COLOR_KEY_DISABLED) {
++              skip = 0;
++      } else if (vx <= gx && vx2 >= gx2) {
++              unsigned y = MIN(gy2, vy2) - MAX(gy, vy);
++              skip = y * gvar->xres_virtual * bpp;
++      } else if (vx <= gx || vx2 >= gx2) {
++              unsigned x = MIN(gx2, vx2) - MAX(gx, vx);
++              skip = x * bpp;
++      } else {
++              skip = vw * bpp + 1;
++      }
++
++      if (skip != last_skip) {
++              last_skip = skip;
++              dispc_write_reg(DISPC_GFX_WINDOW_SKIP, skip);
++              MOD_REG_FLD(DISPC_CONTROL, 1<<12, !!skip<<12);
++      }
++}
++
+ static inline int _setup_plane(int plane, int channel_out,
+                                 u32 paddr, int screen_width,
+                                 int pos_x, int pos_y, int width, int height,
+@@ -437,6 +491,9 @@ static inline int _setup_plane(int plane, int channel_out,
+       dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
++      if (plane < 2)
++              setup_overlay_opt();
++
+       MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
+       return height * screen_width * bpp / 8;
+@@ -585,11 +642,19 @@ static int omap_dispc_enable_plane(int plane, int enable)
+       const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
+                               DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
+                               DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
++      struct omapfb_plane_struct *pi;
++
+       if ((unsigned int)plane > dispc.mem_desc.region_cnt)
+               return -EINVAL;
++      pi = dispc.fbdev->fb_info[plane]->par;
++      pi->info.enabled = enable;
++
+       enable_lcd_clocks(1);
+       MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
++      if (plane < 2)
++              setup_overlay_opt();
++      MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
+       enable_lcd_clocks(0);
+       return 0;
+@@ -633,6 +698,7 @@ static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
+       if (val != 0)
+               dispc_write_reg(tr_reg, ck->trans_key);
+       dispc_write_reg(df_reg, ck->background);
++      setup_overlay_opt();
+       enable_lcd_clocks(0);
+       dispc.color_key = *ck;
diff --git a/packages/linux/omap3-pandora-kernel/mru-fix-display-panning.diff b/packages/linux/omap3-pandora-kernel/mru-fix-display-panning.diff
new file mode 100755 (executable)
index 0000000..a4ba3d2
--- /dev/null
@@ -0,0 +1,49 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:18:48 +0000 (+0100)
+Subject: OMAP: Fix omapfb display panning
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=2ea46e9f28ff57a32d87bc380457a584c913fe78
+
+OMAP: Fix omapfb display panning
+
+This makes the FBIOPAN_DISPLAY ioctl work with omapfb.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index ce4c4de..64bf333 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -436,6 +436,8 @@ static inline int _setup_plane(int plane, int channel_out,
+       dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
++      MOD_REG_FLD(DISPC_CONTROL, 1<<5, 1<<5);
++
+       return height * screen_width * bpp / 8;
+ }
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index e7f3462..e9ffb92 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -207,8 +207,8 @@ static int ctrl_change_mode(struct fb_info *fbi)
+       struct omapfb_device *fbdev = plane->fbdev;
+       struct fb_var_screeninfo *var = &fbi->var;
+-      offset = var->yoffset * fbi->fix.line_length +
+-               var->xoffset * var->bits_per_pixel / 8;
++      offset = (var->yoffset * var->xres_virtual + var->xoffset) *
++              var->bits_per_pixel / 8;
+       if (fbdev->ctrl->sync)
+               fbdev->ctrl->sync();
+@@ -426,6 +426,8 @@ static void set_fb_fix(struct fb_info *fbi)
+       }
+       fix->accel              = FB_ACCEL_OMAP1610;
+       fix->line_length        = var->xres_virtual * bpp / 8;
++      fix->xpanstep           = 1;
++      fix->ypanstep           = 1;
+ }
+ static int set_color_mode(struct omapfb_plane_struct *plane,
diff --git a/packages/linux/omap3-pandora-kernel/mru-fix-timings.diff b/packages/linux/omap3-pandora-kernel/mru-fix-timings.diff
new file mode 100755 (executable)
index 0000000..37ca7d3
--- /dev/null
@@ -0,0 +1,26 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:16:14 +0000 (+0100)
+Subject: OMAP: Fix video timings info message
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=3a8bdf0967ae2c4eb3cebb97118ef0392f709c1c
+
+OMAP: Fix video timings info message
+
+This fixes the hsync frequency value printed on startup.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index d176a2c..e7f3462 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -1792,7 +1792,8 @@ static int omapfb_do_probe(struct platform_device *pdev,
+                       vram, fbdev->mem_desc.region_cnt);
+       pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
+                       "vfreq %lu.%lu Hz\n",
+-                      phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
++                      phz / 1000, hhz / 10000, hhz % 10000,
++                      vhz / 10, vhz % 10);
+       return 0;
diff --git a/packages/linux/omap3-pandora-kernel/mru-improve-pixclock-config.diff b/packages/linux/omap3-pandora-kernel/mru-improve-pixclock-config.diff
new file mode 100755 (executable)
index 0000000..5a70212
--- /dev/null
@@ -0,0 +1,93 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:34:39 +0000 (+0100)
+Subject: OMAP: Improve pixel clock configuration
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=01c2d720e59c291de9eb21eb65225f2f215fef84
+
+OMAP: Improve pixel clock configuration
+
+This sets the DSS1_ALWON_FCLK clock as close as possible to a
+multiple of the requested pixel clock, while keeping it below
+the 173MHz limit.
+
+Due to of the structure of the clock tree, dss1_alwon_fck cannot
+be set directly, and we must use dpll4_m4_ck instead.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
+index 64bf333..888d2c2 100644
+--- a/drivers/video/omap/dispc.c
++++ b/drivers/video/omap/dispc.c
+@@ -177,6 +177,7 @@ static struct {
+       struct clk      *dss_ick, *dss1_fck;
+       struct clk      *dss_54m_fck;
++      struct clk      *dpll4_m4_ck;
+       enum omapfb_update_mode update_mode;
+       struct omapfb_device    *fbdev;
+@@ -736,19 +737,34 @@ static void setup_color_conv_coef(void)
+       MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
+ }
++#define MAX_FCK 173000000
++
+ static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
+ {
++      unsigned long prate = clk_get_rate(clk_get_parent(dispc.dpll4_m4_ck));
++      unsigned long pcd_min = is_tft? 2: 3;
++      unsigned long fck_div;
+       unsigned long fck, lck;
+-      *lck_div = 1;
+       pck = max(1, pck);
++
++      if (pck * pcd_min > MAX_FCK) {
++              dev_warn(dispc.fbdev->dev, "pixclock %d kHz too high.\n",
++                       pck / 1000);
++              pck = MAX_FCK / pcd_min;
++      }
++
++      fck = pck * 2;
++      fck_div = (prate + pck) / fck;
++      if (fck_div > 16)
++              fck_div /= (fck_div + 15) / 16;
++      if (fck_div < 1)
++              fck_div = 1;
++      clk_set_rate(dispc.dpll4_m4_ck, prate / fck_div);
+       fck = clk_get_rate(dispc.dss1_fck);
+-      lck = fck;
+-      *pck_div = (lck + pck - 1) / pck;
+-      if (is_tft)
+-              *pck_div = max(2, *pck_div);
+-      else
+-              *pck_div = max(3, *pck_div);
++
++      *lck_div = 1;
++      *pck_div = (fck + pck - 1) / pck;
+       if (*pck_div > 255) {
+               *pck_div = 255;
+               lck = pck * *pck_div;
+@@ -909,11 +925,21 @@ static int get_dss_clocks(void)
+               return PTR_ERR(dispc.dss_54m_fck);
+       }
++      if (IS_ERR((dispc.dpll4_m4_ck =
++                              clk_get(dispc.fbdev->dev, "dpll4_m4_ck")))) {
++              dev_err(dispc.fbdev->dev, "can't get dpll4_m4_ck");
++              clk_put(dispc.dss_ick);
++              clk_put(dispc.dss1_fck);
++              clk_put(dispc.dss_54m_fck);
++              return PTR_ERR(dispc.dss_54m_fck);
++      }
++
+       return 0;
+ }
+ static void put_dss_clocks(void)
+ {
++      clk_put(dispc.dpll4_m4_ck);
+       clk_put(dispc.dss_54m_fck);
+       clk_put(dispc.dss1_fck);
+       clk_put(dispc.dss_ick);
diff --git a/packages/linux/omap3-pandora-kernel/mru-make-dpll4-m4-ck-programmable.diff b/packages/linux/omap3-pandora-kernel/mru-make-dpll4-m4-ck-programmable.diff
new file mode 100755 (executable)
index 0000000..0a535c5
--- /dev/null
@@ -0,0 +1,27 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:52:42 +0000 (+0100)
+Subject: OMAP: Make dpll4_m4_ck programmable with clk_set_rate()
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=feab5b628c06619196044c15d9d2a113df173eee
+
+OMAP: Make dpll4_m4_ck programmable with clk_set_rate()
+
+Filling the set_rate and round_rate fields of dpll4_m4_ck makes
+this clock programmable through clk_set_rate().  This is needed
+to give omapfb control over the dss1_alwon_fck rate.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
+index 41f91f8..9c8e0c8 100644
+--- a/arch/arm/mach-omap2/clock34xx.h
++++ b/arch/arm/mach-omap2/clock34xx.h
+@@ -877,6 +877,8 @@ static struct clk dpll4_m4_ck = {
+                               PARENT_CONTROLS_CLOCK,
+       .clkdm          = { .name = "dpll4_clkdm" },
+       .recalc         = &omap2_clksel_recalc,
++      .set_rate       = &omap2_clksel_set_rate,
++      .round_rate     = &omap2_clksel_round_rate,
+ };
+ /* The PWRDN bit is apparently only available on 3430ES2 and above */
diff --git a/packages/linux/omap3-pandora-kernel/mru-make-video-timings-selectable.diff b/packages/linux/omap3-pandora-kernel/mru-make-video-timings-selectable.diff
new file mode 100755 (executable)
index 0000000..bba3ef7
--- /dev/null
@@ -0,0 +1,312 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Fri, 29 Aug 2008 01:42:12 +0000 (+0100)
+Subject: OMAP: Make video mode selectable from pre-defined list
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=7a9e55d7156dae6bc930c77620a88a669d2ed1c9
+
+OMAP: Make video mode selectable from pre-defined list
+
+This adds a list of common video modes and allows one to be
+selected with video=omapfb:mode:name on the command line,
+overriding the defaults from lcd_*.c. A default named mode
+can also be specified in the kernel configuration.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
+index 5ebd591..9977e80 100644
+--- a/drivers/video/omap/Kconfig
++++ b/drivers/video/omap/Kconfig
+@@ -7,26 +7,13 @@ config FB_OMAP
+       help
+           Frame buffer driver for OMAP based boards.
+-choice
+-      depends on FB_OMAP && MACH_OVERO
+-      prompt "Screen resolution"
+-      default FB_OMAP_079M3R
++config FB_OMAP_VIDEO_MODE
++      string "Default video mode"
++      depends on FB_OMAP
+       help
+-        Selected desired screen resolution
+-
+-config FB_OMAP_031M3R
+-      boolean "640 x 480 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_048M3R
+-      boolean "800 x 600 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_079M3R
+-      boolean "1024 x 768 @ 60 Hz Reduced blanking"
+-
+-config FB_OMAP_092M9R
+-      boolean "1280 x 720 @ 60 Hz Reduced blanking"
+-
+-endchoice
++        Enter video mode name to use if none is specified on the kernel
++        command line. If left blank, board-specific default timings
++        will be used. See omapfb_main.c for a list of valid mode names.
+ config FB_OMAP_LCDC_EXTERNAL
+       bool "External LCD controller support"
+diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
+index e9ffb92..c4c4049 100644
+--- a/drivers/video/omap/omapfb_main.c
++++ b/drivers/video/omap/omapfb_main.c
+@@ -36,6 +36,20 @@
+ #define MODULE_NAME   "omapfb"
++struct video_mode {
++      const char      *name;
++      int             x_res, y_res;
++      int             pixel_clock;    /* In kHz */
++      int             hsw;            /* Horizontal synchronization
++                                         pulse width */
++      int             hfp;            /* Horizontal front porch */
++      int             hbp;            /* Horizontal back porch */
++      int             vsw;            /* Vertical synchronization
++                                         pulse width */
++      int             vfp;            /* Vertical front porch */
++      int             vbp;            /* Vertical back porch */
++};
++
+ static unsigned int   def_accel;
+ static unsigned long  def_vram[OMAPFB_PLANE_NUM];
+ static unsigned int   def_vram_cnt;
+@@ -43,6 +57,7 @@ static unsigned long def_vxres;
+ static unsigned long  def_vyres;
+ static unsigned int   def_rotate;
+ static unsigned int   def_mirror;
++static char           def_mode[16] = CONFIG_FB_OMAP_VIDEO_MODE;
+ #ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
+ static int            manual_update = 1;
+@@ -53,6 +68,7 @@ static int           manual_update;
+ static struct platform_device *fbdev_pdev;
+ static struct lcd_panel               *fbdev_panel;
+ static struct omapfb_device   *omapfb_dev;
++static struct video_mode      video_mode;
+ struct caps_table_struct {
+       unsigned long flag;
+@@ -83,6 +99,152 @@ static struct caps_table_struct color_caps[] = {
+       { 1 << OMAPFB_COLOR_YUY422,     "YUY422", },
+ };
++static struct video_mode video_modes[] __initdata = {
++      {
++              /* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
++              .name           = "640x480@60",
++              .x_res          = 640,
++              .y_res          = 480,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 7,
++              .pixel_clock    = 23500,
++      },
++      {
++              /* 800 x 600 @ 60 Hz  Reduced blanking VESA CVT 0.48M3-R */
++              .name           = "800x600@60",
++              .x_res          = 800,
++              .y_res          = 600,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 11,
++              .pixel_clock    = 35500,
++      },
++      {
++              /* 1024 x 768 @ 60 Hz  Reduced blanking VESA CVT 0.79M3-R */
++              .name           = "1024x768@60",
++              .x_res          = 1024,
++              .y_res          = 768,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 4,
++              .vbp            = 15,
++              .pixel_clock    = 56000,
++      },
++      {
++              /* 1280 x 720 @ 60 Hz  Reduced blanking VESA CVT 0.92M9-R */
++              .name           = "1280x720@60",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 48,
++              .hsw            = 32,
++              .hbp            = 80,
++              .vfp            = 3,
++              .vsw            = 5,
++              .vbp            = 13,
++              .pixel_clock    = 64000,
++      },
++      {
++              /* 720 x 480 @ 60 Hz  CEA-861 Format 3 */
++              .name           = "480p60",
++              .x_res          = 720,
++              .y_res          = 480,
++              .hfp            = 16,
++              .hsw            = 62,
++              .hbp            = 60,
++              .vfp            = 9,
++              .vsw            = 6,
++              .vbp            = 30,
++              .pixel_clock    = 27027,
++      },
++      {
++              /* 720 x 576 @ 60 Hz  CEA-861 Format 18 */
++              .name           = "576p50",
++              .x_res          = 720,
++              .y_res          = 576,
++              .hfp            = 12,
++              .hsw            = 64,
++              .hbp            = 68,
++              .vfp            = 5,
++              .vsw            = 5,
++              .vbp            = 39,
++              .pixel_clock    = 27000,
++      },
++      {
++              /* 1280 x 720 @ 50 Hz  CEA-861B Format 19 */
++              .name           = "720p50",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 440,
++              .hsw            = 40,
++              .hbp            = 220,
++              .vfp            = 20,
++              .vsw            = 5,
++              .vbp            = 5,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1280 x 720 @ 60 Hz  CEA-861B Format 4 */
++              .name           = "720p60",
++              .x_res          = 1280,
++              .y_res          = 720,
++              .hfp            = 110,
++              .hsw            = 40,
++              .hbp            = 220,
++              .vfp            = 20,
++              .vsw            = 5,
++              .vbp            = 5,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 24 Hz  CEA-861B Format 32 */
++              .name           = "1080p24",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 638,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 25 Hz  CEA-861B Format 33 */
++              .name           = "1080p25",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 528,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++      {
++              /* 1920 x 1080 @ 30 Hz  CEA-861B Format 34 */
++              .name           = "1080p30",
++              .x_res          = 1920,
++              .y_res          = 1080,
++              .hfp            = 148,
++              .hsw            = 44,
++              .hbp            = 88,
++              .vfp            = 36,
++              .vsw            = 5,
++              .vbp            = 4,
++              .pixel_clock    = 74250,
++      },
++};
++
+ /*
+  * ---------------------------------------------------------------------------
+  * LCD panel
+@@ -1714,6 +1876,20 @@ static int omapfb_do_probe(struct platform_device *pdev,
+               goto cleanup;
+       }
++      if (video_mode.name) {
++              pr_info("omapfb: using mode %s\n", video_mode.name);
++
++              fbdev->panel->x_res     = video_mode.x_res;
++              fbdev->panel->y_res     = video_mode.y_res;
++              fbdev->panel->pixel_clock = video_mode.pixel_clock;
++              fbdev->panel->hsw       = video_mode.hsw;
++              fbdev->panel->hfp       = video_mode.hfp;
++              fbdev->panel->hbp       = video_mode.hbp;
++              fbdev->panel->vsw       = video_mode.vsw;
++              fbdev->panel->vfp       = video_mode.vfp;
++              fbdev->panel->vbp       = video_mode.vbp;
++      }
++
+       r = fbdev->panel->init(fbdev->panel, fbdev);
+       if (r)
+               goto cleanup;
+@@ -1870,6 +2046,17 @@ static struct platform_driver omapfb_driver = {
+       },
+ };
++static void __init omapfb_find_mode(char *name, struct video_mode *vmode)
++{
++      int i;
++
++      for (i = 0; i < sizeof(video_modes)/sizeof(video_modes[0]); i++)
++              if (!strcmp(name, video_modes[i].name)) {
++                      *vmode = video_modes[i];
++                      break;
++              }
++}
++
+ #ifndef MODULE
+ /* Process kernel command line parameters */
+@@ -1918,6 +2105,8 @@ static int __init omapfb_setup(char *options)
+                       def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
+               else if (!strncmp(this_opt, "manual_update", 13))
+                       manual_update = 1;
++              else if (!strncmp(this_opt, "mode:", 5))
++                      strncpy(def_mode, this_opt + 5, sizeof(def_mode));
+               else {
+                       pr_debug("omapfb: invalid option\n");
+                       r = -1;
+@@ -1939,6 +2128,9 @@ static int __init omapfb_init(void)
+               return -ENODEV;
+       omapfb_setup(option);
+ #endif
++
++      omapfb_find_mode(def_mode, &video_mode);
++
+       /* Register the driver with LDM */
+       if (platform_driver_register(&omapfb_driver)) {
+               pr_debug("failed to register omapfb driver\n");
+@@ -1960,6 +2152,7 @@ module_param_named(vyres, def_vyres, long, 0664);
+ module_param_named(rotate, def_rotate, uint, 0664);
+ module_param_named(mirror, def_mirror, uint, 0664);
+ module_param_named(manual_update, manual_update, bool, 0664);
++module_param_string(video_mode, def_mode, sizeof(def_mode), 0664);
+ module_init(omapfb_init);
+ module_exit(omapfb_cleanup);
diff --git a/packages/linux/omap3-pandora-kernel/musb-dma-iso-in.eml b/packages/linux/omap3-pandora-kernel/musb-dma-iso-in.eml
new file mode 100755 (executable)
index 0000000..56fc827
--- /dev/null
@@ -0,0 +1,138 @@
+Fixes blurred capture images in dma mode. Isochronous error field in
+urb and source data buffer pointer were not updated properly in dma
+mode.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..a481d54 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -1505,10 +1505,29 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+ #ifdef CONFIG_USB_INVENTRA_DMA
++              if (usb_pipeisoc(pipe)) {
++                      struct usb_iso_packet_descriptor *d;
++
++                      d = urb->iso_frame_desc + qh->iso_idx;
++                      d->actual_length = xfer_len;
++
++                      /* even if there was an error, we did the dma
++                       * for iso_frame_desc->length
++                       */
++                      if (d->status != EILSEQ && d->status != -EOVERFLOW)
++                              d->status = 0;
++
++                      if (++qh->iso_idx >= urb->number_of_packets)
++                              done = true;
++                      else
++                              done = false;
++
++              } else  {
+               /* done if urb buffer is full or short packet is recd */
+               done = (urb->actual_length + xfer_len >=
+                               urb->transfer_buffer_length
+                       || dma->actual_len < qh->maxpacket);
++              }
+               /* send IN token for next packet, without AUTOREQ */
+               if (!done) {
+@@ -1545,7 +1564,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               if (dma) {
+                       struct dma_controller   *c;
+                       u16                     rx_count;
+-                      int                     ret;
++                      int                     ret, length;
++                      dma_addr_t              buf;
+                       rx_count = musb_readw(epio, MUSB_RXCOUNT);
+@@ -1558,6 +1578,35 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                       c = musb->dma_controller;
++                      if (usb_pipeisoc(pipe)) {
++                              int status = 0;
++                              struct usb_iso_packet_descriptor *d;
++
++                              d = urb->iso_frame_desc + qh->iso_idx;
++
++                              if (iso_err) {
++                                      status = -EILSEQ;
++                                      urb->error_count++;
++                              }
++                              if (rx_count > d->length) {
++                                      if (status == 0) {
++                                              status = -EOVERFLOW;
++                                              urb->error_count++;
++                                      }
++                                      DBG(2, "** OVERFLOW %d into %d\n",\
++                                          rx_count, d->length);
++
++                                      length = d->length;
++                              } else
++                                      length = rx_count;
++                              d->status = status;
++                              buf = urb->transfer_dma + d->offset;
++                      } else {
++                              length = rx_count;
++                              buf = urb->transfer_dma +
++                                              urb->actual_length;
++                      }
++
+                       dma->desired_mode = 0;
+ #ifdef USE_MODE1
+                       /* because of the issue below, mode 1 will
+@@ -1569,6 +1618,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                                               urb->actual_length)
+                                       > qh->maxpacket)
+                               dma->desired_mode = 1;
++                      if (rx_count < hw_ep->max_packet_sz_rx) {
++                              length = rx_count;
++                              dma->bDesiredMode = 0;
++                      } else {
++                              length = urb->transfer_buffer_length;
++                      }
+ #endif
+ /* Disadvantage of using mode 1:
+@@ -1606,12 +1661,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                        */
+                       ret = c->channel_program(
+                               dma, qh->maxpacket,
+-                              dma->desired_mode,
+-                              urb->transfer_dma
+-                                      + urb->actual_length,
+-                              (dma->desired_mode == 0)
+-                                      ? rx_count
+-                                      : urb->transfer_buffer_length);
++                              dma->desired_mode, buf, length);
+                       if (!ret) {
+                               c->channel_release(dma);
+@@ -1628,19 +1678,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+               }
+       }
+-      if (dma && usb_pipeisoc(pipe)) {
+-              struct usb_iso_packet_descriptor        *d;
+-              int                                     iso_stat = status;
+-
+-              d = urb->iso_frame_desc + qh->iso_idx;
+-              d->actual_length += xfer_len;
+-              if (iso_err) {
+-                      iso_stat = -EILSEQ;
+-                      urb->error_count++;
+-              }
+-              d->status = iso_stat;
+-      }
+-
+ finish:
+       urb->actual_length += xfer_len;
+       qh->offset += xfer_len;
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel/musb-fix-ISO-in-unlink.diff b/packages/linux/omap3-pandora-kernel/musb-fix-ISO-in-unlink.diff
new file mode 100755 (executable)
index 0000000..c93a5b0
--- /dev/null
@@ -0,0 +1,69 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-omap@vger.kernel.org
+Cc: linux-usb@vger.kernel.org, felipe.balbi@nokia.com,
+        stern@rowland.harvard.edu, Ajay Kumar Gupta <ajay.gupta@ti.com>
+Subject: [PATCH v3] OMAP:MUSB: Corrects urb unlink function path
+Date:  Mon, 25 Aug 2008 10:52:16 +0530
+
+Fixes kernel panic while ISO IN transfer is aborted.Replaced
+usb_hcd_unlink_urb_from_ep() from musb_giveback() to __musb_giveback()
+to make sure urb is unlinked before giveback when __musb_giveback() is
+called from musb_urb_dequeue().
+
+Acquired musb->lock() before usb_hcd_unlink_urb_from_ep() within in 
+enqueue path.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_host.c |    7 +++++--
+ 1 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..4279311 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -291,6 +291,7 @@ __acquires(musb->lock)
+                       urb->actual_length, urb->transfer_buffer_length
+                       );
++      usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+       spin_unlock(&musb->lock);
+       usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+       spin_lock(&musb->lock);
+@@ -353,8 +354,6 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+               break;
+       }
+-      usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+-
+       qh->is_ready = 0;
+       __musb_giveback(musb, urb, status);
+       qh->is_ready = ready;
+@@ -1787,7 +1786,9 @@ static int musb_urb_enqueue(
+        */
+       qh = kzalloc(sizeof *qh, mem_flags);
+       if (!qh) {
++              spin_lock_irqsave(&musb->lock, flags);
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
++              spin_unlock_irqrestore(&musb->lock, flags);
+               return -ENOMEM;
+       }
+@@ -1899,7 +1900,9 @@ static int musb_urb_enqueue(
+ done:
+       if (ret != 0) {
++              spin_lock_irqsave(&musb->lock, flags);
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
++              spin_unlock_irqrestore(&musb->lock, flags);
+               kfree(qh);
+       }
+       return ret;
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel/musb-fix-dbrownell.patch b/packages/linux/omap3-pandora-kernel/musb-fix-dbrownell.patch
new file mode 100755 (executable)
index 0000000..3526cd3
--- /dev/null
@@ -0,0 +1,71 @@
+From: David Brownell <dbrownell@users.sourceforge.net>
+
+Minor cleanups to omap 2430/34xx/35x musb_hdrc init:
+
+ - num_eps is 16; here, each one is bidirectional
+ - use DMA_32BIT_MASK to prevent confusion/errors
+ - initialize root port power to reflect 100 mA limit
+
+This still hard-wires some board-specific data, since there
+are no hooks through which different boards can provide the
+right data to the init code.
+
+Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
+
+--- a/arch/arm/mach-omap2/usb-musb.c
++++ b/arch/arm/mach-omap2/usb-musb.c
+@@ -21,12 +21,15 @@
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
++#include <linux/dma-mapping.h>
++
+ #include <asm/io.h>
+-#include <mach/mux.h>
++
+ #include <linux/usb/musb.h>
+ #include <mach/hardware.h>
+ #include <mach/pm.h>
++#include <mach/mux.h>
+ #include <mach/usb.h>
+ #ifdef CONFIG_USB_MUSB_SOC
+@@ -109,7 +112,7 @@ static struct musb_hdrc_config musb_config = {
+       .dyn_fifo       = 1,
+       .soft_con       = 1,
+       .dma            = 1,
+-      .num_eps        = 32,
++      .num_eps        = 16,
+       .dma_channels   = 7,
+       .dma_req_chan   = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3),
+       .ram_bits       = 12,
+@@ -129,16 +132,22 @@ static struct musb_hdrc_platform_data musb_plat = {
+                       : "usbhs_ick",
+       .set_clock      = musb_set_clock,
+       .config         = &musb_config,
++
++      /* REVISIT charge pump on TWL4030 can supply up to
++       * 100 mA ... but this value is board-specific, like
++       * "mode", and should be passed to usb_musb_init().
++       */
++      .power          = 50,                   /* up to 100 mA */
+ };
+-static u64 musb_dmamask = ~(u32)0;
++static u64 musb_dmamask = DMA_32BIT_MASK;
+ static struct platform_device musb_device = {
+       .name           = "musb_hdrc",
+       .id             = -1,
+       .dev = {
+               .dma_mask               = &musb_dmamask,
+-              .coherent_dma_mask      = 0xffffffff,
++              .coherent_dma_mask      = DMA_32BIT_MASK,
+               .platform_data          = &musb_plat,
+       },
+       .num_resources  = ARRAY_SIZE(musb_resources),
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff --git a/packages/linux/omap3-pandora-kernel/musb-fix-endpoints.diff b/packages/linux/omap3-pandora-kernel/musb-fix-endpoints.diff
new file mode 100755 (executable)
index 0000000..5d1201f
--- /dev/null
@@ -0,0 +1,197 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-usb@vger.kernel.org
+Cc: linux-omap@vger.kernel.org, david-b@pacbell.net, me@felipebalbi.com,
+        Ajay Kumar Gupta <ajay.gupta@ti.com>
+Subject: [PATCH] MUSB: BULK request on different available endpoints
+Date:  Tue,  7 Oct 2008 11:12:24 +0530
+
+Fixes co-working issue of usb serial device with usb/net devices while
+oter endpoints are free and can be used.This patch implements the policy
+that if endpoint resources are available then different BULK request goes
+to different endpoint otherwise they are multiplexed to one reserved
+endpoint as currently done.
+
+NAK limit scheme has to be added for multiplexed BULK request scenario
+to avoid endpoint starvation due to usb/net devices.
+
+musb->periodic[] flag setting is also updated.It use to set this flag for
+an endpoint even when only rx or tx is used.Now flag setting is done on
+rx/tx basis of an endpoint.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_host.c |   94 ++++++++++++++++++++++++------------------
+ drivers/usb/musb/musb_host.h |    1 +
+ 2 files changed, 55 insertions(+), 40 deletions(-)
+--- /tmp/musb_host.c   2008-10-07 10:10:49.000000000 +0200
++++ git/drivers/usb/musb/musb_host.c   2008-10-07 10:13:59.000000000 +0200
+@@ -378,27 +378,32 @@
+               switch (qh->type) {
++              case USB_ENDPOINT_XFER_CONTROL:
++              case USB_ENDPOINT_XFER_BULK:
++                      /* fifo policy for these lists, except that NAKing
++                       * should rotate a qh to the end (for fairness).
++                       */
++                      if (qh->mux == 1) {
++                              head = qh->ring.prev;
++                              list_del(&qh->ring);
++                              kfree(qh);
++                              qh = first_qh(head);
++                              break;
++                      }
+               case USB_ENDPOINT_XFER_ISOC:
+               case USB_ENDPOINT_XFER_INT:
+                       /* this is where periodic bandwidth should be
+                        * de-allocated if it's tracked and allocated;
+                        * and where we'd update the schedule tree...
+                        */
+-                      musb->periodic[ep->epnum] = NULL;
++                      if (is_in)
++                              musb->periodic[2 * ep->epnum - 2] = NULL;
++                      else
++                              musb->periodic[2 * ep->epnum - 1] = NULL;
+                       kfree(qh);
+                       qh = NULL;
+                       break;
+-              case USB_ENDPOINT_XFER_CONTROL:
+-              case USB_ENDPOINT_XFER_BULK:
+-                      /* fifo policy for these lists, except that NAKing
+-                       * should rotate a qh to the end (for fairness).
+-                       */
+-                      head = qh->ring.prev;
+-                      list_del(&qh->ring);
+-                      kfree(qh);
+-                      qh = first_qh(head);
+-                      break;
+               }
+       }
+       return qh;
+@@ -1728,22 +1733,9 @@
+       u16                     maxpacket;
+       /* use fixed hardware for control and bulk */
+-      switch (qh->type) {
+-      case USB_ENDPOINT_XFER_CONTROL:
++      if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
+               head = &musb->control;
+               hw_ep = musb->control_ep;
+-              break;
+-      case USB_ENDPOINT_XFER_BULK:
+-              hw_ep = musb->bulk_ep;
+-              if (is_in)
+-                      head = &musb->in_bulk;
+-              else
+-                      head = &musb->out_bulk;
+-              break;
+-      }
+-      if (head) {
+-              idle = list_empty(head);
+-              list_add_tail(&qh->ring, head);
+               goto success;
+       }
+@@ -1778,7 +1770,8 @@
+       for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+               int     diff;
+-              if (musb->periodic[epnum])
++              if ((is_in && musb->periodic[2 * epnum - 2]) ||
++                      (!is_in && musb->periodic[2 * epnum - 1]))
+                       continue;
+               hw_ep = &musb->endpoints[epnum];
+               if (hw_ep == musb->bulk_ep)
+@@ -1789,19 +1782,36 @@
+               else
+                       diff = hw_ep->max_packet_sz_tx - maxpacket;
+-              if (diff > 0 && best_diff > diff) {
++              if (diff >= 0 && best_diff > diff) {
+                       best_diff = diff;
+                       best_end = epnum;
+               }
+       }
+-      if (best_end < 0)
++      /* use bulk reserved ep1 if no other ep is free*/
++      if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
++              hw_ep = musb->bulk_ep;
++              if (is_in)
++                      head = &musb->in_bulk;
++              else
++                      head = &musb->out_bulk;
++              goto success;
++      } else if (best_end < 0)
+               return -ENOSPC;
+       idle = 1;
++      qh->mux = 0;
+       hw_ep = musb->endpoints + best_end;
+-      musb->periodic[best_end] = qh;
+-      DBG(4, "qh %p periodic slot %d\n", qh, best_end);
++      if (is_in)
++              musb->periodic[2 * best_end - 2] = qh;
++      else
++              musb->periodic[2 * best_end - 1] = qh;
++      DBG(4, "qh %p periodic slot %d%s\n", qh, best_end, is_in ? "Rx" : "Tx");
+ success:
++      if (head) {
++              idle = list_empty(head);
++              list_add_tail(&qh->ring, head);
++              qh->mux = 1;
++      }
+       qh->hw_ep = hw_ep;
+       qh->hep->hcpriv = qh;
+       if (idle)
+@@ -2065,11 +2075,13 @@
+                       sched = &musb->control;
+                       break;
+               case USB_ENDPOINT_XFER_BULK:
+-                      if (usb_pipein(urb->pipe))
+-                              sched = &musb->in_bulk;
+-                      else
+-                              sched = &musb->out_bulk;
+-                      break;
++                      if (qh->mux == 1) {
++                              if (usb_pipein(urb->pipe))
++                                      sched = &musb->in_bulk;
++                              else
++                                      sched = &musb->out_bulk;
++                              break;
++                      }
+               default:
+                       /* REVISIT when we get a schedule tree, periodic
+                        * transfers won't always be at the head of a
+@@ -2131,11 +2143,13 @@
+               sched = &musb->control;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+-              if (is_in)
+-                      sched = &musb->in_bulk;
+-              else
+-                      sched = &musb->out_bulk;
+-              break;
++              if (qh->mux == 1) {
++                      if (is_in)
++                              sched = &musb->in_bulk;
++                      else
++                              sched = &musb->out_bulk;
++                      break;
++              }
+       case USB_ENDPOINT_XFER_ISOC:
+       case USB_ENDPOINT_XFER_INT:
+               for (i = 0; i < musb->nr_endpoints; i++) {
+--- /tmp/musb_host.h   2008-10-07 08:59:38.000000000 +0200
++++ git/drivers/usb/musb/musb_host.h   2008-10-07 10:10:54.000000000 +0200
+@@ -53,7 +53,8 @@
+       struct list_head        ring;           /* of musb_qh */
+       /* struct musb_qh               *next; */       /* for periodic tree */
+-
++      u8          mux;        /* qh multiplexed to hw_ep */
++      
+       unsigned                offset;         /* in urb->transfer_buffer */
+       unsigned                segsize;        /* current xfer fragment */
diff --git a/packages/linux/omap3-pandora-kernel/musb-fix-multiple-bulk-transfers.diff b/packages/linux/omap3-pandora-kernel/musb-fix-multiple-bulk-transfers.diff
new file mode 100755 (executable)
index 0000000..7435a2e
--- /dev/null
@@ -0,0 +1,194 @@
+From: Ajay Kumar Gupta <ajay.gupta@ti.com>
+To: linux-usb@vger.kernel.org
+Cc: linux-omap@vger.kernel.org, felipe.balbi@nokia.com,
+Subject: [PATCH] MUSB: Fix for kernel panic with multiple bulk transfer
+Date:  Wed,  1 Oct 2008 13:08:56 +0530
+
+Fixes kernel panic when multiple copy is performed among more than two mass
+storage media and transfer is aborted.musb_advance_schedule(),
+musb_urb_dequeue(),musb_cleanup_urb() and musb_h_disable() functions have
+been modified to correct urb handling associated with bulk and control
+endpoints which are multiplexed on one hardware endpoint.
+
+musb_advance_schedule() has been removed from musb_cleanup_urb() and added
+to musb_urb_dequeue(). musb_h_disable() has been modified to take care of
+multiple qh on same hw_ep scenario.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+CC: Romit Dasgupta <romit@ti.com> 
+---
+Suggestions welcome to move while loop doing kfree(qh) from 
+musb_advance_schedule() and musb_h_disable() to musb_giveback().
+
+ drivers/usb/musb/musb_host.c |  105 ++++++++++++++++++++++++++++++-----------
+ 1 files changed, 77 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 8b4be01..c2474de 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -427,8 +427,17 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
+               qh = musb_giveback(qh, urb, 0);
+       else
+               qh = musb_giveback(qh, urb, urb->status);
++      while (qh && qh->is_ready && list_empty(&qh->hep->urb_list)) {
++              struct list_head *head;
++              head = qh->ring.prev;
++              list_del(&qh->ring);
++              qh->hep->hcpriv = NULL;
++              kfree(qh);
++              qh = first_qh(head);
++      }
+-      if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
++
++      if (qh && qh->is_ready) {
+               DBG(4, "... next ep%d %cX urb %p\n",
+                               hw_ep->epnum, is_in ? 'R' : 'T',
+                               next_urb(qh));
+@@ -1964,8 +1973,6 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+               /* flush cpu writebuffer */
+               csr = musb_readw(epio, MUSB_TXCSR);
+       }
+-      if (status == 0)
+-              musb_advance_schedule(ep->musb, urb, ep, is_in);
+       return status;
+ }
+@@ -2026,13 +2033,24 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+       if (ret < 0 || (sched && qh != first_qh(sched))) {
+               int     ready = qh->is_ready;
+-
++              int     type = urb->pipe;
+               ret = 0;
+               qh->is_ready = 0;
+               __musb_giveback(musb, urb, 0);
+-              qh->is_ready = ready;
+-      } else
++
++              if (list_empty(&qh->hep->urb_list) && list_empty(&qh->ring))
++                      list_del(&qh->ring);
++              else
++                      qh->is_ready = ready;
++              if (usb_pipeisoc(type))
++                      musb->periodic[qh->hw_ep->epnum] = NULL;
++      } else {
+               ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
++              if (!ret) {
++                      musb_advance_schedule(qh->hw_ep->musb, urb, qh->hw_ep,
++                                      urb->pipe & USB_DIR_IN);
++              }
++      }
+ done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return ret;
+@@ -2046,14 +2064,17 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+       unsigned long           flags;
+       struct musb             *musb = hcd_to_musb(hcd);
+       u8                      is_in = epnum & USB_DIR_IN;
+-      struct musb_qh          *qh = hep->hcpriv;
++      struct musb_qh          *qh, *qh_for_curr_urb;
+       struct urb              *urb, *tmp;
+       struct list_head        *sched;
+-
+-      if (!qh)
+-              return;
++      int                     i;
+       spin_lock_irqsave(&musb->lock, flags);
++      qh = hep->hcpriv;
++      if (!qh) {
++              spin_unlock_irqrestore(&musb->lock, flags);
++              return;
++      }
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+@@ -2065,6 +2086,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+               else
+                       sched = &musb->out_bulk;
+               break;
++      case USB_ENDPOINT_XFER_ISOC:
++      case USB_ENDPOINT_XFER_INT:
++              for (i = 0; i < musb->nr_endpoints; i++) {
++                      if (musb->periodic[i] == qh)
++                              sched = &qh->ring;
++                      break;
++              }
+       default:
+               /* REVISIT when we get a schedule tree, periodic transfers
+                * won't always be at the head of a singleton queue...
+@@ -2073,26 +2101,47 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+               break;
+       }
+-      /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+-
+       /* kick first urb off the hardware, if needed */
+-      qh->is_ready = 0;
+-      if (!sched || qh == first_qh(sched)) {
++      if (sched) {
++              qh_for_curr_urb = qh;
+               urb = next_urb(qh);
+-
+-              /* make software (then hardware) stop ASAP */
+-              if (!urb->unlinked)
+-                      urb->status = -ESHUTDOWN;
+-
+-              /* cleanup */
+-              musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+-      } else
+-              urb = NULL;
+-
+-      /* then just nuke all the others */
+-      list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+-              musb_giveback(qh, urb, -ESHUTDOWN);
+-
++              if (urb) {
++                      /* make software (then hardware) stop ASAP */
++                      if (!urb->unlinked)
++                              urb->status = -ESHUTDOWN;
++                      /* cleanup first urb of first qh; */
++                      if (qh == first_qh(sched)) {
++                              musb_cleanup_urb(urb, qh,
++                                      urb->pipe & USB_DIR_IN);
++                      }
++                      qh = musb_giveback(qh, urb, -ESHUTDOWN);
++                      if (qh == qh_for_curr_urb) {
++                              list_for_each_entry_safe_from(urb, tmp,
++                                      &hep->urb_list, urb_list) {
++                                      qh = musb_giveback(qh, tmp, -ESHUTDOWN);
++                                      if (qh != qh_for_curr_urb)
++                                              break;
++                              }
++                      }
++              }
++              /* pick the next candidate and go */
++              if (qh && qh->is_ready) {
++                      while (qh && qh->is_ready &&
++                              list_empty(&qh->hep->urb_list)) {
++                                      struct list_head *head;
++                                      head = qh->ring.prev;
++                                      list_del(&qh->ring);
++                                      qh->hep->hcpriv = NULL;
++                                      kfree(qh);
++                                      qh = first_qh(head);
++                      }
++                      if (qh && qh->is_ready) {
++                              epnum = qh->hep->desc.bEndpointAddress;
++                              is_in = epnum & USB_DIR_IN;
++                              musb_start_urb(musb, is_in, qh);
++                      }
++              }
++      }
+       spin_unlock_irqrestore(&musb->lock, flags);
+ }
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff --git a/packages/linux/omap3-pandora-kernel/musb-mru-otgfix.diff b/packages/linux/omap3-pandora-kernel/musb-mru-otgfix.diff
new file mode 100755 (executable)
index 0000000..767858b
--- /dev/null
@@ -0,0 +1,43 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Sat, 6 Sep 2008 15:11:00 +0000 (+0100)
+Subject: usb: musb: fix something
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=1e5bc41773bb981b3a89bd762becf98c72be5e4c
+
+usb: musb: fix something
+
+This makes USB work on the Beagleboard.  I don't know why.
+---
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index e07cad8..4d6ff26 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1819,6 +1819,9 @@ allocate_instance(struct device *dev, void __iomem *mbase)
+               ep->epnum = epnum;
+       }
++#ifdef CONFIG_USB_MUSB_OTG
++      otg_set_transceiver(&musb->xceiv);
++#endif
+       musb->controller = dev;
+       return musb;
+ }
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index 9d2dcb1..51af80b 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -215,12 +215,14 @@ void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+ int __init musb_platform_init(struct musb *musb)
+ {
++      struct otg_transceiver *xceiv = otg_get_transceiver();
+       u32 l;
+ #if defined(CONFIG_ARCH_OMAP2430)
+       omap_cfg_reg(AE5_2430_USB0HS_STP);
+ #endif
++      musb->xceiv = *xceiv;
+       musb_platform_resume(musb);
+       l = omap_readl(OTG_SYSCONFIG);
diff --git a/packages/linux/omap3-pandora-kernel/musb-support-high-bandwidth.patch.eml b/packages/linux/omap3-pandora-kernel/musb-support-high-bandwidth.patch.eml
new file mode 100755 (executable)
index 0000000..0264a97
--- /dev/null
@@ -0,0 +1,134 @@
+Enables support for camera (as creative) requiring high bandwidth
+isochronous transfer.
+
+Signed-off-by: Ajay Kumar Gupta <ajay.gupta@ti.com>
+---
+ drivers/usb/musb/musb_core.c |   18 +++++++++---------
+ drivers/usb/musb/musb_host.c |   32 +++++++++++++++++++++-----------
+ 2 files changed, 30 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index c939f81..9914f70 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1063,17 +1063,17 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
+ { .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
+ { .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
+ { .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket =  64, },
+ { .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket =  64, },
+ { .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
+-{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
+-{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
++{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket =  64, },
++{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 256, },
++{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 256, },
++{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 256, },
++{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 4096, },
+ { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+ { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+ };
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 08e421f..84173df 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -1443,6 +1443,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                       /* packet error reported later */
+                       iso_err = true;
+               }
++      } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
++              DBG(3, "end %d Highbandwidth  incomplete ISO packet received\n"
++                                      , epnum);
++              status = -EPROTO;
+       }
+       /* faults abort the transfer */
+@@ -1595,7 +1599,13 @@ void musb_host_rx(struct musb *musb, u8 epnum)
+                               val &= ~MUSB_RXCSR_H_AUTOREQ;
+                       else
+                               val |= MUSB_RXCSR_H_AUTOREQ;
+-                      val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
++
++                      if (qh->maxpacket & ~0x7ff)
++                              /* Autoclear doesn't work in high bandwidth iso */
++                              val |= MUSB_RXCSR_DMAENAB;
++                      else
++                              val |= MUSB_RXCSR_AUTOCLEAR
++                                      | MUSB_RXCSR_DMAENAB;
+                       musb_writew(epio, MUSB_RXCSR,
+                               MUSB_RXCSR_H_WZC_BITS | val);
+@@ -1666,6 +1676,7 @@ static int musb_schedule(
+       int                     best_end, epnum;
+       struct musb_hw_ep       *hw_ep = NULL;
+       struct list_head        *head = NULL;
++      u16                     maxpacket;
+       /* use fixed hardware for control and bulk */
+       switch (qh->type) {
+@@ -1708,6 +1719,13 @@ static int musb_schedule(
+       best_diff = 4096;
+       best_end = -1;
++      if (qh->maxpacket & (1<<11))
++              maxpacket = 2 * (qh->maxpacket & 0x7ff);
++      else if (qh->maxpacket & (1<<12))
++              maxpacket = 3 * (qh->maxpacket & 0x7ff);
++      else
++              maxpacket = (qh->maxpacket & 0x7ff);
++
+       for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+               int     diff;
+@@ -1718,9 +1736,9 @@ static int musb_schedule(
+                       continue;
+               if (is_in)
+-                      diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
++                      diff = hw_ep->max_packet_sz_rx - maxpacket;
+               else
+-                      diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
++                      diff = hw_ep->max_packet_sz_tx - maxpacket;
+               if (diff > 0 && best_diff > diff) {
+                       best_diff = diff;
+@@ -1797,13 +1815,6 @@ static int musb_urb_enqueue(
+       qh->is_ready = 1;
+       qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+-
+-      /* no high bandwidth support yet */
+-      if (qh->maxpacket & ~0x7ff) {
+-              ret = -EMSGSIZE;
+-              goto done;
+-      }
+-
+       qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+       qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+@@ -1897,7 +1908,6 @@ static int musb_urb_enqueue(
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+-done:
+       if (ret != 0) {
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
+               kfree(qh);
+-- 
+1.5.6
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-omap" in
+the body of a message to majordomo@vger.kernel.org
+More majordomo info at  http://vger.kernel.org/majordomo-info.html
+
diff --git a/packages/linux/omap3-pandora-kernel/nand.patch b/packages/linux/omap3-pandora-kernel/nand.patch
new file mode 100755 (executable)
index 0000000..4a6d8e6
--- /dev/null
@@ -0,0 +1,11 @@
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 2ede116..d18a8c9 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -196,7 +196,7 @@ static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)  {
+       struct nand_chip *nand = mtd->priv;
+
+-      __raw_readsl(nand->IO_ADDR_R, buf, len / 2);
++      readsw(nand->IO_ADDR_R, buf, len / 2);
+ }
+
+ /*
\ No newline at end of file
diff --git a/packages/linux/omap3-pandora-kernel/no-cortex-deadlock.patch b/packages/linux/omap3-pandora-kernel/no-cortex-deadlock.patch
new file mode 100755 (executable)
index 0000000..78547c8
--- /dev/null
@@ -0,0 +1,77 @@
+From: Mans Rullgard <mans@mansr.com>
+Date: Sat, 16 Aug 2008 23:03:06 +0000 (+0100)
+Subject: ARM: Workaround for erratum 451034
+X-Git-Url: http://git.mansr.com/?p=linux-omap;a=commitdiff_plain;h=b84fa87873ffb68ad23930cf6cddeea8bec43ede
+
+ARM: Workaround for erratum 451034
+
+On Cortex-A8 r1p0 and r1p1, executing a NEON store with an integer
+store in the store buffer, can cause a processor deadlock under
+certain conditions.
+
+Executing a DMB instruction before saving NEON/VFP registers and before
+return to userspace makes it safe to run code which includes similar
+counter-measures.  Userspace code can still trigger the deadlock, so
+a different workaround is required to safely run untrusted code.
+
+See ARM Cortex-A8 Errata Notice (PR120-PRDC-008070) for full details.
+
+Signed-off-by: Mans Rullgard <mans@mansr.com>
+---
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index aa475d9..41d536e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1117,6 +1117,22 @@ config NEON
+         Say Y to include support code for NEON, the ARMv7 Advanced SIMD
+         Extension.
++config ARM_ERRATUM_451034
++      bool "Enable workaround for ARM erratum 451034"
++      depends on VFPv3
++      help
++        On Cortex-A8 r1p0 and r1p1, executing a NEON store with an integer
++        store in the store buffer, can cause a processor deadlock under
++        certain conditions.
++
++        See ARM Cortex-A8 Errata Notice (PR120-PRDC-008070) for full details.
++
++        Say Y to include a partial workaround.
++
++        WARNING: Even with this option enabled, userspace code can trigger
++        the deadlock.  To safely run untrusted code, a different fix is
++        required.
++
+ endmenu
+ menu "Userspace binary formats"
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 422f3cc..934798b 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -32,6 +32,9 @@
+       @ write all the working registers out of the VFP
+       .macro  VFPFSTMIA, base, tmp
++#ifdef CONFIG_ARM_ERRATUM_451034
++      dmb
++#endif
+ #if __LINUX_ARM_ARCH__ < 6
+       STC     p11, cr0, [\base],#33*4             @ FSTMIAX \base!, {d0-d15}
+ #else
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 060d7e2..9799a35 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -69,6 +69,10 @@ no_work_pending:
+       /* perform architecture specific actions before user return */
+       arch_ret_to_user r1, lr
++#ifdef CONFIG_ARM_ERRATUM_451034
++      dmb
++#endif
++
+       @ slow_restore_user_regs
+       ldr     r1, [sp, #S_PSR]                @ get calling cpsr
+       ldr     lr, [sp, #S_PC]!                @ get pc
diff --git a/packages/linux/omap3-pandora-kernel/no-empty-flash-warnings.patch b/packages/linux/omap3-pandora-kernel/no-empty-flash-warnings.patch
new file mode 100755 (executable)
index 0000000..ab344b0
--- /dev/null
@@ -0,0 +1,15 @@
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 1d437de..33b3feb 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -647,8 +647,8 @@ scan_more:
+                       inbuf_ofs = ofs - buf_ofs;
+                       while (inbuf_ofs < scan_end) {
+                               if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
+-                                      printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
+-                                             empty_start, ofs);
++//                                    printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
++//                                           empty_start, ofs);
+                                       if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
+                                               return err;
+                                       goto scan_more;
diff --git a/packages/linux/omap3-pandora-kernel/no-harry-potter.diff b/packages/linux/omap3-pandora-kernel/no-harry-potter.diff
new file mode 100755 (executable)
index 0000000..2bb20ab
--- /dev/null
@@ -0,0 +1,11 @@
+--- /tmp/Makefile      2008-04-24 14:36:20.509598016 +0200
++++ git/arch/arm/Makefile      2008-04-24 14:36:31.949546584 +0200
+@@ -47,7 +47,7 @@
+ # Note that GCC does not numerically define an architecture version
+ # macro, but instead defines a whole series of macros which makes
+ # testing for a specific architecture or later rather impossible.
+-arch-$(CONFIG_CPU_32v7)               :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7a,-march=armv5t -Wa$(comma)-march=armv7a)
++arch-$(CONFIG_CPU_32v7)               :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
+ arch-$(CONFIG_CPU_32v6)               :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
+ # Only override the compiler option if ARMv6. The ARMv6K extensions are
+ # always available in ARMv7
diff --git a/packages/linux/omap3-pandora-kernel/omap-2430-lcd.patch b/packages/linux/omap3-pandora-kernel/omap-2430-lcd.patch
new file mode 100755 (executable)
index 0000000..8f8a687
--- /dev/null
@@ -0,0 +1,11 @@
+--- git/drivers/video/omap/lcd_2430sdp.c.orig  2007-08-13 14:35:17.000000000 -0700
++++ git/drivers/video/omap/lcd_2430sdp.c       2007-08-13 14:35:55.000000000 -0700
+@@ -32,7 +32,7 @@
+ #define LCD_PANEL_BACKLIGHT_GPIO      91
+ #define LCD_PANEL_ENABLE_GPIO         154
+ #define LCD_PIXCLOCK_MAX              5400 /* freq 5.4 MHz */
+-#define PM_RECEIVER             TWL4030_MODULE_PM_RECIEVER
++#define PM_RECEIVER             TWL4030_MODULE_PM_RECEIVER
+ #define ENABLE_VAUX2_DEDICATED  0x09
+ #define ENABLE_VAUX2_DEV_GRP    0x20
diff --git a/packages/linux/omap3-pandora-kernel/oprofile-0.9.3.armv7.diff b/packages/linux/omap3-pandora-kernel/oprofile-0.9.3.armv7.diff
new file mode 100755 (executable)
index 0000000..1eedbb5
--- /dev/null
@@ -0,0 +1,599 @@
+Hi,
+
+This patch adds Oprofile support on ARMv7, using the PMNC unit.
+Tested on OMAP3430 SDP.
+
+Feedback and comments are welcome.
+
+The patch to user space components is attached for reference. It i applies 
+against version 0.9.3 of oprofile source 
+(http://prdownloads.sourceforge.net/oprofile/oprofile-0.9.3.tar.gz).
+
+Regards,
+Jean.
+
+---
+
+From: Jean Pihet <jpihet@mvista.com>
+Date: Tue, 6 May 2008 17:21:44 +0200
+Subject: [PATCH] ARM: Add ARMv7 oprofile support
+
+Add ARMv7 Oprofile support to kernel
+
+Signed-off-by: Jean Pihet <jpihet@mvista.com>
+---
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index c60a27d..60b50a0 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -161,6 +161,11 @@ config OPROFILE_MPCORE
+ config OPROFILE_ARM11_CORE
+       bool
++config OPROFILE_ARMV7
++      def_bool y
++      depends on CPU_V7 && !SMP
++      bool
++
+ endif
+ config VECTORS_BASE
+diff --git a/arch/arm/oprofile/Makefile b/arch/arm/oprofile/Makefile
+index e61d0cc..88e31f5 100644
+--- a/arch/arm/oprofile/Makefile
++++ b/arch/arm/oprofile/Makefile
+@@ -11,3 +11,4 @@ oprofile-$(CONFIG_CPU_XSCALE)                += op_model_xscale.o
+ oprofile-$(CONFIG_OPROFILE_ARM11_CORE)        += op_model_arm11_core.o
+ oprofile-$(CONFIG_OPROFILE_ARMV6)     += op_model_v6.o
+ oprofile-$(CONFIG_OPROFILE_MPCORE)    += op_model_mpcore.o
++oprofile-$(CONFIG_OPROFILE_ARMV7)     += op_model_v7.o
+diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
+index 0a5cf3a..3fcd752 100644
+--- a/arch/arm/oprofile/common.c
++++ b/arch/arm/oprofile/common.c
+@@ -145,6 +145,10 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) 
+       spec = &op_mpcore_spec;
+ #endif
++#ifdef CONFIG_OPROFILE_ARMV7
++      spec = &op_armv7_spec;
++#endif
++
+       if (spec) {
+               ret = spec->init();
+               if (ret < 0)
+diff --git a/arch/arm/oprofile/op_arm_model.h 
+b/arch/arm/oprofile/op_arm_model.h
+index 4899c62..8c4e4f6 100644
+--- a/arch/arm/oprofile/op_arm_model.h
++++ b/arch/arm/oprofile/op_arm_model.h
+@@ -26,6 +26,7 @@ extern struct op_arm_model_spec op_xscale_spec;
+ extern struct op_arm_model_spec op_armv6_spec;
+ extern struct op_arm_model_spec op_mpcore_spec;
++extern struct op_arm_model_spec op_armv7_spec;
+ extern void arm_backtrace(struct pt_regs * const regs, unsigned int depth);
+diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c
+new file mode 100644
+index 0000000..a159bc1
+--- /dev/null
++++ b/arch/arm/oprofile/op_model_v7.c
+@@ -0,0 +1,407 @@
++/**
++ * @file op_model_v7.c
++ * ARM V7 (Cortex A8) Event Monitor Driver
++ *
++ * @remark Copyright 2008 Jean Pihet <jpihet@mvista.com>
++ * @remark Copyright 2004 ARM SMP Development Team
++ */
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <linux/oprofile.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/smp.h>
++
++#include "op_counter.h"
++#include "op_arm_model.h"
++#include "op_model_v7.h"
++
++/* #define DEBUG */
++
++
++/*
++ * ARM V7 PMNC support
++ */
++
++static u32 cnt_en[CNTMAX];
++
++static inline void armv7_pmnc_write(u32 val)
++{
++      val &= PMNC_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
++}
++
++static inline u32 armv7_pmnc_read(void)
++{
++      u32 val;
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
++      return val;
++}
++
++static inline u32 armv7_pmnc_enable_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = CNTENS_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= CNTENS_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_disable_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u disabling wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = CNTENC_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= CNTENC_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_enable_intens(unsigned int cnt)
++{
++      u32 val;
++
++      if (cnt >= CNTMAX) {
++              printk(KERN_ERR "oprofile: CPU%u enabling wrong PMNC counter"
++                      " interrupt enable %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      if (cnt == CCNT)
++              val = INTENS_C;
++      else
++              val = (1 << (cnt - CNT0));
++
++      val &= INTENS_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
++
++      return cnt;
++}
++
++static inline u32 armv7_pmnc_getreset_flags(void)
++{
++      u32 val;
++
++      /* Read */
++      asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
++
++      /* Write to clear flags */
++      val &= FLAG_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
++
++      return val;
++}
++
++static inline int armv7_pmnc_select_counter(unsigned int cnt)
++{
++      u32 val;
++
++      if ((cnt == CCNT) || (cnt >= CNTMAX)) {
++              printk(KERN_ERR "oprofile: CPU%u selecting wrong PMNC counteri"
++                      " %d\n", smp_processor_id(), cnt);
++              return -1;
++      }
++
++      val = (cnt - CNT0) & SELECT_MASK;
++      asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
++
++      return cnt;
++}
++
++static inline void armv7_pmnc_write_evtsel(unsigned int cnt, u32 val)
++{
++      if (armv7_pmnc_select_counter(cnt) == cnt) {
++              val &= EVTSEL_MASK;
++              asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
++      }
++}
++
++static void armv7_pmnc_reset_counter(unsigned int cnt)
++{
++      u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++      u32 val = -(u32)counter_config[cpu_cnt].count;
++
++      switch (cnt) {
++      case CCNT:
++              armv7_pmnc_disable_counter(cnt);
++
++              asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
++
++              if (cnt_en[cnt] != 0)
++                  armv7_pmnc_enable_counter(cnt);
++
++              break;
++
++      case CNT0:
++      case CNT1:
++      case CNT2:
++      case CNT3:
++              armv7_pmnc_disable_counter(cnt);
++
++              if (armv7_pmnc_select_counter(cnt) == cnt)
++                  asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val));
++
++              if (cnt_en[cnt] != 0)
++                  armv7_pmnc_enable_counter(cnt);
++
++              break;
++
++      default:
++              printk(KERN_ERR "oprofile: CPU%u resetting wrong PMNC counter"
++                      " %d\n", smp_processor_id(), cnt);
++              break;
++      }
++}
++
++int armv7_setup_pmnc(void)
++{
++      unsigned int cnt;
++
++      if (armv7_pmnc_read() & PMNC_E) {
++              printk(KERN_ERR "oprofile: CPU%u PMNC still enabled when setup"
++                      " new event counter.\n", smp_processor_id());
++              return -EBUSY;
++      }
++
++      /*
++       * Initialize & Reset PMNC: C bit, D bit and P bit.
++       *  Note: Using a slower count for CCNT (D bit: divide by 64) results
++       *   in a more stable system
++       */
++      armv7_pmnc_write(PMNC_P | PMNC_C | PMNC_D);
++
++
++      for (cnt = CCNT; cnt < CNTMAX; cnt++) {
++              unsigned long event;
++              u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++
++              /*
++               * Disable counter
++               */
++              armv7_pmnc_disable_counter(cnt);
++              cnt_en[cnt] = 0;
++
++              if (!counter_config[cpu_cnt].enabled)
++                      continue;
++
++              event = counter_config[cpu_cnt].event & 255;
++
++              /*
++               * Set event (if destined for PMNx counters)
++               * We don't need to set the event if it's a cycle count
++               */
++              if (cnt != CCNT)
++                      armv7_pmnc_write_evtsel(cnt, event);
++
++              /*
++               * Enable interrupt for this counter
++               */
++              armv7_pmnc_enable_intens(cnt);
++
++              /*
++               * Reset counter
++               */
++              armv7_pmnc_reset_counter(cnt);
++
++              /*
++               * Enable counter
++               */
++              armv7_pmnc_enable_counter(cnt);
++              cnt_en[cnt] = 1;
++      }
++
++      return 0;
++}
++
++static inline void armv7_start_pmnc(void)
++{
++      armv7_pmnc_write(armv7_pmnc_read() | PMNC_E);
++}
++
++static inline void armv7_stop_pmnc(void)
++{
++      armv7_pmnc_write(armv7_pmnc_read() & ~PMNC_E);
++}
++
++/*
++ * CPU counters' IRQ handler (one IRQ per CPU)
++ */
++static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg)
++{
++      struct pt_regs *regs = get_irq_regs();
++      unsigned int cnt;
++      u32 flags;
++
++
++      /*
++       * Stop IRQ generation
++       */
++      armv7_stop_pmnc();
++
++      /*
++       * Get and reset overflow status flags
++       */
++      flags = armv7_pmnc_getreset_flags();
++
++      /*
++       * Cycle counter
++       */
++      if (flags & FLAG_C) {
++              u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), CCNT);
++              armv7_pmnc_reset_counter(CCNT);
++              oprofile_add_sample(regs, cpu_cnt);
++      }
++
++      /*
++       * PMNC counters 0:3
++       */
++      for (cnt = CNT0; cnt < CNTMAX; cnt++) {
++              if (flags & (1 << (cnt - CNT0))) {
++                      u32 cpu_cnt = CPU_COUNTER(smp_processor_id(), cnt);
++                      armv7_pmnc_reset_counter(cnt);
++                      oprofile_add_sample(regs, cpu_cnt);
++              }
++      }
++
++      /*
++       * Allow IRQ generation
++       */
++      armv7_start_pmnc();
++
++      return IRQ_HANDLED;
++}
++
++int armv7_request_interrupts(int *irqs, int nr)
++{
++      unsigned int i;
++      int ret = 0;
++
++      for (i = 0; i < nr; i++) {
++              ret = request_irq(irqs[i], armv7_pmnc_interrupt,
++                              IRQF_DISABLED, "CP15 PMNC", NULL);
++              if (ret != 0) {
++                      printk(KERN_ERR "oprofile: unable to request IRQ%u"
++                              " for ARMv7\n",
++                             irqs[i]);
++                      break;
++              }
++      }
++
++      if (i != nr)
++              while (i-- != 0)
++                      free_irq(irqs[i], NULL);
++
++      return ret;
++}
++
++void armv7_release_interrupts(int *irqs, int nr)
++{
++      unsigned int i;
++
++      for (i = 0; i < nr; i++)
++              free_irq(irqs[i], NULL);
++}
++
++#ifdef DEBUG
++static void armv7_pmnc_dump_regs(void)
++{
++      u32 val;
++      unsigned int cnt;
++
++      printk(KERN_INFO "PMNC registers dump:\n");
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
++      printk(KERN_INFO "PMNC  =0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
++      printk(KERN_INFO "CNTENS=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
++      printk(KERN_INFO "INTENS=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
++      printk(KERN_INFO "FLAGS =0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
++      printk(KERN_INFO "SELECT=0x%08x\n", val);
++
++      asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
++      printk(KERN_INFO "CCNT  =0x%08x\n", val);
++
++      for (cnt = CNT0; cnt < CNTMAX; cnt++) {
++              armv7_pmnc_select_counter(cnt);
++              asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
++              printk(KERN_INFO "CNT[%d] count =0x%08x\n", cnt-CNT0, val);
++              asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
++              printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", cnt-CNT0, val);
++      }
++}
++#endif
++
++
++static int irqs[] = {
++#ifdef CONFIG_ARCH_OMAP3
++      INT_34XX_BENCH_MPU_EMUL,
++#endif
++};
++
++static void armv7_pmnc_stop(void)
++{
++#ifdef DEBUG
++      armv7_pmnc_dump_regs();
++#endif
++      armv7_stop_pmnc();
++      armv7_release_interrupts(irqs, ARRAY_SIZE(irqs));
++}
++
++static int armv7_pmnc_start(void)
++{
++      int ret;
++
++#ifdef DEBUG
++      armv7_pmnc_dump_regs();
++#endif
++      ret = armv7_request_interrupts(irqs, ARRAY_SIZE(irqs));
++      if (ret >= 0)
++              armv7_start_pmnc();
++
++      return ret;
++}
++
++static int armv7_detect_pmnc(void)
++{
++      return 0;
++}
++
++struct op_arm_model_spec op_armv7_spec = {
++      .init           = armv7_detect_pmnc,
++      .num_counters   = 5,
++      .setup_ctrs     = armv7_setup_pmnc,
++      .start          = armv7_pmnc_start,
++      .stop           = armv7_pmnc_stop,
++      .name           = "arm/armv7",
++};
+diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h
+new file mode 100644
+index 0000000..08f40ea
+--- /dev/null
++++ b/arch/arm/oprofile/op_model_v7.h
+@@ -0,0 +1,101 @@
++/**
++ * @file op_model_v7.h
++ * ARM v7 (Cortex A8) Event Monitor Driver
++ *
++ * @remark Copyright 2008 Jean Pihet <jpihet@mvista.com>
++ * @remark Copyright 2004 ARM SMP Development Team
++ * @remark Copyright 2000-2004 Deepak Saxena <dsaxena@mvista.com>
++ * @remark Copyright 2000-2004 MontaVista Software Inc
++ * @remark Copyright 2004 Dave Jiang <dave.jiang@intel.com>
++ * @remark Copyright 2004 Intel Corporation
++ * @remark Copyright 2004 Zwane Mwaikambo <zwane@arm.linux.org.uk>
++ * @remark Copyright 2004 Oprofile Authors
++ *
++ * @remark Read the file COPYING
++ *
++ * @author Zwane Mwaikambo
++ */
++#ifndef OP_MODEL_V7_H
++#define OP_MODEL_V7_H
++
++/*
++ * Per-CPU PMNC: config reg
++ */
++#define PMNC_E                (1 << 0)        /* Enable all counters */
++#define PMNC_P                (1 << 1)        /* Reset all counters */
++#define PMNC_C                (1 << 2)        /* Cycle counter reset */
++#define PMNC_D                (1 << 3)        /* CCNT counts every 64th cpu cycle */
++#define PMNC_X                (1 << 4)        /* Export to ETM */
++#define PMNC_DP               (1 << 5)        /* Disable CCNT if non-invasive debug*/
++#define       PMNC_MASK       0x3f            /* Mask for writable bits */
++
++/*
++ * Available counters
++ */
++#define CCNT          0
++#define CNT0          1
++#define CNT1          2
++#define CNT2          3
++#define CNT3          4
++#define CNTMAX                5
++
++#define CPU_COUNTER(cpu, counter)     ((cpu) * CNTMAX + (counter))
++
++/*
++ * CNTENS: counters enable reg
++ */
++#define CNTENS_P0     (1 << 0)
++#define CNTENS_P1     (1 << 1)
++#define CNTENS_P2     (1 << 2)
++#define CNTENS_P3     (1 << 3)
++#define CNTENS_C      (1 << 31)
++#define       CNTENS_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * CNTENC: counters disable reg
++ */
++#define CNTENC_P0     (1 << 0)
++#define CNTENC_P1     (1 << 1)
++#define CNTENC_P2     (1 << 2)
++#define CNTENC_P3     (1 << 3)
++#define CNTENC_C      (1 << 31)
++#define       CNTENC_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * INTENS: counters overflow interrupt enable reg
++ */
++#define INTENS_P0     (1 << 0)
++#define INTENS_P1     (1 << 1)
++#define INTENS_P2     (1 << 2)
++#define INTENS_P3     (1 << 3)
++#define INTENS_C      (1 << 31)
++#define       INTENS_MASK     0x8000000f      /* Mask for writable bits */
++
++/*
++ * EVTSEL: Event selection reg
++ */
++#define       EVTSEL_MASK     0x7f            /* Mask for writable bits */
++
++/*
++ * SELECT: Counter selection reg
++ */
++#define       SELECT_MASK     0x1f            /* Mask for writable bits */
++
++/*
++ * FLAG: counters overflow flag status reg
++ */
++#define FLAG_P0               (1 << 0)
++#define FLAG_P1               (1 << 1)
++#define FLAG_P2               (1 << 2)
++#define FLAG_P3               (1 << 3)
++#define FLAG_C                (1 << 31)
++#define       FLAG_MASK       0x8000000f      /* Mask for writable bits */
++
++
++int armv7_setup_pmu(void);
++int armv7_start_pmu(void);
++int armv7_stop_pmu(void);
++int armv7_request_interrupts(int *, int);
++void armv7_release_interrupts(int *, int);
++
++#endif
+
diff --git a/packages/linux/omap3-pandora-kernel/pvr/dispc.patch b/packages/linux/omap3-pandora-kernel/pvr/dispc.patch
new file mode 100755 (executable)
index 0000000..1697448
--- /dev/null
@@ -0,0 +1,46 @@
+--- kernel-2.6.27.orig/drivers/video/omap/dispc.c
++++ kernel-2.6.27/drivers/video/omap/dispc.c
+@@ -314,6 +319,32 @@
+ }
+ EXPORT_SYMBOL(omap_dispc_enable_digit_out);
++extern void omap_dispc_set_plane_base(int plane, u32 paddr)
++{
++      u32 reg;
++      u32 val;
++
++      switch (plane) {
++      case 0:
++              reg = DISPC_GFX_BA0;
++              break;
++      case 1:
++              reg = DISPC_VID1_BASE + DISPC_VID_BA0;
++              break;
++      case 2:
++              reg = DISPC_VID2_BASE + DISPC_VID_BA0;
++              break;
++      default:
++              BUG();
++              return;
++      }
++
++      dispc_write_reg(reg, paddr);
++      val = dispc_read_reg(DISPC_CONTROL) | (1 << 5); /* GOLCD */
++      dispc_write_reg(DISPC_CONTROL, val);
++}
++EXPORT_SYMBOL(omap_dispc_set_plane_base);
++
+ static inline int _setup_plane(int plane, int channel_out,
+                                 u32 paddr, int screen_width,
+                                 int pos_x, int pos_y, int width, int height,
+--- /tmp/dispc.h       2008-12-09 15:13:12.000000000 +0100
++++ git/drivers/video/omap/dispc.h     2008-12-09 15:13:36.000000000 +0100
+@@ -32,6 +32,8 @@
+ #define DISPC_TFT_DATA_LINES_18               2
+ #define DISPC_TFT_DATA_LINES_24               3
++extern void omap_dispc_set_plane_base(int plane, u32 paddr);
++
+ extern void omap_dispc_set_lcd_size(int width, int height);
+ extern void omap_dispc_enable_lcd_out(int enable);
diff --git a/packages/linux/omap3-pandora-kernel/pvr/nokia-TI.diff b/packages/linux/omap3-pandora-kernel/pvr/nokia-TI.diff
new file mode 100755 (executable)
index 0000000..a4aca1e
--- /dev/null
@@ -0,0 +1,8798 @@
+ include4/img_types.h                                                 |    5 
+ include4/pdumpdefs.h                                                 |    1 
+ include4/pvrmodule.h                                                 |   31 
+ include4/pvrversion.h                                                |    8 
+ include4/services.h                                                  |   46 
+ include4/servicesext.h                                               |    6 
+ include4/sgxapi_km.h                                                 |   65 
+ services4/3rdparty/bufferclass_example/bufferclass_example.c         |   32 
+ services4/3rdparty/bufferclass_example/bufferclass_example.h         |   25 
+ services4/3rdparty/bufferclass_example/bufferclass_example_linux.c   |   20 
+ services4/3rdparty/bufferclass_example/bufferclass_example_private.c |   76 -
+ services4/3rdparty/bufferclass_example/kbuild/Makefile               |   40 
+ services4/3rdparty/dc_omap3430_linux/kbuild/Makefile                 |   39 
+ services4/3rdparty/dc_omap3430_linux/omaplfb.h                       |    7 
+ services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c          |   60 
+ services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c                 |   52 
+ services4/include/pvr_bridge.h                                       |   26 
+ services4/include/servicesint.h                                      |   17 
+ services4/include/sgx_bridge.h                                       |   95 +
+ services4/include/sgx_bridge_km.h                                    |  139 -
+ services4/include/sgxinfo.h                                          |  347 ++--
+ services4/srvkm/Makefile                                             |   68 
+ services4/srvkm/bridged/bridged_pvr_bridge.c                         |  732 ++++++++-
+ services4/srvkm/common/deviceclass.c                                 |    6 
+ services4/srvkm/common/devicemem.c                                   |    3 
+ services4/srvkm/common/handle.c                                      |   58 
+ services4/srvkm/common/power.c                                       |   15 
+ services4/srvkm/common/pvrsrv.c                                      |  151 +-
+ services4/srvkm/common/queue.c                                       |    4 
+ services4/srvkm/common/resman.c                                      |   13 
+ services4/srvkm/devices/sgx/mmu.c                                    |    2 
+ services4/srvkm/devices/sgx/mmu.h                                    |    2 
+ services4/srvkm/devices/sgx/pb.c                                     |   37 
+ services4/srvkm/devices/sgx/sgx2dcore.c                              |   21 
+ services4/srvkm/devices/sgx/sgx_bridge_km.h                          |  158 ++
+ services4/srvkm/devices/sgx/sgxinfokm.h                              |  146 +
+ services4/srvkm/devices/sgx/sgxinit.c                                |  734 ++--------
+ services4/srvkm/devices/sgx/sgxkick.c                                |  327 +++-
+ services4/srvkm/devices/sgx/sgxreset.c                               |  330 ++++
+ services4/srvkm/devices/sgx/sgxtransfer.c                            |  312 ++++
+ services4/srvkm/devices/sgx/sgxutils.c                               |  459 +++---
+ services4/srvkm/devices/sgx/sgxutils.h                               |   28 
+ services4/srvkm/env/linux/env_data.h                                 |    8 
+ services4/srvkm/env/linux/event.c                                    |  221 +++
+ services4/srvkm/env/linux/event.h                                    |   32 
+ services4/srvkm/env/linux/kbuild/Makefile                            |   81 +
+ services4/srvkm/env/linux/mm.c                                       |    8 
+ services4/srvkm/env/linux/module.c                                   |  342 +++-
+ services4/srvkm/env/linux/osfunc.c                                   |  347 +++-
+ services4/srvkm/env/linux/pdump.c                                    |   13 
+ services4/srvkm/env/linux/proc.c                                     |   17 
+ services4/srvkm/env/linux/pvr_debug.c                                |    2 
+ services4/srvkm/hwdefs/sgxdefs.h                                     |    4 
+ services4/srvkm/hwdefs/sgxerrata.h                                   |    9 
+ services4/srvkm/hwdefs/sgxfeaturedefs.h                              |   11 
+ services4/srvkm/include/device.h                                     |   35 
+ services4/srvkm/include/handle.h                                     |   10 
+ services4/srvkm/include/osfunc.h                                     |   32 
+ services4/srvkm/include/pdump_km.h                                   |    2 
+ services4/srvkm/include/resman.h                                     |    5 
+ services4/srvkm/include/srvkm.h                                      |    4 
+ services4/system/include/syscommon.h                                 |    2 
+ services4/system/omap3430/sysconfig.c                                |   24 
+ services4/system/omap3430/sysconfig.h                                |    7 
+ services4/system/omap3430/sysutils.c                                 |    2 
+ 65 files changed, 4286 insertions(+), 1675 deletions(-)
+
+
+diff -Nurd git/drivers/gpu/pvr/include4/img_types.h git/drivers/gpu/pvr/include4/img_types.h
+--- git/drivers/gpu/pvr/include4/img_types.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/img_types.h   2008-12-18 15:47:29.000000000 +0100
+@@ -43,7 +43,10 @@
+ typedef signed long           IMG_INT32,      *IMG_PINT32;
+       #if defined(LINUX)
+-
++#if !defined(USE_CODE)
++              typedef unsigned long long              IMG_UINT64,     *IMG_PUINT64;
++              typedef long long                               IMG_INT64,      *IMG_PINT64;
++#endif
+       #else
+               #error("define an OS")
+diff -Nurd git/drivers/gpu/pvr/include4/pdumpdefs.h git/drivers/gpu/pvr/include4/pdumpdefs.h
+--- git/drivers/gpu/pvr/include4/pdumpdefs.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pdumpdefs.h   2008-12-18 15:47:29.000000000 +0100
+@@ -73,6 +73,7 @@
+       PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
+       PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
+       PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++      PVRSRV_PDUMP_MEM_FORMAT_HYBRID = 10,
+       
+       PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
+ } PDUMP_MEM_FORMAT;
+diff -Nurd git/drivers/gpu/pvr/include4/pvrmodule.h git/drivers/gpu/pvr/include4/pvrmodule.h
+--- git/drivers/gpu/pvr/include4/pvrmodule.h   1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pvrmodule.h   2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,31 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef       _PVRMODULE_H_
++#define       _PVRMODULE_H_
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++#endif        
+diff -Nurd git/drivers/gpu/pvr/include4/pvrversion.h git/drivers/gpu/pvr/include4/pvrversion.h
+--- git/drivers/gpu/pvr/include4/pvrversion.h  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/pvrversion.h  2008-12-18 15:47:29.000000000 +0100
+@@ -28,10 +28,10 @@
+ #define _PVRVERSION_H_
+ #define PVRVERSION_MAJ 1
+-#define PVRVERSION_MIN 1
+-#define PVRVERSION_BRANCH 11
+-#define PVRVERSION_BUILD 970
+-#define PVRVERSION_STRING "1.1.11.970"
++#define PVRVERSION_MIN 2
++#define PVRVERSION_BRANCH 12
++#define PVRVERSION_BUILD 838
++#define PVRVERSION_STRING "1.2.12.838"
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/include4/servicesext.h git/drivers/gpu/pvr/include4/servicesext.h
+--- git/drivers/gpu/pvr/include4/servicesext.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/servicesext.h 2008-12-18 15:47:29.000000000 +0100
+@@ -150,6 +150,8 @@
+       PVRSRV_PIXEL_FORMAT_V8U8,
+       PVRSRV_PIXEL_FORMAT_V16U16,
+       PVRSRV_PIXEL_FORMAT_QWVU8888,
++      PVRSRV_PIXEL_FORMAT_XLVU8888,
++      PVRSRV_PIXEL_FORMAT_QWVU16,
+       PVRSRV_PIXEL_FORMAT_D16,
+       PVRSRV_PIXEL_FORMAT_D24S8,
+       PVRSRV_PIXEL_FORMAT_D24X8,
+@@ -159,7 +161,9 @@
+       PVRSRV_PIXEL_FORMAT_YUY2,
+       PVRSRV_PIXEL_FORMAT_DXT23,
+       PVRSRV_PIXEL_FORMAT_DXT45,      
+-      PVRSRV_PIXEL_FORMAT_G32R32F,    
++      PVRSRV_PIXEL_FORMAT_G32R32F,
++      PVRSRV_PIXEL_FORMAT_NV11,
++      PVRSRV_PIXEL_FORMAT_NV12,
+       PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
+ } PVRSRV_PIXEL_FORMAT;
+diff -Nurd git/drivers/gpu/pvr/include4/services.h git/drivers/gpu/pvr/include4/services.h
+--- git/drivers/gpu/pvr/include4/services.h    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/services.h    2008-12-18 15:47:29.000000000 +0100
+@@ -36,16 +36,14 @@
+ #include "pdumpdefs.h"
+-#if defined(SERVICES4)
+ #define IMG_CONST const
+-#else
+-#define IMG_CONST
+-#endif
+ #define PVRSRV_MAX_CMD_SIZE           1024
+ #define PVRSRV_MAX_DEVICES            16      
++#define EVENTOBJNAME_MAXLENGTH (50)
++
+ #define PVRSRV_MEM_READ                                               (1<<0)
+ #define PVRSRV_MEM_WRITE                                      (1<<1)
+ #define PVRSRV_MEM_CACHE_CONSISTENT                   (1<<2)
+@@ -90,6 +88,7 @@
+ #define PVRSRV_MISC_INFO_TIMER_PRESENT                        (1<<0)
+ #define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT            (1<<1)
+ #define PVRSRV_MISC_INFO_MEMSTATS_PRESENT             (1<<2)
++#define PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT    (1<<3)
+ #define PVRSRV_PDUMP_MAX_FILENAME_SIZE                        20
+ #define PVRSRV_PDUMP_MAX_COMMENT_SIZE                 200
+@@ -133,7 +132,8 @@
+       IMG_OPENGLES2  = 0x00000003,
+       IMG_D3DM           = 0x00000004,
+       IMG_SRV_UM         = 0x00000005,
+-      IMG_OPENVG         = 0x00000006
++      IMG_OPENVG         = 0x00000006,
++      IMG_SRVCLIENT  = 0x00000007,
+ } IMG_MODULE_ID;
+@@ -202,10 +202,8 @@
+       
+       IMG_PVOID                               pvLinAddr;      
+-#if defined(SERVICES4)
+     
+       IMG_PVOID                               pvLinAddrKM;
+-#endif
+       
+       
+       IMG_DEV_VIRTADDR                sDevVAddr;
+@@ -294,6 +292,14 @@
+ } PVRSRV_DEVICE_IDENTIFIER;
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++      
++      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
++      
++      IMG_HANDLE      hOSEventKM;
++
++} PVRSRV_EVENTOBJECT;
+ typedef struct _PVRSRV_MISC_INFO_
+ {
+@@ -313,9 +319,14 @@
+       IMG_UINT32      ui32MemoryStrLen;
+       
+       
++      PVRSRV_EVENTOBJECT      sGlobalEventObject;
++      IMG_HANDLE                      hOSGlobalEvent;
++      
++      
+       
+ } PVRSRV_MISC_INFO;
++
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
+@@ -335,7 +346,7 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+ IMG_IMPORT
+-PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (PVRSRV_MISC_INFO *psMiscInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
+ #if 1
+ IMG_IMPORT
+@@ -348,7 +359,9 @@
+ #endif
+ IMG_IMPORT
+-PVRSRV_ERROR PollForValue (volatile IMG_UINT32 *pui32LinMemAddr,
++PVRSRV_ERROR PollForValue ( PVRSRV_CONNECTION *psConnection,
++                                                      IMG_HANDLE hOSEvent,
++                                                      volatile IMG_UINT32 *pui32LinMemAddr,
+                                                                       IMG_UINT32 ui32Value,
+                                                                       IMG_UINT32 ui32Mask,
+                                                                       IMG_UINT32 ui32Waitus,
+@@ -631,21 +644,18 @@
+                                                                                       IMG_UINT32 ui32RegValue,
+                                                                                       IMG_UINT32 ui32Flags);
+-#ifdef SERVICES4
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                                        IMG_UINT32 ui32RegAddr,
+                                                                                                        IMG_UINT32 ui32RegValue,
+                                                                                                        IMG_UINT32 ui32Mask,
+                                                                                                        IMG_UINT32 ui32Flags);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                       IMG_UINT32 ui32RegAddr,
+                                                                                       IMG_UINT32 ui32RegValue,
+                                                                                       IMG_UINT32 ui32Mask);
+-#ifdef SERVICES4
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                       IMG_UINT32 ui32RegAddr,
+@@ -655,7 +665,6 @@
+                                                                                               PVRSRV_CLIENT_MEM_INFO *psMemInfo,
+                                                                                               IMG_UINT32 ui32Offset,
+                                                                                               IMG_DEV_PHYADDR sPDDevPAddr);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
+@@ -676,7 +685,6 @@
+                                                                                        IMG_CONST IMG_CHAR *pszComment,
+                                                                                        IMG_BOOL bContinuous);
+-#if defined(SERVICES4)
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                         IMG_BOOL bContinuous,
+@@ -686,7 +694,6 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                                          IMG_UINT32 ui32Flags,
+                                                                                                          IMG_CONST IMG_CHAR *pszFormat, ...);
+-#endif
+ IMG_IMPORT
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
+@@ -718,7 +725,7 @@
+                                                                                       IMG_UINT32 ui32Size,
+                                                                                       IMG_UINT32 ui32PDumpFlags);
+-#ifdef SERVICES4
++
+ IMG_IMPORT
+ IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
+@@ -726,7 +733,6 @@
+ PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
+                                                                                               IMG_UINT32 ui32RegOffset,
+                                                                                               IMG_BOOL bLastFrame);
+-#endif
+ IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(IMG_CHAR *pszLibraryName);
+ IMG_IMPORT PVRSRV_ERROR       PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
+@@ -777,9 +783,9 @@
+ IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_UINT32 ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
+ #endif 
+-PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION *psConnection, 
+-                                                                      IMG_HANDLE hOSEvent, 
+-                                                                      IMG_UINT32 ui32MSTimeout);
++IMG_IMPORT 
++PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION * psConnection, 
++                                                                      IMG_HANDLE hOSEvent);
+ #define TIME_NOT_PASSED_UINT32(a,b,c)         ((a - b) < c)
+diff -Nurd git/drivers/gpu/pvr/include4/sgxapi_km.h git/drivers/gpu/pvr/include4/sgxapi_km.h
+--- git/drivers/gpu/pvr/include4/sgxapi_km.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/include4/sgxapi_km.h   2008-12-18 15:47:29.000000000 +0100
+@@ -32,6 +32,7 @@
+ #endif
+ #include "sgxdefs.h"
++
+ #if defined(__linux__) && !defined(USE_CODE)
+       #if defined(__KERNEL__)
+               #include <asm/unistd.h>
+@@ -64,6 +65,8 @@
+ #define SGX_MAX_TA_STATUS_VALS        32
+ #define SGX_MAX_3D_STATUS_VALS        2
++#define SGX_MAX_SRC_SYNCS                     4
++
+ #define PFLAGS_POWERDOWN                      0x00000001
+ #define PFLAGS_POWERUP                                0x00000002
+  
+@@ -75,11 +78,60 @@
+       IMG_SYS_PHYADDR                 sPhysBase;                              
+ }SGX_SLAVE_PORT;
++#ifdef SUPPORT_SGX_HWPERF
++
++#define PVRSRV_SGX_HWPERF_CBSIZE                                      0x100   
++
++#define PVRSRV_SGX_HWPERF_INVALID                                     1
++#define PVRSRV_SGX_HWPERF_TRANSFER                                    2
++#define PVRSRV_SGX_HWPERF_TA                                          3
++#define PVRSRV_SGX_HWPERF_3D                                          4
++
++#define PVRSRV_SGX_HWPERF_ON                                          0x40
++
++
++typedef struct _PVRSRV_SGX_HWPERF_CBDATA_
++{
++      IMG_UINT32      ui32FrameNo;
++      IMG_UINT32      ui32Type;
++      IMG_UINT32      ui32StartTimeWraps;
++      IMG_UINT32      ui32StartTime;
++      IMG_UINT32      ui32EndTimeWraps;
++      IMG_UINT32      ui32EndTime;
++      IMG_UINT32      ui32ClockSpeed;
++      IMG_UINT32      ui32TimeMax;
++} PVRSRV_SGX_HWPERF_CBDATA;
++
++typedef struct _PVRSRV_SGX_HWPERF_CB_
++{
++      IMG_UINT32      ui32Woff;
++      IMG_UINT32      ui32Roff;
++      PVRSRV_SGX_HWPERF_CBDATA psHWPerfCBData[PVRSRV_SGX_HWPERF_CBSIZE];
++} PVRSRV_SGX_HWPERF_CB;
++
++
++typedef struct _SGX_MISC_INFO_HWPERF_RETRIEVE_CB
++{
++      PVRSRV_SGX_HWPERF_CBDATA*       psHWPerfData;   
++      IMG_UINT32                                      ui32ArraySize;  
++      IMG_UINT32                                      ui32DataCount;  
++      IMG_UINT32                                      ui32Time;               
++} SGX_MISC_INFO_HWPERF_RETRIEVE_CB;
++#endif 
++
++
+ typedef enum _SGX_MISC_INFO_REQUEST_
+ {
++      SGX_MISC_INFO_REQUEST_CLOCKSPEED = 0,
++#ifdef SUPPORT_SGX_HWPERF
++      SGX_MISC_INFO_REQUEST_HWPERF_CB_ON,
++      SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF,
++      SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB,
++#endif 
+       SGX_MISC_INFO_REQUEST_FORCE_I16                                 =  0x7fff
+ } SGX_MISC_INFO_REQUEST;
++
+ typedef struct _SGX_MISC_INFO_
+ {
+       SGX_MISC_INFO_REQUEST   eRequest;       
+@@ -87,6 +139,10 @@
+       union
+       {
+               IMG_UINT32      reserved;       
++              IMG_UINT32                                                                                      ui32SGXClockSpeed;
++#ifdef SUPPORT_SGX_HWPERF
++              SGX_MISC_INFO_HWPERF_RETRIEVE_CB                                        sRetrieveCB;
++#endif 
+       } uData;
+ } SGX_MISC_INFO;
+@@ -162,6 +218,15 @@
+ } PVR3DIF4_KICKTA_PDUMP, *PPVR3DIF4_KICKTA_PDUMP;
+ #endif        
++#if defined(TRANSFER_QUEUE)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define SGX_MAX_2D_BLIT_CMD_SIZE              26
++#define SGX_MAX_2D_SRC_SYNC_OPS                       3
++#endif
++#define SGX_MAX_TRANSFER_STATUS_VALS  64
++#define SGX_MAX_TRANSFER_SYNC_OPS     5
++#endif
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   2008-12-18 15:47:29.000000000 +0100
+@@ -197,11 +197,27 @@
+                       return PVRSRV_ERROR_OUT_OF_MEMORY;
+               }
++              
++
++              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
++              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
++              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
++              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
++              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
++              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
++
+               for(i=0; i < BC_EXAMPLE_NUM_BUFFERS; i++)
+               {
++                      IMG_UINT32 ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++
++                      if(psDevInfo->sBufferInfo.pixelformat == PVRSRV_PIXEL_FORMAT_YUV420)
++                      {
++                              
++                              ui32Size += ((BC_EXAMPLE_STRIDE >> 1) * (BC_EXAMPLE_HEIGHT >> 1) << 1);
++                      }
+                       
+-                      if (AllocContigMemory(BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE,
++                      if (AllocContigMemory(ui32Size,
+                                                                 &psDevInfo->psSystemBuffer[i].hMemHandle,
+                                                                 &psDevInfo->psSystemBuffer[i].sCPUVAddr,
+                                                                 &sSystemBufferCPUPAddr) != PVRSRV_OK)
+@@ -211,12 +227,14 @@
+                       psDevInfo->ui32NumBuffers++;
+-                      psDevInfo->psSystemBuffer[i].ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++                      psDevInfo->psSystemBuffer[i].ui32Size = ui32Size;
+                       psDevInfo->psSystemBuffer[i].sSysAddr = CpuPAddrToSysPAddr(sSystemBufferCPUPAddr);
+                       psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr = (psDevInfo->psSystemBuffer[i].sSysAddr.uiAddr & 0xFFFFF000);
+                       psDevInfo->psSystemBuffer[i].psSyncData = IMG_NULL;
+               }
++              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
++
+               
+               psDevInfo->sBCJTable.ui32TableSize = sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE);
+@@ -234,16 +252,6 @@
+               {
+                       return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+               }
+-
+-              
+-
+-              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
+-              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
+-              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
+-              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
+-              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
+-              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
+-              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
+       }
+       
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   2008-12-18 15:47:29.000000000 +0100
+@@ -39,11 +39,32 @@
+ #define BC_EXAMPLE_NUM_BUFFERS        3
+-#define BC_EXAMPLE_WIDTH              (160)
++#define YUV420 1
++#ifdef YUV420
++
++#define BC_EXAMPLE_WIDTH              (320)
+ #define BC_EXAMPLE_HEIGHT             (160)
+-#define BC_EXAMPLE_STRIDE             (160*2)
++#define BC_EXAMPLE_STRIDE             (320)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YUV420)
++
++#else
++#ifdef YUV422
++
++#define BC_EXAMPLE_WIDTH              (320)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (320*2)
+ #define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YVYU)
++#else
++
++#define BC_EXAMPLE_WIDTH              (320)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (320*2)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_RGB565)
++
++#endif
++#endif
++
+ #define BC_EXAMPLE_DEVICEID            0
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     2008-12-18 15:47:29.000000000 +0100
+@@ -38,11 +38,10 @@
+ #include "bufferclass_example.h"
+ #include "bufferclass_example_linux.h"
++#include "pvrmodule.h"
+ #define DEVNAME       "bc_example"
+-MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+-MODULE_LICENSE("GPL");
+ MODULE_SUPPORTED_DEVICE(DEVNAME);
+ int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+@@ -259,22 +258,11 @@
+       {
+               return PVRSRV_ERROR_OUT_OF_MEMORY;
+       }
+-      else
+-      {
+-              IMG_VOID *pvPage;
+-              IMG_VOID *pvEnd = pvLinAddr + ui32Size;
+-
+-              for(pvPage = pvLinAddr; pvPage < pvEnd;  pvPage += PAGE_SIZE)
+-              {
+-                      SetPageReserved(virt_to_page(pvPage));
+-              }
+-              pPhysAddr->uiAddr = dma;
+-              *pLinAddr = pvLinAddr;
++      pPhysAddr->uiAddr = dma;
++      *pLinAddr = pvLinAddr;
+-              return PVRSRV_OK;
+-      }
+-      return PVRSRV_ERROR_OUT_OF_MEMORY;
++      return PVRSRV_OK;
+ #endif
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   2008-12-18 15:47:29.000000000 +0100
+@@ -26,6 +26,43 @@
+ #include "bufferclass_example.h"
++void FillYUV420Image(void *pvDest, int width, int height, int bytestride)
++{
++      static int iPhase = 0;
++      int i, j;
++      unsigned char u,v,y;
++      unsigned char *pui8y = (unsigned char *)pvDest;
++      unsigned short *pui16uv;
++      unsigned int count = 0;
++
++      for(j=0;j<height;j++)
++      {
++              for(i=0;i<width;i++)
++              {
++                      y = (((i+iPhase)>>6)%(2)==0)? 0x7f:0x00;
++
++                      pui8y[count++] = y;
++              }
++      }
++
++      pui16uv = (unsigned short *)((unsigned char *)pvDest + (width * height));
++      count = 0;
++
++      for(j=0;j<height;j+=2)
++      {
++              for(i=0;i<width;i+=2)
++              {
++                      u = (j<(height/2))? ((i<(width/2))? 0xFF:0x33) : ((i<(width/2))? 0x33:0xAA);
++                      v = (j<(height/2))? ((i<(width/2))? 0xAC:0x0) : ((i<(width/2))? 0x03:0xEE);
++
++                      
++                      pui16uv[count++] = (v << 8) | u;
++
++              }
++      }
++
++      iPhase++;
++}
+ void FillYUV422Image(void *pvDest, int width, int height, int bytestride)
+ {
+@@ -37,12 +74,12 @@
+       for(y=0;y<height;y++)
+       {
+-              for(x=0;x<width >> 1;x++)
++              for(x=0;x<width;x+=2)
+               {
+-                      u = (y<(height/2))? ((x<(width/4))? 0xFF:0x33) : ((x<(width/4))? 0x33:0xAA);
+-                      v = (y<(height/2))? ((x<(width/4))? 0xAA:0x0) : ((x<(width/4))? 0x03:0xEE);
++                      u = (y<(height/2))? ((x<(width/2))? 0xFF:0x33) : ((x<(width/2))? 0x33:0xAA);
++                      v = (y<(height/2))? ((x<(width/2))? 0xAA:0x0) : ((x<(width/2))? 0x03:0xEE);
+-                      y0 = y1 = (((x+iPhase)>>4)%(2)==0)? 0x7f:0x00;
++                      y0 = y1 = (((x+iPhase)>>6)%(2)==0)? 0x7f:0x00;
+                       
+                       pui32yuv[count++] = (y1 << 24) | (v << 16) | (y0 << 8) | u;
+@@ -115,19 +152,36 @@
+       
+       psSyncData = psBuffer->psSyncData;
+-      
+       if(psSyncData)
+       {
++              
++              if(psSyncData->ui32ReadOpsPending != psSyncData->ui32ReadOpsComplete)
++              {
++                      return -1;
++              }
++
++              
+               psSyncData->ui32WriteOpsPending++;
+       }
+-      if(psBufferInfo->pixelformat == PVRSRV_PIXEL_FORMAT_RGB565)
+-      {
+-              FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
+-      }
+-      else
++      switch(psBufferInfo->pixelformat)
+       {
+-              FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++              case PVRSRV_PIXEL_FORMAT_RGB565:
++              default:
++              {
++                      FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
++              case PVRSRV_PIXEL_FORMAT_YVYU:
++              {
++                      FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
++              case PVRSRV_PIXEL_FORMAT_YUV420:
++              {
++                      FillYUV420Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++                      break;
++              }
+       }
+       
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/kbuild/Makefile 2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,40 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++#
++
++MODULE                = bc_example
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++              -I$(EURASIAROOT)/services4/include \
++              -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++              -I$(EURASIAROOT)/services4/system/include \
++
++SOURCES =     ../bufferclass_example.c \
++                      ../bufferclass_example_linux.c \
++                      ../bufferclass_example_private.c
++
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile   1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/kbuild/Makefile   2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,39 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++#
++
++MODULE                = omaplfb
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++              -I$(EURASIAROOT)/services4/include \
++              -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++              -I$(EURASIAROOT)/services4/system/include \
++
++SOURCES       =       ../omaplfb_displayclass.c \
++                      ../omaplfb_linux.c
++
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    2008-12-18 15:47:29.000000000 +0100
+@@ -41,6 +41,7 @@
+ #define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
+ #define       DRIVER_PREFIX   "omaplfb"
++//extern int omap2_disp_get_output_dev(int);
+ static IMG_VOID *gpvAnchor;
+@@ -57,8 +58,6 @@
+                                                  PVR_POWER_STATE      eCurrentPowerState);
+ #endif
+-extern void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr);
+-
+ static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
+ static OMAPLFB_DEVINFO * GetAnchorPtr(IMG_VOID)
+@@ -124,28 +123,53 @@
+ static PVRSRV_ERROR Flip(OMAPLFB_SWAPCHAIN *psSwapChain,
+                                                 IMG_UINT32 aPhyAddr)
+ {
+-      if (1 /* omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD */)
++      IMG_UINT32 control;
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();     
++
++      if (1) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD)
+       {
+-                omap_dispc_set_plane_base(0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
++
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr);
++      
++              control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
++              control |= OMAP_CONTROL_GOLCD;
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
++              
+               return PVRSRV_OK;
+       }
+       else
+-      if (0 /*omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV*/)
++      if (0) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV)
+       {
+-                omap_dispc_set_plane_base(0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA0, aPhyAddr);
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_GFX_BA1, aPhyAddr + psDevInfo->sFBInfo.ui32ByteStride);
++      
++              control = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_CONTROL);
++              control |= OMAP_CONTROL_GODIGITAL;
++              OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_CONTROL, control);
++              
+               return PVRSRV_OK;
+       }
+-
++      
+       return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ static IMG_VOID EnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-
++      
++      IMG_UINT32 ui32InterruptEnable  = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
++      ui32InterruptEnable |= OMAPLCD_INTMASK_VSYNC;
++      OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ui32InterruptEnable );
+ }
+ static IMG_VOID DisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
++      
++      IMG_UINT32 ui32InterruptEnable = OMAPLFBVSyncReadReg(psSwapChain, OMAPLCD_IRQENABLE);
++      ui32InterruptEnable &= ~(OMAPLCD_INTMASK_VSYNC);
++      OMAPLFBVSyncWriteReg(psSwapChain, OMAPLCD_IRQENABLE, ui32InterruptEnable);
+ }
+ static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
+@@ -169,6 +193,7 @@
+ #endif
+       );
++      
+       memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
+       psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
+@@ -363,6 +388,7 @@
+       PVR_UNREFERENCED_PARAMETER(ui32OEMFlags);       
+       PVR_UNREFERENCED_PARAMETER(pui32SwapChainID);
+       
++      
+       if(!hDevice 
+       || !psDstSurfAttrib 
+       || !psSrcSurfAttrib 
+@@ -399,6 +425,7 @@
+       || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
+       {
++              
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }               
+@@ -407,6 +434,7 @@
+       || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
+       || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
+       {
++              
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }               
+@@ -467,12 +495,21 @@
+       }
+       
++      psSwapChain->pvRegs = ioremap(psDevInfo->psLINFBInfo->fix.mmio_start, psDevInfo->psLINFBInfo->fix.mmio_len);
++
++      if (psSwapChain->pvRegs == IMG_NULL)
++      {
++              printk(KERN_WARNING DRIVER_PREFIX ": Couldn't map registers needed for flipping\n");
++              goto ErrorFreeVSyncItems;
++      }
++
++      
+       unblank_display(psDevInfo);
+       if (OMAPLFBInstallVSyncISR(psSwapChain) != PVRSRV_OK)
+       {
+               printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
+-              goto ErrorFreeVSyncItems;
++              goto ErrorUnmapRegisters;
+       }
+               
+       EnableVSyncInterrupt(psSwapChain);
+@@ -485,6 +522,8 @@
+       return PVRSRV_OK;
++ErrorUnmapRegisters:
++      iounmap(psSwapChain->pvRegs);
+ ErrorFreeVSyncItems:
+       OMAPLFBFreeKernelMem(psVSyncFlips);
+ ErrorFreeBuffers:
+@@ -590,6 +629,9 @@
+       }
+       
++      iounmap(psSwapChain->pvRegs);
++
++      
+       OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
+       OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
+       OMAPLFBFreeKernelMem(psSwapChain);
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 2008-12-18 15:47:29.000000000 +0100
+@@ -121,6 +121,9 @@
+       IMG_UINT32 ui32RemoveIndex;
+       
++      IMG_VOID *pvRegs;
++
++      
+       PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
+ } OMAPLFB_SWAPCHAIN;
+@@ -194,8 +197,8 @@
+ IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size);
+ IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem);
+-IMG_VOID OMAPLFBWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+-IMG_UINT32 OMAPLFBReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
++IMG_VOID OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++IMG_UINT32 OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
+ PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
+ PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2008-12-18 15:47:29.000000000 +0100
+@@ -101,28 +100,57 @@
+ }
+ static void
+-OMAPLFBVSyncISR(void *arg)
++OMAPLFBVSyncISR(void *arg, struct pt_regs *regs)
+ {
+-      (void) OMAPLFBVSyncIHandler((OMAPLFB_SWAPCHAIN *)arg);
++      OMAPLFB_SWAPCHAIN *psSwapChain= (OMAPLFB_SWAPCHAIN *)arg;
++      
++      (void) OMAPLFBVSyncIHandler(psSwapChain);
+ }
+-#define DISPC_IRQ_VSYNC 0x0002
+-
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-        if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
+-            return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
+-
++      if (1) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD)
++       {
++              if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR,
++                                      psSwapChain) != 0)
++              {
++                      printk("request OMAPLCD IRQ failed");
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++      }
++      else
++      if (0) //omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV)
++      {
++              if (omap_dispc_request_irq(DISPC_IRQSTATUS_EVSYNC_EVEN|DISPC_IRQSTATUS_EVSYNC_ODD, OMAPLFBVSyncISR, psSwapChain) != 0)
++              {
++                      printk("request OMAPLCD IRQ failed");
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++      }
++              
+       return PVRSRV_OK;
+ }
+ PVRSRV_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+-        omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++      omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++              
++      return PVRSRV_OK;               
++}
+-      return PVRSRV_OK;
++IMG_VOID OMAPLFBVSyncWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      IMG_VOID *pvRegAddr = (IMG_VOID *)((IMG_UINT8 *)psSwapChain->pvRegs + ui32Offset);
++
++      
++      writel(ui32Value, pvRegAddr);
++}
++
++IMG_UINT32 OMAPLFBVSyncReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset)
++{
++      return readl((IMG_UINT8 *)psSwapChain->pvRegs + ui32Offset);
+ }
+ module_init(OMAPLFB_Init);
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge.h git/drivers/gpu/pvr/services4/include/pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/pvr_bridge.h 2008-12-18 15:47:29.000000000 +0100
+@@ -202,14 +202,14 @@
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST  (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)      
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
+-#define PVRSRV_BRIDGE_EVENT_OBJECT_CONNECT            PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
+-#define PVRSRV_BRIDGE_EVENT_OBJECT_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_OPEN                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE              PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+ #define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST           (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
+       
+ #define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD             (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
+-#define PVRSRV_KERNAL_MODE_CLIENT                             1
++#define PVRSRV_KERNEL_MODE_CLIENT                             1
+ typedef struct PVRSRV_BRIDGE_RETURN_TAG
+ {
+@@ -716,7 +716,7 @@
+ typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+-      IMG_HANDLE *hKernelMemInfo;
++      IMG_HANDLE hKernelMemInfo;
+       IMG_UINT32 ui32Offset;
+       IMG_DEV_PHYADDR sPDDevPAddr;
+ }PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
+@@ -1302,9 +1302,25 @@
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+       IMG_HANDLE      hOSEventKM;
+-      IMG_UINT32  ui32MSTimeout;
+ } PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN_TAG
++{
++      PVRSRV_EVENTOBJECT sEventObject;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN;
++
++typedef struct        PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN_TAG
++{
++      IMG_HANDLE hOSEvent;
++      PVRSRV_ERROR eError;
++} PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE_TAG
++{
++      PVRSRV_EVENTOBJECT sEventObject;
++      IMG_HANDLE hOSEventKM;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE;
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/servicesint.h git/drivers/gpu/pvr/services4/include/servicesint.h
+--- git/drivers/gpu/pvr/services4/include/servicesint.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/servicesint.h        2008-12-18 15:47:29.000000000 +0100
+@@ -38,16 +38,6 @@
+ #define DRIVERNAME_MAXLENGTH  (100)
+-#define EVENTOBJNAME_MAXLENGTH (50)
+-
+-
+-typedef struct _PVRSRV_EVENTOBJECT_
+-{
+-      
+-      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
+-      
+-      IMG_HANDLE      hOSEventKM;
+-} PVRSRV_EVENTOBJECT;
+ typedef struct _PVRSRV_KERNEL_MEM_INFO_
+@@ -93,6 +83,13 @@
+ } PVRSRV_KERNEL_SYNC_INFO;
++typedef struct _PVRSRV_DEVICE_SYNC_OBJECT_
++{
++      IMG_UINT32                      ui32ReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sReadOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32WriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sWriteOpsCompleteDevVAddr;
++} PVRSRV_DEVICE_SYNC_OBJECT;
+ typedef struct _PVRSRV_SYNC_OBJECT
+ {
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge.h git/drivers/gpu/pvr/services4/include/sgx_bridge.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgx_bridge.h 2008-12-18 15:47:29.000000000 +0100
+@@ -70,8 +70,16 @@
+ #define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
+ #define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
+ #define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_SUBMIT2D                                    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+23)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+24)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+25)
++#endif
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+26)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+27)
++#define PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+-#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+28)
+  
+ typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
+@@ -161,8 +169,18 @@
+ {
+       IMG_UINT32                              ui32BridgeFlags; 
+       IMG_HANDLE                              hDevCookie;
+-      IMG_DEV_VIRTADDR                sHWRenderContextDevVAddr;
++      PVRSRV_TRANSFER_SGX_KICK                        sKick;
+ }PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_SUBMIT2D_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_2D_SGX_KICK                              sKick;
++} PVRSRV_BRIDGE_IN_SUBMIT2D;
++#endif
+ #endif
+  
+@@ -330,6 +348,33 @@
+       IMG_HANDLE hHWRenderContext;
+ }PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWTransferContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT;
++
+ typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+@@ -337,18 +382,54 @@
+       IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
+ }PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
+-typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT_TAG
+ {
+       IMG_UINT32 ui32BridgeFlags; 
+       IMG_HANDLE hDevCookie;
+-      IMG_HANDLE hHWRenderContext;
+-}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++      IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHW2DContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT;
+- 
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+ #define       SGX2D_MAX_BLT_CMD_SIZ           256     
+ #endif 
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      IMG_UINT32              ui32PerfReg;
++      IMG_BOOL                bNewPerf;
++      IMG_UINT32              ui32NewPerf;
++      IMG_UINT32              ui32NewPerfReset;
++      IMG_UINT32              ui32PerfCountersReg;
++} PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32OldPerf;
++      IMG_UINT32              aui32Counters[PVRSRV_SGX_HWPERF_NUM_COUNTERS];
++      IMG_UINT32              ui32KickTACounter;
++      IMG_UINT32              ui32KickTARenderCounter;
++      IMG_UINT32              ui32CPUTime;
++      IMG_UINT32              ui32SGXTime;
++} PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS;
++
+ #if defined (__cplusplus)
+ }
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
+@@ -1,139 +0,0 @@
+-/**********************************************************************
+- *
+- * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+- * 
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- * 
+- * This program is distributed in the hope it will be useful but, except 
+- * as otherwise stated in writing, without any warranty; without even the 
+- * implied warranty of merchantability or fitness for a particular purpose. 
+- * See the GNU General Public License for more details.
+- * 
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- * 
+- * The full GNU General Public License is included in this distribution in
+- * the file called "COPYING".
+- *
+- * Contact Information:
+- * Imagination Technologies Ltd. <gpl-support@imgtec.com>
+- * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
+- *
+- ******************************************************************************/
+-
+-#if !defined(__SGX_BRIDGE_KM_H__)
+-#define __SGX_BRIDGE_KM_H__
+-
+-#include "sgxapi_km.h"
+-#include "sgxinfo.h"
+-#include "sgxinfokm.h"
+-#include "sgx_bridge.h"
+-#include "pvr_bridge.h"
+-#include "perproc.h"
+-
+-#if defined (__cplusplus)
+-extern "C" {
+-#endif
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
+-                                                               IMG_DEV_VIRTADDR sHWRenderContextDevVAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
+-                                               PVR3DIF4_CCB_KICK *psCCBKick);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
+-                                                                IMG_DEV_VIRTADDR sDevVAddr,
+-                                                                IMG_DEV_PHYADDR *pDevPAddr,
+-                                                                IMG_CPU_PHYADDR *pCpuPAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
+-                                                                                      IMG_HANDLE              hDevMemContext,
+-                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
+-                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
+-                                                        SGX_MISC_INFO                 *psMiscInfo);
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
+-                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
+-                                                        IMG_UINT32            ui32NumSrcSyncs,
+-                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
+-                                                        IMG_UINT32            ui32DataByteSize,
+-                                                        IMG_UINT32            *pui32BltData);
+-
+-#if defined(SGX2D_DIRECT_BLITS)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
+-                                                         IMG_UINT32                   ui32DataByteSize,
+-                                                         IMG_UINT32                   *pui32BltData);
+-#endif 
+-#endif 
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
+-IMG_IMPORT
+-PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
+-                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
+-                                                                         IMG_BOOL bWaitForComplete);
+-#endif 
+-
+-IMG_IMPORT
+-PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
+-                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
+-
+-IMG_IMPORT
+-PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
+-                                                         IMG_HANDLE hDevHandle,
+-                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
+-                                        IMG_UINT32 ui32TotalPBSize,
+-                                        IMG_HANDLE *phSharedPBDesc,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
+-                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
+-                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
+-                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
+-                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
+-                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
+-                                       IMG_UINT32 ui32TotalPBSize,
+-                                       IMG_HANDLE *phSharedPBDesc,
+-                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
+-                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
+-
+-
+-IMG_IMPORT PVRSRV_ERROR
+-SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
+-                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
+-
+- 
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-#define       SGX2D_MAX_BLT_CMD_SIZ           256     
+-#endif 
+-
+-#if defined (__cplusplus)
+-}
+-#endif
+-
+-#endif 
+-
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgxinfo.h git/drivers/gpu/pvr/services4/include/sgxinfo.h
+--- git/drivers/gpu/pvr/services4/include/sgxinfo.h    2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/include/sgxinfo.h    2008-12-18 15:47:29.000000000 +0100
+@@ -59,11 +59,16 @@
+ #if defined(SGX_SUPPORT_HWPROFILING)
+       IMG_HANDLE      hKernelHWProfilingMemInfo;
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      IMG_HANDLE      hKernelHWPerfCBMemInfo;
++#endif
+       IMG_UINT32 ui32EDMTaskReg0;
+       IMG_UINT32 ui32EDMTaskReg1;
+-      IMG_UINT32 ui32ClockGateMask;
++      IMG_UINT32 ui32ClkGateCtl;
++      IMG_UINT32 ui32ClkGateCtl2;
++      IMG_UINT32 ui32ClkGateStatusMask;
+       IMG_UINT32 ui32CacheControl;
+@@ -111,11 +116,13 @@
+ #define PVRSRV_CCBFLAGS_RASTERCMD                     0x1
+ #define PVRSRV_CCBFLAGS_TRANSFERCMD                   0x2
+ #define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD     0x3
++#if defined(SGX_FEATURE_2D_HARDWARE) 
++#define PVRSRV_CCBFLAGS_2DCMD                         0x4 
++#endif
+ #define PVRSRV_KICKFLAG_RENDER                                0x1
+ #define PVRSRV_KICKFLAG_PIXEL                         0x2
+-
+ #define       SGX_BIF_INVALIDATE_PTCACHE      0x1
+ #define       SGX_BIF_INVALIDATE_PDCACHE      0x2
+@@ -125,25 +132,40 @@
+       PVRSRV_SGX_COMMAND_TYPE         eCommand;
+       PVRSRV_SGX_COMMAND              sCommand;
+       IMG_HANDLE                      hCCBKernelMemInfo;
+-      IMG_HANDLE                      hDstKernelSyncInfo;
+-      IMG_UINT32                      ui32DstReadOpsPendingOffset;
+-      IMG_UINT32                      ui32DstWriteOpsPendingOffset;
++      IMG_HANDLE      hRenderSurfSyncInfo;
++
+       IMG_UINT32      ui32NumTAStatusVals;
+-      IMG_UINT32      aui32TAStatusValueOffset[SGX_MAX_TA_STATUS_VALS];
+       IMG_HANDLE      ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
+       IMG_UINT32      ui32Num3DStatusVals;
+-      IMG_UINT32      aui323DStatusValueOffset[SGX_MAX_3D_STATUS_VALS];
+       IMG_HANDLE      ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
+-#ifdef        NO_HARDWARE
+-      IMG_BOOL        bTerminate;
+-      IMG_HANDLE      hUpdateDstKernelSyncInfo;
++
++      IMG_BOOL        bFirstKickOrResume;
++#if (defined(NO_HARDWARE) || defined(PDUMP))
++      IMG_BOOL        bTerminateOrAbort;
++#endif
++      IMG_UINT32      ui32KickFlags;
++
++      
++      IMG_UINT32      ui32CCBOffset;
++
++      
++      IMG_UINT32      ui32NumSrcSyncs;
++      IMG_HANDLE      ahSrcKernelSyncInfo[SGX_MAX_SRC_SYNCS];
++
++      
++      IMG_BOOL        bTADependency;
++      IMG_HANDLE      hTA3DSyncInfo;
++
++      IMG_HANDLE      hTASyncInfo;
++      IMG_HANDLE      h3DSyncInfo;
++#if defined(NO_HARDWARE)
+       IMG_UINT32      ui32WriteOpsPendingVal;
+ #endif
+-      IMG_UINT32                                      ui32KickFlags;
+ } PVR3DIF4_CCB_KICK;
++
+ typedef struct _PVRSRV_SGX_HOST_CTL_
+ {     
+@@ -158,163 +180,25 @@
+       IMG_UINT32                              ui32ResManFlags;                
+       IMG_DEV_VIRTADDR                sResManCleanupData;             
++      
+       IMG_DEV_VIRTADDR                sTAHWPBDesc;            
+       IMG_DEV_VIRTADDR                s3DHWPBDesc;
++      IMG_DEV_VIRTADDR                sHostHWPBDesc;          
+-} PVRSRV_SGX_HOST_CTL;
+-
+-
+-#if defined(SUPPORT_HW_RECOVERY)
+-typedef struct _SGX_INIT_SCRIPT_DATA
+-{
+-      IMG_UINT32 asHWRecoveryData[SGX_MAX_DEV_DATA];
+-} SGX_INIT_SCRIPT_DATA;
+-#endif
+-
+-typedef struct _PVRSRV_SGXDEV_INFO_
+-{
+-      PVRSRV_DEVICE_TYPE              eDeviceType;
+-      PVRSRV_DEVICE_CLASS             eDeviceClass;
+-
+-      IMG_UINT8                               ui8VersionMajor;
+-      IMG_UINT8                               ui8VersionMinor;
+-      IMG_UINT32                              ui32CoreConfig;
+-      IMG_UINT32                              ui32CoreFlags;
+-
+-      
+-      IMG_PVOID                               pvRegsBaseKM;
+-      
+-
+-      
+-      IMG_HANDLE                              hRegMapping;
+-
+-      
+-      IMG_SYS_PHYADDR                 sRegsPhysBase;
+-      
+-      IMG_UINT32                              ui32RegSize;
+-
+-      
+-      IMG_UINT32                              ui32CoreClockSpeed;
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      
+-      SGX_SLAVE_PORT                  s2DSlavePortKM;
+-
+-      
+-      PVRSRV_RESOURCE                 s2DSlaveportResource;
+-
+-      
+-      IMG_UINT32                      ui322DFifoSize;
+-      IMG_UINT32                      ui322DFifoOffset;
+-      
+-      IMG_HANDLE                      h2DCmdCookie;
+-      
+-      IMG_HANDLE                      h2DQueue;
+-      IMG_BOOL                        b2DHWRecoveryInProgress;
+-      IMG_BOOL                        b2DHWRecoveryEndPending;
+-      IMG_UINT32                      ui322DCompletedBlits;
+-      IMG_BOOL                        b2DLockupSuspected;
+-#endif
+-      
+-    
+-      IMG_VOID                        *psStubPBDescListKM;
+-
+-
+-      
+-      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
+-
+-      IMG_VOID                                *pvDeviceMemoryHeap;
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
+-      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
+-      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
+-      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
+-      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
+-      IMG_UINT32                              *pui32KernelCCBEventKicker; 
+-      IMG_UINT32                              ui32TAKickAddress;              
+-      IMG_UINT32                              ui32TexLoadKickAddress; 
+-      IMG_UINT32                              ui32VideoHandlerAddress;
+-#if defined(SGX_SUPPORT_HWPROFILING)
+-      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
+-#endif
+-
+-      
+-      IMG_UINT32                              ui32ClientRefCount;
+-
+-      
+-      IMG_UINT32                              ui32CacheControl;
+-
+-      
+-
+-
+-      IMG_VOID                                *pvMMUContextList;
+-
+-      
+-      IMG_BOOL                                bForcePTOff;
+-
+-      IMG_UINT32                              ui32EDMTaskReg0;
+-      IMG_UINT32                              ui32EDMTaskReg1;
+-
+-      IMG_UINT32                              ui32ClockGateMask;
+-      SGX_INIT_SCRIPTS                sScripts;
+-#if defined(SUPPORT_HW_RECOVERY)
+-      SGX_INIT_SCRIPT_DATA    sScriptData;
+-#endif
+-              
+-      IMG_HANDLE                              hBIFResetPDOSMemHandle;
+-      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
+-      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
+-      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
+-      IMG_UINT32                              *pui32BIFResetPD;
+-      IMG_UINT32                              *pui32BIFResetPT;
+-
+-
+-
+-#if defined(SUPPORT_HW_RECOVERY)
+-      
+-      IMG_HANDLE                              hTimer;
+-      
+-      IMG_UINT32                              ui32TimeStamp;
+-#endif
+-
+-      
+-      IMG_UINT32                              ui32NumResets;
+-
+-      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
+-      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
+-
+-      IMG_UINT32                              ui32Flags;
+-
+-      
+-      IMG_UINT32                              ui32RegFlags;
+-
+-      #if defined(PDUMP)
+-      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
+-      #endif
++      IMG_UINT32                              ui32NumActivePowerEvents;        
+-#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
+-      
+-      IMG_VOID                                *pvDummyPTPageCpuVAddr;
+-      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
+-      IMG_HANDLE                              hDummyPTPageOSMemHandle;
+-      IMG_VOID                                *pvDummyDataPageCpuVAddr;
+-      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
+-      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#if defined(SUPPORT_SGX_HWPERF)
++      IMG_UINT32                      ui32HWPerfFlags;                
+ #endif
+-      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      PVRSRV_EVENTOBJECT      *psSGXEventObject;
+-#endif
++       
++      IMG_UINT32                      ui32TimeWraps;
++} PVRSRV_SGX_HOST_CTL;
+-} PVRSRV_SGXDEV_INFO;
+ typedef struct _PVR3DIF4_CLIENT_INFO_
+ {
+-      IMG_VOID                                        *pvRegsBase;                    
+-      IMG_HANDLE                                      hBlockMapping;                  
+-      SGX_SLAVE_PORT                          s2DSlavePort;                   
+       IMG_UINT32                                      ui32ProcessID;                  
+       IMG_VOID                                        *pvProcess;                             
+       PVRSRV_MISC_INFO                        sMiscInfo;                              
+@@ -330,13 +214,9 @@
+ typedef struct _PVR3DIF4_INTERNAL_DEVINFO_
+ {
+       IMG_UINT32                      ui32Flags;
+-      IMG_BOOL                        bTimerEnable;
+       IMG_HANDLE                      hCtlKernelMemInfoHandle;
+       IMG_BOOL                        bForcePTOff;
+       IMG_UINT32                      ui32RegFlags;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      IMG_HANDLE                      hOSEvent;               
+-#endif
+ } PVR3DIF4_INTERNAL_DEVINFO;
+ typedef struct _PVRSRV_SGX_SHARED_CCB_
+@@ -371,5 +251,150 @@
+       #endif
+ }PVRSRV_SGX_CCB;
++typedef struct _CTL_STATUS_
++{
++      IMG_DEV_VIRTADDR        sStatusDevAddr;
++      IMG_UINT32              ui32StatusValue;
++} CTL_STATUS, *PCTL_STATUS;
++
++#if defined(TRANSFER_QUEUE)
++#define SGXTQ_MAX_STATUS 5
++typedef struct _PVR3DIF4_CMDTA_SHARED_
++{
++      IMG_UINT32                      ui32NumTAStatusVals;
++      IMG_UINT32                      ui32Num3DStatusVals;
++      
++      
++      IMG_UINT32                      ui32WriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32ReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32                      ui32TQSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                sTQSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui32TQSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                sTQSyncReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32                      ui323DTQSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR                s3DTQSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32                      ui323DTQSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR                s3DTQSyncReadOpsCompleteDevVAddr;
++      
++      
++      IMG_UINT32                      ui32NumSrcSyncs;
++      PVRSRV_DEVICE_SYNC_OBJECT       asSrcSyncs[SGX_MAX_SRC_SYNCS];
++
++      CTL_STATUS                      sCtlTAStatusInfo[SGX_MAX_TA_STATUS_VALS];
++      CTL_STATUS                      sCtl3DStatusInfo[SGX_MAX_3D_STATUS_VALS];
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sTA3DDependancy;        
++      
++} PVR3DIF4_CMDTA_SHARED;
++
++typedef struct _PVR3DIF4_TRANSFERCMD_SHARED_
++{
++      
++      
++      IMG_UINT32              ui32SrcReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sSrcReadOpsCompleteDevAddr;
++      
++      IMG_UINT32              ui32SrcWriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sSrcWriteOpsCompleteDevAddr;
++
++      
++      
++      IMG_UINT32              ui32DstReadOpPendingVal;
++      IMG_DEV_VIRTADDR        sDstReadOpsCompleteDevAddr;
++      
++      IMG_UINT32              ui32DstWriteOpPendingVal;
++      IMG_DEV_VIRTADDR        sDstWriteOpsCompleteDevAddr;
++
++      
++      IMG_UINT32              ui32TASyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR        sTASyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32              ui32TASyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR        sTASyncReadOpsCompleteDevVAddr;
++
++      
++      IMG_UINT32              ui323DSyncWriteOpsPendingVal;
++      IMG_DEV_VIRTADDR        s3DSyncWriteOpsCompleteDevVAddr;
++      IMG_UINT32              ui323DSyncReadOpsPendingVal;
++      IMG_DEV_VIRTADDR        s3DSyncReadOpsCompleteDevVAddr;
++
++      IMG_UINT32              ui32NumStatusVals;
++      CTL_STATUS              sCtlStatusInfo[SGXTQ_MAX_STATUS];
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_UINT32              ui32NumDstSync;
++
++      IMG_DEV_VIRTADDR        sSrcWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++      IMG_DEV_VIRTADDR        sSrcReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_DEV_VIRTADDR        sDstWriteOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++      IMG_DEV_VIRTADDR        sDstReadOpsDevVAddr[SGX_MAX_TRANSFER_SYNC_OPS];
++} PVR3DIF4_TRANSFERCMD_SHARED, *PPVR3DIF4_TRANSFERCMD_SHARED;
++
++typedef struct _PVRSRV_TRANSFER_SGX_KICK_
++{
++      IMG_HANDLE              hCCBMemInfo;
++      IMG_UINT32              ui32SharedCmdCCBOffset;
++
++      IMG_DEV_VIRTADDR        sHWTransferContextDevVAddr;
++
++      IMG_HANDLE              hTASyncInfo;
++      IMG_HANDLE              h3DSyncInfo;
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_HANDLE              ahSrcSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_UINT32              ui32NumDstSync;
++      IMG_HANDLE              ahDstSyncInfo[SGX_MAX_TRANSFER_SYNC_OPS];
++
++      IMG_UINT32              ui32StatusFirstSync;
++} PVRSRV_TRANSFER_SGX_KICK, *PPVRSRV_TRANSFER_SGX_KICK;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _PVR3DIF4_2DCMD_SHARED_ {
++      
++      IMG_UINT32                      ui32NumSrcSync;
++      PVRSRV_DEVICE_SYNC_OBJECT       sSrcSyncData[SGX_MAX_2D_SRC_SYNC_OPS];
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sDstSyncData;
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       sTASyncData;
++      
++      
++      PVRSRV_DEVICE_SYNC_OBJECT       s3DSyncData;
++} PVR3DIF4_2DCMD_SHARED, *PPVR3DIF4_2DCMD_SHARED;
++
++typedef struct _PVRSRV_2D_SGX_KICK_
++{
++      IMG_HANDLE              hCCBMemInfo;
++      IMG_UINT32              ui32SharedCmdCCBOffset;
++
++      IMG_DEV_VIRTADDR        sHW2DContextDevVAddr;
++
++      IMG_UINT32              ui32NumSrcSync;
++      IMG_HANDLE              ahSrcSyncInfo[SGX_MAX_2D_SRC_SYNC_OPS];
++      
++      
++      IMG_HANDLE              hDstSyncInfo;
++      
++      
++      IMG_HANDLE              hTASyncInfo;
++      
++      
++      IMG_HANDLE              h3DSyncInfo;
++      
++} PVRSRV_2D_SGX_KICK, *PPVRSRV_2D_SGX_KICK;
++#endif        
++#endif        
++
++#define PVRSRV_SGX_HWPERF_NUM_COUNTERS        9
++
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   2008-12-18 15:47:29.000000000 +0100
+@@ -44,7 +44,6 @@
+ #include "bridged_pvr_bridge.h"
+ #include "env_data.h"
+-
+ #if defined (__linux__)
+ #include "mmap.h"
+ #else
+@@ -66,7 +65,7 @@
+ static IMG_BOOL gbInitServerRunning = IMG_FALSE;
+ static IMG_BOOL gbInitServerRan = IMG_FALSE;
+-static IMG_BOOL gbInitServerSuccessful = IMG_FALSE;
++static IMG_BOOL gbInitSuccessful = IMG_FALSE;
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+@@ -446,7 +445,13 @@
+ }
+-
++#if defined(OS_PVRSRV_ALLOC_DEVICE_MEM_BW)
++int
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++                                         PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc);
++#else
+ static int
+ PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
+                                          PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
+@@ -512,7 +517,7 @@
+               psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
+               psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
+               psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
+-              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = psMemInfo->sMemBlk.hOSMemHandle;
+               psAllocDeviceMemOUT->eError =
+                       PVRSRVAllocHandle(psPerProc->psHandleBase,
+@@ -568,6 +573,7 @@
+       return 0;
+ }
++#endif 
+ static int
+ PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
+@@ -1547,12 +1553,12 @@
+               return 0;
+       }
+-      if(psDoKickIN->sCCBKick.hDstKernelSyncInfo != IMG_NULL)
++      if(psDoKickIN->sCCBKick.hTA3DSyncInfo != IMG_NULL)
+       {
+               psRetOUT->eError =
+                       PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                         &psDoKickIN->sCCBKick.hDstKernelSyncInfo,
+-                                                         psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         &psDoKickIN->sCCBKick.hTA3DSyncInfo,
++                                                         psDoKickIN->sCCBKick.hTA3DSyncInfo,
+                                                          PVRSRV_HANDLE_TYPE_SYNC_INFO); 
+               if(psRetOUT->eError != PVRSRV_OK)
+@@ -1561,13 +1567,12 @@
+               }
+       }
+-#if defined (NO_HARDWARE)
+-      if(psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo != IMG_NULL)
++      if(psDoKickIN->sCCBKick.hTASyncInfo != IMG_NULL)
+       {
+               psRetOUT->eError =
+                       PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                         &psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
+-                                                         psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         &psDoKickIN->sCCBKick.hTASyncInfo,
++                                                         psDoKickIN->sCCBKick.hTASyncInfo,
+                                                          PVRSRV_HANDLE_TYPE_SYNC_INFO); 
+               if(psRetOUT->eError != PVRSRV_OK)
+@@ -1575,7 +1580,46 @@
+                       return 0;
+               }
+       }
+-#endif
++
++      if(psDoKickIN->sCCBKick.h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.h3DSyncInfo,
++                                                         psDoKickIN->sCCBKick.h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      
++      if (psDoKickIN->sCCBKick.ui32NumSrcSyncs > SGX_MAX_SRC_SYNCS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for(i=0; i<psDoKickIN->sCCBKick.ui32NumSrcSyncs; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ahSrcKernelSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psDoKickIN->sCCBKick.ui32NumTAStatusVals > SGX_MAX_TA_STATUS_VALS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
+       for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
+       {
+               psRetOUT->eError =
+@@ -1590,6 +1634,11 @@
+               }
+       }
++      if (psDoKickIN->sCCBKick.ui32Num3DStatusVals > SGX_MAX_3D_STATUS_VALS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
+       for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
+       {
+               psRetOUT->eError =
+@@ -1604,6 +1653,20 @@
+               }
+       }
++      if(psDoKickIN->sCCBKick.hRenderSurfSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hRenderSurfSyncInfo,
++                                                         psDoKickIN->sCCBKick.hRenderSurfSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
+       psRetOUT->eError =
+               SGXDoKickKM(hDevCookieInt, 
+                                       &psDoKickIN->sCCBKick);
+@@ -1620,51 +1683,119 @@
+                       PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
++      PVRSRV_TRANSFER_SGX_KICK *psKick;
++      IMG_UINT32 i;
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
+       PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++      psKick = &psSubmitTransferIN->sKick;
++
+       psRetOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase,
+                                                  &hDevCookieInt,
+                                                  psSubmitTransferIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
+-
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
+       psRetOUT->eError =
+-              SGXSubmitTransferKM(hDevCookieInt,
+-                                                      psSubmitTransferIN->sHWRenderContextDevVAddr);
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psKick->hCCBMemInfo,
++                                                 psKick->hCCBMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hTASyncInfo,
++                                                         psKick->hTASyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->h3DSyncInfo,
++                                                         psKick->h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->ui32NumSrcSync > SGX_MAX_TRANSFER_SYNC_OPS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahSrcSyncInfo[i],
++                                                         psKick->ahSrcSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      if (psKick->ui32NumDstSync > SGX_MAX_TRANSFER_SYNC_OPS)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumDstSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahDstSyncInfo[i],
++                                                         psKick->ahDstSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      psRetOUT->eError = SGXSubmitTransferKM(hDevCookieInt, psKick);
+       return 0;
+ }
+-#endif
++#if defined(SGX_FEATURE_2D_HARDWARE)
+ static int
+-SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
+-                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
+-                               PVRSRV_BRIDGE_RETURN *psRetOUT,
+-                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++SGXSubmit2DBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_SUBMIT2D *psSubmit2DIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      SGX_MISC_INFO *psMiscInfo;
+-
+-
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO);
++      PVRSRV_2D_SGX_KICK *psKick;
++      IMG_UINT32 i;
+-      
+-      psMiscInfo =
+-              (SGX_MISC_INFO *)((IMG_UINT8 *)psSGXGetMiscInfoIN
+-                                                + sizeof(PVRSRV_BRIDGE_IN_SGXGETMISCINFO));
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMIT2D);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
+       psRetOUT->eError =
+-              PVRSRVLookupHandle(psPerProc->psHandleBase, 
+-                                                 &hDevCookieInt, 
+-                                                 psSGXGetMiscInfoIN->hDevCookie, 
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSubmit2DIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
+       if(psRetOUT->eError != PVRSRV_OK)
+@@ -1672,45 +1803,156 @@
+               return 0;
+       }
+-      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++      psKick = &psSubmit2DIN->sKick;
+-      if(CopyFromUserWrapper(psPerProc, 
+-                                     ui32BridgeID,
+-                                                 psMiscInfo,
+-                                                 psSGXGetMiscInfoIN->psMiscInfo,
+-                                                 sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psKick->hCCBMemInfo,
++                                                 psKick->hCCBMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
+       {
+-              return -EFAULT;
++              return 0;
+       }
+-      switch(psMiscInfo->eRequest)
++      if (psKick->hTASyncInfo != IMG_NULL)
+       {
+-              default:
+-                      break;
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hTASyncInfo,
++                                                         psKick->hTASyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
+-      
+-      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->h3DSyncInfo,
++                                                         psKick->h3DSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
+-      
+-      switch(psMiscInfo->eRequest)
++      if (psKick->ui32NumSrcSync > SGX_MAX_2D_SRC_SYNC_OPS)
+       {
+-              default:
+-                      break;
++              psRetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
++              return 0;
++      }
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->ahSrcSyncInfo[i],
++                                                         psKick->ahSrcSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
+-      if(CopyToUserWrapper(psPerProc,
+-                                   ui32BridgeID,
+-                                               psSGXGetMiscInfoIN->psMiscInfo,
+-                                               psMiscInfo,
+-                                               sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      if (psKick->hDstSyncInfo != IMG_NULL)
+       {
+-              return -EFAULT;
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psKick->hDstSyncInfo,
++                                                         psKick->hDstSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
+       }
++      psRetOUT->eError =
++              SGXSubmit2DKM(hDevCookieInt, psKick);
++
++      return 0;
++}
++#endif
++
++#endif
++
++static int
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      SGX_MISC_INFO *psMiscInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
++                                                      PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++      psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                      &hDevCookieInt,
++                                                      psSGXGetMiscInfoIN->hDevCookie,
++                                                      PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE*)hDevCookieInt)->pvDevice;
++
++      psMiscInfo = psSGXGetMiscInfoIN->psMiscInfo;
++      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++
+       return 0;
+ }
++#if defined(SUPPORT_SGX_HWPERF)
++static int
++SGXReadHWPerfCountersBW(IMG_UINT32                                                                    ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_SGX_READ_HWPERF_COUNTERS       *psSGXReadHWPerfCountersIN,
++                                              PVRSRV_BRIDGE_OUT_SGX_READ_HWPERF_COUNTERS      *psSGXReadHWPerfCountersOUT,
++                                              PVRSRV_PER_PROCESS_DATA                                         *psPerProc)
++{
++      IMG_HANDLE                      hDevCookieInt;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS);
++
++      psSGXReadHWPerfCountersOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                      &hDevCookieInt,
++                                                      psSGXReadHWPerfCountersIN->hDevCookie,
++                                                      PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psSGXReadHWPerfCountersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = ((PVRSRV_DEVICE_NODE*)hDevCookieInt)->pvDevice;
++
++      psSGXReadHWPerfCountersOUT->eError = SGXReadHWPerfCountersKM(psDevInfo,
++                                                      psSGXReadHWPerfCountersIN->ui32PerfReg,
++                                                      &psSGXReadHWPerfCountersOUT->ui32OldPerf,
++                                                      psSGXReadHWPerfCountersIN->bNewPerf,
++                                                      psSGXReadHWPerfCountersIN->ui32NewPerf,
++                                                      psSGXReadHWPerfCountersIN->ui32NewPerfReset,
++                                                      psSGXReadHWPerfCountersIN->ui32PerfCountersReg,
++                                                      &psSGXReadHWPerfCountersOUT->aui32Counters[0],
++                                                      &psSGXReadHWPerfCountersOUT->ui32KickTACounter,
++                                                      &psSGXReadHWPerfCountersOUT->ui32KickTARenderCounter,
++                                                      &psSGXReadHWPerfCountersOUT->ui32CPUTime,
++                                                      &psSGXReadHWPerfCountersOUT->ui32SGXTime);
++
++      return 0;
++}
++#endif 
++
+ static int
+ PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
+                                          IMG_VOID *psBridgeIn,
+@@ -1752,15 +1994,13 @@
+               return 0;
+       }
+-      PDUMPENDINITPHASE();
+-
+-      gbInitServerSuccessful = psInitSrvDisconnectIN->bInitSuccesful;
+-
+       psPerProc->bInitProcess = IMG_FALSE;
+       gbInitServerRunning = IMG_FALSE;
+       gbInitServerRan = IMG_TRUE;
+-      psRetOUT->eError = PVRSRV_OK;
++      psRetOUT->eError = PVRSRVFinaliseSystem(psInitSrvDisconnectIN->bInitSuccesful);
++
++      gbInitSuccessful = (IMG_BOOL)(((psRetOUT->eError == PVRSRV_OK) && (psInitSrvDisconnectIN->bInitSuccesful)));
+       return 0;
+ }
+@@ -1772,15 +2012,99 @@
+                                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
+                                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
++      IMG_HANDLE hOSEventKM;
++
+       PVR_UNREFERENCED_PARAMETER(psPerProc);
+       
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
+-      psRetOUT->eError = OSEventObjectWait(psEventObjectWaitIN->hOSEventKM, psEventObjectWaitIN->ui32MSTimeout);
++      psRetOUT->eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hOSEventKM, 
++                                                 psEventObjectWaitIN->hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++      
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      
++      psRetOUT->eError = OSEventObjectWait(hOSEventKM);
++
++      return 0;
++}
++
++static int
++PVRSRVEventObjectOpenBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_OPEN *psEventObjectOpenIN,
++                                                PVRSRV_BRIDGE_OUT_EVENT_OBJECT_OPEN *psEventObjectOpenOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_OPEN);
++
++      psEventObjectOpenOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &psEventObjectOpenIN->sEventObject.hOSEventKM, 
++                                                 psEventObjectOpenIN->sEventObject.hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++
++      if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psEventObjectOpenOUT->eError = OSEventObjectOpen(&psEventObjectOpenIN->sEventObject, &psEventObjectOpenOUT->hOSEvent);
++
++      if(psEventObjectOpenOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psEventObjectOpenOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psEventObjectOpenOUT->hOSEvent,
++                                                psEventObjectOpenOUT->hOSEvent,
++                                                PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);               
+       return 0;
+ }
++static int
++PVRSRVEventObjectCloseBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_CLOSE *psEventObjectCloseIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hOSEventKM;
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE);
++      
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &psEventObjectCloseIN->sEventObject.hOSEventKM, 
++                                                 psEventObjectCloseIN->sEventObject.hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &hOSEventKM, 
++                                                 psEventObjectCloseIN->hOSEventKM, 
++                                                 PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = OSEventObjectClose(&psEventObjectCloseIN->sEventObject, hOSEventKM);
++
++      return 0;
++}
+ static int
+ SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
+@@ -1847,6 +2171,13 @@
+       bLookupFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -1907,6 +2238,13 @@
+       bReleaseFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -1950,6 +2288,10 @@
+       bDissociateFailed |= (eError != PVRSRV_OK);
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWPerfCBMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++#endif
+       for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
+@@ -2005,7 +2347,6 @@
+                                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+       IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+       IMG_HANDLE hHWRenderContextInt;
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
+@@ -2020,10 +2361,8 @@
+               return 0;
+       }
+-      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+-
+       hHWRenderContextInt =
+-              SGXRegisterHWRenderContextKM(psDevInfo,
++              SGXRegisterHWRenderContextKM(hDevCookieInt,
+                                                                        &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr);
+       if (hHWRenderContextInt == IMG_NULL)
+@@ -2043,54 +2382,180 @@
+ }
+ static int
+-SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
+-                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
+-                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
+-                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+-      IMG_HANDLE hDevCookieInt;
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
+       psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWRenderContextInt,
++                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      
++      return 0;
++}
++
++static int
++SGXRegisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_TRANSFER_CONTEXT *psSGXRegHWTransferContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hHWTransferContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT);
++
++      psSGXRegHWTransferContextOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase, 
+                                                  &hDevCookieInt,
+-                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 psSGXRegHWTransferContextIN->hDevCookie,
+                                                  PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHWTransferContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      hHWTransferContextInt =
++              SGXRegisterHWTransferContextKM(hDevCookieInt,
++                                                                       &psSGXRegHWTransferContextIN->sHWTransferContextDevVAddr);
++
++      if (hHWTransferContextInt == IMG_NULL)
++      {
++              psSGXRegHWTransferContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHWTransferContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHWTransferContextOUT->hHWTransferContext,
++                                                hHWTransferContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++SGXUnregisterHWTransferContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_TRANSFER_CONTEXT *psSGXUnregHWTransferContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hHWTransferContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWTransferContextInt,
++                                                 psSGXUnregHWTransferContextIN->hHWTransferContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
++      psRetOUT->eError = SGXUnregisterHWTransferContextKM(hHWTransferContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWTransferContextIN->hHWTransferContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT);
++      
++      return 0;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static int
++SGXRegisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_2D_CONTEXT *psSGXRegHW2DContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hHW2DContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT);
++
++      psSGXRegHW2DContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXRegHW2DContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHW2DContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
+       psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
+-      SGXFlushHWRenderTargetKM(psDevInfo, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++      hHW2DContextInt =
++              SGXRegisterHW2DContextKM(hDevCookieInt,
++                                                                       &psSGXRegHW2DContextIN->sHW2DContextDevVAddr);
++
++      if (hHW2DContextInt == IMG_NULL)
++      {
++              psSGXRegHW2DContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHW2DContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHW2DContextOUT->hHW2DContext,
++                                                hHW2DContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
+       return 0;
+ }
+ static int
+-SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
+-                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++SGXUnregisterHW2DContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_2D_CONTEXT *psSGXUnregHW2DContextIN,
+                                                          PVRSRV_BRIDGE_RETURN *psRetOUT,
+                                                          PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
+-      IMG_HANDLE hHWRenderContextInt;
++      IMG_HANDLE hHW2DContextInt;
+-      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT);
+       psRetOUT->eError =
+               PVRSRVLookupHandle(psPerProc->psHandleBase,
+-                                                 &hHWRenderContextInt,
+-                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
+-                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++                                                 &hHW2DContextInt,
++                                                 psSGXUnregHW2DContextIN->hHW2DContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+       }
+-      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      psRetOUT->eError = SGXUnregisterHW2DContextKM(hHW2DContextInt);
+       if(psRetOUT->eError != PVRSRV_OK)
+       {
+               return 0;
+@@ -2098,11 +2563,37 @@
+       psRetOUT->eError =
+               PVRSRVReleaseHandle(psPerProc->psHandleBase,
+-                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
+-                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++                                                      psSGXUnregHW2DContextIN->hHW2DContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT);
+       
+       return 0;
+ }
++#endif
++
++static int
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      SGXFlushHWRenderTargetKM(hDevCookieInt, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++      return 0;
++}
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+@@ -2679,16 +3170,63 @@
+                                       PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
+                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
+ {
++      PVRSRV_ERROR eError;
++      
+       PVR_UNREFERENCED_PARAMETER(psPerProc);
+-
+       PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
+       
+       OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
+                         &psGetMiscInfoIN->sMiscInfo,
+                         sizeof(PVRSRV_MISC_INFO));
+-      psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoIN->sMiscInfo);
+-      psGetMiscInfoOUT->sMiscInfo = psGetMiscInfoIN->sMiscInfo;
++      if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT)
++      {
++                      
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                            psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++                                          (IMG_VOID **)&psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++          if(eError != PVRSRV_OK)
++          {
++                  PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Out of memory"));
++                  return -EFAULT;
++          }
++
++          psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++      
++              
++              eError = CopyToUserWrapper(psPerProc, ui32BridgeID,
++                                   psGetMiscInfoIN->sMiscInfo.pszMemoryStr,
++                                   psGetMiscInfoOUT->sMiscInfo.pszMemoryStr,
++                                   psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen);
++              
++          
++          OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                            psGetMiscInfoOUT->sMiscInfo.ui32MemoryStrLen,
++                            (IMG_VOID *)psGetMiscInfoOUT->sMiscInfo.pszMemoryStr, 0);
++      
++          
++          psGetMiscInfoOUT->sMiscInfo.pszMemoryStr = psGetMiscInfoIN->sMiscInfo.pszMemoryStr; 
++
++          if(eError != PVRSRV_OK)
++          {
++              
++                  PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoBW Error copy to user"));
++                  return -EFAULT;
++          }
++      }
++      else
++      {
++              psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoOUT->sMiscInfo);
++      }
++
++      if (psGetMiscInfoIN->sMiscInfo.ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++      {
++              psGetMiscInfoOUT->eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                                                                      &psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++                                                                                                      psGetMiscInfoOUT->sMiscInfo.sGlobalEventObject.hOSEventKM,
++                                                                                                      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++                                                                                                      PVRSRV_HANDLE_ALLOC_FLAG_SHARED);        
++      }
+       return 0;
+ }
+@@ -3526,6 +4064,7 @@
+               psKernelMemInfo->ui32Flags;
+       psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
+               psKernelMemInfo->ui32AllocSize; 
++      psAllocSharedSysMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+       psAllocSharedSysMemOUT->eError =
+               PVRSRVAllocHandle(psPerProc->psHandleBase,
+                                                 &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
+@@ -3641,7 +4180,7 @@
+               psKernelMemInfo->ui32Flags;
+       psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
+               psKernelMemInfo->ui32AllocSize; 
+-      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = psKernelMemInfo->sMemBlk.hOSMemHandle;
+       psMapMemInfoMemOUT->eError =
+               PVRSRVAllocSubHandle(psPerProc->psHandleBase,
+                                                 &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
+@@ -3972,6 +4511,8 @@
+               
+       SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_OPEN, PVRSRVEventObjectOpenBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_CLOSE, PVRSRVEventObjectCloseBW);
+ #if defined(SUPPORT_SGX1)
+@@ -4009,7 +4550,18 @@
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
+       SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
+-
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#if defined(TRANSFER_QUEUE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMIT2D, SGXSubmit2DBW);
++#endif
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_2D_CONTEXT, SGXRegisterHW2DContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_2D_CONTEXT, SGXUnregisterHW2DContextBW);
++#endif
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_TRANSFER_CONTEXT, SGXRegisterHWTransferContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_TRANSFER_CONTEXT, SGXUnregisterHWTransferContextBW);
++#endif 
++#if defined(SUPPORT_SGX_HWPERF)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READ_HWPERF_COUNTERS, SGXReadHWPerfCountersBW);
+ #endif 
+@@ -4059,7 +4611,7 @@
+       {
+               if(gbInitServerRan)
+               {
+-                      if(!gbInitServerSuccessful)
++                      if(!gbInitSuccessful)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed.  Driver unusable.",
+                                                __FUNCTION__));
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   2008-12-18 15:47:29.000000000 +0100
+@@ -24,7 +24,6 @@
+  *
+  ******************************************************************************/
+-#include <linux/module.h>
+ #include "services_headers.h"
+ #include "buffer_manager.h"
+ #include "kernelbuffer.h"
+@@ -1128,7 +1127,8 @@
+       
+       apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+-      if(psBuffer->psSwapChain->psLastFlipBuffer)
++      if(psBuffer->psSwapChain->psLastFlipBuffer &&
++              psBuffer != psBuffer->psSwapChain->psLastFlipBuffer)
+       {
+               apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
+               ui32NumSrcSyncs++;
+@@ -1389,7 +1389,7 @@
+ }
+-IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State)
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State)
+ {
+       PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
+       PVRSRV_DEVICE_NODE                      *psDeviceNode;
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     2008-12-18 15:47:29.000000000 +0100
+@@ -422,7 +422,8 @@
+       BM_HEAP                                 *psBMHeap;
+       IMG_HANDLE                              hDevMemContext;
+-      if (!hDevMemHeap)
++      if (!hDevMemHeap ||
++              (ui32Size == 0))
+       {
+               return PVRSRV_ERROR_INVALID_PARAMS;
+       }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/handle.c git/drivers/gpu/pvr/services4/srvkm/common/handle.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/handle.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/handle.c        2008-12-18 15:47:29.000000000 +0100
+@@ -25,6 +25,10 @@
+  ******************************************************************************/
+ #ifdef        PVR_SECURE_HANDLES
++#ifdef        __linux__
++#include <linux/vmalloc.h>
++#endif
++
+ #include <stddef.h>
+ #include "services_headers.h"
+@@ -36,6 +40,8 @@
+ #define       HANDLE_BLOCK_SIZE       256
+ #endif
++#define       HANDLE_LARGE_BLOCK_SIZE 1024
++
+ #define       HANDLE_HASH_TAB_INIT_SIZE       32
+ #define       INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
+@@ -100,13 +106,13 @@
+ {
+       IMG_BOOL bIsEmpty;
+-      bIsEmpty = (psList->ui32Next == ui32Index);
++      bIsEmpty = (IMG_BOOL)(psList->ui32Next == ui32Index);
+ #ifdef        DEBUG
+       {
+               IMG_BOOL bIsEmpty2;
+-              bIsEmpty2 = (psList->ui32Prev == ui32Index);
++              bIsEmpty2 = (IMG_BOOL)(psList->ui32Prev == ui32Index);
+               PVR_ASSERT(bIsEmpty == bIsEmpty2);
+       }
+ #endif
+@@ -114,6 +120,7 @@
+       return bIsEmpty;
+ }
++#ifdef DEBUG
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(NoChildren)
+ #endif
+@@ -143,6 +150,7 @@
+       }
+       return IMG_FALSE;
+ }
++#endif 
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(ParentHandle)
+@@ -328,6 +336,14 @@
+       if (psBase->psHandleArray != IMG_NULL)
+       {
++#ifdef        __linux__
++              if (psBase->bVmallocUsed)
++              {
++                      vfree(psBase->psHandleArray);
++                      psBase->psHandleArray = IMG_NULL;
++                      return PVRSRV_OK;
++              }
++#endif        
+               eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+                       psBase->ui32TotalHandCount * sizeof(struct sHandle),
+                       psBase->psHandleArray,
+@@ -363,6 +379,7 @@
+               PVR_ASSERT(hHandle != IMG_NULL);
+               PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++              PVR_UNREFERENCED_PARAMETER(hHandle);
+       }
+       
+@@ -495,22 +512,46 @@
+       return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+ }
++#define       NEW_HANDLE_ARRAY_SIZE(psBase, handleNumberIncrement)    \
++      (((psBase)->ui32TotalHandCount +  (handleNumberIncrement)) * \
++      sizeof(struct sHandle))
++
+ static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase)
+ {
+       struct sHandle *psNewHandleArray;
+       IMG_HANDLE hNewHandBlockAlloc;
+       PVRSRV_ERROR eError;
+       struct sHandle *psHandle;
++      IMG_UINT32 ui32HandleNumberIncrement =  HANDLE_BLOCK_SIZE;
++      IMG_UINT32 ui32NewHandleArraySize = NEW_HANDLE_ARRAY_SIZE(psBase, ui32HandleNumberIncrement);
++#ifdef        __linux__
++      IMG_BOOL bVmallocUsed = IMG_FALSE;
++#endif
+       
+       eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+-              (psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE) * sizeof(struct sHandle),
++              ui32NewHandleArraySize,
+               (IMG_PVOID *)&psNewHandleArray,
+               &hNewHandBlockAlloc);
+       if (eError != PVRSRV_OK)
+       {
++#ifdef        __linux__
++              PVR_TRACE(("IncreaseHandleArraySize:  OSAllocMem failed (%d), trying vmalloc", eError));
++              
++              ui32HandleNumberIncrement =  HANDLE_LARGE_BLOCK_SIZE;
++              ui32NewHandleArraySize = NEW_HANDLE_ARRAY_SIZE(psBase, ui32HandleNumberIncrement);
++
++              psNewHandleArray = vmalloc(ui32NewHandleArraySize);
++              if (psNewHandleArray == IMG_NULL)
++              {
++                      PVR_TRACE(("IncreaseHandleArraySize:  vmalloc failed"));
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++              bVmallocUsed = IMG_TRUE;
++#else 
+               PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Couldn't allocate new handle array (%d)", eError));
+               return eError;
++#endif        
+       }
+       
+@@ -521,7 +562,7 @@
+       
+       for(psHandle = psNewHandleArray + psBase->ui32TotalHandCount;
+-              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE;
++              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + ui32HandleNumberIncrement;
+               psHandle++)
+       {
+               psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
+@@ -538,15 +579,18 @@
+       
+       psBase->psHandleArray = psNewHandleArray;
+       psBase->hHandBlockAlloc = hNewHandBlockAlloc;
++#ifdef        __linux__
++      psBase->bVmallocUsed = bVmallocUsed;
++#endif
+       
+       PVR_ASSERT(psBase->ui32FreeHandCount == 0);
+-      psBase->ui32FreeHandCount = HANDLE_BLOCK_SIZE;
++      psBase->ui32FreeHandCount = ui32HandleNumberIncrement;
+       PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
+       psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
+-      psBase->ui32TotalHandCount += HANDLE_BLOCK_SIZE;
++      psBase->ui32TotalHandCount += ui32HandleNumberIncrement;
+       PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
+       psBase->ui32LastFreeIndexPlusOne = psBase->ui32TotalHandCount;
+@@ -564,7 +608,7 @@
+       
+       PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+-      PVR_ASSERT(psBase->psHashTab != NULL);
++      PVR_ASSERT(psBase->psHashTab != IMG_NULL);
+       if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+       {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/power.c git/drivers/gpu/pvr/services4/srvkm/common/power.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/power.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/power.c 2008-12-18 15:47:29.000000000 +0100
+@@ -207,6 +207,21 @@
+ }
++PVRSRV_ERROR PVRSRVSetDevicePowerStateCoreKM(IMG_UINT32                       ui32DeviceIndex,
++                                             PVR_POWER_STATE  eNewPowerState)
++{
++      PVRSRV_ERROR    eError;
++      eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      return eError;
++}
++
++
+ IMG_EXPORT
+ PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
+                                                                                PVR_POWER_STATE        eNewPowerState,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        2008-12-18 15:47:29.000000000 +0100
+@@ -28,6 +28,7 @@
+ #include "buffer_manager.h"
+ #include "handle.h"
+ #include "perproc.h"
++#include "pdump_km.h"
+ #include "ra.h"
+@@ -180,7 +181,7 @@
+ }
+-PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData)
+ {
+       PVRSRV_ERROR    eError;
+@@ -215,6 +216,20 @@
+       gpsSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0;
+       gpsSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_EVENTOBJECT) , 
++                                       (IMG_VOID **)&psSysData->psGlobalEventObject, 0) != PVRSRV_OK) 
++      {
++              
++              goto Error;
++      }
++
++      if(OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", psSysData->psGlobalEventObject) != PVRSRV_OK)
++      {
++              goto Error;     
++      }       
++
+       return eError;
+       
+ Error:
+@@ -224,12 +239,21 @@
+-IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData)
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData)
+ {
+       PVRSRV_ERROR    eError;
+       
+       PVR_UNREFERENCED_PARAMETER(psSysData);
++      
++      if(psSysData->psGlobalEventObject)
++      {
++              OSEventObjectDestroy(psSysData->psGlobalEventObject);
++              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                               sizeof(PVRSRV_EVENTOBJECT) , 
++                                               psSysData->psGlobalEventObject, 0);
++      }
++
+       eError = PVRSRVHandleDeInit();
+       if (eError != PVRSRV_OK)
+       {
+@@ -246,10 +270,10 @@
+ }
+-PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,  
+-                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+-                                                                IMG_UINT32 ui32SOCInterruptBit,
+-                                                                IMG_UINT32 *pui32DeviceIndex)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,  
++                                                                                        PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                                        IMG_UINT32 ui32SOCInterruptBit,
++                                                                                        IMG_UINT32 *pui32DeviceIndex)
+ {
+       PVRSRV_ERROR            eError;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+@@ -342,6 +366,61 @@
+               }
+       }
++      
++
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed PVRSRVResManConnect call"));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccessful)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVFinaliseSystem"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed to get SysData"));
++              return(eError);
++      }
++
++      if (bInitSuccessful)
++      {
++              eError = SysFinalise();
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: SysFinalise failed (%d)", eError));
++                      return eError;
++              }
++
++              
++              psDeviceNode = psSysData->psDeviceNodeList;
++              while (psDeviceNode)
++              {
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                                                       PVRSRV_POWER_Unspecified,
++                                                                                                                       KERNEL_ID, IMG_FALSE);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVFinaliseSystem: Failed PVRSRVSetDevicePowerStateKM call (device index: %d)", psDeviceNode->sDevId.ui32DeviceIndex));
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++
++      PDUMPENDINITPHASE();
++
+       return PVRSRV_OK;
+ }
+@@ -408,7 +487,7 @@
+ }
+-PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
+ {
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+       PVRSRV_DEVICE_NODE      **ppsDevNode;
+@@ -441,10 +520,6 @@
+       
+-
+-#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-      
+-
+       eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
+                                                                                PVRSRV_POWER_STATE_D3,
+                                                                                KERNEL_ID,
+@@ -454,7 +529,16 @@
+               PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
+               return eError;
+       }
+-#endif 
++
++      
++
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVResManConnect call"));
++              return eError;
++      }
+       
+@@ -481,11 +565,11 @@
+ IMG_EXPORT
+-PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
+-                                                                        IMG_UINT32 ui32Value,
+-                                                                        IMG_UINT32 ui32Mask,
+-                                                                        IMG_UINT32 ui32Waitus,
+-                                                                        IMG_UINT32 ui32Tries)
++PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask,
++                                                                                IMG_UINT32 ui32Waitus,
++                                                                                IMG_UINT32 ui32Tries)
+ {
+       IMG_BOOL        bStart = IMG_FALSE;
+       IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
+@@ -585,7 +669,8 @@
+       
+       if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
+                                                                               |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
+-                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT))
++                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT
++                                                                              |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT))
+       {
+               PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
+               return PVRSRV_ERROR_INVALID_PARAMS;                     
+@@ -719,13 +804,20 @@
+               i32Count = OSSNPrintf(pszStr, 100, "\n\0");
+               UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
+       }
++
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT)
++      && psSysData->psGlobalEventObject)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
++              psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
++      }
+       
+       return PVRSRV_OK;
+ }
+-PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32            *pui32Total, 
+-                                                              IMG_UINT32              *pui32Available)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFBStatsKM(IMG_UINT32               *pui32Total, 
++                                                                                       IMG_UINT32             *pui32Available)
+ {
+       IMG_UINT32 ui32Total = 0, i = 0;
+       IMG_UINT32 ui32Available = 0;
+@@ -746,7 +838,7 @@
+ }
+-IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
+ {
+       SYS_DATA                        *psSysData;
+       IMG_BOOL                        bStatus = IMG_FALSE;
+@@ -776,7 +868,7 @@
+ }
+-IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData)
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData)
+ {
+       SYS_DATA                        *psSysData = pvSysData;
+       IMG_BOOL                        bStatus = IMG_FALSE;
+@@ -826,7 +918,7 @@
+ }
+-IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData)
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData)
+ {
+       SYS_DATA                        *psSysData = pvSysData;
+       PVRSRV_DEVICE_NODE      *psDeviceNode;
+@@ -853,10 +945,21 @@
+       {
+               PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
+       }
++      
++      
++      if (psSysData->psGlobalEventObject)
++      {
++              IMG_HANDLE hOSEventKM = psSysData->psGlobalEventObject->hOSEventKM;
++              if(hOSEventKM)
++              {
++                      OSEventObjectSignal(hOSEventKM);
++              }
++      }       
+ }
+-PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, 
++                                                                                                              IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
+ {
+       IMG_UINT32         uiBytesSaved = 0;
+       IMG_PVOID          pvLocalMemCPUVAddr;
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/queue.c git/drivers/gpu/pvr/services4/srvkm/common/queue.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/queue.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/queue.c 2008-12-18 15:47:29.000000000 +0100
+@@ -760,14 +760,10 @@
+       
+       PVRSRVCommandCompleteCallbacks();
+       
+-#if defined(SYS_USING_INTERRUPTS)
+       if(bScheduleMISR)
+       {
+               OSScheduleMISR(psSysData);
+       }
+-#else
+-      PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
+-#endif 
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/resman.c git/drivers/gpu/pvr/services4/srvkm/common/resman.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/resman.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/common/resman.c        2008-12-18 15:47:29.000000000 +0100
+@@ -145,6 +141,10 @@
+               
+               case RESMAN_TYPE_HW_RENDER_CONTEXT:
+                       return "HW Render Context Resource";
++              case RESMAN_TYPE_HW_TRANSFER_CONTEXT:
++                      return "HW Transfer Context Resource";
++              case RESMAN_TYPE_HW_2D_CONTEXT:
++                      return "HW 2D Context Resource";
+               case RESMAN_TYPE_SHARED_PB_DESC:
+                       return "Shared Parameter Buffer Description Resource";
+               
+@@ -378,7 +378,12 @@
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
+               
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_EVENT_OBJECT, 0, 0, IMG_TRUE);
++              
++              
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_2D_CONTEXT, 0, 0, IMG_TRUE);
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);                       
+               FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      2008-12-18 15:47:29.000000000 +0100
+@@ -1966,6 +1966,8 @@
+ }
++
++
+ #if PAGE_TEST
+ static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr)
+ {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      2008-12-18 15:47:29.000000000 +0100
+@@ -27,6 +27,8 @@
+ #ifndef _MMU_H_
+ #define _MMU_H_
++#include "sgxinfokm.h"
++
+ PVRSRV_ERROR
+ MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       2008-12-18 15:47:29.000000000 +0100
+@@ -56,11 +56,26 @@
+       psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++      
++
++
++#if defined(FIXME)
+       for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
+               psStubPBDesc != IMG_NULL;
+               psStubPBDesc = psStubPBDesc->psNext)
+       {
+               if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++#else
++      psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++      if (psStubPBDesc != IMG_NULL)
++      {
++              if(psStubPBDesc->ui32TotalPBSize != ui32TotalPBSize)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,
++                                      "SGXFindSharedPBDescKM: Shared PB requested with different size (0x%x) from existing shared PB (0x%x) - requested size ignored",
++                                      ui32TotalPBSize, psStubPBDesc->ui32TotalPBSize));
++              }
++#endif
+               {
+                       IMG_UINT32 i;
+                       PRESMAN_ITEM psResItem;
+@@ -125,20 +140,6 @@
+       return eError;
+ }
+-IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO* psSGXDevInfo) 
+-{
+-      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
+-      
+-      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
+-              *ppsStubPBDesc != IMG_NULL;
+-              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
+-      {
+-              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
+-              IMG_UINT32* pui32Flags = (IMG_UINT32*)psStubPBDesc->psHWPBDescKernelMemInfo->pvLinAddrKM;
+-              *pui32Flags |= 1;
+-      }
+-}
+-
+ static PVRSRV_ERROR
+ SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
+@@ -266,7 +267,7 @@
+                       {
+                               PVR_DPF((PVR_DBG_ERROR,
+                                       "SGXAddSharedPBDescKM: "
+-                                      "Failed to register exisitng shared "
++                                      "Failed to register existing shared "
+                                       "PBDesc with the resource manager"));
+                               goto NoAddKeepPB;
+                       }
+@@ -301,7 +302,7 @@
+       }
+-      psStubPBDesc->ppsSubKernelMemInfos=IMG_NULL;
++      psStubPBDesc->ppsSubKernelMemInfos = IMG_NULL;
+       if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
+                                 sizeof(PVRSRV_KERNEL_MEM_INFO *)
+@@ -395,8 +396,10 @@
+       }
+ NoAddKeepPB:
+-      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      for (i = 0; i < ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
+               PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i], IMG_FALSE);
++      }
+       PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
+       PVRSRVFreeDeviceMemKM(hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        2008-12-18 15:47:29.000000000 +0100
+@@ -27,12 +27,15 @@
+ #include "sgxdefs.h"
+ #include "services_headers.h"
+ #include "sgxinfo.h"
++#include "sgxinfokm.h"
+ #if defined(SGX_FEATURE_2D_HARDWARE)
+ #include "sgx2dcore.h"
+-#define SGX2D_FLUSH_BH                                                        (0xF0000000) 
++#define SGX2D_FLUSH_BH        0xF0000000 
++#define       SGX2D_FENCE_BH  0x70000000 
++
+ #define SGX2D_QUEUED_BLIT_PAD 4
+ #define SGX2D_COMMAND_QUEUE_SIZE 1024
+@@ -521,7 +524,7 @@
+       
+       if (hCmdCookie != IMG_NULL)
+       {
+-              PVRSRVCommandCompleteKM(hCmdCookie, IMG_FALSE);
++              PVRSRVCommandCompleteKM(hCmdCookie, IMG_TRUE);
+       }
+       PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Exit"));
+@@ -723,7 +726,7 @@
+                       SGX2DWriteSlavePortBatch(psDevInfo, pui32BltData, ui32DataByteSize);
+-                      SGX2DWriteSlavePort(psDevInfo, EURASIA2D_FENCE_BH);
++                      SGX2DWriteSlavePort(psDevInfo, SGX2D_FENCE_BH);
+               }
+       }
+@@ -817,6 +820,18 @@
+       
+       PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++#if defined(DEBUG)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Syncinfo: %p, Syncdata: %p", psSyncInfo, psSyncData));
++
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Read ops complete: %d, Read ops pending: %d", psSyncData->ui32ReadOpsComplete, psSyncData->ui32ReadOpsPending));
++              PVR_TRACE(("SGX2DQueryBlitsCompleteKM: Write ops complete: %d, Write ops pending: %d", psSyncData->ui32WriteOpsComplete, psSyncData->ui32WriteOpsPending));
++
++      }
++#endif
++
+       return PVRSRV_ERROR_TIMEOUT;
+ }
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h    1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx_bridge_km.h    2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,158 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++                                               PVR3DIF4_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++                                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                                IMG_DEV_PHYADDR *pDevPAddr,
++                                                                IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
++                                                                                      IMG_HANDLE              hDevMemContext,
++                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo);
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_IMPORT
++PVRSRV_ERROR SGXReadHWPerfCountersKM(PVRSRV_SGXDEV_INFO       *psDevInfo,
++                                                                       IMG_UINT32                     ui32PerfReg,
++                                                                       IMG_UINT32                     *pui32OldPerf,
++                                                                       IMG_BOOL                       bNewPerf,
++                                                                       IMG_UINT32                     ui32NewPerf,
++                                                                       IMG_UINT32                     ui32NewPerfReset,
++                                                                       IMG_UINT32                     ui32PerfCountersReg,
++                                                                       IMG_UINT32                     *pui32Counters,
++                                                                       IMG_UINT32                     *pui32KickTACounter,
++                                                                       IMG_UINT32                     *pui32KickTARenderCounter,
++                                                                       IMG_UINT32                     *pui32CPUTime,
++                                                                       IMG_UINT32                     *pui32SGXTime);
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                        IMG_UINT32            ui32DataByteSize,
++                                                        IMG_UINT32            *pui32BltData);
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData);
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete);
++#endif 
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                         IMG_HANDLE hDevHandle,
++                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        2008-12-18 15:47:29.000000000 +0100
+@@ -45,14 +45,152 @@
+ #define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST     0x01    
+ #define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST     0x02    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x04    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x10    
+-#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x20    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST     0x04    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST    0x08    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x10    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x20    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x40    
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;
++      PVRSRV_DEVICE_CLASS             eDeviceClass;
++
++      IMG_UINT8                               ui8VersionMajor;
++      IMG_UINT8                               ui8VersionMinor;
++      IMG_UINT32                              ui32CoreConfig;
++      IMG_UINT32                              ui32CoreFlags;
++
++      
++      IMG_PVOID                               pvRegsBaseKM;
++      
++
++      
++      IMG_HANDLE                              hRegMapping;
++
++      
++      IMG_SYS_PHYADDR                 sRegsPhysBase;
++      
++      IMG_UINT32                              ui32RegSize;
++
++      
++      IMG_UINT32                              ui32CoreClockSpeed;
++      IMG_UINT32                              ui32uKernelTimerClock;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++      SGX_SLAVE_PORT                  s2DSlavePortKM;
++
++      
++      PVRSRV_RESOURCE                 s2DSlaveportResource;
++
++      
++      IMG_UINT32                      ui322DFifoSize;
++      IMG_UINT32                      ui322DFifoOffset;
++      
++      IMG_HANDLE                      h2DCmdCookie;
++      
++      IMG_HANDLE                      h2DQueue;
++      IMG_BOOL                        b2DHWRecoveryInProgress;
++      IMG_BOOL                        b2DHWRecoveryEndPending;
++      IMG_UINT32                      ui322DCompletedBlits;
++      IMG_BOOL                        b2DLockupSuspected;
++#endif
++      
++    
++      IMG_VOID                        *psStubPBDescListKM;
++
++
++      
++      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
++
++      IMG_VOID                                *pvDeviceMemoryHeap;
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
++      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
++      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
++      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
++      IMG_UINT32                              *pui32KernelCCBEventKicker; 
++      IMG_UINT32                              ui32TAKickAddress;              
++      IMG_UINT32                              ui32TexLoadKickAddress; 
++      IMG_UINT32                              ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++      IMG_UINT32                              ui32KickTACounter;
++      IMG_UINT32                              ui32KickTARenderCounter;
++#if defined(SUPPORT_SGX_HWPERF)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWPerfCBMemInfo;
++#endif
++      
++      IMG_UINT32                              ui32ClientRefCount;
++      
++      IMG_UINT32                              ui32CacheControl;
++      
++      IMG_VOID                                *pvMMUContextList;
++
++      
++      IMG_BOOL                                bForcePTOff;
++
++      IMG_UINT32                              ui32EDMTaskReg0;
++      IMG_UINT32                              ui32EDMTaskReg1;
++
++      IMG_UINT32                              ui32ClkGateCtl;
++      IMG_UINT32                              ui32ClkGateCtl2;
++      IMG_UINT32                              ui32ClkGateStatusMask;
++      SGX_INIT_SCRIPTS                sScripts;
++
++              
++      IMG_HANDLE                              hBIFResetPDOSMemHandle;
++      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
++      IMG_UINT32                              *pui32BIFResetPD;
++      IMG_UINT32                              *pui32BIFResetPT;
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      IMG_HANDLE                              hTimer;
++      
++      IMG_UINT32                              ui32TimeStamp;
++#endif
++
++      
++      IMG_UINT32                              ui32NumResets;
++
++      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32RegFlags;
++
++      #if defined(PDUMP)
++      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
++      #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      IMG_VOID                                *pvDummyPTPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
++      IMG_HANDLE                              hDummyPTPageOSMemHandle;
++      IMG_VOID                                *pvDummyDataPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
++      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#endif
++
++      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
++
++} PVRSRV_SGXDEV_INFO;
++
+ typedef struct _SGX_TIMING_INFORMATION_
+ {
+@@ -122,10 +260,8 @@
+ PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
+-
+ IMG_VOID SGXOSTimer(IMG_VOID *pvData);
+-IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO  *psDevInfo);
+ #if defined(NO_HARDWARE)
+ static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO             *psDevInfo,
+                                                                                               IMG_UINT32 ui32StatusRegister,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  2008-12-18 15:47:29.000000000 +0100
+@@ -54,23 +54,16 @@
+ #endif
+ IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
+-IMG_VOID SGXScheduleProcessQueues(IMG_VOID *pvData);
+ IMG_UINT32 gui32EventStatusServicesByISR = 0;
+-static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
+-                                               IMG_UINT32                      ui32PDUMPFlags);
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                IMG_UINT32                     ui32PDUMPFlags);
+-PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                 IMG_BOOL                             bHardwareRecovery);
++static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bHardwareRecovery);
+ PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-#define SGX_BIF_DIR_LIST_INDEX_EDM    15
+-#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
+-#else
+-#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
+-#endif
+ static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
+ {
+@@ -116,6 +109,9 @@
+ #if defined(SGX_SUPPORT_HWPROFILING)
+       psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
+ #endif
++#if defined(SUPPORT_SGX_HWPERF)
++      psDevInfo->psKernelHWPerfCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWPerfCBMemInfo;
++#endif
+       
+@@ -124,7 +120,7 @@
+                                               (IMG_VOID **)&psKernelCCBInfo, 0);
+       if (eError != PVRSRV_OK)        
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to alloc memory"));
++              PVR_DPF((PVR_DBG_ERROR,"InitDevInfo: Failed to alloc memory"));
+               goto failed_allockernelccb;
+       }
+@@ -151,7 +147,9 @@
+       psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
+       psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
+-      psDevInfo->ui32ClockGateMask = psInitInfo->ui32ClockGateMask;   
++      psDevInfo->ui32ClkGateCtl = psInitInfo->ui32ClkGateCtl;
++      psDevInfo->ui32ClkGateCtl2 = psInitInfo->ui32ClkGateCtl2;
++      psDevInfo->ui32ClkGateStatusMask = psInitInfo->ui32ClkGateStatusMask;
+       
+@@ -183,10 +181,20 @@
+               if (eNewPowerState == PVRSRV_POWER_STATE_D3)
+               {
+                       PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
+-                      #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
+-                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClockGateMask;
++
++            #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
++                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClkGateStatusMask;
+                       #endif
++#if defined(SUPPORT_HW_RECOVERY)
++                      
++                      if (OSDisableTimer(psDevInfo->hTimer) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Failed to disable timer"));
++                              return  PVRSRV_ERROR_GENERIC;
++                      }
++#endif 
++
+                       
+                       psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST;
+@@ -202,7 +210,7 @@
+                                                               MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                               WAIT_TRY_COUNT) != PVRSRV_OK)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip power off failed."));
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for chip power off failed."));
+                       }
+                       #endif
+@@ -229,7 +237,7 @@
+                                                               MAX_HW_TIME_US/WAIT_TRY_COUNT,
+                                                               WAIT_TRY_COUNT) != PVRSRV_OK)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip idle failed."));
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPrePowerState: Wait for chip idle failed."));
+                       }
+                       #endif
+                       PDUMPREGPOL(EUR_CR_CLKGATESTATUS, 0, ui32ClockMask);
+@@ -278,6 +286,14 @@
+                               PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
+                               return eError;
+                       }
++#if defined(SUPPORT_HW_RECOVERY)
++                      eError = OSEnableTimer(psDevInfo->hTimer);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState : Failed to enable host timer"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++#endif
+               }
+               PVR_DPF((PVR_DBG_WARNING,
+@@ -288,8 +304,6 @@
+       return PVRSRV_OK;
+ }
+-#define       SCRIPT_DATA(pData, offset, type) (*((type *)(((char *)pData) + offset)))
+-#define       SCRIPT_DATA_UI32(pData, offset) SCRIPT_DATA(pData, offset, IMG_UINT32)
+ static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
+ {
+@@ -333,14 +347,18 @@
+       return PVRSRV_ERROR_GENERIC;;
+ }
+-PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                 IMG_BOOL                             bHardwareRecovery)
++static PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bHardwareRecovery)
+ {
+       PVRSRV_ERROR            eError;
+       IMG_UINT32                      ui32ReadOffset, ui32WriteOffset;
+       
+-      ResetSGX(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_CLKGATECTL, psDevInfo->ui32ClkGateCtl);
++      PDUMPREGWITHFLAGS(EUR_CR_CLKGATECTL, psDevInfo->ui32ClkGateCtl, PDUMP_FLAGS_CONTINUOUS);
++
++      
++      SGXReset(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
+       
+       *psDevInfo->pui32KernelCCBEventKicker = 0;
+@@ -381,12 +399,14 @@
+                                                  0,
+                                                  PVRSRV_USSE_EDM_INTERRUPT_HWR,
+                                                  MAX_HW_TIME_US/WAIT_TRY_COUNT,
+-                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
++                                                 1000) != PVRSRV_OK)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGXEDM: Wait for uKernel HW Recovery failed"));
++                      PVR_DPF((PVR_DBG_ERROR, "SGXInitialise: Wait for uKernel HW Recovery failed"));
++                      return PVRSRV_ERROR_RETRY;
+               }
+       }
++
+       
+@@ -426,259 +446,6 @@
+ }
+-static IMG_VOID ResetSGXSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
+-                                                        IMG_UINT32                    ui32PDUMPFlags,
+-                                                        IMG_BOOL                              bPDump)
+-{
+-#if !defined(PDUMP)
+-      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+-#endif 
+-
+-      
+-      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
+-      if (bPDump)
+-      {
+-              PDUMPIDLWITHFLAGS(1000, ui32PDUMPFlags);
+-      }
+-}
+-
+-
+-static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
+-                                               IMG_UINT32                      ui32PDUMPFlags)
+-{
+-      IMG_UINT32 ui32RegVal;
+-
+-      const IMG_UINT32 ui32SoftResetRegVal =
+-                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
+-                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
+-                                      #endif
+-                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
+-                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
+-                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
+-
+-      const IMG_UINT32 ui32BifInvalDCVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
+-
+-      const IMG_UINT32 ui32BifFaultMask =
+-                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      IMG_UINT32                      ui32BIFCtrl;
+-#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+-      IMG_UINT32                      ui32BIFMemArb;
+-#endif 
+-#endif 
+-
+-#ifndef PDUMP
+-      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
+-#endif 
+-
+-      psDevInfo->ui32NumResets++;
+-
+-      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
+-
+-#if defined(FIX_HW_BRN_23944)
+-      
+-      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      
+-      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+-      if (ui32RegVal & ui32BifFaultMask)
+-      {
+-              
+-              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      }
+-#endif 
+-
+-      
+-      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-      
+-      
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
+-
+-#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
+-      
+-
+-      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
+-                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
+-                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
+-#endif 
+-#endif 
+-
+-
+-      
+-
+-
+-
+-
+-      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-      
+-      ui32RegVal = ui32SoftResetRegVal;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-
+-      
+-
+-      for (;;)
+-      {
+-              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
+-              IMG_DEV_VIRTADDR sBifFault;
+-              IMG_UINT32 ui32PDIndex, ui32PTIndex;
+-
+-              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
+-              {
+-                      break;
+-              }
+-              
+-              
+-
+-
+-              
+-
+-
+-              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
+-              PVR_DPF((PVR_DBG_WARNING, "ResetSGX: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
+-              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
+-              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
+-
+-              
+-              ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-
+-              
+-              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
+-              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
+-
+-              
+-              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
+-              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
+-
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              ui32RegVal = ui32SoftResetRegVal;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-              ui32RegVal = 0;
+-              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
+-
+-              
+-              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
+-              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
+-      }
+-
+-
+-      
+-
+-#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      
+-      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
+-#ifdef SGX_FEATURE_2D_HARDWARE
+-      
+-      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
+-#endif
+-#if defined(FIX_HW_BRN_23410)
+-      
+-      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
+-#endif
+-
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
+-#endif 
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
+-      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
+-
+-#ifdef SGX_FEATURE_2D_HARDWARE
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
+-#endif
+-      
+-#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
+-      
+-      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      ui32RegVal = ui32SoftResetRegVal;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-#endif 
+-
+-      
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32BifInvalDCVal, ui32PDUMPFlags);
+-
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
+-      
+-      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      
+-      ui32RegVal = 0;
+-      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
+-      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
+-
+-      
+-      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
+-
+-      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
+-}
+-
+ static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
+ {
+       PVRSRV_SGXDEV_INFO      *psDevInfo;     
+@@ -730,6 +497,7 @@
+       psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
+       
+       for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
+       {
+@@ -759,25 +527,6 @@
+               return PVRSRV_ERROR_GENERIC;
+       }
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      
+-      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
+-                                       sizeof(PVRSRV_EVENTOBJECT) , 
+-                                       (IMG_VOID **)&psDevInfo->psSGXEventObject, 0) != PVRSRV_OK)    
+-      {
+-              
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for event object"));
+-              return (PVRSRV_ERROR_OUT_OF_MEMORY);
+-      }
+-
+-      if(OSEventObjectCreate("PVRSRV_EVENTOBJECT_SGX", psDevInfo->psSGXEventObject) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to create event object"));
+-              return (PVRSRV_ERROR_OUT_OF_MEMORY);
+-      
+-      }
+-#endif 
+-
+       return PVRSRV_OK;
+ }
+@@ -816,9 +565,10 @@
+       
+       
+       psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++      psDevInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
+       
+       
+-      psInitInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++      psInitInfo->ui32uKernelTimerClock = psDevInfo->ui32uKernelTimerClock;
+ #if defined(SUPPORT_HW_RECOVERY)
+       psInitInfo->ui32HWRecoverySampleRate = psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
+ #endif 
+@@ -970,7 +720,6 @@
+ #endif
+-
+       
+       OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
+@@ -983,27 +732,16 @@
+       PDUMPCOMMENT("Kernel CCB Event Kicker");
+       PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
+-
++#if defined(SUPPORT_HW_RECOVERY)
+       
+-      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+-                                                                               PVRSRV_POWER_Unspecified,
+-                                                                               KERNEL_ID, IMG_FALSE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed PVRSRVSetDevicePowerStateKM call"));
+-              return eError;
+-      }
+-#if defined(SUPPORT_HW_RECOVERY)
++
++      psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode,
++                                                                 1000 * 50 / psSGXDeviceMap->sTimingInfo.ui32uKernelFreq);
++      if(psDevInfo->hTimer == IMG_NULL)
+       {
+-              SGX_TIMING_INFORMATION* psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
+-              
+-              psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
+-              if(psDevInfo->hTimer == IMG_NULL)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"OSAddTimer : Failed to register timer callback function"));
+-                      return PVRSRV_ERROR_GENERIC;
+-              }
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM : Failed to register timer callback function"));
++              return PVRSRV_ERROR_GENERIC;
+       }
+ #endif
+@@ -1030,38 +768,17 @@
+       }
+ #if defined(SUPPORT_HW_RECOVERY)
+-      
+-      if(psDevInfo->hTimer)
+-      {
+-              eError = OSRemoveTimer (psDevInfo->hTimer);
+-              if (eError != PVRSRV_OK)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
+-                      return  eError;
+-              }
+-      }
+-#endif
+-
+-      MMU_BIFResetPDFree(psDevInfo);
+-
+-      
+-
+-
+-
+-
+-
+-
+-#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-      
+-      eError = SGXDeinitialise((IMG_HANDLE)psDevInfo);
++      eError = OSRemoveTimer(psDevInfo->hTimer);
+       if (eError != PVRSRV_OK)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGXDeinitialise failed"));
+-              return eError;
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++              return  eError;
+       }
++      psDevInfo->hTimer = IMG_NULL;
+ #endif 
++      MMU_BIFResetPDFree(psDevInfo);
+       
+@@ -1146,23 +863,14 @@
+ #endif 
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      
+-      if(psDevInfo->psSGXEventObject)
+-      {
+-              OSEventObjectDestroy(psDevInfo->psSGXEventObject);
+-              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
+-                                               sizeof(PVRSRV_EVENTOBJECT) , 
+-                                               psDevInfo->psSGXEventObject, 0);
+-      }
+-#endif 
+       
+       
+       OSFreePages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS,
+                               sizeof(PVRSRV_SGXDEV_INFO),
+                               psDevInfo,
+                               hDevInfoOSMemHandle);
+-
++      psDeviceNode->pvDevice = IMG_NULL;
++      
+       if (psDeviceMemoryHeap != IMG_NULL)
+       {
+       
+@@ -1178,47 +886,17 @@
+-IMG_VOID HWRecoveryResetSGX (PVRSRV_SGXDEV_INFO *psDevInfo,
+-                                                       IMG_UINT32             ui32Component,
+-                                                       IMG_UINT32                     ui32CallerID)
+-{
+-      PVRSRV_ERROR eError;
+-
+-      PVR_UNREFERENCED_PARAMETER(ui32Component);
+-      PVR_UNREFERENCED_PARAMETER(ui32CallerID);
+-      
+-      
+-      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+-      
+-      
+-      PDUMPSUSPEND();
+-
+-      
+-      ResetPBs(psDevInfo);
+-
+-      
+-      eError = SGXInitialise(psDevInfo, IMG_TRUE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
+-      }
+-
+-      
+-      PDUMPRESUME();
+-}
+-
+-
+-IMG_VOID HWRecoveryResetSGXEDM (PVRSRV_DEVICE_NODE *psDeviceNode,
+-                                                                      IMG_UINT32                      ui32Component,
++#if defined(SYS_USING_INTERRUPTS) || defined(SUPPORT_HW_RECOVERY)
++static IMG_VOID HWRecoveryResetSGX (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                                      IMG_UINT32                      ui32Component,
+                                                                       IMG_UINT32                      ui32CallerID)
+ {
+       PVRSRV_ERROR            eError;
+       PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      SGX2DHWRecoveryStart(psDevInfo);
+-#endif
++      PVR_UNREFERENCED_PARAMETER(ui32Component);
++
+       
+       eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
+@@ -1227,15 +905,32 @@
+               
+-              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGXEDM: Power transition in progress"));
++              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGX: Power transition in progress"));
+               return;
+       }
+       psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
+       
+-      HWRecoveryResetSGX(psDevInfo, ui32Component, ui32CallerID);
++      
++      
++      PDUMPSUSPEND();
++      
++      do
++      {
++              eError = SGXInitialise(psDevInfo, IMG_TRUE);
++      }
++      while (eError == PVRSRV_ERROR_RETRY);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++      }
++
++      
++      PDUMPRESUME();
++      
+       PVRSRVPowerUnlock(ui32CallerID);
+       
+       
+@@ -1244,11 +939,9 @@
+       
+       
+       PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
+-
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      SGX2DHWRecoveryEnd(psDevInfo);
+-#endif
+ }
++#endif 
++
+ #if defined(SUPPORT_HW_RECOVERY)
+ IMG_VOID SGXOSTimer(IMG_VOID *pvData)
+@@ -1261,10 +954,6 @@
+       IMG_UINT32              ui32CurrentEDMTasks;
+       IMG_BOOL                bLockup = IMG_FALSE;
+       IMG_BOOL                bPoweredDown;
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      IMG_UINT32              ui322DCompletedBlits = 0;
+-      IMG_BOOL                b2DCoreIsBusy;
+-#endif
+       
+       psDevInfo->ui32TimeStamp++;
+@@ -1305,42 +994,6 @@
+               }
+       }
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      if (!bPoweredDown)
+-      {
+-              ui322DCompletedBlits = psDevInfo->ui322DCompletedBlits;
+-              psDevInfo->ui322DCompletedBlits = SGX2DCompletedBlits(psDevInfo);
+-      }
+-
+-      if (!bLockup && !bPoweredDown)
+-      {
+-              b2DCoreIsBusy = SGX2DIsBusy(psDevInfo);
+-
+-              if (b2DCoreIsBusy && ui322DCompletedBlits == psDevInfo->ui322DCompletedBlits)
+-              {
+-                      if (psDevInfo->b2DLockupSuspected)
+-                      {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXTimer() detects 2D lockup (%d blits completed)", psDevInfo->ui322DCompletedBlits));
+-                              bLockup = IMG_TRUE;
+-                              psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-                      }
+-                      else
+-                      {
+-                              
+-                              psDevInfo->b2DLockupSuspected = IMG_TRUE;
+-                      }
+-              }
+-              else
+-              {
+-                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-              }
+-      }
+-      else
+-      {
+-                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
+-      }
+-#endif 
+-
+       if (bLockup)
+       {
+               PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+@@ -1349,7 +1002,7 @@
+               psSGXHostCtl->ui32HostDetectedLockups ++;
+               
+-              HWRecoveryResetSGXEDM(psDeviceNode, 0, KERNEL_ID);
++              HWRecoveryResetSGX(psDeviceNode, 0, KERNEL_ID);
+       }
+ }
+ #endif 
+@@ -1394,14 +1047,6 @@
+                       ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
+               }
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-              if (ui32EventStatus & EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK)
+-              {
+-                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK;
+-                      SGX2DHandle2DComplete(psDevInfo);
+-              }
+-#endif
+-
+               if (ui32EventClear)
+               {
+                       bInterruptProcessed = IMG_TRUE;
+@@ -1420,7 +1065,6 @@
+ IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
+ {
+-      PVRSRV_ERROR            eError = PVRSRV_OK;
+       PVRSRV_DEVICE_NODE      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+       PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
+       PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
+@@ -1428,64 +1072,12 @@
+       if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) &&
+               !(psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR))
+       {
+-              HWRecoveryResetSGXEDM(psDeviceNode, 0, ISR_ID);
++              HWRecoveryResetSGX(psDeviceNode, 0, ISR_ID);
+       }
+-      if ((eError == PVRSRV_OK) &&
+-              (psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
+-              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
+-      {
+-              
+-
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+-              {
+-
+-                      
+-                      PDUMPSUSPEND();
+-              
+-                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
+-                                                                                               PVRSRV_POWER_STATE_D3,
+-                                                                                               ISR_ID, IMG_FALSE);
+-                      if (eError == PVRSRV_OK)
+-                      {
+-                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
+-                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
+-                              {
+-                                      
+-
+-
+-                                      psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
+-                              }
+-                      }
+-                      else if (eError == PVRSRV_ERROR_RETRY)
+-                      {
+-                              
+-
+-                              eError = PVRSRV_OK;
+-                      }
+-                      
+-                      
+-                      PDUMPRESUME();
+-              }
++      SGXTestActivePowerEvent(psDeviceNode, ISR_ID);
+ #endif 
+-      }
+-
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              if(psEventObject->hOSEventKM)
+-              {
+-                      OSEventObjectSignal(psEventObject->hOSEventKM);
+-              }
+-      }
+-
+-#endif
+-
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "SGX_MISRHandler error:%lu", eError));
+-      }
+ }
+ #endif 
+@@ -1494,7 +1086,6 @@
+ {
+       DEVICE_MEMORY_INFO *psDevMemoryInfo;
+       DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
+-      IMG_BOOL bSharedPB = IMG_TRUE;
+       
+       psDeviceNode->sDevId.eDeviceType        = DEV_DEVICE_TYPE;
+@@ -1684,13 +1275,8 @@
+                                                                                                               | PVRSRV_HAP_MULTI_PROCESS;
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent";
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS";
+-#if defined(SGX535)
+       
+       psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-#else
+-      
+-      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-#endif
+       
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID = HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
+@@ -1698,32 +1284,23 @@
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters";
+       psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName = "3DParameters BS";
+-
+-
+-      if(bSharedPB)
+-      {
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+-                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+-#if 0
+-                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#if defined(SUPPORT_PERCONTEXT_PB)
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                      | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+ #else
+-                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
+-#endif
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
+-      }
+-      else
+-      {
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
+-                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
+-                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
+-              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
+-      }
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                      | PVRSRV_HAP_MULTI_PROCESS;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif                
+       
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_MAPPING_HEAP_ID);
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
+-      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_MULTI_PROCESS;
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszName = "GeneralMapping";
+       psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszBSName = "GeneralMapping BS";
+@@ -1767,23 +1344,7 @@
+       
+       psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
+-#if defined(SGX_FEATURE_2D_HARDWARE)
+-      psClientInfo->s2DSlavePort = psDevInfo->s2DSlavePortKM;
+-#endif
+-      psClientInfo->pvRegsBase = psDevInfo->pvRegsBaseKM;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              psClientInfo->hOSEventKM = psEventObject->hOSEventKM;
+-      }
+-      else
+-      {
+-              psClientInfo->hOSEventKM = IMG_NULL;
+-      }
+-#endif
+-      
+       
+       OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
+@@ -1792,13 +1353,48 @@
+       return PVRSRV_OK;
+ }
++
+ IMG_EXPORT
+-PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_MISC_INFO *psMiscInfo)
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo)
+ {
+-      PVR_UNREFERENCED_PARAMETER(psDevInfo);
+-
+       switch(psMiscInfo->eRequest)
+       {
++              case SGX_MISC_INFO_REQUEST_CLOCKSPEED:
++              {
++                      psMiscInfo->uData.ui32SGXClockSpeed = psDevInfo->ui32CoreClockSpeed;
++                      return PVRSRV_OK;
++              }
++#ifdef SUPPORT_SGX_HWPERF
++              case SGX_MISC_INFO_REQUEST_HWPERF_CB_ON:
++              {
++                      psDevInfo->psSGXHostCtl->ui32HWPerfFlags |= PVRSRV_SGX_HWPERF_ON;
++                      return PVRSRV_OK;
++              }
++              case SGX_MISC_INFO_REQUEST_HWPERF_CB_OFF:
++              {
++                      psDevInfo->psSGXHostCtl->ui32HWPerfFlags &= ~PVRSRV_SGX_HWPERF_ON;
++                      return PVRSRV_OK;
++              }
++              case SGX_MISC_INFO_REQUEST_HWPERF_RETRIEVE_CB:
++              {
++                      SGX_MISC_INFO_HWPERF_RETRIEVE_CB* psRetrieve = &psMiscInfo->uData.sRetrieveCB;
++                      PVRSRV_SGX_HWPERF_CB* psHWPerfCB = (PVRSRV_SGX_HWPERF_CB*)psDevInfo->psKernelHWPerfCBMemInfo->pvLinAddrKM;
++                      IMG_UINT i = 0;
++
++                      for (; psHWPerfCB->ui32Woff != psHWPerfCB->ui32Roff && i < psRetrieve->ui32ArraySize; i++)
++                      {
++                              PVRSRV_SGX_HWPERF_CBDATA* psData = &psHWPerfCB->psHWPerfCBData[psHWPerfCB->ui32Roff];
++                              OSMemCopy(&psRetrieve->psHWPerfData[i], psData, sizeof(PVRSRV_SGX_HWPERF_CBDATA));
++                              psRetrieve->psHWPerfData[i].ui32ClockSpeed = psDevInfo->ui32CoreClockSpeed;
++                              psRetrieve->psHWPerfData[i].ui32TimeMax = psDevInfo->ui32uKernelTimerClock;
++                              psHWPerfCB->ui32Roff = (psHWPerfCB->ui32Roff + 1) & (PVRSRV_SGX_HWPERF_CBSIZE - 1);
++                      }
++                      psRetrieve->ui32DataCount = i;
++                      psRetrieve->ui32Time = OSClockus();
++                      return PVRSRV_OK;
++              }
++#endif 
+               default:
+               {
+                       
+@@ -1807,3 +1403,55 @@
+       }
+ }
++
++#if defined(SUPPORT_SGX_HWPERF)
++IMG_EXPORT
++PVRSRV_ERROR SGXReadHWPerfCountersKM(PVRSRV_SGXDEV_INFO       *psDevInfo,
++                                                                       IMG_UINT32                     ui32PerfReg,
++                                                                       IMG_UINT32                     *pui32OldPerf,
++                                                                       IMG_BOOL                       bNewPerf,
++                                                                       IMG_UINT32                     ui32NewPerf,
++                                                                       IMG_UINT32                     ui32NewPerfReset,
++                                                                       IMG_UINT32                     ui32PerfCountersReg,
++                                                                       IMG_UINT32                     *pui32Counters,
++                                                                       IMG_UINT32                     *pui32KickTACounter,
++                                                                       IMG_UINT32                     *pui32KickTARenderCounter,
++                                                                       IMG_UINT32                     *pui32CPUTime,
++                                                                       IMG_UINT32                     *pui32SGXTime)
++{
++      IMG_UINT32      i;
++
++      
++
++      {
++              *pui32OldPerf = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg);
++
++              for (i = 0; i < 9; ++i)
++              {
++                      pui32Counters[i] = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32PerfCountersReg + (i * 4));
++              }
++
++              *pui32KickTACounter = psDevInfo->ui32KickTACounter;
++              *pui32KickTARenderCounter = psDevInfo->ui32KickTARenderCounter;
++
++              *pui32CPUTime = OSClockus();
++              *pui32SGXTime = psDevInfo->psSGXHostCtl->ui32TimeWraps;
++      }
++
++      
++
++      if (bNewPerf)
++      {
++              if(ui32NewPerfReset != 0)
++              {
++                      OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg, ui32NewPerf | ui32NewPerfReset);
++              }
++
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32PerfReg, ui32NewPerf);
++      }
++
++      return PVRSRV_OK;
++}
++#endif 
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  2008-12-18 15:47:29.000000000 +0100
+@@ -24,11 +24,13 @@
+  *
+  ******************************************************************************/
++#include <stddef.h> 
+ #include "services_headers.h"
+ #include "sgxinfo.h"
+ #include "sgxinfokm.h"
+ #if defined (PDUMP)
+ #include "sgxapi_km.h"
++#include "pdump_km.h"
+ #endif
+ #include "sgx_bridge_km.h"
+ #include "osfunc.h"
+@@ -36,92 +38,241 @@
+ #include "sgxutils.h"
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psCCBKick, offset) \
++      ((psCCBKick)->offset + sizeof(type) < (psCCBMemInfo)->ui32AllocSize)
++
+ #define       CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
+       ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
+               (psCCBKick)->offset))
+-#define       CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, offset) \
+-              ((psCCBKick)->offset < (psCCBMemInfo)->ui32AllocSize)
+-
+ IMG_EXPORT
+ PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, PVR3DIF4_CCB_KICK *psCCBKick)
+ {
+       PVRSRV_ERROR eError;
+       PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
+       PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
+-      IMG_UINT32 *pui32DstReadOpsPendingVal;
+-      IMG_UINT32 *pui32DstWriteOpsPendingVal;
++      PVR3DIF4_CMDTA_SHARED *psTACmd;
+       IMG_UINT32 i;
++#if defined(SUPPORT_SGX_HWPERF)
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++#endif
+-#if defined(NO_HARDWARE)
+-      pui32DstReadOpsPendingVal = IMG_NULL;
+-      pui32DstWriteOpsPendingVal = IMG_NULL;
++#if defined(SUPPORT_SGX_HWPERF)
++      if (psCCBKick->bKickRender)
++      {
++              ++psDevInfo->ui32KickTARenderCounter;
++      }
++      ++psDevInfo->ui32KickTACounter;
+ #endif
+-      if (psCCBKick->hDstKernelSyncInfo != IMG_NULL)
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset))
+       {
+-              
+-              if (!CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset) || !CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset))
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      psTACmd = CCB_DATA_FROM_OFFSET(PVR3DIF4_CMDTA_SHARED, psCCBMemInfo, psCCBKick, ui32CCBOffset);
++
++      
++      if (psCCBKick->hTA3DSyncInfo)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++              psTACmd->sTA3DDependancy.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->sTA3DDependancy.ui32WriteOpPendingVal   = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++              if (psCCBKick->bTADependency)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: ui32DstReadOpsPendingOffset or ui32DstWriteOpsPendingOffset out of range"));
++                      psSyncInfo->psSyncData->ui32WriteOpsPending++;
+               }
+-              else
+-              {
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
+-                              pui32DstReadOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset);
+-                              pui32DstWriteOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset);
++      }
+-                              *pui32DstReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                              *pui32DstWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
+-              }
++      if (psCCBKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
++              psTACmd->sTQSyncReadOpsCompleteDevVAddr  = psSyncInfo->sReadOpsCompleteDevVAddr;
++              psTACmd->sTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->ui32TQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              psTACmd->ui32TQSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
+       }
++      if (psCCBKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++
++              psTACmd->s3DTQSyncReadOpsCompleteDevVAddr  = psSyncInfo->sReadOpsCompleteDevVAddr;
++              psTACmd->s3DTQSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++              psTACmd->ui323DTQSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              psTACmd->ui323DTQSyncWriteOpsPendingVal  = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      psTACmd->ui32NumTAStatusVals = psCCBKick->ui32NumTAStatusVals;
+       if (psCCBKick->ui32NumTAStatusVals != 0)
+       {
+               
+               for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+               {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-                              *pui32TAStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                      }
+-                      else
+-                      {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui32TAStatusValueOffset[%d] out of range", i));
+-                      }
++                      psTACmd->sCtlTAStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psTACmd->sCtlTAStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+               }
+       }
++      psTACmd->ui32Num3DStatusVals = psCCBKick->ui32Num3DStatusVals;
+       if (psCCBKick->ui32Num3DStatusVals != 0)
+       {
+               
+               for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+               {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
+-                              *pui323DStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
+-                      }
+-                      else
++                      psTACmd->sCtl3DStatusInfo[i].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psTACmd->sCtl3DStatusInfo[i].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              }
++      }
++
++      
++      psTACmd->ui32NumSrcSyncs = psCCBKick->ui32NumSrcSyncs;
++      for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++              psTACmd->asSrcSyncs[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTACmd->asSrcSyncs[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++              
++              psTACmd->asSrcSyncs[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              
++              psTACmd->asSrcSyncs[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;  
++
++      }
++
++      if (psCCBKick->bFirstKickOrResume && psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hRenderSurfSyncInfo;
++              psTACmd->sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTACmd->sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++              psTACmd->ui32ReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              psTACmd->ui32WriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++
++#if defined(PDUMP)
++              if (PDumpIsCaptureFrameKM())
++              {
++                      if (psSyncInfo->psSyncData->ui32LastOpDumpVal == 0)
+                       {
+-                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui323DStatusValueOffset[%d] out of range", i));
++                      
++                      PDUMPCOMMENT("Init render surface last op\r\n");
++
++                      PDUMPMEM(IMG_NULL,
++                              psSyncInfo->psSyncDataMemInfoKM,
++                              0,
++                              sizeof(PVRSRV_SYNC_DATA),
++                              0,
++                              MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                              psSyncInfo->psSyncDataMemInfoKM,
++                              offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete),
++                              sizeof(psSyncInfo->psSyncData->ui32WriteOpsComplete),
++                              0,
++                              MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
+                       }
++
++                      psSyncInfo->psSyncData->ui32LastOpDumpVal++;
+               }
++#endif        
+       }
++#if defined(PDUMP)
++      if (PDumpIsCaptureFrameKM())
++      {
++              PDUMPCOMMENT("Shared part of TA command\r\n");
++
++              PDUMPMEM(IMG_NULL, psCCBMemInfo, psCCBKick->ui32CCBOffset, sizeof(PVR3DIF4_CMDTA_SHARED), 0, MAKEUNIQUETAG(psCCBMemInfo));
++
++              if (psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      IMG_UINT32 ui32HackValue;
++
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hRenderSurfSyncInfo;
++                      ui32HackValue = psSyncInfo->psSyncData->ui32LastOpDumpVal - 1;
++
++                      PDUMPCOMMENT("Hack render surface last op in TA cmd\r\n");
++
++                      PDUMPMEM(&ui32HackValue,
++                              psCCBMemInfo,
++                              psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, ui32WriteOpsPendingVal),
++                              sizeof(IMG_UINT32),
++                              0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++
++                              ui32HackValue = 0;
++                              PDUMPCOMMENT("Hack render surface read op in TA cmd\r\n");
++
++                      PDUMPMEM(&ui32HackValue,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sReadOpsCompleteDevVAddr),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                      PDUMPCOMMENT("Hack TA status value in TA cmd\r\n");
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sCtlTAStatusInfo[i].ui32StatusValue),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                      PDUMPCOMMENT("Hack 3D status value in TA cmd\r\n");
++
++                      PDUMPMEM(&psSyncInfo->psSyncData->ui32LastOpDumpVal,
++                               psCCBMemInfo,
++                               psCCBKick->ui32CCBOffset + offsetof(PVR3DIF4_CMDTA_SHARED, sCtl3DStatusInfo[i].ui32StatusValue),
++                               sizeof(IMG_UINT32),
++                               0,
++                              MAKEUNIQUETAG(psCCBMemInfo));
++              }
++      }
++#endif        
++
+       eError = SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand, &psCCBKick->sCommand, KERNEL_ID);
+       if (eError == PVRSRV_ERROR_RETRY)
+       {
+-              
+-              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
+-              psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              if (psCCBKick->bFirstKickOrResume && psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hRenderSurfSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              }
++
++              for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++                      psSyncInfo->psSyncData->ui32ReadOpsPending--;
++              }
++
+               return eError;
+       }
+       else if (PVRSRV_OK != eError)
+@@ -132,70 +283,66 @@
+ #if defined(NO_HARDWARE)
+-      if (psCCBKick->ui32NumTAStatusVals != 0)
+-      {
+-              
+-              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
+-              {
+-                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
+-                      {
+-                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
+-                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-
+-                              psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui32TAStatusValue;
+-                      }
+-              }
+-      }
+       
+-      if (psCCBKick->bTerminate)
++      if (psCCBKick->hTA3DSyncInfo)
+       {
+-              if (psCCBKick->hUpdateDstKernelSyncInfo != IMG_NULL)
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTA3DSyncInfo;
++
++              if (psCCBKick->bTADependency)
+               {
+-                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hUpdateDstKernelSyncInfo;
+-                      psSyncInfo->psSyncData->ui32WriteOpsComplete = ((pui32DstWriteOpsPendingVal != IMG_NULL) ? *pui32DstWriteOpsPendingVal : psCCBKick->ui32WriteOpsPendingVal) + 1;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
+               }
++      }
+-              if (psCCBKick->ui32Num3DStatusVals != 0)
+-              {
+-                      
+-                      for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
+-                      {
+-                              if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
+-                              {
+-                                      IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
+-                                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++      if (psCCBKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hTASyncInfo;
+-                                      psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui323DStatusValue;
+-                              }
+-                      }
+-              }
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
+       }
+-#endif
+-      return eError;
+-}
++      if (psCCBKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->h3DSyncInfo;
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
+-IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
+-{
+-      PVRSRV_ERROR                    eError;
+-      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
+-      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
+-      IMG_UINT32                              ui32PowManFlags;
+-      PVRSRV_SGX_COMMAND              sCommand = {0};
++      
++      for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
+-      ui32PowManFlags = psHostCtl->ui32PowManFlags;
+-      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtlTAStatusInfo[i].ui32StatusValue;
++      }
++      
++      
++      for (i=0; i<psCCBKick->ui32NumSrcSyncs; i++)
+       {
+-              
+-              return;
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->ahSrcKernelSyncInfo[i];
++
++              psSyncInfo->psSyncData->ui32ReadOpsComplete =  psSyncInfo->psSyncData->ui32ReadOpsPending;
++
+       }
+-      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
+-      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
+-      if (eError != PVRSRV_OK)
++      if (psCCBKick->bTerminateOrAbort)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
++              if (psCCBKick->hRenderSurfSyncInfo != IMG_NULL)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hRenderSurfSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = psCCBKick->bFirstKickOrResume ? psSyncInfo->psSyncData->ui32WriteOpsPending : (psCCBKick->ui32WriteOpsPendingVal + 1);
++              }
++
++              
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                      psSyncInfo->psSyncData->ui32ReadOpsComplete = psTACmd->sCtl3DStatusInfo[i].ui32StatusValue;
++              }
+       }
++#endif
++
++      return eError;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c 1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxreset.c 2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,330 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++
++#include "pdump_km.h"
++
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM    15
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
++#else
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
++#endif
++
++
++static IMG_VOID SGXResetSoftReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                                                IMG_BOOL                              bResetBIF,
++                                                                IMG_UINT32                    ui32PDUMPFlags,
++                                                                IMG_BOOL                              bPDump)
++{
++      IMG_UINT32 ui32SoftResetRegVal =
++                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
++                                      #endif
++                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
++                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      if (bResetBIF)
++      {
++              ui32SoftResetRegVal |= EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      }
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32SoftResetRegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32SoftResetRegVal, ui32PDUMPFlags);
++      }
++}
++
++
++static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        IMG_UINT32                    ui32PDUMPFlags,
++                                                        IMG_BOOL                              bPDump)
++{
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      
++      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++      if (bPDump)
++      {
++              PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags);
++#if defined(PDUMP)
++              PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags);
++#endif
++      }
++      
++      
++
++}
++
++
++static IMG_VOID SGXResetInvalDC(PVRSRV_SGXDEV_INFO    *psDevInfo,
++                                                          IMG_UINT32                  ui32PDUMPFlags,
++                                                              IMG_BOOL                        bPDump)
++{
++      IMG_UINT32 ui32RegVal;
++
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      }
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      if (bPDump)
++      {
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      }
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, bPDump);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      {
++              
++
++
++              if (PollForValueKM((IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_BIF_MEM_REQ_STAT),
++                                                      0,
++                                                      EUR_CR_BIF_MEM_REQ_STAT_READS_MASK,
++                                                      MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                      WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"Wait for DC invalidate failed."));
++              }
++              
++              if (bPDump)
++              {
++                      PDUMPREGPOLWITHFLAGS(EUR_CR_BIF_MEM_REQ_STAT, 0, EUR_CR_BIF_MEM_REQ_STAT_READS_MASK, ui32PDUMPFlags);
++              }
++      }
++#endif        
++}
++
++
++IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO  *psDevInfo,
++                                IMG_UINT32                     ui32PDUMPFlags)
++{
++      IMG_UINT32 ui32RegVal;
++
++      const IMG_UINT32 ui32BifFaultMask =
++                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_UINT32                      ui32BIFCtrl;
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      IMG_UINT32                      ui32BIFMemArb;
++#endif 
++#endif 
++
++#ifndef PDUMP
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      psDevInfo->ui32NumResets++;
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++      if (ui32RegVal & ui32BifFaultMask)
++      {
++              
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      }
++#endif 
++
++      
++      SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      
++
++      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
++#endif 
++#endif 
++
++
++      
++
++
++
++
++      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++      SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE);
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++
++      for (;;)
++      {
++              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++              IMG_DEV_VIRTADDR sBifFault;
++              IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++              {
++                      break;
++              }
++              
++              
++
++
++              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++              PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++
++              
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE);
++              SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++      }
++
++
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++#endif
++#if defined(FIX_HW_BRN_23410)
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++#endif
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
++      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++      
++      
++      SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      2008-12-18 15:47:29.000000000 +0100
+@@ -43,16 +43,314 @@
+ #include "pvr_debug.h"
+ #include "sgxutils.h"
+-IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
+-                                                                                      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr)
+-                                          
++#define CCB_OFFSET_IS_VALID(type, psCCBMemInfo, psKick, offset) \
++      ((psKick)->offset + sizeof(type) < (psCCBMemInfo)->ui32AllocSize)
++
++#define CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psKick, offset) \
++      ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++      (psKick)->offset))
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle, PVRSRV_TRANSFER_SGX_KICK *psKick)
+ {
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
+       PVRSRV_SGX_COMMAND sCommand = {0};
++      PVR3DIF4_TRANSFERCMD_SHARED *psTransferCmd;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError;
++
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      psTransferCmd =  CCB_DATA_FROM_OFFSET(PVR3DIF4_TRANSFERCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++      if (psTransferCmd->ui32NumStatusVals > SGXTQ_MAX_STATUS)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (psKick->ui32StatusFirstSync +
++              (psKick->ui32NumSrcSync ? (psKick->ui32NumSrcSync - 1) : 0) +
++              (psKick->ui32NumDstSync ? (psKick->ui32NumDstSync - 1) : 0) >
++                      psTransferCmd->ui32NumStatusVals)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psTransferCmd->ui32TASyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              psTransferCmd->ui32TASyncReadOpsPendingVal  = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sTASyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->sTASyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      else
++      {
++              psTransferCmd->sTASyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++              psTransferCmd->sTASyncReadOpsCompleteDevVAddr.uiAddr = 0;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psTransferCmd->ui323DSyncWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              psTransferCmd->ui323DSyncReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->s3DSyncReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      else
++      {
++              psTransferCmd->s3DSyncWriteOpsCompleteDevVAddr.uiAddr = 0;
++              psTransferCmd->s3DSyncReadOpsCompleteDevVAddr.uiAddr = 0;
++      }
+-    sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
+-    sCommand.ui32Data[1] = sHWRenderContextDevVAddr.uiAddr;
+       
+-      return SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);  
++      psTransferCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++      psTransferCmd->ui32NumDstSync = psKick->ui32NumDstSync;
++
++      
++      if(psKick->ui32NumSrcSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++
++              psTransferCmd->ui32SrcWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++              psTransferCmd->ui32SrcReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sSrcWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr; 
++              psTransferCmd->sSrcReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++      if(psKick->ui32NumDstSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++
++              psTransferCmd->ui32DstWriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++              psTransferCmd->ui32DstReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              psTransferCmd->sDstWriteOpsCompleteDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              psTransferCmd->sDstReadOpsCompleteDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      
++      if (psKick->ui32NumSrcSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[0];
++              psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++      }
++      if (psKick->ui32NumDstSync > 0)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[0];
++              psSyncInfo->psSyncData->ui32WriteOpsPending++;
++      }
++
++      
++      if (psKick->ui32NumSrcSync > 1)
++      {
++              for(i = 1; i < psKick->ui32NumSrcSync; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].ui32StatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].sStatusDevAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++
++                      psKick->ui32StatusFirstSync++;
++              }
++      }
++
++      if (psKick->ui32NumDstSync > 1)
++      {
++              for(i = 1; i < psKick->ui32NumDstSync; i++)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].ui32StatusValue = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++
++                      psTransferCmd->sCtlStatusInfo[psKick->ui32StatusFirstSync].sStatusDevAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++
++                      psKick->ui32StatusFirstSync++;
++              }
++      }
++
++#if defined(PDUMP)
++      PDUMPCOMMENT("Shared part of transfer command\r\n");
++      PDUMPMEM(IMG_NULL,
++                      psCCBMemInfo,
++                      psKick->ui32SharedCmdCCBOffset,
++                      sizeof(PVR3DIF4_TRANSFERCMD_SHARED),
++                      0,
++                      MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
++      sCommand.ui32Data[1] = psKick->sHWTransferContextDevVAddr.uiAddr;
++      
++      eError = SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);        
++
++#if defined(NO_HARDWARE)
++      
++      for(i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      for(i = 0; i < psKick->ui32NumDstSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahDstSyncInfo[i];
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++      }
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++#endif
++
++      return eError;
+ }
+-#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_EXPORT PVRSRV_ERROR SGXSubmit2DKM(IMG_HANDLE hDevHandle, PVRSRV_2D_SGX_KICK *psKick)
++                                          
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psKick->hCCBMemInfo;
++      PVRSRV_SGX_COMMAND sCommand = {0};
++      PVR3DIF4_2DCMD_SHARED *ps2DCmd;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      IMG_BOOL bDstSyncDone = IMG_FALSE;
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++      if (!CCB_OFFSET_IS_VALID(PVR3DIF4_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      ps2DCmd =  CCB_DATA_FROM_OFFSET(PVR3DIF4_2DCMD_SHARED, psCCBMemInfo, psKick, ui32SharedCmdCCBOffset);
++
++      OSMemSet(ps2DCmd, 0, sizeof(*ps2DCmd));
++
++      
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              ps2DCmd->sTASyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              ps2DCmd->sTASyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              ps2DCmd->sTASyncData.sWriteOpsCompleteDevVAddr  = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sTASyncData.sReadOpsCompleteDevVAddr   = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              ps2DCmd->s3DSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              ps2DCmd->s3DSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++              ps2DCmd->s3DSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->s3DSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      ps2DCmd->ui32NumSrcSync = psKick->ui32NumSrcSync;
++      for (i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              if (psSyncInfo == (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo)
++              {
++                      ps2DCmd->sSrcSyncData[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++                      ps2DCmd->sSrcSyncData[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++
++                      ps2DCmd->sDstSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++                      ps2DCmd->sDstSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++
++                      bDstSyncDone = IMG_TRUE;
++              }
++              else
++              {
++                      ps2DCmd->sSrcSyncData[i].ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending;
++                      ps2DCmd->sSrcSyncData[i].ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++              }
++
++              ps2DCmd->sSrcSyncData[i].sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sSrcSyncData[i].sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++      if (psKick->hDstSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++
++              if (!bDstSyncDone)
++              {
++                      ps2DCmd->sDstSyncData.ui32WriteOpPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++                      ps2DCmd->sDstSyncData.ui32ReadOpPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++              }
++
++              ps2DCmd->sDstSyncData.sWriteOpsCompleteDevVAddr = psSyncInfo->sWriteOpsCompleteDevVAddr;
++              ps2DCmd->sDstSyncData.sReadOpsCompleteDevVAddr = psSyncInfo->sReadOpsCompleteDevVAddr;
++      }
++
++#if defined(PDUMP)
++      
++      PDUMPCOMMENT("Shared part of 2D command\r\n");
++      PDUMPMEM(IMG_NULL,
++                      psCCBMemInfo,
++                      psKick->ui32SharedCmdCCBOffset,
++                      sizeof(PVR3DIF4_2DCMD_SHARED),
++                      0,
++                      MAKEUNIQUETAG(psCCBMemInfo));
++#endif
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_2DCMD;
++      sCommand.ui32Data[1] = psKick->sHW2DContextDevVAddr.uiAddr;
++      
++      eError = SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);        
++
++#if defined(NO_HARDWARE)
++      
++      for(i = 0; i < psKick->ui32NumSrcSync; i++)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->ahSrcSyncInfo[i];
++              psSyncInfo->psSyncData->ui32ReadOpsComplete = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hDstSyncInfo;
++      psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++
++      if (psKick->hTASyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->hTASyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++
++      if (psKick->h3DSyncInfo != IMG_NULL)
++      {
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psKick->h3DSyncInfo;
++
++              psSyncInfo->psSyncData->ui32WriteOpsComplete = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++#endif
++
++      return eError;
++}
++#endif        
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 2008-12-18 15:47:29.000000000 +0100
+@@ -46,6 +46,79 @@
+ #include <stdio.h>
+ #endif
++#if defined(SYS_CUSTOM_POWERDOWN)
++PVRSRV_ERROR SysPowerDownMISR(IMG_UINT32 ui32DeviceIndex, IMG_UINT32 ui32CallerID);
++#endif
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++IMG_VOID SGXTestActivePowerEvent (PVRSRV_DEVICE_NODE  *psDeviceNode,
++                                                                IMG_UINT32                    ui32CallerID)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++
++      if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
++              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
++      {
++              
++
++              {
++                      
++                      PDUMPSUSPEND();
++              
++#if defined(SYS_CUSTOM_POWERDOWN)
++                      
++
++
++                      eError = SysPowerDownMISR(psDeviceNode->sDevId.ui32DeviceIndex, ui32CallerID);
++#else                 
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                               PVRSRV_POWER_STATE_D3,
++                                                                                               ui32CallerID, IMG_FALSE);
++                      if (eError == PVRSRV_OK)
++                      {
++                              
++                              psSGXHostCtl->ui32NumActivePowerEvents++;
++                              
++                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
++                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++                              {
++                                      
++
++
++                                      if (ui32CallerID == ISR_ID)
++                                      {
++                                              psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                                      }
++                                      else
++                                      {
++                                              SGXScheduleProcessQueues(psDeviceNode);
++                                      }
++                              }
++                      }
++#endif
++                      if (eError == PVRSRV_ERROR_RETRY)
++                      {
++                              
++
++                              eError = PVRSRV_OK;
++                      }
++                      
++                      
++                      PDUMPRESUME();
++              }
++      }
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXTestActivePowerEvent error:%lu", eError));
++      }
++}
++#endif 
++
++
+ #ifdef INLINE_IS_PRAGMA
+ #pragma inline(SGXAcquireKernelCCBSlot)
+ #endif
+@@ -255,147 +328,43 @@
+ Exit:
+       PVRSRVPowerUnlock(ui32CallerID);
+-      return eError;
+-}
+-
+-
+-#if 0 
+-PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
+-                                         IMG_UINT32                   ui32CCBSize,
+-                                         IMG_UINT32                   ui32AllocGran,
+-                                         IMG_UINT32                   ui32OverrunSize,
+-                                         IMG_HANDLE                   hDevMemHeap,
+-                                         PVRSRV_SGX_CCB               **ppsCCB)
+-{
+-      PVRSRV_SGX_CCB  *psCCB;
+-
+-      PVR_UNREFERENCED_PARAMETER(psSGXDevInfo);
+-
+-      psCCB = IMG_NULL;
+-
+-      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
+-                                 sizeof(PVRSRV_SGX_CCB),
+-                                 (IMG_VOID **)&psCCB,
+-                                 IMG_NULL) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: psCCB alloc failed"));
+-
+-              return PVRSRV_ERROR_OUT_OF_MEMORY;
+-      }
+-
+-      
+-      psCCB->psCCBMemInfo = IMG_NULL;
+-      psCCB->psCCBCtlMemInfo = IMG_NULL;
+-      psCCB->pui32CCBLinAddr = IMG_NULL;
+-      psCCB->pui32WriteOffset = IMG_NULL;
+-      psCCB->pui32ReadOffset = IMG_NULL;
+-
+-      #ifdef PDUMP
+-      psCCB->ui32CCBDumpWOff = 0;
+-      #endif
+-
+-      
+-      if ( ui32CCBSize < 0x1000 )
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (ui32CallerID != ISR_ID)
+       {
+-              IMG_UINT32      i, ui32PowOfTwo;
++              
+-              ui32PowOfTwo = 0x1000;
+-              for (i = 12; i > 0; i--)
+-              {
+-                      if (ui32CCBSize & ui32PowOfTwo)
+-                      {
+-                              break;
+-                      }
+-      
+-                      ui32PowOfTwo >>= 1;
+-              }
+-      
+-              if (ui32CCBSize & (ui32PowOfTwo - 1))
+-              {
+-                      ui32PowOfTwo <<= 1;
+-              }
+-
+-              ui32AllocGran = ui32PowOfTwo;
+-      }
+-      else
+-      {
+-              ui32AllocGran = 0x1000;
++              SGXTestActivePowerEvent(psDeviceNode, ui32CallerID);
+       }
++#endif 
+-      
+-      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
+-                                                         hDevMemHeap,
+-                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
+-                                                         ui32CCBSize + ui32OverrunSize,
+-                                                         ui32AllocGran,
+-                                                         &psCCB->psCCBMemInfo) != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBMemInfo alloc failed"));
+-
+-              goto ErrorExit;
+-      }
++      return eError;
++}
+-      psCCB->pui32CCBLinAddr = psCCB->psCCBMemInfo->pvLinAddrKM;
+-      psCCB->sCCBDevAddr = psCCB->psCCBMemInfo->sDevVAddr;
+-      psCCB->ui32Size = ui32CCBSize;
+-      psCCB->ui32AllocGran = ui32AllocGran;
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++      PVRSRV_SGX_COMMAND              sCommand = {0};
+-      
+-      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
+-                                                         hDevMemHeap,
+-                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
+-                                                         sizeof(PVRSRV_SGX_CCB_CTL),
+-                                                         32,
+-                                                         &psCCB->psCCBCtlMemInfo) != PVRSRV_OK)
++      ui32PowManFlags = psHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
+       {
+-              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBCtlMemInfo alloc failed"));
+-
+-              goto ErrorExit;
++              
++              return;
+       }
+-      
+-      psCCB->pui32WriteOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32WriteOffset;
+-      psCCB->pui32ReadOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32ReadOffset;
+-
+-      
+-      *psCCB->pui32WriteOffset = 0;
+-      *psCCB->pui32ReadOffset = 0;
+-
+-      
+-      *ppsCCB = psCCB;
+-
+-      return PVRSRV_OK;
+-
+-ErrorExit:
+-
+-      
+-      if (psCCB->psCCBMemInfo)
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
++      if (eError != PVRSRV_OK)
+       {
+-              PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
+       }
+-
+-      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
+-
+-      return PVRSRV_ERROR_OUT_OF_MEMORY;
+-;
+ }
+-IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags)
+-{
+-      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
+-
+-      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBCtlMemInfo, IMG_FALSE);
+-      if (!(ui32PFlags & PFLAGS_POWERDOWN))
+-      {
+-              if (psCCB)
+-              {
+-                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
+-              }
+-      }
+-}
+-#endif 
+ #if defined (PDUMP)
+ IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
+                                                IMG_UINT32                                             ui32BufferArrayLength,
+@@ -513,18 +482,6 @@
+       psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
+       psSGXInternalDevInfo->ui32RegFlags = (IMG_BOOL)psDevInfo->ui32RegFlags;
+-#if defined(SUPPORT_SGX_EVENT_OBJECT)
+-      if (psDevInfo->psSGXEventObject)
+-      {
+-              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
+-              psSGXInternalDevInfo->hOSEvent = psEventObject->hOSEventKM;
+-      }
+-      else
+-      {
+-              psSGXInternalDevInfo->hOSEvent = IMG_NULL;
+-      }
+-#endif
+-
+       
+       psSGXInternalDevInfo->hCtlKernelMemInfoHandle =
+               (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
+@@ -532,11 +489,11 @@
+       return PVRSRV_OK;
+ }
+-static IMG_VOID SGXCleanupRequest(PVRSRV_SGXDEV_INFO  *psSGXDevInfo,
++static IMG_VOID SGXCleanupRequest(PVRSRV_DEVICE_NODE  *psDeviceNode,
+                                                                 IMG_DEV_VIRTADDR              *psHWDataDevVAddr,
+-                                                                IMG_BOOL                              bContextCleanup)
++                                                                IMG_UINT32                    ui32ResManRequestFlag)
+ {
+-      IMG_UINT32                              ui32ResManRequestFlag = 0;
++      PVRSRV_SGXDEV_INFO              *psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
+       PVRSRV_KERNEL_MEM_INFO  *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
+       PVRSRV_SGX_HOST_CTL             *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXHostCtlMemInfo->pvLinAddrKM;
+       IMG_UINT32                              ui32PowManFlags;
+@@ -554,25 +511,18 @@
+               
+               if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PDCACHE)
+               {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
++                      psSGXHostCtl->ui32ResManFlags |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
+                       psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PDCACHE;
+               }
+               if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PTCACHE)
+               {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
++                      psSGXHostCtl->ui32ResManFlags |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
+                       psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PTCACHE;
+               }
+-              if (bContextCleanup)
+-              {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST;
+-              }
+-              else
+-              {
+-                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST;
+-              }
+-              
++
+               
+               psSGXHostCtl->sResManCleanupData.uiAddr = psHWDataDevVAddr->uiAddr;
++              
+               psSGXHostCtl->ui32ResManFlags |= ui32ResManRequestFlag;
+               
+@@ -581,6 +531,9 @@
+               PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
+               
++              SGXScheduleProcessQueues(psDeviceNode);
++
++              
+               #if !defined(NO_HARDWARE)
+               if(PollForValueKM ((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32ResManFlags),
+                                       PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
+@@ -612,8 +565,8 @@
+ typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
+ {
+-      PVRSRV_SGXDEV_INFO *psDevInfo;
+-      IMG_DEV_VIRTADDR sHWDataDevVAddr;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
+       IMG_HANDLE hBlockAlloc;
+       PRESMAN_ITEM psResItem;
+ } SGX_HW_RENDER_CONTEXT_CLEANUP;
+@@ -625,8 +578,8 @@
+       PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
+       PVR_UNREFERENCED_PARAMETER(ui32Param);
+-      SGXCleanupRequest(psCleanup->psDevInfo,
+-                                                      &psCleanup->sHWDataDevVAddr, IMG_TRUE);
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHWRenderContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST);
+       OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
+                         sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
+@@ -636,8 +589,34 @@
+       return PVRSRV_OK;
+ }
++typedef struct _SGX_HW_TRANSFER_CONTEXT_CLEANUP_
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHWTransferContextDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_TRANSFER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWTransferContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHWTransferContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_TC_REQUEST);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
+ IMG_EXPORT
+-IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
+ {
+       PVRSRV_ERROR eError;
+       IMG_HANDLE hBlockAlloc;
+@@ -656,8 +635,8 @@
+       }
+       psCleanup->hBlockAlloc = hBlockAlloc;
+-      psCleanup->psDevInfo = psSGXDevInfo;
+-      psCleanup->sHWDataDevVAddr = *psHWRenderContextDevVAddr;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHWRenderContextDevVAddr = *psHWRenderContextDevVAddr;
+       psResItem = ResManRegisterRes(RESMAN_TYPE_HW_RENDER_CONTEXT,
+                                                                 (IMG_VOID *)psCleanup,
+@@ -682,25 +661,173 @@
+ }
+ IMG_EXPORT
+-IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
+ {
+-      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++      PVRSRV_ERROR eError;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
+-      SGXCleanupRequest(psDevInfo, &sHWRTDataSetDevVAddr, IMG_FALSE);
++      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
+ }
+ IMG_EXPORT
+-PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr)
+ {
+       PVRSRV_ERROR eError;
+-      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
+-      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
+-      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: Couldn't allocate memory for SGX_HW_TRANSFER_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHWTransferContextDevVAddr = *psHWTransferContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_TRANSFER_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHWTransferContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWTransferContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_TRANSFER_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_TRANSFER_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHWTransferContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_TRANSFER_CONTEXT_CLEANUP *)hHWTransferContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++typedef struct _SGX_HW_2D_CONTEXT_CLEANUP_
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_DEV_VIRTADDR sHW2DContextDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_2D_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHW2DContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDeviceNode,
++                                                      &psCleanup->sHW2DContextDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_2DC_REQUEST);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR *psHW2DContextDevVAddr)
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: Couldn't allocate memory for SGX_HW_2D_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDeviceNode = (PVRSRV_DEVICE_NODE *)psDeviceNode;
++      psCleanup->sHW2DContextDevVAddr = *psHW2DContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_2D_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHW2DContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHW2DContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_2D_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_2D_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHW2DContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_2D_CONTEXT_CLEANUP *)hHW2DContext;
+       eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
+       return eError;
+ }
++#endif
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psDeviceNode, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++      SGXCleanupRequest((PVRSRV_DEVICE_NODE *)psDeviceNode, &sHWRTDataSetDevVAddr, PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST);
++}
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 2008-12-18 15:47:29.000000000 +0100
+@@ -73,6 +73,13 @@
+                                                IMG_BOOL                                               bDumpPolls);
+ #endif
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++IMG_IMPORT
++IMG_VOID SGXTestActivePowerEvent(PVRSRV_DEVICE_NODE   *psDeviceNode,
++                                                               IMG_UINT32                     ui32CallerID);
++#endif 
++
+ IMG_IMPORT
+ PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
+                                                                        PVRSRV_SGX_COMMAND_TYPE        eCommandType,
+@@ -80,14 +87,31 @@
+                                                                        IMG_UINT32                                     ui32CallerID);
+ IMG_IMPORT
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
+ IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
+ IMG_IMPORT
+-IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
++IMG_HANDLE SGXRegisterHWRenderContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
+ IMG_IMPORT
+-IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++IMG_HANDLE SGXRegisterHWTransferContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHWTransferContextDevVAddr);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
+ IMG_IMPORT
+ PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWTransferContextKM(IMG_HANDLE hHWTransferContext);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHW2DContextKM(IMG_HANDLE psSGXDevInfo, IMG_DEV_VIRTADDR *psHW2DContextDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHW2DContextKM(IMG_HANDLE hHW2DContext);
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   2008-12-18 15:47:29.000000000 +0100
+@@ -33,6 +33,12 @@
+ #define PVRSRV_MAX_BRIDGE_IN_SIZE     0x1000
+ #define PVRSRV_MAX_BRIDGE_OUT_SIZE    0x1000
++typedef       struct _PVR_PCI_DEV_TAG
++{
++      struct pci_dev          *psPCIDev;
++      HOST_PCI_INIT_FLAGS     ePCIFlags;
++      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} PVR_PCI_DEV;
+ typedef struct _ENV_DATA_TAG
+ {
+@@ -43,8 +49,6 @@
+       IMG_UINT32              ui32IRQ;
+       IMG_VOID                *pvISRCookie;
+       struct tasklet_struct   sMISRTasklet;
+-      struct pci_dev          *psPCIDev;
+-      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
+ } ENV_DATA;
+ #endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/event.c      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,221 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
++{
++   rwlock_t                      sLock;
++   struct list_head        sList;
++   
++} PVRSRV_LINUX_EVENT_OBJECT_LIST;
++
++
++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
++{
++      struct completion sCompletion;
++      struct list_head        sList;
++      IMG_HANDLE                      hResItem;                               
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
++} PVRSRV_LINUX_EVENT_OBJECT;
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
++{
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), 
++              (IMG_VOID **)&psEvenObjectList, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));           
++              return PVRSRV_ERROR_OUT_OF_MEMORY;      
++      }
++
++    INIT_LIST_HEAD(&psEvenObjectList->sList);
++
++      rwlock_init(&psEvenObjectList->sLock);
++      
++      *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
++{
++
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
++
++      if(psEvenObjectList)    
++      {
++              if (!list_empty(&psEvenObjectList->sList)) 
++              {
++                       PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
++                       return PVRSRV_ERROR_GENERIC;
++              }
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT_LIST), psEvenObjectList, IMG_NULL);
++      }
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject, IMG_BOOL bResManCallback)
++{
++      if(hOSEventObjectList)
++      {
++              PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++              if(hOSEventObject)
++              {
++                      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; 
++                      write_lock_bh(&psLinuxEventObjectList->sLock);
++                      list_del(&psLinuxEventObject->sList);
++                      write_unlock_bh(&psLinuxEventObjectList->sLock);
++              
++                      
++                      if(!bResManCallback && psLinuxEventObject->hResItem)
++                      {
++                              if(ResManFreeResByPtr(psLinuxEventObject->hResItem, IMG_FALSE) != PVRSRV_OK)
++                              {
++                                      return PVRSRV_ERROR_GENERIC;
++                              }
++                      }
++                      
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), psLinuxEventObject, IMG_NULL);
++                      
++                      return PVRSRV_OK;
++              }
++      }
++      return PVRSRV_ERROR_GENERIC;
++
++}
++
++static PVRSRV_ERROR LinuxEventObjectDeleteCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      if(pvParam)             
++      {       
++              PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)pvParam; 
++              if(psLinuxEventObject->psLinuxEventObjectList)
++              {
++                      IMG_HANDLE hOSEventObjectList = (IMG_HANDLE)psLinuxEventObject->psLinuxEventObjectList; 
++                      return LinuxEventObjectDelete(hOSEventObjectList,(IMG_HANDLE) psLinuxEventObject, IMG_TRUE);
++              }
++      }       
++      return PVRSRV_ERROR_GENERIC;
++}
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
++ {
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; 
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(PVRSRV_LINUX_EVENT_OBJECT), 
++              (IMG_VOID **)&psLinuxEventObject, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));            
++              return PVRSRV_ERROR_OUT_OF_MEMORY;      
++      }
++      
++      INIT_LIST_HEAD(&psLinuxEventObject->sList);
++
++      init_completion(&psLinuxEventObject->sCompletion);      
++    
++
++      psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
++
++      psLinuxEventObject->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_EVENT_OBJECT,
++                                                                                                                              psLinuxEventObject,
++                                                                                                                              0,
++                                                                                                                              &LinuxEventObjectDeleteCallback,
++                                                                                                                              0);     
++
++      write_lock_bh(&psLinuxEventObjectList->sLock);
++      list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
++    write_unlock_bh(&psLinuxEventObjectList->sLock);
++      
++      *phOSEventObject = psLinuxEventObject;
++
++      return PVRSRV_OK;        
++}
++
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
++{
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
++      PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; 
++      struct list_head *psListEntry, *psListEntryTemp, *psList;
++      psList = &psLinuxEventObjectList->sList;
++
++      list_for_each_safe(psListEntry, psListEntryTemp, psList) 
++      {
++                              psLinuxEventObject = list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); 
++                              complete(&psLinuxEventObject->sCompletion);                             
++      }
++      return  PVRSRV_OK;
++      
++}
++
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout)
++{
++      PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
++      if(wait_for_completion_timeout(&psLinuxEventObject->sCompletion, msecs_to_jiffies(ui32MSTimeout)) == 0)
++      {
++              return PVRSRV_ERROR_TIMEOUT;
++      }
++#else
++      wait_for_completion(&psLinuxEventObject->sCompletion);
++#endif        
++      return  PVRSRV_OK;
++}
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/event.h      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,32 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObjectList, IMG_HANDLE hOSEventObject, IMG_BOOL bResManCallback);
++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, IMG_UINT32 ui32MSTimeout);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile      1970-01-01 01:00:00.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/kbuild/Makefile      2008-12-18 15:47:29.000000000 +0100
+@@ -0,0 +1,81 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++# 
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++# 
++# This program is distributed in the hope it will be useful but, except 
++# as otherwise stated in writing, without any warranty; without even the 
++# implied warranty of merchantability or fitness for a particular purpose. 
++# See the GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++# 
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++# 
++#
++
++#
++MODULE                = pvrsrvkm
++
++KBUILDROOT    = ../../../..
++
++INCLUDES =    -I$(EURASIAROOT)/include4 \
++                      -I$(EURASIAROOT)/services4/include \
++                      -I$(EURASIAROOT)/services4/srvkm/env/linux \
++                      -I$(EURASIAROOT)/services4/srvkm/include \
++                      -I$(EURASIAROOT)/services4/srvkm/bridged \
++                      -I$(EURASIAROOT)/services4/srvkm/devices/sgx \
++                      -I$(EURASIAROOT)/services4/system/$(PVR_SYSTEM) \
++                      -I$(EURASIAROOT)/services4/system/include 
++
++
++SOURCES             = $(KBUILDROOT)/srvkm/env/linux/osfunc.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mmap.c \
++                              $(KBUILDROOT)/srvkm/env/linux/module.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pdump.c \
++                              $(KBUILDROOT)/srvkm/env/linux/proc.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pvr_bridge_k.c \
++                              $(KBUILDROOT)/srvkm/env/linux/pvr_debug.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mm.c \
++                              $(KBUILDROOT)/srvkm/env/linux/mutex.c \
++                              $(KBUILDROOT)/srvkm/env/linux/event.c
++
++SOURCES            += $(KBUILDROOT)/srvkm/common/buffer_manager.c \
++                              $(KBUILDROOT)/srvkm/common/devicemem.c \
++                              $(KBUILDROOT)/srvkm/common/deviceclass.c \
++                              $(KBUILDROOT)/srvkm/common/handle.c \
++                              $(KBUILDROOT)/srvkm/common/hash.c \
++                              $(KBUILDROOT)/srvkm/common/metrics.c \
++                              $(KBUILDROOT)/srvkm/common/pvrsrv.c \
++                              $(KBUILDROOT)/srvkm/common/queue.c \
++                              $(KBUILDROOT)/srvkm/common/ra.c \
++                              $(KBUILDROOT)/srvkm/common/resman.c \
++                              $(KBUILDROOT)/srvkm/common/power.c \
++                              $(KBUILDROOT)/srvkm/common/mem.c \
++                              $(KBUILDROOT)/srvkm/bridged/bridged_pvr_bridge.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxinit.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxreset.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxutils.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxkick.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgxtransfer.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/mmu.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/pb.c \
++                              $(KBUILDROOT)/srvkm/common/perproc.c \
++                              $(KBUILDROOT)/../services4/system/$(PVR_SYSTEM)/sysconfig.c \
++                              $(KBUILDROOT)/../services4/system/$(PVR_SYSTEM)/sysutils.c \
++                              $(KBUILDROOT)/srvkm/devices/sgx/sgx2dcore.c
++
++
++INCLUDES += -I$(EURASIAROOT)/services4/srvkm/hwdefs 
++
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 2008-12-18 15:47:29.000000000 +0100
+@@ -37,6 +37,7 @@
+ #endif
+ #include <linux/slab.h>
+ #include <linux/highmem.h>
++#include <linux/sched.h>
+ #include "img_defs.h"
+ #include "services.h"
+@@ -1078,7 +1079,11 @@
+ #if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
+     ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
+ #endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+     return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL);
++#else
++    return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL, NULL);
++#endif
+ }
+@@ -1445,9 +1450,6 @@
+ const IMG_CHAR *
+ LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
+ {
+-    PVR_ASSERT(LINUX_MEM_AREA_TYPE_COUNT == 5);
+-    PVR_ASSERT(eMemAreaType < LINUX_MEM_AREA_TYPE_COUNT);
+-    
+     
+     switch(eMemAreaType)
+     {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     2008-12-18 15:47:29.000000000 +0100
+@@ -25,7 +25,7 @@
+  ******************************************************************************/
+ #ifndef AUTOCONF_INCLUDED
+-// #include <linux/config.h>
++ #include <linux/config.h>
+ #endif
+ #include <linux/init.h>
+@@ -34,9 +34,19 @@
+ #include <linux/version.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
++
+ #if defined(LDM_PLATFORM)
+ #include <linux/platform_device.h>
+ #endif 
++
++#if defined(LDM_PCI)
++#include <linux/pci.h>
++#endif 
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++#include <asm/uaccess.h>
++#endif
++
+ #include "img_defs.h"
+ #include "services.h"
+ #include "kerneldisplay.h"
+@@ -51,15 +61,13 @@
+ #include "handle.h"
+ #include "pvr_bridge_km.h"
+ #include "proc.h"
+-
++#include "pvrmodule.h"
+ #define CLASSNAME     "powervr"
+ #define DRVNAME               "pvrsrvkm"
+ #define DEVNAME               "pvrsrvkm"
+-MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+-MODULE_LICENSE("GPL");
+ MODULE_SUPPORTED_DEVICE(DEVNAME);
+ #ifdef DEBUG
+ static int debug = DBGPRIV_WARNING;
+@@ -99,24 +107,75 @@
+ };
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
+ #if defined(LDM_PLATFORM)
+-static int PVRSRVDriverRemove(struct platform_device *device);
+-static int PVRSRVDriverProbe(struct platform_device *device);
+-static int PVRSRVDriverSuspend(struct platform_device *device, pm_message_t state);
+-static void PVRSRVDriverShutdown(struct platform_device *device);
+-static int PVRSRVDriverResume(struct platform_device *device);
++#define       LDM_DEV struct platform_device
++#define       LDM_DRV struct platform_driver
++#if defined(LDM_PCI)
++#undef        LDM_PCI
++#endif 
++#endif 
+-static struct platform_driver powervr_driver = {
++#if defined(LDM_PCI)
++#define       LDM_DEV struct pci_dev
++#define       LDM_DRV struct pci_driver
++#endif 
++
++//static void PVRSRVClassDeviceRelease(struct class_device *class_device);
++
++/*static struct class powervr_class = {
++      .name                   = CLASSNAME,
++      .release                = PVRSRVClassDeviceRelease
++};*/
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverRemove(LDM_DEV *device);
++static int PVRSRVDriverProbe(LDM_DEV *device);
++#endif
++#if defined(LDM_PCI)
++static void PVRSRVDriverRemove(LDM_DEV *device);
++static int PVRSRVDriverProbe(LDM_DEV *device, const struct pci_device_id *id);
++#endif
++static int PVRSRVDriverSuspend(LDM_DEV *device, pm_message_t state);
++static void PVRSRVDriverShutdown(LDM_DEV *device);
++static int PVRSRVDriverResume(LDM_DEV *device);
++
++#if defined(LDM_PCI)
++struct pci_device_id powervr_id_table[] __devinitdata = {
++      { PCI_DEVICE(SYS_SGX_DEV_VENDOR_ID, SYS_SGX_DEV_DEVICE_ID2) },
++      { 0 }
++};
++
++MODULE_DEVICE_TABLE(pci, powervr_id_table);
++#endif
++
++static LDM_DRV powervr_driver = {
++#if defined(LDM_PLATFORM)
+       .driver = {
+-              .name           = DEVNAME,
++              .name           = DRVNAME,
+       },
++#endif
++#if defined(LDM_PCI)
++      .name           = DRVNAME,
++      .id_table = powervr_id_table,
++#endif
+       .probe          = PVRSRVDriverProbe,
++#if defined(LDM_PLATFORM)
+       .remove         = PVRSRVDriverRemove,
++#endif
++#if defined(LDM_PCI)
++      .remove         = __devexit_p(PVRSRVDriverRemove),
++#endif
+       .suspend        = PVRSRVDriverSuspend,
+       .resume         = PVRSRVDriverResume,
+       .shutdown       = PVRSRVDriverShutdown,
+ };
++LDM_DEV *gpsPVRLDMDev;
++
++ 
++#if defined(LDM_PLATFORM)
+ static void PVRSRVDeviceRelease(struct device *device);
+ static struct platform_device powervr_device = {
+@@ -126,18 +185,79 @@
+               .release                = PVRSRVDeviceRelease
+       }
+ };
++#endif 
++static ssize_t PVRSRVShowDev(struct class_device *pClassDevice, char *buf)
++{
++      PVR_TRACE(("PVRSRVShowDev(pClassDevice=%p)", pClassDevice));
+-static int PVRSRVDriverProbe(struct platform_device *pDevice)
++      return snprintf(buf, PAGE_SIZE, "%d:0\n", AssignedMajorNumber);
++}
++
++//static CLASS_DEVICE_ATTR(dev,  S_IRUGO, PVRSRVShowDev, NULL);
++
++/*static void PVRSRVClassDeviceRelease(struct class_device *pClassDevice)
++{
++      PVR_TRACE(("PVRSRVClassDeviceRelease(pClassDevice=%p)", pClassDevice));
++
++      kfree(pClassDevice);
++}*/
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverProbe(LDM_DEV *pDevice)
++#endif
++#if defined(LDM_PCI)
++static int __devinit PVRSRVDriverProbe(LDM_DEV *pDevice, const struct pci_device_id *id)
++#endif
+ {
+       SYS_DATA *psSysData;
+       PVRSRV_ERROR eError;
++      //struct class_device *pClassDevice;
+       int error;
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverProbe(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverProbe(pDevice=%p)", pDevice));
+-      pDevice->dev.driver_data = NULL;
++      pDevice->dev.driver_data = NULL;        
++      /*pClassDevice = kmalloc(sizeof(*pClassDevice), GFP_KERNEL);
++
++      if (pClassDevice == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): no memory for class device instance.",
++                              pDevice));
++
++              return -ENOMEM;
++      }
++
++      memset(pClassDevice, 0, sizeof(*pClassDevice));
++
++      pDevice->dev.driver_data = (void *)pClassDevice;
++
++      
++      strncpy(pClassDevice->class_id, DEVNAME, BUS_ID_SIZE);
++
++      pClassDevice->class = &powervr_class;
++      pClassDevice->dev = &pDevice->dev;
++
++      
++      if ((error = class_device_register(pClassDevice)) != 0)
++      {
++              kfree(pClassDevice);
++
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): class_device_register failed (%d)",
++                              pDevice, error));
++              return error;
++      }
++
++      if ((error = class_device_create_file(pClassDevice, &class_device_attr_dev)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVDriverProbe(pDevice=%p): class_device_create_file failed (%d)",
++                              pDevice, error));
++              return error;
++      }*/
+ #if 0
+       
+@@ -149,37 +269,34 @@
+       
+       if (SysAcquireData(&psSysData) != PVRSRV_OK)
+       {
++              gpsPVRLDMDev = pDevice;
++
+               if (SysInitialise() != PVRSRV_OK)
+               {
+                       return -ENODEV;
+               }
+-
+-              eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
+-              if(eError != PVRSRV_OK)
+-              {
+-                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDriverProbe: Failed to connect to resource manager"));
+-                      error = -ENODEV;
+-              }
+       }
+       return 0;
+ }
+-static int PVRSRVDriverRemove(struct platform_device *pDevice)
++#if defined (LDM_PLATFORM)
++static int PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
++#if defined(LDM_PCI)
++static void __devexit PVRSRVDriverRemove(LDM_DEV *pDevice)
++#endif
+ {
+       SYS_DATA *psSysData;
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverRemove(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverRemove(pDevice=%p)", pDevice));
+-      if(PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE) != PVRSRV_OK)
+-      {
+-              return -EINVAL;
+-      }
+-      
+       if (SysAcquireData(&psSysData) == PVRSRV_OK)
+       {
+               SysDeinitialise(psSysData);
++
++              gpsPVRLDMDev = IMG_NULL;
+       }
+ #if 0
+@@ -189,68 +306,131 @@
+       }
+ #endif
++      //class_device_unregister((struct class_device *)pDevice->dev.driver_data);
++
++
++      pDevice->dev.driver_data = 0;
++
++#if defined (LDM_PLATFORM)
+       return 0;
++#endif
++#if defined (LDM_PCI)
++      return;
++#endif
+ }
+-static void PVRSRVDriverShutdown(struct platform_device *pDevice)
++static void PVRSRVDriverShutdown(LDM_DEV *pDevice)
+ {
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++      PVR_TRACE(("PVRSRVDriverShutdown(pDevice=%p)", pDevice));
+       (void) PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3);
+ }
+-static int PVRSRVDriverSuspend(struct platform_device *pDevice, pm_message_t state)
++static int PVRSRVDriverSuspend(LDM_DEV *pDevice, pm_message_t state)
+ {
+-
+-      PVR_DPF((PVR_DBG_WARNING,
+-                      "PVRSRVDriverSuspend(pDevice=%p)",
+-                      pDevice));
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL))
++      PVR_TRACE(( "PVRSRVDriverSuspend(pDevice=%p)", pDevice));
+       if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
+       {
+               return -EINVAL;
+       }
+-
++#endif
+       return 0;
+ }
+-static int PVRSRVDriverResume(struct platform_device *pDevice)
++static int PVRSRVDriverResume(LDM_DEV *pDevice)
+ {
+-      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverResume(pDevice=%p)", pDevice));
++#if !(defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL))
++      PVR_TRACE(("PVRSRVDriverResume(pDevice=%p)", pDevice));
+       if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
+       {
+               return -EINVAL;
+       }
+-
++#endif
+       return 0;
+ }
++#if defined(LDM_PLATFORM)
+ static void PVRSRVDeviceRelease(struct device *pDevice)
+ {
+       PVR_DPF((PVR_DBG_WARNING, "PVRSRVDeviceRelease(pDevice=%p)", pDevice));
+ }
+ #endif 
++#endif 
++
++
++#if defined(DEBUG) && defined(PVR_MANUAL_POWER_CONTROL)
++static IMG_UINT32 gPVRPowerLevel;
++
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, unsigned long count, void *data)
++{
++      char data_buffer[2];
++      IMG_UINT32 PVRPowerLevel;
++
++      if (count != sizeof(data_buffer))
++      {
++              return -EINVAL;
++      }
++      else
++      {
++              if (copy_from_user(data_buffer, buffer, count))
++                      return -EINVAL;
++              if (data_buffer[count - 1] != '\n')
++                      return -EINVAL;
++              PVRPowerLevel = data_buffer[0] - '0';
++              if (PVRPowerLevel != gPVRPowerLevel)
++              {
++                      if (PVRPowerLevel != 0)
++                      {
++                              if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
++                              {
++                                      return -EINVAL;
++                              }
++                      }
++                      else
++                      {
++                              if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++                              {
++                                      return -EINVAL;
++                              }
++                      }
++
++                      gPVRPowerLevel = PVRPowerLevel;
++              }
++      }
++      return (count);
++}
++
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      if (off == 0) {
++              *start = (char *)1;
++              return printAppend(page, count, 0, "%lu\n", gPVRPowerLevel);
++      }
++      *eof = 1;
++      return 0;
++}
++#endif
+ static int PVRSRVOpen(struct inode unref__ * pInode, struct file unref__ * pFile)
+ {
+       int Ret = 0;
+-      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVOpen"));
+-
+-    LinuxLockMutex(&gPVRSRVLock);
++      LinuxLockMutex(&gPVRSRVLock);
+       if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_TRUE) != PVRSRV_OK)
+       {
+               Ret = -ENOMEM;
+       }
+       
+-    LinuxUnLockMutex(&gPVRSRVLock);
++      LinuxUnLockMutex(&gPVRSRVLock);
+       return Ret;
+ }
+@@ -260,8 +440,6 @@
+ {
+       int Ret = 0;
+-      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRelease"));
+-
+       if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_FALSE) != PVRSRV_OK)
+       {
+               Ret = -ENOMEM;
+@@ -274,9 +452,12 @@
+ static int __init PVRCore_Init(void)
+ {
+       int error;
+-#if !defined(LDM_PLATFORM)
++#if !(defined(LDM_PLATFORM) || defined(LDM_PCI))
+       PVRSRV_ERROR eError;
+-#endif 
++#endif
++
++      PVR_TRACE(("PVRCore_Init"));
++
+       
+       AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
+@@ -287,7 +468,7 @@
+               return -EBUSY;
+       }
+-      PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: major device %d", AssignedMajorNumber));
++      PVR_TRACE(("PVRCore_Init: major device %d", AssignedMajorNumber));
+       
+       if (CreateProcEntries ())
+@@ -313,9 +494,19 @@
+       PVRMMapInit();
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++      /*if ((error = class_register(&powervr_class)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register class (%d)", error));
++
++              goto init_failed;
++      }*/
++
+ #if defined(LDM_PLATFORM)
+       if ((error = platform_driver_register(&powervr_driver)) != 0)
+       {
++              //class_unregister(&powervr_class);
++
+               PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
+               goto init_failed;
+@@ -324,11 +515,25 @@
+       if ((error = platform_device_register(&powervr_device)) != 0)
+       {
+               platform_driver_unregister(&powervr_driver);
++              //class_unregister(&powervr_class);
+               PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
+               goto init_failed;
+       }
++#endif 
++
++#if defined(LDM_PCI)
++      if ((error = pci_register_driver(&powervr_driver)) != 0)
++      {
++              //class_unregister(&powervr_class);
++
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register PCI driver (%d)", error));
++
++              goto init_failed;
++      }
++#endif 
++
+ #else 
+       
+       if ((eError = SysInitialise()) != PVRSRV_OK)
+@@ -343,20 +548,12 @@
+ #endif
+               goto init_failed;
+       }
+-
+-      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
+-      if(eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"PVRCore_Init: Failed to connect to resource manager"));
+-              error = -ENODEV;
+-              goto init_failed;
+-      }
+ #endif 
++
+       return 0;
+ init_failed:
+-      (void) PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
+       PVRMMapCleanup();
+       LinuxMMCleanup();
+       RemoveProcEntries();
+@@ -370,23 +567,34 @@
+ static void __exit PVRCore_Cleanup(void)
+ {
+       SYS_DATA *psSysData;
+-#if !defined(LDM_PLATFORM)
++#if !(defined(LDM_PLATFORM) || defined (LDM_PCI))
+       PVRSRV_ERROR eError;
+-#endif 
++#endif
++
++      PVR_TRACE(("PVRCore_Cleanup"));
+       SysAcquireData(&psSysData);
+-      unregister_chrdev(AssignedMajorNumber, DRVNAME);
+       
++      /*if (unregister_chrdev(AssignedMajorNumber, DRVNAME))
++      {
++              PVR_DPF((PVR_DBG_ERROR," can't unregister device major %d", AssignedMajorNumber));
++      }*/
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++#if defined(LDM_PLATFORM) || defined(LDM_PCI)
++
++#if defined(LDM_PCI)
++      pci_unregister_driver(&powervr_driver);
++#endif
++
+ #if defined (LDM_PLATFORM)
+       platform_device_unregister(&powervr_device);
+       platform_driver_unregister(&powervr_driver);
+-#else 
+-      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
+-      if (eError != PVRSRV_OK)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR,"KernelResManDisconnect: Failed to disconnect"));
+-      }
++#endif
++      //class_unregister(&powervr_class);
++
++#else 
+       
+       SysDeinitialise(psSysData);
+ #endif 
+@@ -399,7 +607,7 @@
+       RemoveProcEntries();
+-      PVR_DPF((PVR_DBG_WARNING,"unloading"));
++      PVR_TRACE(("PVRCore_Cleanup: unloading"));
+ }
+ module_init(PVRCore_Init);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     2008-12-18 15:47:29.000000000 +0100
+@@ -56,6 +56,9 @@
+ #include "env_data.h"
+ #include "proc.h"
+ #include "mutex.h"
++#include "event.h"
++
++#define EVENT_OBJECT_TIMEOUT_MS               (100)
+ extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
+@@ -411,9 +414,6 @@
+       psEnvData->bLISRInstalled = IMG_FALSE;
+       
+-      psEnvData->psPCIDev = NULL;
+-
+-      
+       *ppvEnvSpecificData = psEnvData;
+       return PVRSRV_OK;
+@@ -426,7 +426,6 @@
+       PVR_ASSERT(!psEnvData->bMISRInstalled);
+       PVR_ASSERT(!psEnvData->bLISRInstalled);
+-      PVR_ASSERT(psEnvData->psPCIDev == NULL);
+       OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0x1000, psEnvData->pvBridgeData, IMG_NULL);
+@@ -1189,57 +1188,62 @@
+ }
+ #if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
+-PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++
++IMG_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+       int err;
+       IMG_UINT32 i;
++      PVR_PCI_DEV *psPVRPCI;
+-      if (psEnvData->psPCIDev != NULL)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: A device has already been acquired"));
+-              return PVRSRV_ERROR_GENERIC;
+-      }
++      PVR_TRACE(("OSPCISetDev"));
+-      psEnvData->psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, psEnvData->psPCIDev);
+-      if (psEnvData->psPCIDev == NULL)
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)&psPVRPCI, IMG_NULL) != PVRSRV_OK)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't allocate PVR PCI structure"));
++              return IMG_NULL;
+       }
+-      err = pci_enable_device(psEnvData->psPCIDev);
++      psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie;
++      psPVRPCI->ePCIFlags = eFlags;
++
++      err = pci_enable_device(psPVRPCI->psPCIDev);
+       if (err != 0)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't enable device (%d)", err));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISetDev: Couldn't enable device (%d)", err));
++              return IMG_NULL;
+       }
+-      if (eFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
+-              pci_set_master(psEnvData->psPCIDev);
++      if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psPVRPCI->psPCIDev);
+       
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++              psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
+       }
+-      return PVRSRV_OK;
++      return (IMG_HANDLE)psPVRPCI;
+ }
+-PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ)
++IMG_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      struct pci_dev *psPCIDev;
+-      if (psEnvData->psPCIDev == NULL)
++      psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL);
++      if (psPCIDev == NULL)
+       {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIIRQ: Device hasn't been acquired"));
+-              return PVRSRV_ERROR_GENERIC;
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++              return IMG_NULL;
+       }
+-      *pui32IRQ = psEnvData->psPCIDev->irq;
++      return OSPCISetDev((IMG_VOID *)psPCIDev, eFlags);
++}
++
++PVRSRV_ERROR OSPCIIRQ(IMG_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++
++      *pui32IRQ = psPVRPCI->psPCIDev->irq;
+       return PVRSRV_OK;
+ }
+@@ -1254,19 +1258,12 @@
+ };
+ static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
+-                                                                       IMG_VOID *pvSysData,
++                                                                       IMG_HANDLE hPVRPCI,
+                                                                        IMG_UINT32 ui32Index
+                                                                        
+ )
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
+-
+-      if (psEnvData->psPCIDev == NULL)
+-      {
+-              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Device hasn't been acquired"));
+-              return 0;
+-      }
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       if (ui32Index >= DEVICE_COUNT_RESOURCE)
+       {
+@@ -1278,32 +1275,32 @@
+       switch (eFunc)
+       {
+               case HOST_PCI_ADDR_RANGE_FUNC_LEN:
+-                      return pci_resource_len(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_len(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_START:
+-                      return pci_resource_start(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_start(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_END:
+-                      return pci_resource_end(psEnvData->psPCIDev, ui32Index);
++                      return pci_resource_end(psPVRPCI->psPCIDev, ui32Index);
+               case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
+               {
+                       
+ #ifdef FIXME
+                       int err;
+-                      err = pci_request_region(psEnvData->psPCIDev, ui32Index, "PowerVR");
++                      err = pci_request_region(psPVRPCI->psPCIDev, ui32Index, "PowerVR");
+                       if (err != 0)
+                       {
+                               PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
+                               return 0;
+                       }
+ #endif
+-                      psEnvData->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++                      psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE;
+                       return 1;
+               }
+               case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
+-                      if (psEnvData->abPCIResourceInUse[ui32Index])
++                      if (psPVRPCI->abPCIResourceInUse[ui32Index])
+                       {
+-                              pci_release_region(psEnvData->psPCIDev, ui32Index);
+-                              psEnvData->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++                              pci_release_region(psPVRPCI->psPCIDev, ui32Index);
++                              psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE;
+                       }
+                       return 1;
+               default:
+@@ -1314,62 +1311,160 @@
+       return 0;
+ }
+-IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeLen(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); 
+ }
+-IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeStart(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); 
+ }
+-IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, pvSysData, ui32Index); 
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); 
+ }
+-PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData,
+-                                                                 IMG_UINT32 ui32Index
+-                                                                 
+-)
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_HANDLE hPVRPCI,
++                                                                 IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index)
+ {
+-      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData)
++PVRSRV_ERROR OSPCIReleaseDev(IMG_HANDLE hPVRPCI)
+ {
+-      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
+-      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
+       int i;
+-      if (psEnvData->psPCIDev == NULL)
++      PVR_TRACE(("OSPCIReleaseDev"));
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              return PVRSRV_OK;
++              if (psPVRPCI->abPCIResourceInUse[i])
++              {
++                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++                      pci_release_region(psPVRPCI->psPCIDev, i);
++                      psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE;
++              }
+       }
++      pci_disable_device(psPVRPCI->psPCIDev);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*psPVRPCI), (IMG_VOID *)psPVRPCI, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCISuspendDev(IMG_HANDLE hPVRPCI)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++      int i;
++      int err;
++
++      PVR_TRACE(("OSPCISuspendDev"));
++
+       
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       {
+-              if (psEnvData->abPCIResourceInUse[i])
++              if (psPVRPCI->abPCIResourceInUse[i])
+               {
+-                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
+-                      pci_release_region(psEnvData->psPCIDev, i);
+-                      psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++                      pci_release_region(psPVRPCI->psPCIDev, i);
+               }
+       }
+-      pci_disable_device(psEnvData->psPCIDev);
++      err = pci_save_state(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_save_state_failed (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
+-      psEnvData->psPCIDev = NULL;
++      pci_disable_device(psPVRPCI->psPCIDev);
++
++      err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D3cold);
++      switch(err)
++      {
++              case 0:
++                      break;
++              case -EIO:
++                      PVR_DPF((PVR_DBG_WARNING, "OSPCISuspendDev: device doesn't support PCI PM"));
++                      break;
++              case -EINVAL:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: can't enter requested power state"));
++                      break;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCISuspendDev: pci_set_power_state failed (%d)", err));
++                      break;
++      }
+       return PVRSRV_OK;
+ }
++
++PVRSRV_ERROR OSPCIResumeDev(IMG_HANDLE hPVRPCI)
++{
++      PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI;
++      int err;
++      int i;
++
++      PVR_TRACE(("OSPCIResumeDev"));
++
++      err = pci_set_power_state(psPVRPCI->psPCIDev, PCI_D0);
++      switch(err)
++      {
++              case 0:
++                      break;
++              case -EIO:
++                      PVR_DPF((PVR_DBG_WARNING, "OSPCIResumeDev: device doesn't support PCI PM"));
++                      break;
++              case -EINVAL:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: can't enter requested power state"));
++                      return PVRSRV_ERROR_GENERIC;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_set_power_state failed (%d)", err));
++                      return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_restore_state(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_restore_state failed (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_enable_device(psPVRPCI->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: Couldn't enable device (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psPVRPCI->psPCIDev);
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              if (psPVRPCI->abPCIResourceInUse[i])
++              {
++                      err = pci_request_region(psPVRPCI->psPCIDev, i, "PowerVR");
++                      if (err != 0)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err));
++                      }
++              }
++
++      }
++
++      return PVRSRV_OK;
++}
++
+ #endif 
+ typedef struct TIMER_CALLBACK_DATA_TAG
+@@ -1418,7 +1513,7 @@
+       psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+       psTimerCBData->pvData = pvData;
+-      psTimerCBData->bActive = IMG_TRUE;
++      psTimerCBData->bActive = IMG_FALSE;
+       
+       
+@@ -1434,14 +1529,36 @@
+       psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
+       psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+       
++      return (IMG_HANDLE)psTimerCBData;
++}
++
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      psTimerCBData->bActive = IMG_TRUE;
++
+       
+       add_timer(&psTimerCBData->sTimer);
+       
+-      return (IMG_HANDLE)psTimerCBData;
++      return PVRSRV_OK;
+ }
+-PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+ {
+       TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
+       
+@@ -1451,21 +1568,17 @@
+       
+       del_timer_sync(&psTimerCBData->sTimer); 
+       
+-      
+-      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
+-      
+       return PVRSRV_OK;
+ }
+ PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
+ {
++
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(psEventObject)
+       {
+-              struct completion *psCompletion;
+-
+               if(pszName)
+               {
+                       
+@@ -1478,26 +1591,20 @@
+                       snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
+               }
+               
+-              
+-              if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
+-                                      sizeof(struct completion), 
+-                                      (IMG_VOID **)&psCompletion, IMG_NULL) != PVRSRV_OK)
++              if(LinuxEventObjectListCreate(&psEventObject->hOSEventKM) != PVRSRV_OK)
+               {
+-                      PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: failed to allocate memory for completion variable"));             
+-                      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++                       eError = PVRSRV_ERROR_OUT_OF_MEMORY;   
+               }
+-              init_completion(psCompletion);
+-      
+-              psEventObject->hOSEventKM = (IMG_HANDLE) psCompletion;
+       }
+       else
+       {
+         PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
+-        eError = PVRSRV_ERROR_INVALID_PARAMS;
++              eError = PVRSRV_ERROR_GENERIC;  
+       }
+       
+       return eError;
++
+ }
+@@ -1509,8 +1616,7 @@
+       {
+               if(psEventObject->hOSEventKM)
+               {
+-                      struct completion *psCompletion = (struct completion *) psEventObject->hOSEventKM;
+-                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(struct completion), psCompletion, IMG_NULL);
++                      LinuxEventObjectListDestroy(psEventObject->hOSEventKM);
+               }
+               else
+               {
+@@ -1527,19 +1633,13 @@
+       return eError;
+ }
+-PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout)
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(hOSEventKM)
+       {
+-              LinuxUnLockMutex(&gPVRSRVLock);         
+-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
+-              wait_for_completion_timeout((struct completion *)hOSEventKM, msecs_to_jiffies(ui32MSTimeout));
+-#else
+-              wait_for_completion((struct completion *)hOSEventKM);
+-#endif        
+-              LinuxLockMutex(&gPVRSRVLock);
++              eError = LinuxEventObjectWait(hOSEventKM, EVENT_OBJECT_TIMEOUT_MS);
+       }
+       else
+       {
+@@ -1550,13 +1650,60 @@
+       return eError;
+ }
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE *phOSEvent)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(psEventObject)
++      {
++              if(LinuxEventObjectAdd(psEventObject->hOSEventKM, phOSEvent) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE hOSEventKM)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(psEventObject)
++      {
++              if(LinuxEventObjectDelete(psEventObject->hOSEventKM, hOSEventKM, IMG_FALSE) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++      
++}
++
+ PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
+       
+       if(hOSEventKM)
+       {
+-              complete_all((struct completion *) hOSEventKM);         
++              eError = LinuxEventObjectSignal(hOSEventKM);
+       }
+       else
+       {
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      2008-12-18 15:47:29.000000000 +0100
+@@ -1205,15 +1205,14 @@
+       {
+               ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
+-#if 0
+               
+               if (ui32Written == 0)
+               {
+-                      ZwYieldExecution();
++                      OSReleaseThreadQuanta();
+               }
+-#endif
++
+               if (ui32Written != 0xFFFFFFFF)
+               {
+                       ui32Off += ui32Written;
+@@ -1302,6 +1301,14 @@
+       return bFrameDumped;
+ }
++IMG_VOID PDumpRegRead(const IMG_UINT32 ui32RegOffset, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
+ IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
+ {
+       __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       2008-12-18 15:47:29.000000000 +0100
+@@ -46,6 +46,11 @@
+ #ifdef DEBUG
+ int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data);
+ int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++int PVRProcSetPowerLevel(struct file *file, const char *buffer, unsigned long count, void *data);
++int PVRProcGetPowerLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
+ #endif
+ static struct proc_dir_entry * dir;
+@@ -198,6 +203,15 @@
+         return -ENOMEM;
+     }
++
++#ifdef PVR_MANUAL_POWER_CONTROL
++      if (CreateProcEntry("power_control", PVRProcGetPowerLevel, PVRProcSetPowerLevel, 0))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr/power_control"));
++
++        return -ENOMEM;
++    }
++#endif
+ #endif
+     return 0;
+@@ -219,6 +233,9 @@
+ {
+ #ifdef DEBUG
+     RemoveProcEntry("debug_level");
++#ifdef PVR_MANUAL_POWER_CONTROL
++    RemoveProcEntry("power_control");
++#endif
+ #endif
+     RemoveProcEntry("queue");
+     RemoveProcEntry("nodes");
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  2008-12-18 15:47:29.000000000 +0100
+@@ -161,7 +161,7 @@
+ void PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
+ {
+-      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x",(unsigned int)uDebugLevel);
++      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x\n",(unsigned int)uDebugLevel);
+       gPVRDebugLevel = uDebugLevel;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       2008-12-18 15:47:29.000000000 +0100
+@@ -33,12 +33,16 @@
+ #if defined(SGX535)
+ #include "sgx535defs.h"
+ #else
++#if defined(SGX520)
++#include "sgx520defs.h"
++#else
+ #if defined(SGX535_V1_1)
+ #include "sgx535defs.h"
+ #else
+ #endif
+ #endif
+ #endif
++#endif
+ #include "sgxerrata.h"
+ #include "sgxfeaturedefs.h"
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     2008-12-18 15:47:29.000000000 +0100
+@@ -43,6 +43,8 @@
+       #else
+       #if SGX_CORE_REV == 120
+       #else
++      #if SGX_CORE_REV == 121
++      #else
+       #if SGX_CORE_REV == SGX_CORE_REV_HEAD
+               
+       #else
+@@ -51,6 +53,7 @@
+       #endif
+       #endif
+       #endif
++      #endif
+         #endif
+       
+       #define SGX_CORE_DEFINED
+@@ -69,16 +72,22 @@
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 1111
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 112
+               #define FIX_HW_BRN_23281
+               #define FIX_HW_BRN_23410
+               #define FIX_HW_BRN_22693
++              #define FIX_HW_BRN_22997
++              #define FIX_HW_BRN_23030
+       #else
+       #if SGX_CORE_REV == 113
+               #define FIX_HW_BRN_23281
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        2008-12-18 15:47:29.000000000 +0100
+@@ -24,6 +24,12 @@
+  *
+  ******************************************************************************/
++#if defined(SGX520)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX520"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_520
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (28)
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
+ #if defined(SGX530)
+       #define SGX_CORE_FRIENDLY_NAME                                                  "SGX530"
+       #define SGX_CORE_ID                                                                             SGX_CORE_ID_530
+@@ -36,8 +42,9 @@
+       #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (32)
+       #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
+       #define SGX_FEATURE_2D_HARDWARE
+-              #define SGX_FEATURE_AUTOCLOCKGATING
+-
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#endif
+ #endif
+ #endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/device.h git/drivers/gpu/pvr/services4/srvkm/include/device.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/device.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/device.h       2008-12-18 15:47:29.000000000 +0100
+@@ -225,39 +225,40 @@
+       struct _PVRSRV_DEVICE_NODE_     *psNext;
+ } PVRSRV_DEVICE_NODE;
+-PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
+-                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
+-                                                                IMG_UINT32 ui32SOCInterruptBit,
+-                                                                IMG_UINT32 *pui32DeviceIndex );
++PVRSRV_ERROR IMG_CALLCONV PVRSRVRegisterDevice(PSYS_DATA psSysData,
++                                                                                        PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                                        IMG_UINT32 ui32SOCInterruptBit,
++                                                                                        IMG_UINT32 *pui32DeviceIndex );
+-PVRSRV_ERROR PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFinaliseSystem(IMG_BOOL bInitSuccesful);
+-PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
+ #if !defined(USE_CODE)
+-IMG_IMPORT PVRSRV_ERROR PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
+-                                                                         IMG_UINT32 ui32Value,
+-                                                                         IMG_UINT32 ui32Mask,
+-                                                                         IMG_UINT32 ui32Waitus,
+-                                                                         IMG_UINT32 ui32Tries);
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++                                                                                                 IMG_UINT32 ui32Value,
++                                                                                                 IMG_UINT32 ui32Mask,
++                                                                                                 IMG_UINT32 ui32Waitus,
++                                                                                                 IMG_UINT32 ui32Tries);
+ #endif 
+ #if defined (USING_ISR_INTERRUPTS)
+-PVRSRV_ERROR PollForInterruptKM(IMG_UINT32 ui32Value,
++PVRSRV_ERROR IMG_CALLCONV PollForInterruptKM(IMG_UINT32 ui32Value,
+                                                               IMG_UINT32 ui32Mask,
+                                                               IMG_UINT32 ui32Waitus,
+                                                               IMG_UINT32 ui32Tries);
+ #endif 
+-PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData);
+-IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData);
+-IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
+-IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData);
+-IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID IMG_CALLCONV PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL IMG_CALLCONV PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL IMG_CALLCONV PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID IMG_CALLCONV PVRSRVMISR(IMG_VOID *pvSysData);
+ #if defined(__cplusplus)
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/handle.h git/drivers/gpu/pvr/services4/srvkm/include/handle.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/handle.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/handle.h       2008-12-18 15:47:29.000000000 +0100
+@@ -50,10 +50,13 @@
+       PVRSRV_HANDLE_TYPE_DISP_BUFFER,
+       PVRSRV_HANDLE_TYPE_BUF_BUFFER,
+       PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SGX_HW_TRANSFER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SGX_HW_2D_CONTEXT,
+       PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
+       PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
+       PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
+-      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT
++      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
++      PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT
+ } PVRSRV_HANDLE_TYPE;
+ typedef enum
+@@ -126,6 +129,11 @@
+       
+       IMG_UINT32 ui32LastFreeIndexPlusOne;
++
++#ifdef        __linux__
++      
++      IMG_BOOL bVmallocUsed;
++#endif
+ } PVRSRV_HANDLE_BASE;
+ #ifdef        PVR_SECURE_HANDLES
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       2008-12-18 15:47:29.000000000 +0100
+@@ -148,14 +148,16 @@
+ IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
+ IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...);
+ #define OSStringLength(pszString) strlen(pszString)
+-PVRSRV_ERROR OSPowerManagerConnect(IMG_VOID);
+-PVRSRV_ERROR OSPowerManagerDisconnect(IMG_VOID);
+ PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+                                                                PVRSRV_EVENTOBJECT *psEventObject);
+ PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
+ PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
+-PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectOpen(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE *phOSEvent);
++PVRSRV_ERROR OSEventObjectClose(PVRSRV_EVENTOBJECT *psEventObject,
++                                                                                      IMG_HANDLE hOSEventKM);
+ PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
+@@ -203,6 +205,8 @@
+ typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
+ IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
+ PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer);
++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer);
+ PVRSRV_ERROR OSGetSysMemSize(IMG_UINT32 *pui32Bytes);
+@@ -211,17 +215,17 @@
+       HOST_PCI_INIT_FLAG_BUS_MASTER = 0x1,
+       HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
+ } HOST_PCI_INIT_FLAGS;
+-PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
+-PVRSRV_ERROR OSPCISetDev(IMG_VOID *pvSysData, IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
+-PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData);
+-PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ);
+-IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
+-PVRSRV_ERROR OSPCISuspendDev(IMG_VOID *pvSysData);
+-PVRSRV_ERROR OSPCIResumeDev(IMG_VOID *pvSysData);
++IMG_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++IMG_HANDLE OSPCISetDev(IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(IMG_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIIRQ(IMG_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_HANDLE hPVRPCI, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(IMG_HANDLE hPVRPCI);
++PVRSRV_ERROR OSPCIResumeDev(IMG_HANDLE hPVRPCI);
+ PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     2008-12-18 15:47:29.000000000 +0100
+@@ -180,6 +180,8 @@
+       void PDump3DSignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
+                                                                  IMG_BOOL             bLastFrame);
++      IMG_VOID PDumpRegRead(const IMG_UINT32 dwRegOffset, IMG_UINT32  ui32Flags);
++      
+       IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
+       void PDumpPerformanceCounterRegisters(IMG_UINT32        ui32DumpFrameNum,
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/resman.h git/drivers/gpu/pvr/services4/srvkm/include/resman.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/resman.h       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/resman.h       2008-12-18 15:47:29.000000000 +0100
+@@ -34,7 +34,9 @@
+ enum {
+       
+       RESMAN_TYPE_SHARED_PB_DESC = 1,                                 
+-      RESMAN_TYPE_HW_RENDER_CONTEXT,                                          
++      RESMAN_TYPE_HW_RENDER_CONTEXT,                                  
++      RESMAN_TYPE_HW_TRANSFER_CONTEXT,                                
++      RESMAN_TYPE_HW_2D_CONTEXT,                                              
+       RESMAN_TYPE_TRANSFER_CONTEXT,                                   
+       
+@@ -57,6 +59,7 @@
+       RESMAN_TYPE_DEVICEMEM_WRAP,                                             
+       RESMAN_TYPE_DEVICEMEM_ALLOCATION,                               
+       RESMAN_TYPE_RESOURCE_PERPROC_DATA,                              
++      RESMAN_TYPE_EVENT_OBJECT,                                               
+     RESMAN_TYPE_SHARED_MEM_INFO,                    
+       
+       
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        2008-12-18 15:47:29.000000000 +0100
+@@ -33,9 +33,9 @@
+ #endif
+-IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State);
++IMG_VOID IMG_CALLCONV PVRSRVSetDCState(IMG_UINT32 ui32State);
+-PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
+ #if defined (__cplusplus)
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/Makefile git/drivers/gpu/pvr/services4/srvkm/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/Makefile       2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/srvkm/Makefile       1970-01-01 01:00:00.000000000 +0100
+@@ -1,68 +0,0 @@
+-#
+-# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
+-#
+-# This program is free software; you can redistribute it and/or modify it
+-# under the terms and conditions of the GNU General Public License,
+-# version 2, as published by the Free Software Foundation.
+-#
+-# This program is distributed in the hope it will be useful but, except
+-# as otherwise stated in writing, without any warranty; without even the
+-# implied warranty of merchantability or fitness for a particular purpose.
+-# See the GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License along with
+-# this program; if not, write to the Free Software Foundation, Inc.,
+-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+-#
+-# The full GNU General Public License is included in this distribution in
+-# the file called "COPYING".
+-#
+-# Contact Information:
+-# Imagination Technologies Ltd. <gpl-support@imgtec.com>
+-# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
+-#
+-#
+-
+-obj-y +=      env/linux/osfunc.o              \
+-              env/linux/mmap.o                \
+-              env/linux/mod.o                 \
+-              env/linux/pdump.o               \
+-              env/linux/proc.o                \
+-              env/linux/pvr_bridge_k.o        \
+-              env/linux/pvr_debug.o           \
+-              env/linux/mm.o                  \
+-              env/linux/mutex.o
+-
+-obj-y +=      common/buffer_manager.o         \
+-              common/devicemem.o              \
+-              common/deviceclass.o            \
+-              common/handle.o                 \
+-              common/hash.o                   \
+-              common/metrics.o                \
+-              common/pvrsrv.o                 \
+-              common/queue.o                  \
+-              common/ra.o                     \
+-              common/resman.o                 \
+-              common/power.o                  \
+-              common/mem.o                    \
+-              bridged/bridged_pvr_bridge.o    \
+-              devices/sgx/sgxinit.o           \
+-              devices/sgx/sgxutils.o          \
+-              devices/sgx/sgxkick.o           \
+-              devices/sgx/sgxtransfer.o       \
+-              devices/sgx/mmu.o               \
+-              devices/sgx/pb.o                \
+-              common/perproc.o                \
+-              ../system/$(CONFIG_PVR_SYSTEM)/sysconfig.o      \
+-              ../system/$(CONFIG_PVR_SYSTEM)/sysutils.o       \
+-              devices/sgx/sgx2dcore.o
+-
+-INCLUDES =    -I$(src)/env/linux      \
+-              -I$(src)/include        \
+-              -I$(src)/bridged        \
+-              -I$(src)/devices/sgx    \
+-              -I$(src)/include        \
+-              -I$(src)/hwdefs
+-
+-ccflags-y += $(CONFIG_PVR_OPTS) $(INCLUDES)
+-
+diff -Nurd git/drivers/gpu/pvr/services4/system/include/syscommon.h git/drivers/gpu/pvr/services4/system/include/syscommon.h
+--- git/drivers/gpu/pvr/services4/system/include/syscommon.h   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/include/syscommon.h   2008-12-18 15:47:29.000000000 +0100
+@@ -83,11 +83,13 @@
+       RA_ARENA                                        *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; 
+     IMG_CHAR                    *pszVersionString;          
++      PVRSRV_EVENTOBJECT                      *psGlobalEventObject;                   
+ } SYS_DATA;
+ PVRSRV_ERROR SysInitialise(IMG_VOID);
++PVRSRV_ERROR SysFinalise(IMG_VOID);
+ IMG_UINT32 GetCPUTranslatedAddress(IMG_VOID);
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  2008-12-18 15:47:29.000000000 +0100
+@@ -360,8 +360,15 @@
+       }
+       gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_INITDEV;
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SysFinalise(IMG_VOID)
++{
+ #if defined(SYS_USING_INTERRUPTS)
++      PVRSRV_ERROR eError;
++
+       eError = OSInstallMISR(gpsSysData);
+       if (eError != PVRSRV_OK)
+       {
+@@ -388,12 +395,12 @@
+       
+       gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
+       if (!gpsSysData->pszVersionString)
+-      { 
+-              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysFinalise: Failed to create a system version string"));
+       }
+       else
+       {
+-              PVR_DPF((PVR_DBG_WARNING, "SysInitialise: Version string: %s", gpsSysData->pszVersionString));
++              PVR_DPF((PVR_DBG_WARNING, "SysFinalise: Version string: %s", gpsSysData->pszVersionString));
+       }
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+@@ -641,7 +648,7 @@
+                       }
+                       gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_LISR;
+               }
+-#endif        
++#endif
+               if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
+               {
+                       DisableSystemClocks(gpsSysData);
+@@ -682,7 +689,7 @@
+                       }
+                       gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
+               }
+-#endif        
++#endif
+       }
+       return eError;
+ }
+@@ -706,7 +713,7 @@
+               DisableSGXClocks(gpsSysData);
+       }
+ #else 
+-      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState );
+ #endif 
+       return PVRSRV_OK;
+ }
+@@ -718,12 +725,13 @@
+ {
+       PVRSRV_ERROR eError = PVRSRV_OK;
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++
+       if (ui32DeviceIndex != gui32SGXDeviceID)
+       {
+               return eError;
+       }
+-      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
+ #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
+       if (eCurrentPowerState == PVRSRV_POWER_STATE_D3)
+@@ -734,7 +742,7 @@
+ #else 
+       PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
+ #endif        
+-      
++
+       return eError;
+ }
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  2008-12-18 15:47:29.000000000 +0100
+@@ -38,13 +38,6 @@
+ #define SYS_OMAP3430_SGX_IRQ                           21
+-#define SYS_OMAP3430_PM_REGS_SYS_PHYS_BASE     0x48306000
+-#define SYS_OMAP3430_PM_REGS_SIZE                      0x1000
+-
+-#define SYS_OMAP3430_CM_REGS_SYS_PHYS_BASE     0x48004000
+-#define SYS_OMAP3430_CM_REGS_SIZE                      0x1000
+-
+-
+ #define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE  0x48088024
+ #define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE      0x48088028
+ #define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE     0x48088040
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   2009-01-05 20:00:44.000000000 +0100
++++ git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   2008-12-18 15:47:29.000000000 +0100
+@@ -52,7 +52,7 @@
+               return PVRSRV_OK;
+       }
+-      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Enabling SGX Clocks"));
++      PVR_TRACE(("EnableSGXClocks: Enabling SGX Clocks"));
+ #if defined(__linux__)
+       if (psSysSpecData->psSGX_FCK == IMG_NULL)
+--- /tmp/omaplfb_linux.c       2009-01-06 10:41:49.000000000 +0100
++++ git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   2009-01-06 10:42:41.000000000 +0100
+@@ -108,6 +108,8 @@
+       (void) OMAPLFBVSyncIHandler(psSwapChain);
+ }
++#define DISPC_IRQ_VSYNC 0x0002
++
+ PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
+ {
+--- /tmp/Makefile      2009-01-06 11:32:47.000000000 +0100
++++ git/drivers/gpu/pvr/Makefile       2009-01-06 11:39:06.000000000 +0100
+@@ -16,6 +16,7 @@
+               services4/srvkm/env/linux/pvr_debug.o           \
+               services4/srvkm/env/linux/mm.o                  \
+               services4/srvkm/env/linux/mutex.o               \
++              services4/srvkm/env/linux/event.o \
+               services4/srvkm/common/buffer_manager.o         \
+               services4/srvkm/common/devicemem.o              \
+               services4/srvkm/common/deviceclass.o            \
+@@ -30,6 +31,7 @@
+               services4/srvkm/common/mem.o                    \
+               services4/srvkm/bridged/bridged_pvr_bridge.o    \
+               services4/srvkm/devices/sgx/sgxinit.o           \
++              services4/srvkm/devices/sgx/sgxreset.o \
+               services4/srvkm/devices/sgx/sgxutils.o          \
+               services4/srvkm/devices/sgx/sgxkick.o           \
+               services4/srvkm/devices/sgx/sgxtransfer.o       \
diff --git a/packages/linux/omap3-pandora-kernel/pvr/pvr-add.patch b/packages/linux/omap3-pandora-kernel/pvr/pvr-add.patch
new file mode 100755 (executable)
index 0000000..541e869
--- /dev/null
@@ -0,0 +1,155099 @@
+diff -Nurd git/drivers/gpu/drm-tungsten/ati_pcigart.c git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c
+--- kernel-2.6.27.orig/drivers/video/Kconfig
++++ kernel-2.6.27/drivers/video/Kconfig
+@@ -7,7 +7,7 @@
+
+ source "drivers/char/agp/Kconfig"
+
+-source "drivers/gpu/drm/Kconfig"
++source "drivers/gpu/Kconfig"
+
+ config VGASTATE
+        tristate
+--- git/drivers/gpu/drm-tungsten/ati_pcigart.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ati_pcigart.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,199 @@
++/**
++ * \file ati_pcigart.c
++ * ATI PCI GART support
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++# define ATI_PCIGART_PAGE_SIZE                4096    /**< PCI GART page size */
++# define ATI_PCIGART_PAGE_MASK                (~(ATI_PCIGART_PAGE_SIZE-1))
++
++#define ATI_PCIE_WRITE 0x4
++#define ATI_PCIE_READ 0x8
++
++static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, u32 *pci_gart)
++{
++      u32 page_base;
++
++      page_base = (u32)addr & ATI_PCIGART_PAGE_MASK;
++      switch(gart_info->gart_reg_if) {
++      case DRM_ATI_GART_IGP:
++              page_base |= (upper_32_bits(addr) & 0xff) << 4;
++              page_base |= 0xc;
++              break;
++      case DRM_ATI_GART_PCIE:
++              page_base >>= 8;
++              page_base |= (upper_32_bits(addr) & 0xff) << 24;
++              page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
++              break;
++      default:
++      case DRM_ATI_GART_PCI:
++              break;
++      }
++      *pci_gart = cpu_to_le32(page_base);
++}
++
++static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
++                                     struct drm_ati_pcigart_info *gart_info)
++{
++      gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
++                                              PAGE_SIZE,
++                                              gart_info->table_mask);
++      if (gart_info->table_handle == NULL)
++              return -ENOMEM;
++
++      return 0;
++}
++
++static void drm_ati_free_pcigart_table(struct drm_device *dev,
++                                     struct drm_ati_pcigart_info *gart_info)
++{
++      drm_pci_free(dev, gart_info->table_handle);
++      gart_info->table_handle = NULL;
++}
++
++int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
++{
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long pages;
++      int i;
++      int max_pages;
++
++      /* we need to support large memory configurations */
++      if (!entry) {
++              DRM_ERROR("no scatter/gather memory!\n");
++              return 0;
++      }
++
++      if (gart_info->bus_addr) {
++
++              max_pages = (gart_info->table_size / sizeof(u32));
++              pages = (entry->pages <= max_pages)
++                ? entry->pages : max_pages;
++
++              for (i = 0; i < pages; i++) {
++                      if (!entry->busaddr[i])
++                              break;
++                      pci_unmap_page(dev->pdev, entry->busaddr[i],
++                                       PAGE_SIZE, PCI_DMA_TODEVICE);
++              }
++
++              if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
++                      gart_info->bus_addr = 0;
++      }
++
++
++      if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
++          && gart_info->table_handle) {
++
++              drm_ati_free_pcigart_table(dev, gart_info);
++      }
++
++      return 1;
++}
++EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
++
++int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
++{
++      struct drm_sg_mem *entry = dev->sg;
++      void *address = NULL;
++      unsigned long pages;
++      u32 *pci_gart;
++      dma_addr_t bus_address = 0;
++      int i, j, ret = 0;
++      int max_pages;
++      dma_addr_t entry_addr;
++
++      if (!entry) {
++              DRM_ERROR("no scatter/gather memory!\n");
++              goto done;
++      }
++
++      if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
++              DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
++
++              ret = drm_ati_alloc_pcigart_table(dev, gart_info);
++              if (ret) {
++                      DRM_ERROR("cannot allocate PCI GART page!\n");
++                      goto done;
++              }
++
++              address = gart_info->table_handle->vaddr;
++              bus_address = gart_info->table_handle->busaddr;
++      } else {
++              address = gart_info->addr;
++              bus_address = gart_info->bus_addr;
++              DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
++                        bus_address, (unsigned long)address);
++      }
++
++      pci_gart = (u32 *) address;
++
++      max_pages = (gart_info->table_size / sizeof(u32));
++      pages = (entry->pages <= max_pages)
++          ? entry->pages : max_pages;
++
++      memset(pci_gart, 0, max_pages * sizeof(u32));
++
++      for (i = 0; i < pages; i++) {
++              /* we need to support large memory configurations */
++              entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
++                                               0, PAGE_SIZE, PCI_DMA_TODEVICE);
++              if (entry->busaddr[i] == 0) {
++                      DRM_ERROR("unable to map PCIGART pages!\n");
++                      drm_ati_pcigart_cleanup(dev, gart_info);
++                      address = NULL;
++                      bus_address = 0;
++                      goto done;
++              }
++
++              entry_addr = entry->busaddr[i];
++              for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
++                      gart_insert_page_into_table(gart_info, entry_addr, pci_gart);
++                      pci_gart++;
++                      entry_addr += ATI_PCIGART_PAGE_SIZE;
++              }
++      }
++
++      ret = 1;
++
++#if defined(__i386__) || defined(__x86_64__)
++      wbinvd();
++#else
++      mb();
++#endif
++
++      done:
++      gart_info->addr = address;
++      gart_info->bus_addr = bus_address;
++      return ret;
++}
++EXPORT_SYMBOL(drm_ati_pcigart_init);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_agpsupport.c git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c
+--- git/drivers/gpu/drm-tungsten/drm_agpsupport.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_agpsupport.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,715 @@
++/**
++ * \file drm_agpsupport.c
++ * DRM support for AGP/GART backend
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include <linux/module.h>
++
++#if __OS_HAS_AGP
++
++/**
++ * Get AGP information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a (output) drm_agp_info structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been initialized and acquired and fills in the
++ * drm_agp_info structure with the information in drm_agp_head::agp_info.
++ */
++int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
++{
++      DRM_AGP_KERN *kern;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++
++      kern = &dev->agp->agp_info;
++      info->agp_version_major = kern->version.major;
++      info->agp_version_minor = kern->version.minor;
++      info->mode = kern->mode;
++      info->aperture_base = kern->aper_base;
++      info->aperture_size = kern->aper_size * 1024 * 1024;
++      info->memory_allowed = kern->max_memory << PAGE_SHIFT;
++      info->memory_used = kern->current_memory << PAGE_SHIFT;
++      info->id_vendor = kern->device->vendor;
++      info->id_device = kern->device->device;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_info);
++
++int drm_agp_info_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_info *info = data;
++      int err;
++
++      err = drm_agp_info(dev, info);
++      if (err)
++              return err;
++
++      return 0;
++}
++
++/**
++ * Acquire the AGP device.
++ *
++ * \param dev DRM device that is to acquire AGP.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device hasn't been acquired before and calls
++ * \c agp_backend_acquire.
++ */
++int drm_agp_acquire(struct drm_device * dev)
++{
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      int retcode;
++#endif
++
++      if (!dev->agp)
++              return -ENODEV;
++      if (dev->agp->acquired)
++              return -EBUSY;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      if ((retcode = agp_backend_acquire()))
++              return retcode;
++#else
++      if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
++              return -ENODEV;
++#endif
++
++      dev->agp->acquired = 1;
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_acquire);
++
++/**
++ * Acquire the AGP device (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device hasn't been acquired before and calls
++ * \c agp_backend_acquire.
++ */
++int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
++}
++
++/**
++ * Release the AGP device.
++ *
++ * \param dev DRM device that is to release AGP.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
++ */
++int drm_agp_release(struct drm_device *dev)
++{
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_backend_release();
++#else
++      agp_backend_release(dev->agp->bridge);
++#endif
++      dev->agp->acquired = 0;
++      return 0;
++
++}
++EXPORT_SYMBOL(drm_agp_release);
++
++int drm_agp_release_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      return drm_agp_release(dev);
++}
++
++/**
++ * Enable the AGP bus.
++ *
++ * \param dev DRM device that has previously acquired AGP.
++ * \param mode Requested AGP mode.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device has been acquired but not enabled, and calls
++ * \c agp_enable.
++ */
++int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
++{
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++
++      dev->agp->mode = mode.mode;
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_enable(mode.mode);
++#else
++      agp_enable(dev->agp->bridge, mode.mode);
++#endif
++      dev->agp->enabled = 1;
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_enable);
++
++int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_agp_mode *mode = data;
++
++      return drm_agp_enable(dev, *mode);
++}
++
++/**
++ * Allocate AGP memory.
++ *
++ * \param inode device inode.
++ * \param file_priv file private pointer.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_buffer structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired, allocates the
++ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
++ */
++int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
++{
++      struct drm_agp_mem *entry;
++      DRM_AGP_MEM *memory;
++      unsigned long pages;
++      u32 type;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
++              return -ENOMEM;
++
++      memset(entry, 0, sizeof(*entry));
++
++      pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
++      type = (u32) request->type;
++      if (!(memory = drm_alloc_agp(dev, pages, type))) {
++              drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++              return -ENOMEM;
++      }
++
++      entry->handle = (unsigned long)memory->key + 1;
++      entry->memory = memory;
++      entry->bound = 0;
++      entry->pages = pages;
++      list_add(&entry->head, &dev->agp->memory);
++
++      request->handle = entry->handle;
++      request->physical = memory->physical;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_alloc);
++
++
++int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_agp_buffer *request = data;
++
++      return drm_agp_alloc(dev, request);
++}
++
++/**
++ * Search for the AGP memory entry associated with a handle.
++ *
++ * \param dev DRM device structure.
++ * \param handle AGP memory handle.
++ * \return pointer to the drm_agp_mem structure associated with \p handle.
++ *
++ * Walks through drm_agp_head::memory until finding a matching handle.
++ */
++static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
++                                         unsigned long handle)
++{
++      struct drm_agp_mem *entry;
++
++      list_for_each_entry(entry, &dev->agp->memory, head) {
++              if (entry->handle == handle)
++                      return entry;
++      }
++      return NULL;
++}
++
++/**
++ * Unbind AGP memory from the GATT (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_binding structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and acquired, looks-up the AGP memory
++ * entry and passes it to the unbind_agp() function.
++ */
++int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
++{
++      struct drm_agp_mem *entry;
++      int ret;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (!entry->bound)
++              return -EINVAL;
++      ret = drm_unbind_agp(entry->memory);
++      if (ret == 0)
++              entry->bound = 0;
++      return ret;
++}
++EXPORT_SYMBOL(drm_agp_unbind);
++
++
++int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_agp_binding *request = data;
++
++      return drm_agp_unbind(dev, request);
++}
++
++
++/**
++ * Bind AGP memory into the GATT (ioctl)
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_binding structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired and that no memory
++ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
++ * it to bind_agp() function.
++ */
++int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
++{
++      struct drm_agp_mem *entry;
++      int retcode;
++      int page;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (entry->bound)
++              return -EINVAL;
++      page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
++      if ((retcode = drm_bind_agp(entry->memory, page)))
++              return retcode;
++      entry->bound = dev->agp->base + (page << PAGE_SHIFT);
++      DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
++                dev->agp->base, entry->bound);
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_bind);
++
++
++int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_binding *request = data;
++
++      return drm_agp_bind(dev, request);
++}
++
++
++/**
++ * Free AGP memory (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_agp_buffer structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the AGP device is present and has been acquired and looks up the
++ * AGP memory entry. If the memory it's currently bound, unbind it via
++ * unbind_agp(). Frees it via free_agp() as well as the entry itself
++ * and unlinks from the doubly linked list it's inserted in.
++ */
++int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
++{
++      struct drm_agp_mem *entry;
++
++      if (!dev->agp || !dev->agp->acquired)
++              return -EINVAL;
++      if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
++              return -EINVAL;
++      if (entry->bound)
++              drm_unbind_agp(entry->memory);
++
++      list_del(&entry->head);
++
++      drm_free_agp(entry->memory, entry->pages);
++      drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++      return 0;
++}
++EXPORT_SYMBOL(drm_agp_free);
++
++
++
++int drm_agp_free_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_agp_buffer *request = data;
++
++      return drm_agp_free(dev, request);
++}
++
++
++/**
++ * Initialize the AGP resources.
++ *
++ * \return pointer to a drm_agp_head structure.
++ *
++ * Gets the drm_agp_t structure which is made available by the agpgart module
++ * via the inter_module_* functions. Creates and initializes a drm_agp_head
++ * structure.
++ */
++struct drm_agp_head *drm_agp_init(struct drm_device *dev)
++{
++      struct drm_agp_head *head = NULL;
++
++      if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
++              return NULL;
++      memset((void *)head, 0, sizeof(*head));
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      agp_copy_info(&head->agp_info);
++#else
++      head->bridge = agp_find_bridge(dev->pdev);
++      if (!head->bridge) {
++              if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
++                      drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
++                      return NULL;
++              }
++              agp_copy_info(head->bridge, &head->agp_info);
++              agp_backend_release(head->bridge);
++      } else {
++              agp_copy_info(head->bridge, &head->agp_info);
++      }
++#endif
++      if (head->agp_info.chipset == NOT_SUPPORTED) {
++              drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
++              return NULL;
++      }
++      INIT_LIST_HEAD(&head->memory);
++      head->cant_use_aperture = head->agp_info.cant_use_aperture;
++      head->page_mask = head->agp_info.page_mask;
++      head->base = head->agp_info.aper_base;
++      return head;
++}
++
++/** Calls agp_allocate_memory() */
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
++{
++      return agp_allocate_memory(pages, type);
++}
++#else
++DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
++                                   size_t pages, u32 type)
++{
++      return agp_allocate_memory(bridge, pages, type);
++}
++#endif
++
++/** Calls agp_free_memory() */
++int drm_agp_free_memory(DRM_AGP_MEM * handle)
++{
++      if (!handle)
++              return 0;
++      agp_free_memory(handle);
++      return 1;
++}
++
++/** Calls agp_bind_memory() */
++int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
++{
++      if (!handle)
++              return -EINVAL;
++      return agp_bind_memory(handle, start);
++}
++EXPORT_SYMBOL(drm_agp_bind_memory);
++
++/** Calls agp_unbind_memory() */
++int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
++{
++      if (!handle)
++              return -EINVAL;
++      return agp_unbind_memory(handle);
++}
++
++/**
++ * Binds a collection of pages into AGP memory at the given offset, returning
++ * the AGP memory structure containing them.
++ *
++ * No reference is held on the pages during this time -- it is up to the
++ * caller to handle that.
++ */
++DRM_AGP_MEM *
++drm_agp_bind_pages(struct drm_device *dev,
++                 struct page **pages,
++                 unsigned long num_pages,
++                 uint32_t gtt_offset)
++{
++      DRM_AGP_MEM *mem;
++      int ret, i;
++
++      DRM_DEBUG("drm_agp_populate_ttm\n");
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
++#else
++      mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
++                                    AGP_USER_MEMORY);
++#endif
++      if (mem == NULL) {
++              DRM_ERROR("Failed to allocate memory for %ld pages\n",
++                        num_pages);
++              return NULL;
++      }
++
++      for (i = 0; i < num_pages; i++)
++              mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
++      mem->page_count = num_pages;
++
++      mem->is_flushed = true;
++      ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
++      if (ret != 0) {
++              DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
++              agp_free_memory(mem);
++              return NULL;
++      }
++
++      return mem;
++}
++EXPORT_SYMBOL(drm_agp_bind_pages);
++
++/*
++ * AGP ttm backend interface.
++ */
++
++#ifndef AGP_USER_TYPES
++#define AGP_USER_TYPES (1 << 16)
++#define AGP_USER_MEMORY (AGP_USER_TYPES)
++#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
++#endif
++#define AGP_REQUIRED_MAJOR 0
++#define AGP_REQUIRED_MINOR 102
++
++static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
++{
++      return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
++}
++
++
++static int drm_agp_populate(struct drm_ttm_backend *backend,
++                          unsigned long num_pages, struct page **pages,
++                          struct page *dummy_read_page)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      struct page **cur_page, **last_page = pages + num_pages;
++      DRM_AGP_MEM *mem;
++      int dummy_page_count = 0;
++
++      if (drm_alloc_memctl(num_pages * sizeof(void *)))
++              return -1;
++
++      DRM_DEBUG("drm_agp_populate_ttm\n");
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
++#else
++      mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++#endif
++      if (!mem) {
++              drm_free_memctl(num_pages * sizeof(void *));
++              return -1;
++      }
++
++      DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
++      mem->page_count = 0;
++      for (cur_page = pages; cur_page < last_page; ++cur_page) {
++              struct page *page = *cur_page;
++              if (!page) {
++                      page = dummy_read_page;
++                      ++dummy_page_count;
++              }
++              mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
++      }
++      if (dummy_page_count)
++              DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
++      agp_be->mem = mem;
++      return 0;
++}
++
++static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
++                          struct drm_bo_mem_reg *bo_mem)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      DRM_AGP_MEM *mem = agp_be->mem;
++      int ret;
++      int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
++
++      DRM_DEBUG("drm_agp_bind_ttm\n");
++      mem->is_flushed = true;
++      mem->type = AGP_USER_MEMORY;
++      /* CACHED MAPPED implies not snooped memory */
++      if (snooped)
++              mem->type = AGP_USER_CACHED_MEMORY;
++
++      ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
++      if (ret)
++              DRM_ERROR("AGP Bind memory failed\n");
++
++      DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
++                      DRM_BE_FLAG_BOUND_CACHED : 0,
++                      DRM_BE_FLAG_BOUND_CACHED);
++      return ret;
++}
++
++static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++
++      DRM_DEBUG("drm_agp_unbind_ttm\n");
++      if (agp_be->mem->is_bound)
++              return drm_agp_unbind_memory(agp_be->mem);
++      else
++              return 0;
++}
++
++static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be =
++              container_of(backend, struct drm_agp_ttm_backend, backend);
++      DRM_AGP_MEM *mem = agp_be->mem;
++
++      DRM_DEBUG("drm_agp_clear_ttm\n");
++      if (mem) {
++              unsigned long num_pages = mem->page_count;
++              backend->func->unbind(backend);
++              agp_free_memory(mem);
++              drm_free_memctl(num_pages * sizeof(void *));
++      }
++      agp_be->mem = NULL;
++}
++
++static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
++{
++      struct drm_agp_ttm_backend *agp_be;
++
++      if (backend) {
++              DRM_DEBUG("drm_agp_destroy_ttm\n");
++              agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
++              if (agp_be) {
++                      if (agp_be->mem)
++                              backend->func->clear(backend);
++                      drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
++              }
++      }
++}
++
++static struct drm_ttm_backend_func agp_ttm_backend = {
++      .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
++      .populate = drm_agp_populate,
++      .clear = drm_agp_clear_ttm,
++      .bind = drm_agp_bind_ttm,
++      .unbind = drm_agp_unbind_ttm,
++      .destroy =  drm_agp_destroy_ttm,
++};
++
++struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
++{
++
++      struct drm_agp_ttm_backend *agp_be;
++      struct agp_kern_info *info;
++
++      if (!dev->agp) {
++              DRM_ERROR("AGP is not initialized.\n");
++              return NULL;
++      }
++      info = &dev->agp->agp_info;
++
++      if (info->version.major != AGP_REQUIRED_MAJOR ||
++          info->version.minor < AGP_REQUIRED_MINOR) {
++              DRM_ERROR("Wrong agpgart version %d.%d\n"
++                        "\tYou need at least version %d.%d.\n",
++                        info->version.major,
++                        info->version.minor,
++                        AGP_REQUIRED_MAJOR,
++                        AGP_REQUIRED_MINOR);
++              return NULL;
++      }
++
++
++      agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
++      if (!agp_be)
++              return NULL;
++
++      agp_be->mem = NULL;
++
++      agp_be->bridge = dev->agp->bridge;
++      agp_be->populated = false;
++      agp_be->backend.func = &agp_ttm_backend;
++      agp_be->backend.dev = dev;
++
++      return &agp_be->backend;
++}
++EXPORT_SYMBOL(drm_agp_init_ttm);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
++void drm_agp_chipset_flush(struct drm_device *dev)
++{
++      agp_flush_chipset(dev->agp->bridge);
++}
++EXPORT_SYMBOL(drm_agp_chipset_flush);
++#endif
++
++#endif                                /* __OS_HAS_AGP */
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_auth.c git-nokia/drivers/gpu/drm-tungsten/drm_auth.c
+--- git/drivers/gpu/drm-tungsten/drm_auth.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_auth.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**
++ * \file drm_auth.c
++ * IOCTLs for authentication
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Find the file with the given magic number.
++ *
++ * \param dev DRM device.
++ * \param magic magic number.
++ *
++ * Searches in drm_device::magiclist within all files with the same hash key
++ * the one with matching magic number, while holding the drm_device::struct_mutex
++ * lock.
++ */
++static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
++{
++      struct drm_file *retval = NULL;
++      struct drm_magic_entry *pt;
++      struct drm_hash_item *hash;
++
++      mutex_lock(&dev->struct_mutex);
++      if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
++              pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
++              retval = pt->priv;
++      }
++      mutex_unlock(&dev->struct_mutex);
++      return retval;
++}
++
++/**
++ * Adds a magic number.
++ *
++ * \param dev DRM device.
++ * \param priv file private data.
++ * \param magic magic number.
++ *
++ * Creates a drm_magic_entry structure and appends to the linked list
++ * associated the magic number hash key in drm_device::magiclist, while holding
++ * the drm_device::struct_mutex lock.
++ */
++static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
++                       drm_magic_t magic)
++{
++      struct drm_magic_entry *entry;
++
++      DRM_DEBUG("%d\n", magic);
++
++      entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
++      if (!entry)
++              return -ENOMEM;
++      memset(entry, 0, sizeof(*entry));
++      entry->priv = priv;
++      entry->hash_item.key = (unsigned long)magic;
++      mutex_lock(&dev->struct_mutex);
++      drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
++      list_add_tail(&entry->head, &dev->magicfree);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Remove a magic number.
++ *
++ * \param dev DRM device.
++ * \param magic magic number.
++ *
++ * Searches and unlinks the entry in drm_device::magiclist with the magic
++ * number hash key, while holding the drm_device::struct_mutex lock.
++ */
++static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
++{
++      struct drm_magic_entry *pt;
++      struct drm_hash_item *hash;
++
++      DRM_DEBUG("%d\n", magic);
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++      pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
++      drm_ht_remove_item(&dev->magiclist, hash);
++      list_del(&pt->head);
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
++
++      return 0;
++}
++
++/**
++ * Get a unique magic number (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a resulting drm_auth structure.
++ * \return zero on success, or a negative number on failure.
++ *
++ * If there is a magic number in drm_file::magic then use it, otherwise
++ * searches an unique non-zero magic number and add it associating it with \p
++ * file_priv.
++ */
++int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      static drm_magic_t sequence = 0;
++      static DEFINE_SPINLOCK(lock);
++      struct drm_auth *auth = data;
++
++      /* Find unique magic */
++      if (file_priv->magic) {
++              auth->magic = file_priv->magic;
++      } else {
++              do {
++                      spin_lock(&lock);
++                      if (!sequence)
++                              ++sequence;     /* reserve 0 */
++                      auth->magic = sequence++;
++                      spin_unlock(&lock);
++              } while (drm_find_file(dev, auth->magic));
++              file_priv->magic = auth->magic;
++              drm_add_magic(dev, file_priv, auth->magic);
++      }
++
++      DRM_DEBUG("%u\n", auth->magic);
++
++      return 0;
++}
++
++/**
++ * Authenticate with a magic.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_auth structure.
++ * \return zero if authentication successed, or a negative number otherwise.
++ *
++ * Checks if \p file_priv is associated with the magic number passed in \arg.
++ */
++int drm_authmagic(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_auth *auth = data;
++      struct drm_file *file;
++
++      DRM_DEBUG("%u\n", auth->magic);
++      if ((file = drm_find_file(dev, auth->magic))) {
++              file->authenticated = 1;
++              drm_remove_magic(dev, auth->magic);
++              return 0;
++      }
++      return -EINVAL;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo.c git-nokia/drivers/gpu/drm-tungsten/drm_bo.c
+--- git/drivers/gpu/drm-tungsten/drm_bo.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2796 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++/*
++ * Locking may look a bit complicated but isn't really:
++ *
++ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
++ * when there is a chance that it can be zero before or after the operation.
++ *
++ * dev->struct_mutex also protects all lists and list heads,
++ * Hash tables and hash heads.
++ *
++ * bo->mutex protects the buffer object itself excluding the usage field.
++ * bo->mutex does also protect the buffer list heads, so to manipulate those,
++ * we need both the bo->mutex and the dev->struct_mutex.
++ *
++ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
++ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
++ * the list traversal will, in general, need to be restarted.
++ *
++ */
++
++static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
++static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
++
++static inline uint64_t drm_bo_type_flags(unsigned type)
++{
++      return (1ULL << (24 + type));
++}
++
++/*
++ * bo locked. dev->struct_mutex locked.
++ */
++
++void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
++{
++      struct drm_mem_type_manager *man;
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++      DRM_ASSERT_LOCKED(&bo->mutex);
++
++      man = &bo->dev->bm.man[bo->pinned_mem_type];
++      list_add_tail(&bo->pinned_lru, &man->pinned);
++}
++
++void drm_bo_add_to_lru(struct drm_buffer_object *bo)
++{
++      struct drm_mem_type_manager *man;
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++
++      if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
++          || bo->mem.mem_type != bo->pinned_mem_type) {
++              man = &bo->dev->bm.man[bo->mem.mem_type];
++              list_add_tail(&bo->lru, &man->lru);
++      } else {
++              INIT_LIST_HEAD(&bo->lru);
++      }
++}
++
++static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
++{
++#ifdef DRM_ODD_MM_COMPAT
++      int ret;
++
++      if (!bo->map_list.map)
++              return 0;
++
++      ret = drm_bo_lock_kmm(bo);
++      if (ret)
++              return ret;
++      drm_bo_unmap_virtual(bo);
++      if (old_is_pci)
++              drm_bo_finish_unmap(bo);
++#else
++      if (!bo->map_list.map)
++              return 0;
++
++      drm_bo_unmap_virtual(bo);
++#endif
++      return 0;
++}
++
++static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
++{
++#ifdef DRM_ODD_MM_COMPAT
++      int ret;
++
++      if (!bo->map_list.map)
++              return;
++
++      ret = drm_bo_remap_bound(bo);
++      if (ret) {
++              DRM_ERROR("Failed to remap a bound buffer object.\n"
++                        "\tThis might cause a sigbus later.\n");
++      }
++      drm_bo_unlock_kmm(bo);
++#endif
++}
++
++/*
++ * Call bo->mutex locked.
++ */
++
++static int drm_bo_add_ttm(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      int ret = 0;
++      uint32_t page_flags = 0;
++
++      DRM_ASSERT_LOCKED(&bo->mutex);
++      bo->ttm = NULL;
++
++      if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
++              page_flags |= DRM_TTM_PAGE_WRITE;
++
++      switch (bo->type) {
++      case drm_bo_type_device:
++      case drm_bo_type_kernel:
++              bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
++                                       page_flags, dev->bm.dummy_read_page);
++              if (!bo->ttm)
++                      ret = -ENOMEM;
++              break;
++      case drm_bo_type_user:
++              bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
++                                       page_flags | DRM_TTM_PAGE_USER,
++                                       dev->bm.dummy_read_page);
++              if (!bo->ttm)
++                      ret = -ENOMEM;
++
++              ret = drm_ttm_set_user(bo->ttm, current,
++                                     bo->buffer_start,
++                                     bo->num_pages);
++              if (ret)
++                      return ret;
++
++              break;
++      default:
++              DRM_ERROR("Illegal buffer object type\n");
++              ret = -EINVAL;
++              break;
++      }
++
++      return ret;
++}
++
++static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
++                                struct drm_bo_mem_reg *mem,
++                                int evict, int no_wait)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
++      int new_is_pci = drm_mem_reg_is_pci(dev, mem);
++      struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
++      struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
++      int ret = 0;
++
++      if (old_is_pci || new_is_pci ||
++          ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
++              ret = drm_bo_vm_pre_move(bo, old_is_pci);
++      if (ret)
++              return ret;
++
++      /*
++       * Create and bind a ttm if required.
++       */
++
++      if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
++              ret = drm_bo_add_ttm(bo);
++              if (ret)
++                      goto out_err;
++
++              if (mem->mem_type != DRM_BO_MEM_LOCAL) {
++                      ret = drm_ttm_bind(bo->ttm, mem);
++                      if (ret)
++                              goto out_err;
++              }
++
++              if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
++                      
++                      struct drm_bo_mem_reg *old_mem = &bo->mem;
++                      uint64_t save_flags = old_mem->flags;
++                      uint64_t save_proposed_flags = old_mem->proposed_flags;
++                      
++                      *old_mem = *mem;
++                      mem->mm_node = NULL;
++                      old_mem->proposed_flags = save_proposed_flags;
++                      DRM_FLAG_MASKED(save_flags, mem->flags,
++                                      DRM_BO_MASK_MEMTYPE);
++                      goto moved;
++              }
++              
++      }
++
++      if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
++          !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
++              ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
++      else if (dev->driver->bo_driver->move) 
++              ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
++      else
++              ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
++
++      if (ret)
++              goto out_err;
++
++moved:
++      if (old_is_pci || new_is_pci)
++              drm_bo_vm_post_move(bo);
++
++      if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
++              ret =
++                  dev->driver->bo_driver->invalidate_caches(dev,
++                                                            bo->mem.flags);
++              if (ret)
++                      DRM_ERROR("Can not flush read caches\n");
++      }
++
++      DRM_FLAG_MASKED(bo->priv_flags,
++                      (evict) ? _DRM_BO_FLAG_EVICTED : 0,
++                      _DRM_BO_FLAG_EVICTED);
++
++      if (bo->mem.mm_node)
++              bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
++                      bm->man[bo->mem.mem_type].gpu_offset;
++
++
++      return 0;
++
++out_err:
++      if (old_is_pci || new_is_pci)
++              drm_bo_vm_post_move(bo);
++
++      new_man = &bm->man[bo->mem.mem_type];
++      if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
++              drm_ttm_unbind(bo->ttm);
++              drm_ttm_destroy(bo->ttm);
++              bo->ttm = NULL;
++      }
++
++      return ret;
++}
++
++/*
++ * Call bo->mutex locked.
++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
++ */
++
++static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
++{
++      struct drm_fence_object *fence = bo->fence;
++
++      if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++              return -EBUSY;
++
++      if (fence) {
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              return -EBUSY;
++      }
++      return 0;
++}
++
++static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
++{
++      int ret;
++
++      mutex_lock(&bo->mutex);
++      ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
++      mutex_unlock(&bo->mutex);
++      return ret;
++}
++
++
++/*
++ * Call bo->mutex locked.
++ * Wait until the buffer is idle.
++ */
++
++int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
++              int no_wait, int check_unfenced)
++{
++      int ret;
++
++      DRM_ASSERT_LOCKED(&bo->mutex);
++      while(unlikely(drm_bo_busy(bo, check_unfenced))) {
++              if (no_wait)
++                      return -EBUSY;
++
++              if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
++                      mutex_unlock(&bo->mutex);
++                      wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
++                      mutex_lock(&bo->mutex);
++                      bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++              }
++
++              if (bo->fence) {
++                      struct drm_fence_object *fence;
++                      uint32_t fence_type = bo->fence_type;
++
++                      drm_fence_reference_unlocked(&fence, bo->fence);
++                      mutex_unlock(&bo->mutex);
++
++                      ret = drm_fence_object_wait(fence, lazy, !interruptible,
++                                                  fence_type);
++
++                      drm_fence_usage_deref_unlocked(&fence);
++                      mutex_lock(&bo->mutex);
++                      bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++                      if (ret)
++                              return ret;
++              }
++
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_wait);
++
++static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      if (bo->fence) {
++              if (bm->nice_mode) {
++                      unsigned long _end = jiffies + 3 * DRM_HZ;
++                      int ret;
++                      do {
++                              ret = drm_bo_wait(bo, 0, 0, 0, 0);
++                              if (ret && allow_errors)
++                                      return ret;
++
++                      } while (ret && !time_after_eq(jiffies, _end));
++
++                      if (bo->fence) {
++                              bm->nice_mode = 0;
++                              DRM_ERROR("Detected GPU lockup or "
++                                        "fence driver was taken down. "
++                                        "Evicting buffer.\n");
++                      }
++              }
++              if (bo->fence)
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++      }
++      return 0;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ * Attempts to remove all private references to a buffer by expiring its
++ * fence object and removing from lru lists and memory managers.
++ */
++
++static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      atomic_inc(&bo->usage);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_lock(&bo->mutex);
++
++      DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++
++      if (bo->fence && drm_fence_object_signaled(bo->fence,
++                                                 bo->fence_type))
++              drm_fence_usage_deref_unlocked(&bo->fence);
++
++      if (bo->fence && remove_all)
++              (void)drm_bo_expire_fence(bo, 0);
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!atomic_dec_and_test(&bo->usage))
++              goto out;
++
++      if (!bo->fence) {
++              list_del_init(&bo->lru);
++              if (bo->mem.mm_node) {
++                      drm_mm_put_block(bo->mem.mm_node);
++                      if (bo->pinned_node == bo->mem.mm_node)
++                              bo->pinned_node = NULL;
++                      bo->mem.mm_node = NULL;
++              }
++              list_del_init(&bo->pinned_lru);
++              if (bo->pinned_node) {
++                      drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = NULL;
++              }
++              list_del_init(&bo->ddestroy);
++              mutex_unlock(&bo->mutex);
++              drm_bo_destroy_locked(bo);
++              return;
++      }
++
++      if (list_empty(&bo->ddestroy)) {
++              drm_fence_object_flush(bo->fence, bo->fence_type);
++              list_add_tail(&bo->ddestroy, &bm->ddestroy);
++              schedule_delayed_work(&bm->wq,
++                                    ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
++      }
++
++out:
++      mutex_unlock(&bo->mutex);
++      return;
++}
++
++/*
++ * Verify that refcount is 0 and that there are no internal references
++ * to the buffer object. Then destroy it.
++ */
++
++static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
++          list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
++          list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
++              if (bo->fence != NULL) {
++                      DRM_ERROR("Fence was non-zero.\n");
++                      drm_bo_cleanup_refs(bo, 0);
++                      return;
++              }
++
++#ifdef DRM_ODD_MM_COMPAT
++              BUG_ON(!list_empty(&bo->vma_list));
++              BUG_ON(!list_empty(&bo->p_mm_list));
++#endif
++
++              if (bo->ttm) {
++                      drm_ttm_unbind(bo->ttm);
++                      drm_ttm_destroy(bo->ttm);
++                      bo->ttm = NULL;
++              }
++
++              atomic_dec(&bm->count);
++
++              drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
++
++              return;
++      }
++
++      /*
++       * Some stuff is still trying to reference the buffer object.
++       * Get rid of those references.
++       */
++
++      drm_bo_cleanup_refs(bo, 0);
++
++      return;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ */
++
++static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      struct drm_buffer_object *entry, *nentry;
++      struct list_head *list, *next;
++
++      list_for_each_safe(list, next, &bm->ddestroy) {
++              entry = list_entry(list, struct drm_buffer_object, ddestroy);
++
++              nentry = NULL;
++              if (next != &bm->ddestroy) {
++                      nentry = list_entry(next, struct drm_buffer_object,
++                                          ddestroy);
++                      atomic_inc(&nentry->usage);
++              }
++
++              drm_bo_cleanup_refs(entry, remove_all);
++
++              if (nentry)
++                      atomic_dec(&nentry->usage);
++      }
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++static void drm_bo_delayed_workqueue(void *data)
++#else
++static void drm_bo_delayed_workqueue(struct work_struct *work)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      struct drm_device *dev = (struct drm_device *) data;
++      struct drm_buffer_manager *bm = &dev->bm;
++#else
++      struct drm_buffer_manager *bm =
++          container_of(work, struct drm_buffer_manager, wq.work);
++      struct drm_device *dev = container_of(bm, struct drm_device, bm);
++#endif
++
++      DRM_DEBUG("Delayed delete Worker\n");
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++      drm_bo_delayed_delete(dev, 0);
++      if (bm->initialized && !list_empty(&bm->ddestroy)) {
++              schedule_delayed_work(&bm->wq,
++                                    ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
++{
++      struct drm_buffer_object *tmp_bo = *bo;
++      bo = NULL;
++
++      DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
++
++      if (atomic_dec_and_test(&tmp_bo->usage))
++              drm_bo_destroy_locked(tmp_bo);
++}
++EXPORT_SYMBOL(drm_bo_usage_deref_locked);
++
++static void drm_bo_base_deref_locked(struct drm_file *file_priv,
++                                   struct drm_user_object *uo)
++{
++      struct drm_buffer_object *bo =
++          drm_user_object_entry(uo, struct drm_buffer_object, base);
++
++      DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
++
++      drm_bo_takedown_vm_locked(bo);
++      drm_bo_usage_deref_locked(&bo);
++}
++
++void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
++{
++      struct drm_buffer_object *tmp_bo = *bo;
++      struct drm_device *dev = tmp_bo->dev;
++
++      *bo = NULL;
++      if (atomic_dec_and_test(&tmp_bo->usage)) {
++              mutex_lock(&dev->struct_mutex);
++              if (atomic_read(&tmp_bo->usage) == 0)
++                      drm_bo_destroy_locked(tmp_bo);
++              mutex_unlock(&dev->struct_mutex);
++      }
++}
++EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
++
++void drm_putback_buffer_objects(struct drm_device *dev)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct list_head *list = &bm->unfenced;
++      struct drm_buffer_object *entry, *next;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(entry, next, list, lru) {
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++
++              mutex_lock(&entry->mutex);
++              BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
++              mutex_lock(&dev->struct_mutex);
++
++              list_del_init(&entry->lru);
++              DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++              wake_up_all(&entry->event_queue);
++
++              /*
++               * FIXME: Might want to put back on head of list
++               * instead of tail here.
++               */
++
++              drm_bo_add_to_lru(entry);
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_locked(&entry);
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++EXPORT_SYMBOL(drm_putback_buffer_objects);
++
++
++/*
++ * Note. The caller has to register (if applicable)
++ * and deregister fence object usage.
++ */
++
++int drm_fence_buffer_objects(struct drm_device *dev,
++                           struct list_head *list,
++                           uint32_t fence_flags,
++                           struct drm_fence_object *fence,
++                           struct drm_fence_object **used_fence)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *entry;
++      uint32_t fence_type = 0;
++      uint32_t fence_class = ~0;
++      int count = 0;
++      int ret = 0;
++      struct list_head *l;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!list)
++              list = &bm->unfenced;
++
++      if (fence)
++              fence_class = fence->fence_class;
++
++      list_for_each_entry(entry, list, lru) {
++              BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
++              fence_type |= entry->new_fence_type;
++              if (fence_class == ~0)
++                      fence_class = entry->new_fence_class;
++              else if (entry->new_fence_class != fence_class) {
++                      DRM_ERROR("Unmatching fence classes on unfenced list: "
++                                "%d and %d.\n",
++                                fence_class,
++                                entry->new_fence_class);
++                      ret = -EINVAL;
++                      goto out;
++              }
++              count++;
++      }
++
++      if (!count) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      if (fence) {
++              if ((fence_type & fence->type) != fence_type ||
++                  (fence->fence_class != fence_class)) {
++                      DRM_ERROR("Given fence doesn't match buffers "
++                                "on unfenced list.\n");
++                      ret = -EINVAL;
++                      goto out;
++              }
++      } else {
++              mutex_unlock(&dev->struct_mutex);
++              ret = drm_fence_object_create(dev, fence_class, fence_type,
++                                            fence_flags | DRM_FENCE_FLAG_EMIT,
++                                            &fence);
++              mutex_lock(&dev->struct_mutex);
++              if (ret)
++                      goto out;
++      }
++
++      count = 0;
++      l = list->next;
++      while (l != list) {
++              prefetch(l->next);
++              entry = list_entry(l, struct drm_buffer_object, lru);
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++              mutex_lock(&entry->mutex);
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(l);
++              if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      count++;
++                      if (entry->fence)
++                              drm_fence_usage_deref_locked(&entry->fence);
++                      entry->fence = drm_fence_reference_locked(fence);
++                      entry->fence_class = entry->new_fence_class;
++                      entry->fence_type = entry->new_fence_type;
++                      DRM_FLAG_MASKED(entry->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++                      wake_up_all(&entry->event_queue);
++                      drm_bo_add_to_lru(entry);
++              }
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_locked(&entry);
++              l = list->next;
++      }
++      DRM_DEBUG("Fenced %d buffers\n", count);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      *used_fence = fence;
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_buffer_objects);
++
++/*
++ * bo->mutex locked
++ */
++
++static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
++                      int no_wait)
++{
++      int ret = 0;
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg evict_mem;
++
++      /*
++       * Someone might have modified the buffer before we took the
++       * buffer mutex.
++       */
++
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              if (unlikely(bo->mem.flags &
++                           (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
++                      goto out_unlock;
++              if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++                      goto out_unlock;
++              if (unlikely(bo->mem.mem_type != mem_type))
++                      goto out_unlock;
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
++              if (ret)
++                      goto out_unlock;
++
++      } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++
++      evict_mem = bo->mem;
++      evict_mem.mm_node = NULL;
++
++      evict_mem = bo->mem;
++      evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
++
++      mutex_lock(&dev->struct_mutex);
++      list_del_init(&bo->lru);
++      mutex_unlock(&dev->struct_mutex);
++
++      ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
++
++      if (ret) {
++              if (ret != -EAGAIN)
++                      DRM_ERROR("Failed to find memory space for "
++                                "buffer 0x%p eviction.\n", bo);
++              goto out;
++      }
++
++      ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
++
++      if (ret) {
++              if (ret != -EAGAIN)
++                      DRM_ERROR("Buffer eviction failed\n");
++              goto out;
++      }
++
++      DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
++                      _DRM_BO_FLAG_EVICTED);
++
++out:
++      mutex_lock(&dev->struct_mutex);
++      if (evict_mem.mm_node) {
++              if (evict_mem.mm_node != bo->pinned_node)
++                      drm_mm_put_block(evict_mem.mm_node);
++              evict_mem.mm_node = NULL;
++      }
++      drm_bo_add_to_lru(bo);
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int drm_bo_mem_force_space(struct drm_device *dev,
++                                struct drm_bo_mem_reg *mem,
++                                uint32_t mem_type, int no_wait)
++{
++      struct drm_mm_node *node;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *entry;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++      struct list_head *lru;
++      unsigned long num_pages = mem->num_pages;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      do {
++              node = drm_mm_search_free(&man->manager, num_pages,
++                                        mem->page_alignment, 1);
++              if (node)
++                      break;
++
++              lru = &man->lru;
++              if (lru->next == lru)
++                      break;
++
++              entry = list_entry(lru->next, struct drm_buffer_object, lru);
++              atomic_inc(&entry->usage);
++              mutex_unlock(&dev->struct_mutex);
++              mutex_lock(&entry->mutex);
++              ret = drm_bo_evict(entry, mem_type, no_wait);
++              mutex_unlock(&entry->mutex);
++              drm_bo_usage_deref_unlocked(&entry);
++              if (ret)
++                      return ret;
++              mutex_lock(&dev->struct_mutex);
++      } while (1);
++
++      if (!node) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      node = drm_mm_get_block(node, num_pages, mem->page_alignment);
++      if (unlikely(!node)) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++      mem->mm_node = node;
++      mem->mem_type = mem_type;
++      return 0;
++}
++
++static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
++                              int disallow_fixed,
++                              uint32_t mem_type,
++                              uint64_t mask, uint32_t *res_mask)
++{
++      uint64_t cur_flags = drm_bo_type_flags(mem_type);
++      uint64_t flag_diff;
++
++      if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
++              return 0;
++      if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
++              cur_flags |= DRM_BO_FLAG_CACHED;
++      if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
++              cur_flags |= DRM_BO_FLAG_MAPPABLE;
++      if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
++              DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
++
++      if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
++              return 0;
++
++      if (mem_type == DRM_BO_MEM_LOCAL) {
++              *res_mask = cur_flags;
++              return 1;
++      }
++
++      flag_diff = (mask ^ cur_flags);
++      if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
++              cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
++
++      if ((flag_diff & DRM_BO_FLAG_CACHED) &&
++          (!(mask & DRM_BO_FLAG_CACHED) ||
++           (mask & DRM_BO_FLAG_FORCE_CACHING)))
++              return 0;
++
++      if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
++          ((mask & DRM_BO_FLAG_MAPPABLE) ||
++           (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
++              return 0;
++
++      *res_mask = cur_flags;
++      return 1;
++}
++
++/**
++ * Creates space for memory region @mem according to its type.
++ *
++ * This function first searches for free space in compatible memory types in
++ * the priority order defined by the driver.  If free space isn't found, then
++ * drm_bo_mem_force_space is attempted in priority order to evict and find
++ * space.
++ */
++int drm_bo_mem_space(struct drm_buffer_object *bo,
++                   struct drm_bo_mem_reg *mem, int no_wait)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man;
++
++      uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
++      const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
++      uint32_t i;
++      uint32_t mem_type = DRM_BO_MEM_LOCAL;
++      uint32_t cur_flags;
++      int type_found = 0;
++      int type_ok = 0;
++      int has_eagain = 0;
++      struct drm_mm_node *node = NULL;
++      int ret;
++
++      mem->mm_node = NULL;
++      for (i = 0; i < num_prios; ++i) {
++              mem_type = prios[i];
++              man = &bm->man[mem_type];
++
++              type_ok = drm_bo_mt_compatible(man,
++                                             bo->type == drm_bo_type_user,
++                                             mem_type, mem->proposed_flags,
++                                             &cur_flags);
++
++              if (!type_ok)
++                      continue;
++
++              if (mem_type == DRM_BO_MEM_LOCAL)
++                      break;
++
++              if ((mem_type == bo->pinned_mem_type) &&
++                  (bo->pinned_node != NULL)) {
++                      node = bo->pinned_node;
++                      break;
++              }
++
++              mutex_lock(&dev->struct_mutex);
++              if (man->has_type && man->use_type) {
++                      type_found = 1;
++                      node = drm_mm_search_free(&man->manager, mem->num_pages,
++                                                mem->page_alignment, 1);
++                      if (node)
++                              node = drm_mm_get_block(node, mem->num_pages,
++                                                      mem->page_alignment);
++              }
++              mutex_unlock(&dev->struct_mutex);
++              if (node)
++                      break;
++      }
++
++      if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
++              mem->mm_node = node;
++              mem->mem_type = mem_type;
++              mem->flags = cur_flags;
++              return 0;
++      }
++
++      if (!type_found)
++              return -EINVAL;
++
++      num_prios = dev->driver->bo_driver->num_mem_busy_prio;
++      prios = dev->driver->bo_driver->mem_busy_prio;
++
++      for (i = 0; i < num_prios; ++i) {
++              mem_type = prios[i];
++              man = &bm->man[mem_type];
++
++              if (!man->has_type)
++                      continue;
++
++              if (!drm_bo_mt_compatible(man,
++                                        bo->type == drm_bo_type_user,
++                                        mem_type,
++                                        mem->proposed_flags,
++                                        &cur_flags))
++                      continue;
++
++              ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
++
++              if (ret == 0 && mem->mm_node) {
++                      mem->flags = cur_flags;
++                      return 0;
++              }
++
++              if (ret == -EAGAIN)
++                      has_eagain = 1;
++      }
++
++      ret = (has_eagain) ? -EAGAIN : -ENOMEM;
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_mem_space);
++
++/*
++ * drm_bo_propose_flags:
++ *
++ * @bo: the buffer object getting new flags
++ *
++ * @new_flags: the new set of proposed flag bits
++ *
++ * @new_mask: the mask of bits changed in new_flags
++ *
++ * Modify the proposed_flag bits in @bo
++ */
++static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
++                                       uint64_t new_flags, uint64_t new_mask)
++{
++      uint32_t new_access;
++
++      /* Copy unchanging bits from existing proposed_flags */
++      DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
++       
++      if (bo->type == drm_bo_type_user &&
++          ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
++           (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
++              DRM_ERROR("User buffers require cache-coherent memory.\n");
++              return -EINVAL;
++      }
++
++      if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
++              DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
++              return -EPERM;
++      }
++
++      if (likely(new_mask & DRM_BO_MASK_MEM) &&
++          (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
++          !DRM_SUSER(DRM_CURPROC)) {
++              if (likely(bo->mem.flags & new_flags & new_mask &
++                         DRM_BO_MASK_MEM))
++                      new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
++                              (bo->mem.flags & DRM_BO_MASK_MEM);
++              else {
++                      DRM_ERROR("Incompatible memory type specification "
++                                "for NO_EVICT buffer.\n");
++                      return -EPERM;
++              }
++      }
++
++      if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
++              DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
++              return -EPERM;
++      }
++
++      new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
++                                DRM_BO_FLAG_READ);
++
++      if (new_access == 0) {
++              DRM_ERROR("Invalid buffer object rwx properties\n");
++              return -EINVAL;
++      }
++
++      bo->mem.proposed_flags = new_flags;
++      return 0;
++}
++
++/*
++ * Call dev->struct_mutex locked.
++ */
++
++struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
++                                            uint32_t handle, int check_owner)
++{
++      struct drm_user_object *uo;
++      struct drm_buffer_object *bo;
++
++      uo = drm_lookup_user_object(file_priv, handle);
++
++      if (!uo || (uo->type != drm_buffer_type)) {
++              DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
++              return NULL;
++      }
++
++      if (check_owner && file_priv != uo->owner) {
++              if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
++                      return NULL;
++      }
++
++      bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
++      atomic_inc(&bo->usage);
++      return bo;
++}
++EXPORT_SYMBOL(drm_lookup_buffer_object);
++
++/*
++ * Call bo->mutex locked.
++ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
++ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
++ */
++
++static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
++{
++      struct drm_fence_object *fence = bo->fence;
++
++      if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
++              return -EBUSY;
++
++      if (fence) {
++              if (drm_fence_object_signaled(fence, bo->fence_type)) {
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++                      return 0;
++              }
++              return -EBUSY;
++      }
++      return 0;
++}
++
++int drm_bo_evict_cached(struct drm_buffer_object *bo)
++{
++      int ret = 0;
++
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
++      if (bo->mem.mm_node)
++              ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
++      return ret;
++}
++
++EXPORT_SYMBOL(drm_bo_evict_cached);
++/*
++ * Wait until a buffer is unmapped.
++ */
++
++static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
++{
++      int ret = 0;
++
++      if (likely(atomic_read(&bo->mapped)) == 0)
++              return 0;
++
++      if (unlikely(no_wait))
++              return -EBUSY;
++
++      do {
++              mutex_unlock(&bo->mutex);
++              ret = wait_event_interruptible(bo->event_queue,
++                                             atomic_read(&bo->mapped) == 0);
++              mutex_lock(&bo->mutex);
++              bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
++
++              if (ret == -ERESTARTSYS)
++                      ret = -EAGAIN;
++      } while((ret == 0) && atomic_read(&bo->mapped) > 0);
++
++      return ret;
++}
++
++/*
++ * Fill in the ioctl reply argument with buffer info.
++ * Bo locked.
++ */
++
++void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
++                       struct drm_bo_info_rep *rep)
++{
++      if (!rep)
++              return;
++
++      rep->handle = bo->base.hash.key;
++      rep->flags = bo->mem.flags;
++      rep->size = bo->num_pages * PAGE_SIZE;
++      rep->offset = bo->offset;
++
++      /*
++       * drm_bo_type_device buffers have user-visible
++       * handles which can be used to share across
++       * processes. Hand that back to the application
++       */
++      if (bo->type == drm_bo_type_device)
++              rep->arg_handle = bo->map_list.user_token;
++      else
++              rep->arg_handle = 0;
++
++      rep->proposed_flags = bo->mem.proposed_flags;
++      rep->buffer_start = bo->buffer_start;
++      rep->fence_flags = bo->fence_type;
++      rep->rep_flags = 0;
++      rep->page_alignment = bo->mem.page_alignment;
++
++      if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
++              DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
++                              DRM_BO_REP_BUSY);
++      }
++}
++EXPORT_SYMBOL(drm_bo_fill_rep_arg);
++
++/*
++ * Wait for buffer idle and register that we've mapped the buffer.
++ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
++ * so that if the client dies, the mapping is automatically
++ * unregistered.
++ */
++
++static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
++                               uint32_t map_flags, unsigned hint,
++                               struct drm_bo_info_rep *rep)
++{
++      struct drm_buffer_object *bo;
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret = 0;
++      int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++              if (unlikely(ret))
++                      goto out;
++
++              if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
++                      drm_bo_evict_cached(bo);
++
++      } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
++
++      atomic_inc(&bo->mapped);
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret) {
++              if (atomic_dec_and_test(&bo->mapped))
++                      wake_up_all(&bo->event_queue);
++
++      } else
++              drm_bo_fill_rep_arg(bo, rep);
++
++ out:
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++
++      return ret;
++}
++
++static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      struct drm_ref_object *ro;
++      int ret = 0;
++
++      mutex_lock(&dev->struct_mutex);
++
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      if (!bo) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
++      if (!ro) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      drm_remove_ref_object(file_priv, ro);
++      drm_bo_usage_deref_locked(&bo);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/*
++ * Call struct-sem locked.
++ */
++
++static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
++                                       struct drm_user_object *uo,
++                                       enum drm_ref_type action)
++{
++      struct drm_buffer_object *bo =
++          drm_user_object_entry(uo, struct drm_buffer_object, base);
++
++      /*
++       * We DON'T want to take the bo->lock here, because we want to
++       * hold it when we wait for unmapped buffer.
++       */
++
++      BUG_ON(action != _DRM_REF_TYPE1);
++
++      if (atomic_dec_and_test(&bo->mapped))
++              wake_up_all(&bo->event_queue);
++}
++
++/*
++ * bo->mutex locked.
++ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
++ */
++
++int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
++                     int no_wait, int move_unfenced)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = 0;
++      struct drm_bo_mem_reg mem;
++
++      BUG_ON(bo->fence != NULL);
++
++      mem.num_pages = bo->num_pages;
++      mem.size = mem.num_pages << PAGE_SHIFT;
++      mem.proposed_flags = new_mem_flags;
++      mem.page_alignment = bo->mem.page_alignment;
++
++      mutex_lock(&bm->evict_mutex);
++      mutex_lock(&dev->struct_mutex);
++      list_del_init(&bo->lru);
++      mutex_unlock(&dev->struct_mutex);
++
++      /*
++       * Determine where to move the buffer.
++       */
++      ret = drm_bo_mem_space(bo, &mem, no_wait);
++      if (ret)
++              goto out_unlock;
++
++      ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
++
++out_unlock:
++      mutex_lock(&dev->struct_mutex);
++      if (ret || !move_unfenced) {
++              if (mem.mm_node) {
++                      if (mem.mm_node != bo->pinned_node)
++                              drm_mm_put_block(mem.mm_node);
++                      mem.mm_node = NULL;
++              }
++              drm_bo_add_to_lru(bo);
++              if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      wake_up_all(&bo->event_queue);
++                      DRM_FLAG_MASKED(bo->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++              }
++      } else {
++              list_add_tail(&bo->lru, &bm->unfenced);
++              DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
++                              _DRM_BO_FLAG_UNFENCED);
++      }
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&bm->evict_mutex);
++      return ret;
++}
++
++static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
++{
++      uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
++
++      if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
++              return 0;
++      if ((flag_diff & DRM_BO_FLAG_CACHED) &&
++          (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
++           (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
++              return 0;
++
++      if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
++          ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
++           (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
++              return 0;
++      return 1;
++}
++
++/**
++ * drm_buffer_object_validate:
++ *
++ * @bo: the buffer object to modify
++ *
++ * @fence_class: the new fence class covering this buffer
++ *
++ * @move_unfenced: a boolean indicating whether switching the
++ * memory space of this buffer should cause the buffer to
++ * be placed on the unfenced list.
++ *
++ * @no_wait: whether this function should return -EBUSY instead
++ * of waiting.
++ *
++ * Change buffer access parameters. This can involve moving
++ * the buffer to the correct memory type, pinning the buffer
++ * or changing the class/type of fence covering this buffer
++ *
++ * Must be called with bo locked.
++ */
++
++static int drm_buffer_object_validate(struct drm_buffer_object *bo,
++                                    uint32_t fence_class,
++                                    int move_unfenced, int no_wait,
++                                    int move_buffer)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret;
++
++      if (move_buffer) {
++              ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
++                                       move_unfenced);
++              if (ret) {
++                      if (ret != -EAGAIN)
++                              DRM_ERROR("Failed moving buffer.\n");
++                      if (ret == -ENOMEM)
++                              DRM_ERROR("Out of aperture space or "
++                                        "DRM memory quota.\n");
++                      return ret;
++              }
++      }
++
++      /*
++       * Pinned buffers.
++       */
++
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
++              bo->pinned_mem_type = bo->mem.mem_type;
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&bo->pinned_lru);
++              drm_bo_add_to_pinned_lru(bo);
++
++              if (bo->pinned_node != bo->mem.mm_node) {
++                      if (bo->pinned_node != NULL)
++                              drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = bo->mem.mm_node;
++              }
++
++              mutex_unlock(&dev->struct_mutex);
++
++      } else if (bo->pinned_node != NULL) {
++
++              mutex_lock(&dev->struct_mutex);
++
++              if (bo->pinned_node != bo->mem.mm_node)
++                      drm_mm_put_block(bo->pinned_node);
++
++              list_del_init(&bo->pinned_lru);
++              bo->pinned_node = NULL;
++              mutex_unlock(&dev->struct_mutex);
++
++      }
++
++      /*
++       * We might need to add a TTM.
++       */
++
++      if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
++              ret = drm_bo_add_ttm(bo);
++              if (ret)
++                      return ret;
++      }
++      /*
++       * Validation has succeeded, move the access and other
++       * non-mapping-related flag bits from the proposed flags to
++       * the active flags
++       */
++
++      DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
++
++      /*
++       * Finally, adjust lru to be sure.
++       */
++
++      mutex_lock(&dev->struct_mutex);
++      list_del(&bo->lru);
++      if (move_unfenced) {
++              list_add_tail(&bo->lru, &bm->unfenced);
++              DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
++                              _DRM_BO_FLAG_UNFENCED);
++      } else {
++              drm_bo_add_to_lru(bo);
++              if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
++                      wake_up_all(&bo->event_queue);
++                      DRM_FLAG_MASKED(bo->priv_flags, 0,
++                                      _DRM_BO_FLAG_UNFENCED);
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/*
++ * This function is called with bo->mutex locked, but may release it
++ * temporarily to wait for events.
++ */
++
++static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
++                                     uint64_t flags,
++                                     uint64_t mask,
++                                     uint32_t hint,
++                                     uint32_t fence_class,
++                                     int no_wait,
++                                     int *move_buffer)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      uint32_t ftype;
++
++      int ret;
++
++      DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
++                (unsigned long long) bo->mem.proposed_flags,
++                (unsigned long long) bo->mem.flags);
++
++      ret = drm_bo_modify_proposed_flags (bo, flags, mask);
++      if (ret)
++              return ret;
++
++      ret = drm_bo_wait_unmapped(bo, no_wait);
++      if (ret)
++              return ret;
++
++      ret = driver->fence_type(bo, &fence_class, &ftype);
++
++      if (ret) {
++              DRM_ERROR("Driver did not support given buffer permissions.\n");
++              return ret;
++      }
++
++      /*
++       * We're switching command submission mechanism,
++       * or cannot simply rely on the hardware serializing for us.
++       * Insert a driver-dependant barrier or wait for buffer idle.
++       */
++
++      if ((fence_class != bo->fence_class) ||
++          ((ftype ^ bo->fence_type) & bo->fence_type)) {
++
++              ret = -EINVAL;
++              if (driver->command_stream_barrier) {
++                      ret = driver->command_stream_barrier(bo,
++                                                           fence_class,
++                                                           ftype,
++                                                           no_wait);
++              }
++              if (ret && ret != -EAGAIN) 
++                      ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++              
++              if (ret)
++                      return ret;
++      }
++
++      bo->new_fence_class = fence_class;
++      bo->new_fence_type = ftype;
++
++      /*
++       * Check whether we need to move buffer.
++       */
++
++      *move_buffer = 0;
++      if (!drm_bo_mem_compat(&bo->mem)) {
++              *move_buffer = 1;
++              ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
++      }
++
++      return ret;
++}
++
++/**
++ * drm_bo_do_validate:
++ *
++ * @bo:       the buffer object
++ *
++ * @flags: access rights, mapping parameters and cacheability. See
++ * the DRM_BO_FLAG_* values in drm.h
++ *
++ * @mask: Which flag values to change; this allows callers to modify
++ * things without knowing the current state of other flags.
++ *
++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
++ * values in drm.h.
++ *
++ * @fence_class: a driver-specific way of doing fences. Presumably,
++ * this would be used if the driver had more than one submission and
++ * fencing mechanism. At this point, there isn't any use of this
++ * from the user mode code.
++ *
++ * @rep: To be stuffed with the reply from validation
++ * 
++ * 'validate' a buffer object. This changes where the buffer is
++ * located, along with changing access modes.
++ */
++
++int drm_bo_do_validate(struct drm_buffer_object *bo,
++                     uint64_t flags, uint64_t mask, uint32_t hint,
++                     uint32_t fence_class,
++                     struct drm_bo_info_rep *rep)
++{
++      int ret;
++      int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
++      int move_buffer;
++
++      mutex_lock(&bo->mutex);
++
++      do {
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++              ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
++                                                fence_class, no_wait,
++                                                &move_buffer);
++              if (ret)
++                      goto out;
++
++      } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
++
++      ret = drm_buffer_object_validate(bo,
++                                       fence_class,
++                                       !(hint & DRM_BO_HINT_DONT_FENCE),
++                                       no_wait,
++                                       move_buffer);
++
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++out:
++      if (rep)
++              drm_bo_fill_rep_arg(bo, rep);
++
++      mutex_unlock(&bo->mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_do_validate);
++
++/**
++ * drm_bo_handle_validate
++ *
++ * @file_priv: the drm file private, used to get a handle to the user context
++ *
++ * @handle: the buffer object handle
++ *
++ * @flags: access rights, mapping parameters and cacheability. See
++ * the DRM_BO_FLAG_* values in drm.h
++ *
++ * @mask: Which flag values to change; this allows callers to modify
++ * things without knowing the current state of other flags.
++ *
++ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
++ * values in drm.h.
++ *
++ * @fence_class: a driver-specific way of doing fences. Presumably,
++ * this would be used if the driver had more than one submission and
++ * fencing mechanism. At this point, there isn't any use of this
++ * from the user mode code.
++ *
++ * @rep: To be stuffed with the reply from validation
++ *
++ * @bp_rep: To be stuffed with the buffer object pointer
++ *
++ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
++ * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
++ * This is a convenience wrapper only.
++ */
++
++int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
++                         uint64_t flags, uint64_t mask,
++                         uint32_t hint,
++                         uint32_t fence_class,
++                         struct drm_bo_info_rep *rep,
++                         struct drm_buffer_object **bo_rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      if (bo->base.owner != file_priv)
++              mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
++
++      ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
++
++      if (!ret && bo_rep)
++              *bo_rep = bo;
++      else
++              drm_bo_usage_deref_unlocked(&bo);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_handle_validate);
++
++
++static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
++                            struct drm_bo_info_rep *rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++
++      /*
++       * FIXME: Quick busy here?
++       */
++
++      drm_bo_busy(bo, 1);
++      drm_bo_fill_rep_arg(bo, rep);
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++      return 0;
++}
++
++static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
++                            uint32_t hint,
++                            struct drm_bo_info_rep *rep)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_buffer_object *bo;
++      int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      mutex_lock(&bo->mutex);
++      ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
++      if (ret)
++              goto out;
++
++      drm_bo_fill_rep_arg(bo, rep);
++out:
++      mutex_unlock(&bo->mutex);
++      drm_bo_usage_deref_unlocked(&bo);
++      return ret;
++}
++
++int drm_buffer_object_create(struct drm_device *dev,
++                           unsigned long size,
++                           enum drm_bo_type type,
++                           uint64_t flags,
++                           uint32_t hint,
++                           uint32_t page_alignment,
++                           unsigned long buffer_start,
++                           struct drm_buffer_object **buf_obj)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_buffer_object *bo;
++      int ret = 0;
++      unsigned long num_pages;
++
++      size += buffer_start & ~PAGE_MASK;
++      num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      if (num_pages == 0) {
++              DRM_ERROR("Illegal buffer object size.\n");
++              return -EINVAL;
++      }
++
++      bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
++
++      if (!bo)
++              return -ENOMEM;
++
++      mutex_init(&bo->mutex);
++      mutex_lock(&bo->mutex);
++
++      atomic_set(&bo->usage, 1);
++      atomic_set(&bo->mapped, 0);
++      DRM_INIT_WAITQUEUE(&bo->event_queue);
++      INIT_LIST_HEAD(&bo->lru);
++      INIT_LIST_HEAD(&bo->pinned_lru);
++      INIT_LIST_HEAD(&bo->ddestroy);
++#ifdef DRM_ODD_MM_COMPAT
++      INIT_LIST_HEAD(&bo->p_mm_list);
++      INIT_LIST_HEAD(&bo->vma_list);
++#endif
++      bo->dev = dev;
++      bo->type = type;
++      bo->num_pages = num_pages;
++      bo->mem.mem_type = DRM_BO_MEM_LOCAL;
++      bo->mem.num_pages = bo->num_pages;
++      bo->mem.mm_node = NULL;
++      bo->mem.page_alignment = page_alignment;
++      bo->buffer_start = buffer_start & PAGE_MASK;
++      bo->priv_flags = 0;
++      bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
++                       DRM_BO_FLAG_MAPPABLE);
++      bo->mem.proposed_flags = 0;
++      atomic_inc(&bm->count);
++      /*
++       * Use drm_bo_modify_proposed_flags to error-check the proposed flags
++       */
++      ret = drm_bo_modify_proposed_flags (bo, flags, flags);
++      if (ret)
++              goto out_err;
++
++      /*
++       * For drm_bo_type_device buffers, allocate
++       * address space from the device so that applications
++       * can mmap the buffer from there
++       */
++      if (bo->type == drm_bo_type_device) {
++              mutex_lock(&dev->struct_mutex);
++              ret = drm_bo_setup_vm_locked(bo);
++              mutex_unlock(&dev->struct_mutex);
++              if (ret)
++                      goto out_err;
++      }
++
++      mutex_unlock(&bo->mutex);
++      ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
++                               0, NULL);
++      if (ret)
++              goto out_err_unlocked;
++
++      *buf_obj = bo;
++      return 0;
++
++out_err:
++      mutex_unlock(&bo->mutex);
++out_err_unlocked:
++      drm_bo_usage_deref_unlocked(&bo);
++      return ret;
++}
++EXPORT_SYMBOL(drm_buffer_object_create);
++
++
++static int drm_bo_add_user_object(struct drm_file *file_priv,
++                                struct drm_buffer_object *bo, int shareable)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(file_priv, &bo->base, shareable);
++      if (ret)
++              goto out;
++
++      bo->base.remove = drm_bo_base_deref_locked;
++      bo->base.type = drm_buffer_type;
++      bo->base.ref_struct_locked = NULL;
++      bo->base.unref = drm_buffer_user_object_unmap;
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_create_arg *arg = data;
++      struct drm_bo_create_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_buffer_object *entry;
++      enum drm_bo_type bo_type;
++      int ret = 0;
++
++      DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
++          (int)(req->size / 1024), req->page_alignment * 4);
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * If the buffer creation request comes in with a starting address,
++       * that points at the desired user pages to map. Otherwise, create
++       * a drm_bo_type_device buffer, which uses pages allocated from the kernel
++       */
++      bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
++
++      /*
++       * User buffers cannot be shared
++       */
++      if (bo_type == drm_bo_type_user)
++              req->flags &= ~DRM_BO_FLAG_SHAREABLE;
++
++      ret = drm_buffer_object_create(file_priv->minor->dev,
++                                     req->size, bo_type, req->flags,
++                                     req->hint, req->page_alignment,
++                                     req->buffer_start, &entry);
++      if (ret)
++              goto out;
++
++      ret = drm_bo_add_user_object(file_priv, entry,
++                                   req->flags & DRM_BO_FLAG_SHAREABLE);
++      if (ret) {
++              drm_bo_usage_deref_unlocked(&entry);
++              goto out;
++      }
++
++      mutex_lock(&entry->mutex);
++      drm_bo_fill_rep_arg(entry, rep);
++      mutex_unlock(&entry->mutex);
++
++out:
++      return ret;
++}
++
++int drm_bo_setstatus_ioctl(struct drm_device *dev,
++                         void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_buffer_object *bo;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (ret)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!bo)
++              return -EINVAL;
++
++      if (bo->base.owner != file_priv)
++              req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
++
++      ret = drm_bo_do_validate(bo, req->flags, req->mask,
++                               req->hint | DRM_BO_HINT_DONT_FENCE,
++                               bo->fence_class, rep);
++
++      drm_bo_usage_deref_unlocked(&bo);
++
++      (void) drm_bo_read_unlock(&dev->bm.bm_lock);
++
++      return ret;
++}
++
++int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
++                                  req->hint, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_handle_arg *arg = data;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_buffer_object_unmap(file_priv, arg->handle);
++      return ret;
++}
++
++
++int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_reference_info_arg *arg = data;
++      struct drm_bo_handle_arg *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      struct drm_user_object *uo;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_ref(file_priv, req->handle,
++                                drm_buffer_type, &uo);
++      if (ret)
++              return ret;
++
++      ret = drm_bo_handle_info(file_priv, req->handle, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_handle_arg *arg = data;
++      int ret = 0;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
++      return ret;
++}
++
++int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_reference_info_arg *arg = data;
++      struct drm_bo_handle_arg *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_handle_info(file_priv, req->handle, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_bo_map_wait_idle_arg *arg = data;
++      struct drm_bo_info_req *req = &arg->d.req;
++      struct drm_bo_info_rep *rep = &arg->d.rep;
++      int ret;
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_handle_wait(file_priv, req->handle,
++                               req->hint, rep);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++static int drm_bo_leave_list(struct drm_buffer_object *bo,
++                           uint32_t mem_type,
++                           int free_pinned,
++                           int allow_errors)
++{
++      struct drm_device *dev = bo->dev;
++      int ret = 0;
++
++      mutex_lock(&bo->mutex);
++
++      ret = drm_bo_expire_fence(bo, allow_errors);
++      if (ret)
++              goto out;
++
++      if (free_pinned) {
++              DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&bo->pinned_lru);
++              if (bo->pinned_node == bo->mem.mm_node)
++                      bo->pinned_node = NULL;
++              if (bo->pinned_node != NULL) {
++                      drm_mm_put_block(bo->pinned_node);
++                      bo->pinned_node = NULL;
++              }
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
++              DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
++                        "cleanup. Removing flag and evicting.\n");
++              bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
++              bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
++      }
++
++      if (bo->mem.mem_type == mem_type)
++              ret = drm_bo_evict(bo, mem_type, 0);
++
++      if (ret) {
++              if (allow_errors) {
++                      goto out;
++              } else {
++                      ret = 0;
++                      DRM_ERROR("Cleanup eviction failed\n");
++              }
++      }
++
++out:
++      mutex_unlock(&bo->mutex);
++      return ret;
++}
++
++
++static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
++                                       int pinned_list)
++{
++      if (pinned_list)
++              return list_entry(list, struct drm_buffer_object, pinned_lru);
++      else
++              return list_entry(list, struct drm_buffer_object, lru);
++}
++
++/*
++ * dev->struct_mutex locked.
++ */
++
++static int drm_bo_force_list_clean(struct drm_device *dev,
++                                 struct list_head *head,
++                                 unsigned mem_type,
++                                 int free_pinned,
++                                 int allow_errors,
++                                 int pinned_list)
++{
++      struct list_head *list, *next, *prev;
++      struct drm_buffer_object *entry, *nentry;
++      int ret;
++      int do_restart;
++
++      /*
++       * The list traversal is a bit odd here, because an item may
++       * disappear from the list when we release the struct_mutex or
++       * when we decrease the usage count. Also we're not guaranteed
++       * to drain pinned lists, so we can't always restart.
++       */
++
++restart:
++      nentry = NULL;
++      list_for_each_safe(list, next, head) {
++              prev = list->prev;
++
++              entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
++              atomic_inc(&entry->usage);
++              if (nentry) {
++                      atomic_dec(&nentry->usage);
++                      nentry = NULL;
++              }
++
++              /*
++               * Protect the next item from destruction, so we can check
++               * its list pointers later on.
++               */
++
++              if (next != head) {
++                      nentry = drm_bo_entry(next, pinned_list);
++                      atomic_inc(&nentry->usage);
++              }
++              mutex_unlock(&dev->struct_mutex);
++
++              ret = drm_bo_leave_list(entry, mem_type, free_pinned,
++                                      allow_errors);
++              mutex_lock(&dev->struct_mutex);
++
++              drm_bo_usage_deref_locked(&entry);
++              if (ret)
++                      return ret;
++
++              /*
++               * Has the next item disappeared from the list?
++               */
++
++              do_restart = ((next->prev != list) && (next->prev != prev));
++
++              if (nentry != NULL && do_restart)
++                      drm_bo_usage_deref_locked(&nentry);
++
++              if (do_restart)
++                      goto restart;
++      }
++      return 0;
++}
++
++int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++      int ret = -EINVAL;
++
++      if (mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", mem_type);
++              return ret;
++      }
++
++      if (!man->has_type) {
++              DRM_ERROR("Trying to take down uninitialized "
++                        "memory manager type %u\n", mem_type);
++              return ret;
++      }
++
++      if ((man->kern_init_type) && (kern_clean == 0)) {
++              DRM_ERROR("Trying to take down kernel initialized "
++                        "memory manager type %u\n", mem_type);
++              return -EPERM;
++      }
++
++      man->use_type = 0;
++      man->has_type = 0;
++
++      ret = 0;
++      if (mem_type > 0) {
++              BUG_ON(!list_empty(&bm->unfenced));
++              drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
++              drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
++
++              if (drm_mm_clean(&man->manager)) {
++                      drm_mm_takedown(&man->manager);
++              } else {
++                      ret = -EBUSY;
++              }
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_clean_mm);
++
++/**
++ *Evict all buffers of a particular mem_type, but leave memory manager
++ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
++ *point since we have the hardware lock.
++ */
++
++static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
++{
++      int ret;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem_type];
++
++      if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
++              return -EINVAL;
++      }
++
++      if (!man->has_type) {
++              DRM_ERROR("Memory type %u has not been initialized.\n",
++                        mem_type);
++              return 0;
++      }
++
++      ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
++      if (ret)
++              return ret;
++      ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
++
++      return ret;
++}
++
++int drm_bo_init_mm(struct drm_device *dev, unsigned type,
++                 unsigned long p_offset, unsigned long p_size,
++                 int kern_init)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = -EINVAL;
++      struct drm_mem_type_manager *man;
++
++      if (type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", type);
++              return ret;
++      }
++
++      man = &bm->man[type];
++      if (man->has_type) {
++              DRM_ERROR("Memory manager already initialized for type %d\n",
++                        type);
++              return ret;
++      }
++
++      ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
++      if (ret)
++              return ret;
++
++      ret = 0;
++      if (type != DRM_BO_MEM_LOCAL) {
++              if (!p_size) {
++                      DRM_ERROR("Zero size memory manager type %d\n", type);
++                      return ret;
++              }
++              ret = drm_mm_init(&man->manager, p_offset, p_size);
++              if (ret)
++                      return ret;
++      }
++      man->has_type = 1;
++      man->use_type = 1;
++      man->kern_init_type = kern_init;
++      man->size = p_size;
++
++      INIT_LIST_HEAD(&man->lru);
++      INIT_LIST_HEAD(&man->pinned);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_init_mm);
++
++/*
++ * This function is intended to be called on drm driver unload.
++ * If you decide to call it from lastclose, you must protect the call
++ * from a potentially racing drm_bo_driver_init in firstopen.
++ * (This may happen on X server restart).
++ */
++
++int drm_bo_driver_finish(struct drm_device *dev)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = 0;
++      unsigned i = DRM_BO_MEM_TYPES;
++      struct drm_mem_type_manager *man;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (!bm->initialized)
++              goto out;
++      bm->initialized = 0;
++
++      while (i--) {
++              man = &bm->man[i];
++              if (man->has_type) {
++                      man->use_type = 0;
++                      if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
++                              ret = -EBUSY;
++                              DRM_ERROR("DRM memory manager type %d "
++                                        "is not clean.\n", i);
++                      }
++                      man->has_type = 0;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!cancel_delayed_work(&bm->wq))
++              flush_scheduled_work();
++
++      mutex_lock(&dev->struct_mutex);
++      drm_bo_delayed_delete(dev, 1);
++      if (list_empty(&bm->ddestroy))
++              DRM_DEBUG("Delayed destroy list was clean\n");
++
++      if (list_empty(&bm->man[0].lru))
++              DRM_DEBUG("Swap list was clean\n");
++
++      if (list_empty(&bm->man[0].pinned))
++              DRM_DEBUG("NO_MOVE list was clean\n");
++
++      if (list_empty(&bm->unfenced))
++              DRM_DEBUG("Unfenced list was clean\n");
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      ClearPageReserved(bm->dummy_read_page);
++#endif
++      __free_page(bm->dummy_read_page);
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/*
++ * This function is intended to be called on drm driver load.
++ * If you decide to call it from firstopen, you must protect the call
++ * from a potentially racing drm_bo_driver_finish in lastclose.
++ * (This may happen on X server restart).
++ */
++
++int drm_bo_driver_init(struct drm_device *dev)
++{
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      struct drm_buffer_manager *bm = &dev->bm;
++      int ret = -EINVAL;
++
++      bm->dummy_read_page = NULL;
++      drm_bo_init_lock(&bm->bm_lock);
++      mutex_lock(&dev->struct_mutex);
++      if (!driver)
++              goto out_unlock;
++
++      bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
++      if (!bm->dummy_read_page) {
++              ret = -ENOMEM;
++              goto out_unlock;
++      }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      SetPageReserved(bm->dummy_read_page);
++#endif
++
++      /*
++       * Initialize the system memory buffer type.
++       * Other types need to be driver / IOCTL initialized.
++       */
++      ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
++      if (ret)
++              goto out_unlock;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
++#else
++      INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
++#endif
++      bm->initialized = 1;
++      bm->nice_mode = 1;
++      atomic_set(&bm->count, 0);
++      bm->cur_pages = 0;
++      INIT_LIST_HEAD(&bm->unfenced);
++      INIT_LIST_HEAD(&bm->ddestroy);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_driver_init);
++
++int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_init_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
++      if (ret)
++              return ret;
++
++      ret = -EINVAL;
++      if (arg->magic != DRM_BO_INIT_MAGIC) {
++              DRM_ERROR("You are using an old libdrm that is not compatible with\n"
++                        "\tthe kernel DRM module. Please upgrade your libdrm.\n");
++              return -EINVAL;
++      }
++      if (arg->major != DRM_BO_INIT_MAJOR) {
++              DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
++                        "\tversion don't match. Got %d, expected %d.\n",
++                        arg->major, DRM_BO_INIT_MAJOR);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized.\n");
++              goto out;
++      }
++      if (arg->mem_type == 0) {
++              DRM_ERROR("System memory buffers already initialized.\n");
++              goto out;
++      }
++      ret = drm_bo_init_mm(dev, arg->mem_type,
++                           arg->p_offset, arg->p_size, 0);
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++      (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
++
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
++      if (ret)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = -EINVAL;
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized\n");
++              goto out;
++      }
++      if (arg->mem_type == 0) {
++              DRM_ERROR("No takedown for System memory buffers.\n");
++              goto out;
++      }
++      ret = 0;
++      if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
++              if (ret == -EINVAL)
++                      DRM_ERROR("Memory manager type %d not clean. "
++                                "Delaying takedown\n", arg->mem_type);
++              ret = 0;
++      }
++out:
++      mutex_unlock(&dev->struct_mutex);
++      (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
++
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
++              DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
++              ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
++              if (ret)
++                      return ret;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_bo_lock_mm(dev, arg->mem_type);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret) {
++              (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
++              return ret;
++      }
++
++      return 0;
++}
++
++int drm_mm_unlock_ioctl(struct drm_device *dev,
++                      void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_mm_type_arg *arg = data;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      int ret;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
++              ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
++              if (ret)
++                      return ret;
++      }
++
++      return 0;
++}
++
++int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_mm_info_arg *arg = data;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_bo_driver *driver = dev->driver->bo_driver;
++      struct drm_mem_type_manager *man;
++      int ret = 0;
++      int mem_type = arg->mem_type;
++
++      if (!driver) {
++              DRM_ERROR("Buffer objects are not supported by this driver\n");
++              return -EINVAL;
++      }
++
++      if (mem_type >= DRM_BO_MEM_TYPES) {
++              DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (!bm->initialized) {
++              DRM_ERROR("DRM memory manager was not initialized\n");
++              ret = -EINVAL;
++              goto out;
++      }
++
++
++      man = &bm->man[arg->mem_type];
++
++      arg->p_size = man->size;
++
++out:
++      mutex_unlock(&dev->struct_mutex);
++     
++      return ret;
++}
++/*
++ * buffer object vm functions.
++ */
++
++int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++
++      if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
++              if (mem->mem_type == DRM_BO_MEM_LOCAL)
++                      return 0;
++
++              if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
++                      return 0;
++
++              if (mem->flags & DRM_BO_FLAG_CACHED)
++                      return 0;
++      }
++      return 1;
++}
++EXPORT_SYMBOL(drm_mem_reg_is_pci);
++
++/**
++ * \c Get the PCI offset for the buffer object memory.
++ *
++ * \param bo The buffer object.
++ * \param bus_base On return the base of the PCI region
++ * \param bus_offset On return the byte offset into the PCI region
++ * \param bus_size On return the byte size of the buffer object or zero if
++ *     the buffer object memory is not accessible through a PCI region.
++ * \return Failure indication.
++ *
++ * Returns -EINVAL if the buffer object is currently not mappable.
++ * Otherwise returns zero.
++ */
++
++int drm_bo_pci_offset(struct drm_device *dev,
++                    struct drm_bo_mem_reg *mem,
++                    unsigned long *bus_base,
++                    unsigned long *bus_offset, unsigned long *bus_size)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++
++      *bus_size = 0;
++      if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
++              return -EINVAL;
++
++      if (drm_mem_reg_is_pci(dev, mem)) {
++              *bus_offset = mem->mm_node->start << PAGE_SHIFT;
++              *bus_size = mem->num_pages << PAGE_SHIFT;
++              *bus_base = man->io_offset;
++      }
++
++      return 0;
++}
++
++/**
++ * \c Kill all user-space virtual mappings of this buffer object.
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
++{
++      struct drm_device *dev = bo->dev;
++      loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
++      loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
++
++      if (!dev->dev_mapping)
++              return;
++
++      unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
++}
++
++/**
++ * drm_bo_takedown_vm_locked:
++ *
++ * @bo: the buffer object to remove any drm device mapping
++ *
++ * Remove any associated vm mapping on the drm device node that
++ * would have been created for a drm_bo_type_device buffer
++ */
++static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
++{
++      struct drm_map_list *list;
++      drm_local_map_t *map;
++      struct drm_device *dev = bo->dev;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      if (bo->type != drm_bo_type_device)
++              return;
++
++      list = &bo->map_list;
++      if (list->user_token) {
++              drm_ht_remove_item(&dev->map_hash, &list->hash);
++              list->user_token = 0;
++      }
++      if (list->file_offset_node) {
++              drm_mm_put_block(list->file_offset_node);
++              list->file_offset_node = NULL;
++      }
++
++      map = list->map;
++      if (!map)
++              return;
++
++      drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
++      list->map = NULL;
++      list->user_token = 0ULL;
++      drm_bo_usage_deref_locked(&bo);
++}
++
++/**
++ * drm_bo_setup_vm_locked:
++ *
++ * @bo: the buffer to allocate address space for
++ *
++ * Allocate address space in the drm device so that applications
++ * can mmap the buffer and access the contents. This only
++ * applies to drm_bo_type_device objects as others are not
++ * placed in the drm device address space.
++ */
++static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
++{
++      struct drm_map_list *list = &bo->map_list;
++      drm_local_map_t *map;
++      struct drm_device *dev = bo->dev;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
++      if (!list->map)
++              return -ENOMEM;
++
++      map = list->map;
++      map->offset = 0;
++      map->type = _DRM_TTM;
++      map->flags = _DRM_REMOVABLE;
++      map->size = bo->mem.num_pages * PAGE_SIZE;
++      atomic_inc(&bo->usage);
++      map->handle = (void *)bo;
++
++      list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
++                                                  bo->mem.num_pages, 0, 0);
++
++      if (unlikely(!list->file_offset_node)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++
++      list->file_offset_node = drm_mm_get_block(list->file_offset_node,
++                                                bo->mem.num_pages, 0);
++
++      if (unlikely(!list->file_offset_node)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++              
++      list->hash.key = list->file_offset_node->start;
++      if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
++              drm_bo_takedown_vm_locked(bo);
++              return -ENOMEM;
++      }
++
++      list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
++
++      return 0;
++}
++
++int drm_bo_version_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
++
++      arg->major = DRM_BO_INIT_MAJOR;
++      arg->minor = DRM_BO_INIT_MINOR;
++      arg->patchlevel = DRM_BO_INIT_PATCH;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c
+--- git/drivers/gpu/drm-tungsten/drm_bo_lock.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_lock.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++/*
++ * This file implements a simple replacement for the buffer manager use
++ * of the heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode is fast, and
++ * intended for in-kernel use only.
++ * Taking it in write mode is slow.
++ *
++ * The write mode is used only when there is a need to block all
++ * user-space processes from allocating a
++ * new memory area.
++ * Typical use in write mode is X server VT switching, and it's allowed
++ * to leave kernel space with the write lock held. If a user-space process
++ * dies while having the write-lock, it will be released during the file
++ * descriptor release.
++ *
++ * The read lock is typically placed at the start of an IOCTL- or
++ * user-space callable function that may end up allocating a memory area.
++ * This includes setstatus, super-ioctls and no_pfn; the latter may move
++ * unmappable regions to mappable. It's a bug to leave kernel space with the
++ * read lock held.
++ *
++ * Both read- and write lock taking may be interruptible for low signal-delivery
++ * latency. The locking functions will return -EAGAIN if interrupted by a
++ * signal.
++ *
++ * Locking order: The lock should be taken BEFORE any kernel mutexes
++ * or spinlocks.
++ */
++
++#include "drmP.h"
++
++void drm_bo_init_lock(struct drm_bo_lock *lock)
++{
++      DRM_INIT_WAITQUEUE(&lock->queue);
++      atomic_set(&lock->write_lock_pending, 0);
++      atomic_set(&lock->readers, 0);
++}
++
++void drm_bo_read_unlock(struct drm_bo_lock *lock)
++{
++      if (atomic_dec_and_test(&lock->readers))
++              wake_up_all(&lock->queue);
++}
++EXPORT_SYMBOL(drm_bo_read_unlock);
++
++int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
++{
++      while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
++              int ret;
++              
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->write_lock_pending) == 0);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                  (lock->queue, atomic_read(&lock->write_lock_pending) == 0);
++              if (ret)
++                      return -EAGAIN;
++      }
++
++      while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
++              int ret;
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->readers) != -1);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                      (lock->queue, atomic_read(&lock->readers) != -1);
++              if (ret)
++                      return -EAGAIN;
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_read_lock);
++
++static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
++{
++      if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
++              return -EINVAL;
++      wake_up_all(&lock->queue);
++      return 0;
++}
++
++static void drm_bo_write_lock_remove(struct drm_file *file_priv,
++                                   struct drm_user_object *item)
++{
++      struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
++      int ret;
++
++      ret = __drm_bo_write_unlock(lock);
++      BUG_ON(ret);
++}
++
++int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
++                    struct drm_file *file_priv)
++{
++      int ret = 0;
++      struct drm_device *dev;
++
++      atomic_inc(&lock->write_lock_pending);
++
++      while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
++              if (!interruptible) {
++                      wait_event(lock->queue,
++                                 atomic_read(&lock->readers) == 0);
++                      continue;
++              }
++              ret = wait_event_interruptible
++                  (lock->queue, atomic_read(&lock->readers) == 0);
++
++              if (ret) {
++                      atomic_dec(&lock->write_lock_pending);
++                      wake_up_all(&lock->queue);
++                      return -EAGAIN;
++              }
++      }
++
++      /*
++       * Add a dummy user-object, the destructor of which will
++       * make sure the lock is released if the client dies
++       * while holding it.
++       */
++
++      if (atomic_dec_and_test(&lock->write_lock_pending))
++              wake_up_all(&lock->queue);
++      dev = file_priv->minor->dev;
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(file_priv, &lock->base, 0);
++      lock->base.remove = &drm_bo_write_lock_remove;
++      lock->base.type = drm_lock_type;
++      if (ret)
++              (void)__drm_bo_write_unlock(lock);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_ref_object *ro;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (lock->base.owner != file_priv) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++      ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
++      BUG_ON(!ro);
++      drm_remove_ref_object(file_priv, ro);
++      lock->base.owner = NULL;
++
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bo_move.c git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c
+--- git/drivers/gpu/drm-tungsten/drm_bo_move.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bo_move.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,630 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++/**
++ * Free the old memory node unless it's a pinned region and we
++ * have not been requested to free also pinned regions.
++ */
++
++static void drm_bo_free_old_node(struct drm_buffer_object *bo)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
++              mutex_lock(&bo->dev->struct_mutex);
++              drm_mm_put_block(old_mem->mm_node);
++              mutex_unlock(&bo->dev->struct_mutex);
++      }
++      old_mem->mm_node = NULL;
++}
++
++int drm_bo_move_ttm(struct drm_buffer_object *bo,
++                  int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_ttm *ttm = bo->ttm;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      int ret;
++
++      if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
++              if (evict)
++                      drm_ttm_evict(ttm);
++              else
++                      drm_ttm_unbind(ttm);
++
++              drm_bo_free_old_node(bo);
++              DRM_FLAG_MASKED(old_mem->flags,
++                              DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
++                              DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
++              old_mem->mem_type = DRM_BO_MEM_LOCAL;
++              save_flags = old_mem->flags;
++      }
++      if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
++              ret = drm_ttm_bind(ttm, new_mem);
++              if (ret)
++                      return ret;
++      }
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_move_ttm);
++
++/**
++ * \c Return a kernel virtual address to the buffer object PCI memory.
++ *
++ * \param bo The buffer object.
++ * \return Failure indication.
++ *
++ * Returns -EINVAL if the buffer object is currently not mappable.
++ * Returns -ENOMEM if the ioremap operation failed.
++ * Otherwise returns zero.
++ *
++ * After a successfull call, bo->iomap contains the virtual address, or NULL
++ * if the buffer object content is not accessible through PCI space.
++ * Call bo->mutex locked.
++ */
++
++int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
++                      void **virtual)
++{
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long bus_base;
++      int ret;
++      void *addr;
++
++      *virtual = NULL;
++      ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
++      if (ret || bus_size == 0)
++              return ret;
++
++      if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
++              addr = (void *)(((u8 *) man->io_addr) + bus_offset);
++      else {
++              addr = ioremap_nocache(bus_base + bus_offset, bus_size);
++              if (!addr)
++                      return -ENOMEM;
++      }
++      *virtual = addr;
++      return 0;
++}
++EXPORT_SYMBOL(drm_mem_reg_ioremap);
++
++/**
++ * \c Unmap mapping obtained using drm_bo_ioremap
++ *
++ * \param bo The buffer object.
++ *
++ * Call bo->mutex locked.
++ */
++
++void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
++                       void *virtual)
++{
++      struct drm_buffer_manager *bm;
++      struct drm_mem_type_manager *man;
++
++      bm = &dev->bm;
++      man = &bm->man[mem->mem_type];
++
++      if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
++              iounmap(virtual);
++}
++
++static int drm_copy_io_page(void *dst, void *src, unsigned long page)
++{
++      uint32_t *dstP =
++          (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
++      uint32_t *srcP =
++          (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
++
++      int i;
++      for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
++              iowrite32(ioread32(srcP++), dstP++);
++      return 0;
++}
++
++static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
++                              unsigned long page)
++{
++      struct page *d = drm_ttm_get_page(ttm, page);
++      void *dst;
++
++      if (!d)
++              return -ENOMEM;
++
++      src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
++      dst = kmap(d);
++      if (!dst)
++              return -ENOMEM;
++
++      memcpy_fromio(dst, src, PAGE_SIZE);
++      kunmap(d);
++      return 0;
++}
++
++static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
++{
++      struct page *s = drm_ttm_get_page(ttm, page);
++      void *src;
++
++      if (!s)
++              return -ENOMEM;
++
++      dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
++      src = kmap(s);
++      if (!src)
++              return -ENOMEM;
++
++      memcpy_toio(dst, src, PAGE_SIZE);
++      kunmap(s);
++      return 0;
++}
++
++int drm_bo_move_memcpy(struct drm_buffer_object *bo,
++                     int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
++      struct drm_ttm *ttm = bo->ttm;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      struct drm_bo_mem_reg old_copy = *old_mem;
++      void *old_iomap;
++      void *new_iomap;
++      int ret;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      unsigned long i;
++      unsigned long page;
++      unsigned long add = 0;
++      int dir;
++
++      ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
++      if (ret)
++              return ret;
++      ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
++      if (ret)
++              goto out;
++
++      if (old_iomap == NULL && new_iomap == NULL)
++              goto out2;
++      if (old_iomap == NULL && ttm == NULL)
++              goto out2;
++
++      add = 0;
++      dir = 1;
++
++      if ((old_mem->mem_type == new_mem->mem_type) &&
++          (new_mem->mm_node->start <
++           old_mem->mm_node->start + old_mem->mm_node->size)) {
++              dir = -1;
++              add = new_mem->num_pages - 1;
++      }
++
++      for (i = 0; i < new_mem->num_pages; ++i) {
++              page = i * dir + add;
++              if (old_iomap == NULL)
++                      ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
++              else if (new_iomap == NULL)
++                      ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
++              else
++                      ret = drm_copy_io_page(new_iomap, old_iomap, page);
++              if (ret)
++                      goto out1;
++      }
++      mb();
++out2:
++      drm_bo_free_old_node(bo);
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++
++      if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
++              drm_ttm_unbind(ttm);
++              drm_ttm_destroy(ttm);
++              bo->ttm = NULL;
++      }
++
++out1:
++      drm_mem_reg_iounmap(dev, new_mem, new_iomap);
++out:
++      drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
++      return ret;
++}
++EXPORT_SYMBOL(drm_bo_move_memcpy);
++
++/*
++ * Transfer a buffer object's memory and LRU status to a newly
++ * created object. User-space references remains with the old
++ * object. Call bo->mutex locked.
++ */
++
++int drm_buffer_object_transfer(struct drm_buffer_object *bo,
++                             struct drm_buffer_object **new_obj)
++{
++      struct drm_buffer_object *fbo;
++      struct drm_device *dev = bo->dev;
++      struct drm_buffer_manager *bm = &dev->bm;
++
++      fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
++      if (!fbo)
++              return -ENOMEM;
++
++      *fbo = *bo;
++      mutex_init(&fbo->mutex);
++      mutex_lock(&fbo->mutex);
++      mutex_lock(&dev->struct_mutex);
++
++      DRM_INIT_WAITQUEUE(&bo->event_queue);
++      INIT_LIST_HEAD(&fbo->ddestroy);
++      INIT_LIST_HEAD(&fbo->lru);
++      INIT_LIST_HEAD(&fbo->pinned_lru);
++#ifdef DRM_ODD_MM_COMPAT
++      INIT_LIST_HEAD(&fbo->vma_list);
++      INIT_LIST_HEAD(&fbo->p_mm_list);
++#endif
++
++      fbo->fence = drm_fence_reference_locked(bo->fence);
++      fbo->pinned_node = NULL;
++      fbo->mem.mm_node->private = (void *)fbo;
++      atomic_set(&fbo->usage, 1);
++      atomic_inc(&bm->count);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&fbo->mutex);
++
++      *new_obj = fbo;
++      return 0;
++}
++
++/*
++ * Since move is underway, we need to block signals in this function.
++ * We cannot restart until it has finished.
++ */
++
++int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
++                            int evict, int no_wait, uint32_t fence_class,
++                            uint32_t fence_type, uint32_t fence_flags,
++                            struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      int ret;
++      uint64_t save_flags = old_mem->flags;
++      uint64_t save_proposed_flags = old_mem->proposed_flags;
++      struct drm_buffer_object *old_obj;
++
++      if (bo->fence)
++              drm_fence_usage_deref_unlocked(&bo->fence);
++      ret = drm_fence_object_create(dev, fence_class, fence_type,
++                                    fence_flags | DRM_FENCE_FLAG_EMIT,
++                                    &bo->fence);
++      bo->fence_type = fence_type;
++      if (ret)
++              return ret;
++
++#ifdef DRM_ODD_MM_COMPAT
++      /*
++       * In this mode, we don't allow pipelining a copy blit,
++       * since the buffer will be accessible from user space
++       * the moment we return and rebuild the page tables.
++       *
++       * With normal vm operation, page tables are rebuilt
++       * on demand using fault(), which waits for buffer idle.
++       */
++      if (1)
++#else
++      if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
++                    bo->mem.mm_node != NULL))
++#endif
++      {
++              if (bo->fence) {
++                      (void) drm_fence_object_wait(bo->fence, 0, 1,
++                                                  bo->fence_type);
++                      drm_fence_usage_deref_unlocked(&bo->fence);
++              }
++              drm_bo_free_old_node(bo);
++
++              if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
++                      drm_ttm_unbind(bo->ttm);
++                      drm_ttm_destroy(bo->ttm);
++                      bo->ttm = NULL;
++              }
++      } else {
++
++              /* This should help pipeline ordinary buffer moves.
++               *
++               * Hang old buffer memory on a new buffer object,
++               * and leave it to be released when the GPU
++               * operation has completed.
++               */
++
++              ret = drm_buffer_object_transfer(bo, &old_obj);
++
++              if (ret)
++                      return ret;
++
++              if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
++                      old_obj->ttm = NULL;
++              else
++                      bo->ttm = NULL;
++
++              mutex_lock(&dev->struct_mutex);
++              list_del_init(&old_obj->lru);
++              DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
++              drm_bo_add_to_lru(old_obj);
++
++              drm_bo_usage_deref_locked(&old_obj);
++              mutex_unlock(&dev->struct_mutex);
++
++      }
++
++      *old_mem = *new_mem;
++      new_mem->mm_node = NULL;
++      old_mem->proposed_flags = save_proposed_flags;
++      DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
++
++int drm_bo_same_page(unsigned long offset,
++                   unsigned long offset2)
++{
++      return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
++}
++EXPORT_SYMBOL(drm_bo_same_page);
++
++unsigned long drm_bo_offset_end(unsigned long offset,
++                              unsigned long end)
++{
++      offset = (offset + PAGE_SIZE) & PAGE_MASK;
++      return (end < offset) ? end : offset;
++}
++EXPORT_SYMBOL(drm_bo_offset_end);
++
++static pgprot_t drm_kernel_io_prot(uint32_t map_type)
++{
++      pgprot_t tmp = PAGE_KERNEL;
++
++#if defined(__i386__) || defined(__x86_64__)
++#ifdef USE_PAT_WC
++#warning using pat
++      if (drm_use_pat() && map_type == _DRM_TTM) {
++              pgprot_val(tmp) |= _PAGE_PAT;
++              return tmp;
++      }
++#endif
++      if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
++              pgprot_val(tmp) |= _PAGE_PCD;
++              pgprot_val(tmp) &= ~_PAGE_PWT;
++      }
++#elif defined(__powerpc__)
++      pgprot_val(tmp) |= _PAGE_NO_CACHE;
++      if (map_type == _DRM_REGISTERS)
++              pgprot_val(tmp) |= _PAGE_GUARDED;
++#endif
++#if defined(__ia64__)
++      if (map_type == _DRM_TTM)
++              tmp = pgprot_writecombine(tmp);
++      else
++              tmp = pgprot_noncached(tmp);
++#endif
++      return tmp;
++}
++
++static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
++                        unsigned long bus_offset, unsigned long bus_size,
++                        struct drm_bo_kmap_obj *map)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++
++      if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
++              map->bo_kmap_type = bo_map_premapped;
++              map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
++      } else {
++              map->bo_kmap_type = bo_map_iomap;
++              map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
++      }
++      return (!map->virtual) ? -ENOMEM : 0;
++}
++
++static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
++                         unsigned long start_page, unsigned long num_pages,
++                         struct drm_bo_kmap_obj *map)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++      pgprot_t prot;
++      struct drm_ttm *ttm = bo->ttm;
++      struct page *d;
++      int i;
++
++      BUG_ON(!ttm);
++
++      if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
++
++              /*
++               * We're mapping a single page, and the desired
++               * page protection is consistent with the bo.
++               */
++
++              map->bo_kmap_type = bo_map_kmap;
++              map->page = drm_ttm_get_page(ttm, start_page);
++              map->virtual = kmap(map->page);
++      } else {
++              /*
++               * Populate the part we're mapping;
++               */
++
++              for (i = start_page; i < start_page + num_pages; ++i) {
++                      d = drm_ttm_get_page(ttm, i);
++                      if (!d)
++                              return -ENOMEM;
++              }
++
++              /*
++               * We need to use vmap to get the desired page protection
++               * or to make the buffer object look contigous.
++               */
++
++              prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
++                      PAGE_KERNEL :
++                      drm_kernel_io_prot(man->drm_bus_maptype);
++              map->bo_kmap_type = bo_map_vmap;
++              map->virtual = vmap(ttm->pages + start_page,
++                                  num_pages, 0, prot);
++      }
++      return (!map->virtual) ? -ENOMEM : 0;
++}
++
++/*
++ * This function is to be used for kernel mapping of buffer objects.
++ * It chooses the appropriate mapping method depending on the memory type
++ * and caching policy the buffer currently has.
++ * Mapping multiple pages or buffers that live in io memory is a bit slow and
++ * consumes vmalloc space. Be restrictive with such mappings.
++ * Mapping single pages usually returns the logical kernel address,
++ * (which is fast)
++ * BUG may use slower temporary mappings for high memory pages or
++ * uncached / write-combined pages.
++ *
++ * The function fills in a drm_bo_kmap_obj which can be used to return the
++ * kernel virtual address of the buffer.
++ *
++ * Code servicing a non-priviliged user request is only allowed to map one
++ * page at a time. We might need to implement a better scheme to stop such
++ * processes from consuming all vmalloc space.
++ */
++
++int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
++              unsigned long num_pages, struct drm_bo_kmap_obj *map)
++{
++      int ret;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      map->virtual = NULL;
++
++      if (num_pages > bo->num_pages)
++              return -EINVAL;
++      if (start_page > bo->num_pages)
++              return -EINVAL;
++#if 0
++      if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
++              return -EPERM;
++#endif
++      ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
++                              &bus_offset, &bus_size);
++
++      if (ret)
++              return ret;
++
++      if (bus_size == 0) {
++              return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
++      } else {
++              bus_offset += start_page << PAGE_SHIFT;
++              bus_size = num_pages << PAGE_SHIFT;
++              return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
++      }
++}
++EXPORT_SYMBOL(drm_bo_kmap);
++
++void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
++{
++      if (!map->virtual)
++              return;
++
++      switch (map->bo_kmap_type) {
++      case bo_map_iomap:
++              iounmap(map->virtual);
++              break;
++      case bo_map_vmap:
++              vunmap(map->virtual);
++              break;
++      case bo_map_kmap:
++              kunmap(map->page);
++              break;
++      case bo_map_premapped:
++              break;
++      default:
++              BUG();
++      }
++      map->virtual = NULL;
++      map->page = NULL;
++}
++EXPORT_SYMBOL(drm_bo_kunmap);
++
++int drm_bo_pfn_prot(struct drm_buffer_object *bo,
++                  unsigned long dst_offset,
++                  unsigned long *pfn,
++                  pgprot_t *prot)
++{
++      struct drm_bo_mem_reg *mem = &bo->mem;
++      struct drm_device *dev = bo->dev;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long bus_base;
++      struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
++      int ret;
++
++      ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
++                              &bus_size);
++      if (ret)
++              return -EINVAL;
++
++      if (bus_size != 0)
++              *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
++      else if (!bo->ttm)
++              return -EINVAL;
++      else
++              *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
++
++      *prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
++              PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_bo_pfn_prot);
++
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_bufs.c git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c
+--- git/drivers/gpu/drm-tungsten/drm_bufs.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_bufs.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1608 @@
++/**
++ * \file drm_bufs.c
++ * Generic buffer template
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
++{
++      return pci_resource_start(dev->pdev, resource);
++}
++EXPORT_SYMBOL(drm_get_resource_start);
++
++unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
++{
++      return pci_resource_len(dev->pdev, resource);
++}
++EXPORT_SYMBOL(drm_get_resource_len);
++
++struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map)
++{
++      struct drm_map_list *entry;
++      list_for_each_entry(entry, &dev->maplist, head) {
++              if (entry->map && map->type == entry->map->type &&
++                  ((entry->map->offset == map->offset) ||
++                   (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
++                      return entry;
++              }
++      }
++
++      return NULL;
++}
++EXPORT_SYMBOL(drm_find_matching_map);
++
++static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
++                        unsigned long user_token, int hashed_handle)
++{
++      int use_hashed_handle;
++
++#if (BITS_PER_LONG == 64)
++      use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
++#elif (BITS_PER_LONG == 32)
++      use_hashed_handle = hashed_handle;
++#else
++#error Unsupported long size. Neither 64 nor 32 bits.
++#endif
++
++      if (!use_hashed_handle) {
++              int ret;
++              hash->key = user_token >> PAGE_SHIFT;
++              ret = drm_ht_insert_item(&dev->map_hash, hash);
++              if (ret != -EINVAL)
++                      return ret;
++      }
++      return drm_ht_just_insert_please(&dev->map_hash, hash,
++                                       user_token, 32 - PAGE_SHIFT - 3,
++                                       0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
++}
++
++/**
++ * Ioctl to specify a range of memory that is available for mapping by a non-root process.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_map structure.
++ * \return zero on success or a negative value on error.
++ *
++ * Adjusts the memory offset to its absolute value according to the mapping
++ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
++ * applicable and if supported by the kernel.
++ */
++static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
++                         unsigned int size, enum drm_map_type type,
++                         enum drm_map_flags flags,
++                         struct drm_map_list **maplist)
++{
++      struct drm_map *map;
++      struct drm_map_list *list;
++      drm_dma_handle_t *dmah;
++      unsigned long user_token;
++      int ret;
++
++      map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
++      if (!map)
++              return -ENOMEM;
++
++      map->offset = offset;
++      map->size = size;
++      map->flags = flags;
++      map->type = type;
++
++      /* Only allow shared memory to be removable since we only keep enough
++       * book keeping information about shared memory to allow for removal
++       * when processes fork.
++       */
++      if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
++                map->offset, map->size, map->type);
++      if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      map->mtrr = -1;
++      map->handle = NULL;
++
++      switch (map->type) {
++      case _DRM_REGISTERS:
++      case _DRM_FRAME_BUFFER:
++#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
++              if (map->offset + (map->size - 1) < map->offset ||
++                  map->offset < virt_to_phys(high_memory)) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++#endif
++#ifdef __alpha__
++              map->offset += dev->hose->mem_space->start;
++#endif
++              /* Some drivers preinitialize some maps, without the X Server
++               * needing to be aware of it.  Therefore, we just return success
++               * when the server tries to create a duplicate map.
++               */
++              list = drm_find_matching_map(dev, map);
++              if (list != NULL) {
++                      if (list->map->size != map->size) {
++                              DRM_DEBUG("Matching maps of type %d with "
++                                        "mismatched sizes, (%ld vs %ld)\n",
++                                        map->type, map->size,
++                                        list->map->size);
++                              list->map->size = map->size;
++                      }
++
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      *maplist = list;
++                      return 0;
++              }
++
++              if (drm_core_has_MTRR(dev)) {
++                      if (map->type == _DRM_FRAME_BUFFER ||
++                          (map->flags & _DRM_WRITE_COMBINING)) {
++                              map->mtrr = mtrr_add(map->offset, map->size,
++                                                   MTRR_TYPE_WRCOMB, 1);
++                      }
++              }
++              if (map->type == _DRM_REGISTERS) {
++                      map->handle = ioremap(map->offset, map->size);
++                      if (!map->handle) {
++                              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                              return -ENOMEM;
++                      }
++              }
++              break;
++      case _DRM_SHM:
++              list = drm_find_matching_map(dev, map);
++              if (list != NULL) {
++                      if(list->map->size != map->size) {
++                              DRM_DEBUG("Matching maps of type %d with "
++                                 "mismatched sizes, (%ld vs %ld)\n",
++                                  map->type, map->size, list->map->size);
++                              list->map->size = map->size;
++                      }
++
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      *maplist = list;
++                      return 0;
++              }
++              map->handle = vmalloc_user(map->size);
++              DRM_DEBUG("%lu %d %p\n",
++                        map->size, drm_order(map->size), map->handle);
++              if (!map->handle) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -ENOMEM;
++              }
++              map->offset = (unsigned long)map->handle;
++              if (map->flags & _DRM_CONTAINS_LOCK) {
++                      /* Prevent a 2nd X Server from creating a 2nd lock */
++                      if (dev->lock.hw_lock != NULL) {
++                              vfree(map->handle);
++                              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                              return -EBUSY;
++                      }
++                      dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
++              }
++              break;
++      case _DRM_AGP: {
++              struct drm_agp_mem *entry;
++              int valid = 0;
++
++              if (!drm_core_has_AGP(dev)) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++#ifdef __alpha__
++              map->offset += dev->hose->mem_space->start;
++#endif
++              /* In some cases (i810 driver), user space may have already
++               * added the AGP base itself, because dev->agp->base previously
++               * only got set during AGP enable.  So, only add the base
++               * address if the map's offset isn't already within the
++               * aperture.
++               */
++              if (map->offset < dev->agp->base ||
++                  map->offset > dev->agp->base +
++                  dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
++                      map->offset += dev->agp->base;
++              }
++              map->mtrr = dev->agp->agp_mtrr; /* for getmap */
++
++              /* This assumes the DRM is in total control of AGP space.
++               * It's not always the case as AGP can be in the control
++               * of user space (i.e. i810 driver). So this loop will get
++               * skipped and we double check that dev->agp->memory is
++               * actually set as well as being invalid before EPERM'ing
++               */
++              list_for_each_entry(entry, &dev->agp->memory, head) {
++                      if ((map->offset >= entry->bound) &&
++                          (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
++                              valid = 1;
++                              break;
++                      }
++              }
++              if (!list_empty(&dev->agp->memory) && !valid) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EPERM;
++              }
++              DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
++              break;
++      }
++      case _DRM_SCATTER_GATHER:
++              if (!dev->sg) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -EINVAL;
++              }
++              map->offset += (unsigned long)dev->sg->virtual;
++              break;
++      case _DRM_CONSISTENT:
++              /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
++               * As we're limiting the address to 2^32-1 (or less),
++               * casting it down to 32 bits is no problem, but we
++               * need to point to a 64bit variable first. */
++              dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
++              if (!dmah) {
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++                      return -ENOMEM;
++              }
++              map->handle = dmah->vaddr;
++              map->offset = (unsigned long)dmah->busaddr;
++              kfree(dmah);
++              break;
++      default:
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++
++      list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
++      if (!list) {
++              if (map->type == _DRM_REGISTERS)
++                      iounmap(map->handle);
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              return -EINVAL;
++      }
++      memset(list, 0, sizeof(*list));
++      list->map = map;
++
++      mutex_lock(&dev->struct_mutex);
++      list_add(&list->head, &dev->maplist);
++
++      /* Assign a 32-bit handle */
++
++      user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
++              map->offset;
++      ret = drm_map_handle(dev, &list->hash, user_token, 0);
++
++      if (ret) {
++              if (map->type == _DRM_REGISTERS)
++                      iounmap(map->handle);
++              drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              drm_free(list, sizeof(*list), DRM_MEM_MAPS);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      list->user_token = list->hash.key << PAGE_SHIFT;
++      mutex_unlock(&dev->struct_mutex);
++
++      *maplist = list;
++      return 0;
++}
++
++int drm_addmap(struct drm_device *dev, unsigned int offset,
++             unsigned int size, enum drm_map_type type,
++             enum drm_map_flags flags, drm_local_map_t ** map_ptr)
++{
++      struct drm_map_list *list;
++      int rc;
++
++      rc = drm_addmap_core(dev, offset, size, type, flags, &list);
++      if (!rc)
++              *map_ptr = list->map;
++      return rc;
++}
++
++EXPORT_SYMBOL(drm_addmap);
++
++int drm_addmap_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_map *map = data;
++      struct drm_map_list *maplist;
++      int err;
++
++      if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
++              return -EPERM;
++
++      err = drm_addmap_core(dev, map->offset, map->size, map->type,
++                            map->flags, &maplist);
++
++      if (err)
++              return err;
++
++      /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
++      map->handle = (void *)(unsigned long)maplist->user_token;
++      return 0;
++}
++
++/**
++ * Remove a map private from list and deallocate resources if the mapping
++ * isn't in use.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a struct drm_map structure.
++ * \return zero on success or a negative value on error.
++ *
++ * Searches the map on drm_device::maplist, removes it from the list, see if
++ * its being used, and free any associate resource (such as MTRR's) if it's not
++ * being on use.
++ *
++ * \sa drm_addmap
++ */
++int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
++{
++      struct drm_map_list *r_list = NULL, *list_t;
++      drm_dma_handle_t dmah;
++      int found = 0;
++
++      /* Find the list entry for the map and remove it */
++      list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
++              if (r_list->map == map) {
++                      list_del(&r_list->head);
++                      drm_ht_remove_key(&dev->map_hash,
++                                        r_list->user_token >> PAGE_SHIFT);
++                      drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
++                      found = 1;
++                      break;
++              }
++      }
++
++      if (!found)
++              return -EINVAL;
++
++      /* List has wrapped around to the head pointer, or it's empty and we
++       * didn't find anything.
++       */
++
++      switch (map->type) {
++      case _DRM_REGISTERS:
++              iounmap(map->handle);
++              /* FALLTHROUGH */
++      case _DRM_FRAME_BUFFER:
++              if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
++                      int retcode;
++                      retcode = mtrr_del(map->mtrr, map->offset, map->size);
++                      DRM_DEBUG("mtrr_del=%d\n", retcode);
++              }
++              break;
++      case _DRM_SHM:
++              vfree(map->handle);
++              break;
++      case _DRM_AGP:
++      case _DRM_SCATTER_GATHER:
++              break;
++      case _DRM_CONSISTENT:
++              dmah.vaddr = map->handle;
++              dmah.busaddr = map->offset;
++              dmah.size = map->size;
++              __drm_pci_free(dev, &dmah);
++              break;
++      case _DRM_TTM:
++              BUG_ON(1);
++      }
++      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_rmmap_locked);
++
++int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
++{
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_rmmap_locked(dev, map);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_rmmap);
++
++/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
++ * the last close of the device, and this is necessary for cleanup when things
++ * exit uncleanly.  Therefore, having userland manually remove mappings seems
++ * like a pointless exercise since they're going away anyway.
++ *
++ * One use case might be after addmap is allowed for normal users for SHM and
++ * gets used by drivers that the server doesn't need to care about.  This seems
++ * unlikely.
++ */
++int drm_rmmap_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_map *request = data;
++      drm_local_map_t *map = NULL;
++      struct drm_map_list *r_list;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map &&
++                  r_list->user_token == (unsigned long)request->handle &&
++                  r_list->map->flags & _DRM_REMOVABLE) {
++                      map = r_list->map;
++                      break;
++              }
++      }
++
++      /* List has wrapped around to the head pointer, or its empty we didn't
++       * find anything.
++       */
++      if (list_empty(&dev->maplist) || !map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      /* Register and framebuffer maps are permanent */
++      if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
++              mutex_unlock(&dev->struct_mutex);
++              return 0;
++      }
++
++      ret = drm_rmmap_locked(dev, map);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * Cleanup after an error on one of the addbufs() functions.
++ *
++ * \param dev DRM device.
++ * \param entry buffer entry where the error occurred.
++ *
++ * Frees any pages and buffers associated with the given entry.
++ */
++static void drm_cleanup_buf_error(struct drm_device *dev,
++                                struct drm_buf_entry *entry)
++{
++      int i;
++
++      if (entry->seg_count) {
++              for (i = 0; i < entry->seg_count; i++) {
++                      if (entry->seglist[i]) {
++                              drm_pci_free(dev, entry->seglist[i]);
++                      }
++              }
++              drm_free(entry->seglist,
++                       entry->seg_count *
++                       sizeof(*entry->seglist), DRM_MEM_SEGS);
++
++              entry->seg_count = 0;
++      }
++
++      if (entry->buf_count) {
++              for (i = 0; i < entry->buf_count; i++) {
++                      if (entry->buflist[i].dev_private) {
++                              drm_free(entry->buflist[i].dev_private,
++                                       entry->buflist[i].dev_priv_size,
++                                       DRM_MEM_BUFS);
++                      }
++              }
++              drm_free(entry->buflist,
++                       entry->buf_count *
++                       sizeof(*entry->buflist), DRM_MEM_BUFS);
++
++              entry->buf_count = 0;
++      }
++}
++
++#if __OS_HAS_AGP
++/**
++ * Add AGP buffers for DMA transfers.
++ *
++ * \param dev struct drm_device to which the buffers are to be added.
++ * \param request pointer to a struct drm_buf_desc describing the request.
++ * \return zero on success or a negative number on failure.
++ *
++ * After some sanity checks creates a drm_buf structure for each buffer and
++ * reallocates the buffer list of the same size order to accommodate the new
++ * buffers.
++ */
++int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_agp_mem *agp_entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i, valid;
++      struct drm_buf **temp_buflist;
++
++      if (!dma)
++              return -EINVAL;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = dev->agp->base + request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lx\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      /* Make sure buffers are located in AGP memory that we own */
++      valid = 0;
++      list_for_each_entry(agp_entry, &dev->agp->memory, head) {
++              if ((agp_offset >= agp_entry->bound) &&
++                  (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
++                      valid = 1;
++                      break;
++              }
++      }
++      if (!list_empty(&dev->agp->memory) && !valid) {
++              DRM_DEBUG("zone invalid\n");
++              return -EINVAL;
++      }
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_AGP;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++EXPORT_SYMBOL(drm_addbufs_agp);
++#endif                                /* __OS_HAS_AGP */
++
++int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int count;
++      int order;
++      int size;
++      int total;
++      int page_order;
++      struct drm_buf_entry *entry;
++      drm_dma_handle_t *dmah;
++      struct drm_buf *buf;
++      int alignment;
++      unsigned long offset;
++      int i;
++      int byte_count;
++      int page_count;
++      unsigned long *temp_pagelist;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
++                request->count, request->size, size, order, dev->queue_count);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
++                                 DRM_MEM_SEGS);
++      if (!entry->seglist) {
++              drm_free(entry->buflist,
++                       count * sizeof(*entry->buflist), DRM_MEM_BUFS);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->seglist, 0, count * sizeof(*entry->seglist));
++
++      /* Keep the original pagelist until we know all the allocations
++       * have succeeded
++       */
++      temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
++                                * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++      if (!temp_pagelist) {
++              drm_free(entry->buflist,
++                       count * sizeof(*entry->buflist), DRM_MEM_BUFS);
++              drm_free(entry->seglist,
++                       count * sizeof(*entry->seglist), DRM_MEM_SEGS);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memcpy(temp_pagelist,
++             dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
++      DRM_DEBUG("pagelist: %d entries\n",
++                dma->page_count + (count << page_order));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++      byte_count = 0;
++      page_count = 0;
++
++      while (entry->buf_count < count) {
++
++              dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
++
++              if (!dmah) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      entry->seg_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      drm_free(temp_pagelist,
++                               (dma->page_count + (count << page_order))
++                               * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              entry->seglist[entry->seg_count++] = dmah;
++              for (i = 0; i < (1 << page_order); i++) {
++                      DRM_DEBUG("page %d @ 0x%08lx\n",
++                                dma->page_count + page_count,
++                                (unsigned long)dmah->vaddr + PAGE_SIZE * i);
++                      temp_pagelist[dma->page_count + page_count++]
++                              = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
++              }
++              for (offset = 0;
++                   offset + size <= total && entry->buf_count < count;
++                   offset += alignment, ++entry->buf_count) {
++                      buf = &entry->buflist[entry->buf_count];
++                      buf->idx = dma->buf_count + entry->buf_count;
++                      buf->total = alignment;
++                      buf->order = order;
++                      buf->used = 0;
++                      buf->offset = (dma->byte_count + byte_count + offset);
++                      buf->address = (void *)(dmah->vaddr + offset);
++                      buf->bus_address = dmah->busaddr + offset;
++                      buf->next = NULL;
++                      buf->waiting = 0;
++                      buf->pending = 0;
++                      init_waitqueue_head(&buf->dma_wait);
++                      buf->file_priv = NULL;
++
++                      buf->dev_priv_size = dev->driver->dev_priv_size;
++                      buf->dev_private = drm_alloc(buf->dev_priv_size,
++                                                   DRM_MEM_BUFS);
++                      if (!buf->dev_private) {
++                              /* Set count correctly so we free the proper amount. */
++                              entry->buf_count = count;
++                              entry->seg_count = count;
++                              drm_cleanup_buf_error(dev, entry);
++                              drm_free(temp_pagelist,
++                                       (dma->page_count +
++                                        (count << page_order))
++                                       * sizeof(*dma->pagelist),
++                                       DRM_MEM_PAGES);
++                              mutex_unlock(&dev->struct_mutex);
++                              atomic_dec(&dev->buf_alloc);
++                              return -ENOMEM;
++                      }
++                      memset(buf->dev_private, 0, buf->dev_priv_size);
++
++                      DRM_DEBUG("buffer %d @ %p\n",
++                                entry->buf_count, buf->address);
++              }
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              drm_free(temp_pagelist,
++                       (dma->page_count + (count << page_order))
++                       * sizeof(*dma->pagelist), DRM_MEM_PAGES);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      /* No allocations failed, so now we can replace the orginal pagelist
++       * with the new one.
++       */
++      if (dma->page_count) {
++              drm_free(dma->pagelist,
++                       dma->page_count * sizeof(*dma->pagelist),
++                       DRM_MEM_PAGES);
++      }
++      dma->pagelist = temp_pagelist;
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += entry->seg_count << page_order;
++      dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      if (request->flags & _DRM_PCI_BUFFER_RO)
++              dma->flags = _DRM_DMA_USE_PCI_RO;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++
++}
++EXPORT_SYMBOL(drm_addbufs_pci);
++
++static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lu\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset
++                                      + (unsigned long)dev->sg->virtual);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_SG;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++
++int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_entry *entry;
++      struct drm_buf *buf;
++      unsigned long offset;
++      unsigned long agp_offset;
++      int count;
++      int order;
++      int size;
++      int alignment;
++      int page_order;
++      int total;
++      int byte_count;
++      int i;
++      struct drm_buf **temp_buflist;
++
++      if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN))
++              return -EPERM;
++
++      count = request->count;
++      order = drm_order(request->size);
++      size = 1 << order;
++
++      alignment = (request->flags & _DRM_PAGE_ALIGN)
++          ? PAGE_ALIGN(size) : size;
++      page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
++      total = PAGE_SIZE << page_order;
++
++      byte_count = 0;
++      agp_offset = request->agp_start;
++
++      DRM_DEBUG("count:      %d\n", count);
++      DRM_DEBUG("order:      %d\n", order);
++      DRM_DEBUG("size:       %d\n", size);
++      DRM_DEBUG("agp_offset: %lu\n", agp_offset);
++      DRM_DEBUG("alignment:  %d\n", alignment);
++      DRM_DEBUG("page_order: %d\n", page_order);
++      DRM_DEBUG("total:      %d\n", total);
++
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      if (dev->queue_count)
++              return -EBUSY;  /* Not while in use */
++
++      spin_lock(&dev->count_lock);
++      if (dev->buf_use) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      atomic_inc(&dev->buf_alloc);
++      spin_unlock(&dev->count_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      entry = &dma->bufs[order];
++      if (entry->buf_count) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM; /* May only call once for each order */
++      }
++
++      if (count < 0 || count > 4096) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -EINVAL;
++      }
++
++      entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
++                                 DRM_MEM_BUFS);
++      if (!entry->buflist) {
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      memset(entry->buflist, 0, count * sizeof(*entry->buflist));
++
++      entry->buf_size = size;
++      entry->page_order = page_order;
++
++      offset = 0;
++
++      while (entry->buf_count < count) {
++              buf = &entry->buflist[entry->buf_count];
++              buf->idx = dma->buf_count + entry->buf_count;
++              buf->total = alignment;
++              buf->order = order;
++              buf->used = 0;
++
++              buf->offset = (dma->byte_count + offset);
++              buf->bus_address = agp_offset + offset;
++              buf->address = (void *)(agp_offset + offset);
++              buf->next = NULL;
++              buf->waiting = 0;
++              buf->pending = 0;
++              init_waitqueue_head(&buf->dma_wait);
++              buf->file_priv = NULL;
++
++              buf->dev_priv_size = dev->driver->dev_priv_size;
++              buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
++              if (!buf->dev_private) {
++                      /* Set count correctly so we free the proper amount. */
++                      entry->buf_count = count;
++                      drm_cleanup_buf_error(dev, entry);
++                      mutex_unlock(&dev->struct_mutex);
++                      atomic_dec(&dev->buf_alloc);
++                      return -ENOMEM;
++              }
++              memset(buf->dev_private, 0, buf->dev_priv_size);
++
++              DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
++
++              offset += alignment;
++              entry->buf_count++;
++              byte_count += PAGE_SIZE << page_order;
++      }
++
++      DRM_DEBUG("byte_count: %d\n", byte_count);
++
++      temp_buflist = drm_realloc(dma->buflist,
++                                 dma->buf_count * sizeof(*dma->buflist),
++                                 (dma->buf_count + entry->buf_count)
++                                 * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      if (!temp_buflist) {
++              /* Free the entry because it isn't valid */
++              drm_cleanup_buf_error(dev, entry);
++              mutex_unlock(&dev->struct_mutex);
++              atomic_dec(&dev->buf_alloc);
++              return -ENOMEM;
++      }
++      dma->buflist = temp_buflist;
++
++      for (i = 0; i < entry->buf_count; i++) {
++              dma->buflist[i + dma->buf_count] = &entry->buflist[i];
++      }
++
++      dma->buf_count += entry->buf_count;
++      dma->seg_count += entry->seg_count;
++      dma->page_count += byte_count >> PAGE_SHIFT;
++      dma->byte_count += byte_count;
++
++      DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
++      DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->count = entry->buf_count;
++      request->size = size;
++
++      dma->flags = _DRM_DMA_USE_FB;
++
++      atomic_dec(&dev->buf_alloc);
++      return 0;
++}
++EXPORT_SYMBOL(drm_addbufs_fb);
++
++
++/**
++ * Add buffers for DMA transfers (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a struct drm_buf_desc request.
++ * \return zero on success or a negative number on failure.
++ *
++ * According with the memory type specified in drm_buf_desc::flags and the
++ * build options, it dispatches the call either to addbufs_agp(),
++ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
++ * PCI memory respectively.
++ */
++int drm_addbufs(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_buf_desc *request = data;
++      int ret;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++#if __OS_HAS_AGP
++      if (request->flags & _DRM_AGP_BUFFER)
++              ret = drm_addbufs_agp(dev, request);
++      else
++#endif
++      if (request->flags & _DRM_SG_BUFFER)
++              ret = drm_addbufs_sg(dev, request);
++      else if (request->flags & _DRM_FB_BUFFER)
++              ret = drm_addbufs_fb(dev, request);
++      else
++              ret = drm_addbufs_pci(dev, request);
++
++      return ret;
++}
++
++/**
++ * Get information about the buffer mappings.
++ *
++ * This was originally mean for debugging purposes, or by a sophisticated
++ * client library to determine how best to use the available buffers (e.g.,
++ * large buffers can be used for image transfer).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_info structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Increments drm_device::buf_use while holding the drm_device::count_lock
++ * lock, preventing of allocating more buffers after this call. Information
++ * about each requested buffer is then copied into user space.
++ */
++int drm_infobufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_info *request = data;
++      int i;
++      int count;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      spin_lock(&dev->count_lock);
++      if (atomic_read(&dev->buf_alloc)) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      ++dev->buf_use;         /* Can't allocate more after this call */
++      spin_unlock(&dev->count_lock);
++
++      for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
++              if (dma->bufs[i].buf_count)
++                      ++count;
++      }
++
++      DRM_DEBUG("count = %d\n", count);
++
++      if (request->count >= count) {
++              for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
++                      if (dma->bufs[i].buf_count) {
++                              struct drm_buf_desc __user *to =
++                                  &request->list[count];
++                              struct drm_buf_entry *from = &dma->bufs[i];
++                              struct drm_freelist *list = &dma->bufs[i].freelist;
++                              if (copy_to_user(&to->count,
++                                               &from->buf_count,
++                                               sizeof(from->buf_count)) ||
++                                  copy_to_user(&to->size,
++                                               &from->buf_size,
++                                               sizeof(from->buf_size)) ||
++                                  copy_to_user(&to->low_mark,
++                                               &list->low_mark,
++                                               sizeof(list->low_mark)) ||
++                                  copy_to_user(&to->high_mark,
++                                               &list->high_mark,
++                                               sizeof(list->high_mark)))
++                                      return -EFAULT;
++
++                              DRM_DEBUG("%d %d %d %d %d\n",
++                                        i,
++                                        dma->bufs[i].buf_count,
++                                        dma->bufs[i].buf_size,
++                                        dma->bufs[i].freelist.low_mark,
++                                        dma->bufs[i].freelist.high_mark);
++                              ++count;
++                      }
++              }
++      }
++      request->count = count;
++
++      return 0;
++}
++
++/**
++ * Specifies a low and high water mark for buffer allocation
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg a pointer to a drm_buf_desc structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies that the size order is bounded between the admissible orders and
++ * updates the respective drm_device_dma::bufs entry low and high water mark.
++ *
++ * \note This ioctl is deprecated and mostly never used.
++ */
++int drm_markbufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_desc *request = data;
++      int order;
++      struct drm_buf_entry *entry;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      DRM_DEBUG("%d, %d, %d\n",
++                request->size, request->low_mark, request->high_mark);
++      order = drm_order(request->size);
++      if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
++              return -EINVAL;
++      entry = &dma->bufs[order];
++
++      if (request->low_mark < 0 || request->low_mark > entry->buf_count)
++              return -EINVAL;
++      if (request->high_mark < 0 || request->high_mark > entry->buf_count)
++              return -EINVAL;
++
++      entry->freelist.low_mark = request->low_mark;
++      entry->freelist.high_mark = request->high_mark;
++
++      return 0;
++}
++
++/**
++ * Unreserve the buffers in list, previously reserved using drmDMA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_free structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls free_buffer() for each used buffer.
++ * This function is primarily used for debugging.
++ */
++int drm_freebufs(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf_free *request = data;
++      int i;
++      int idx;
++      struct drm_buf *buf;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      DRM_DEBUG("%d\n", request->count);
++      for (i = 0; i < request->count; i++) {
++              if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
++                      return -EFAULT;
++              if (idx < 0 || idx >= dma->buf_count) {
++                      DRM_ERROR("Index %d (of %d max)\n",
++                                idx, dma->buf_count - 1);
++                      return -EINVAL;
++              }
++              buf = dma->buflist[idx];
++              if (buf->file_priv != file_priv) {
++                      DRM_ERROR("Process %d freeing buffer not owned\n",
++                                current->pid);
++                      return -EINVAL;
++              }
++              drm_free_buffer(dev, buf);
++      }
++
++      return 0;
++}
++
++/**
++ * Maps all of the DMA buffers into client-virtual space (ioctl).
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg pointer to a drm_buf_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
++ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
++ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
++ * drm_mmap_dma().
++ */
++int drm_mapbufs(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int retcode = 0;
++      const int zero = 0;
++      unsigned long virtual;
++      unsigned long address;
++      struct drm_buf_map *request = data;
++      int i;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              return -EINVAL;
++
++      if (!dma)
++              return -EINVAL;
++
++      spin_lock(&dev->count_lock);
++      if (atomic_read(&dev->buf_alloc)) {
++              spin_unlock(&dev->count_lock);
++              return -EBUSY;
++      }
++      dev->buf_use++;         /* Can't allocate more after this call */
++      spin_unlock(&dev->count_lock);
++
++      if (request->count >= dma->buf_count) {
++              if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
++                  || (drm_core_check_feature(dev, DRIVER_SG)
++                      && (dma->flags & _DRM_DMA_USE_SG))
++                  || (drm_core_check_feature(dev, DRIVER_FB_DMA)
++                      && (dma->flags & _DRM_DMA_USE_FB))) {
++                      struct drm_map *map = dev->agp_buffer_map;
++                      unsigned long token = dev->agp_buffer_token;
++
++                      if (!map) {
++                              retcode = -EINVAL;
++                              goto done;
++                      }
++                      down_write(&current->mm->mmap_sem);
++                      virtual = do_mmap(file_priv->filp, 0, map->size,
++                                        PROT_READ | PROT_WRITE,
++                                        MAP_SHARED,
++                                        token);
++                      up_write(&current->mm->mmap_sem);
++              } else {
++                      down_write(&current->mm->mmap_sem);
++                      virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
++                                        PROT_READ | PROT_WRITE,
++                                        MAP_SHARED, 0);
++                      up_write(&current->mm->mmap_sem);
++              }
++              if (virtual > -1024UL) {
++                      /* Real error */
++                      retcode = (signed long)virtual;
++                      goto done;
++              }
++              request->virtual = (void __user *)virtual;
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      if (copy_to_user(&request->list[i].idx,
++                                       &dma->buflist[i]->idx,
++                                       sizeof(request->list[0].idx))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      if (copy_to_user(&request->list[i].total,
++                                       &dma->buflist[i]->total,
++                                       sizeof(request->list[0].total))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      if (copy_to_user(&request->list[i].used,
++                                       &zero, sizeof(zero))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++                      address = virtual + dma->buflist[i]->offset;    /* *** */
++                      if (copy_to_user(&request->list[i].address,
++                                       &address, sizeof(address))) {
++                              retcode = -EFAULT;
++                              goto done;
++                      }
++              }
++      }
++      done:
++      request->count = dma->buf_count;
++      DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
++
++      return retcode;
++}
++
++/**
++ * Compute size order.  Returns the exponent of the smaller power of two which
++ * is greater or equal to given number.
++ *
++ * \param size size.
++ * \return order.
++ *
++ * \todo Can be made faster.
++ */
++int drm_order(unsigned long size)
++{
++      int order;
++      unsigned long tmp;
++
++      for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
++
++      if (size & (size - 1))
++              ++order;
++
++      return order;
++}
++EXPORT_SYMBOL(drm_order);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_compat.c
+--- git/drivers/gpu/drm-tungsten/drm_compat.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,860 @@
++/**************************************************************************
++ *
++ * This kernel module is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ **************************************************************************/
++/*
++ * This code provides access to unexported mm kernel features. It is necessary
++ * to use the new DRM memory manager code with kernels that don't support it
++ * directly.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ *          Linux kernel mm subsystem authors.
++ *          (Most code taken from there).
++ */
++
++#include "drmP.h"
++
++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * These have bad performance in the AGP module for the indicated kernel versions.
++ */
++
++int drm_map_page_into_agp(struct page *page)
++{
++        int i;
++        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
++        /* Caller's responsibility to call global_flush_tlb() for
++         * performance reasons */
++        return i;
++}
++
++int drm_unmap_page_from_agp(struct page *page)
++{
++        int i;
++        i = change_page_attr(page, 1, PAGE_KERNEL);
++        /* Caller's responsibility to call global_flush_tlb() for
++         * performance reasons */
++        return i;
++}
++#endif
++
++
++#if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++
++/*
++ * The protection map was exported in 2.6.19
++ */
++
++pgprot_t vm_get_page_prot(unsigned long vm_flags)
++{
++#ifdef MODULE
++      static pgprot_t drm_protection_map[16] = {
++              __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
++              __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
++      };
++
++      return drm_protection_map[vm_flags & 0x0F];
++#else
++      extern pgprot_t protection_map[];
++      return protection_map[vm_flags & 0x0F];
++#endif
++};
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * vm code for kernels below 2.6.15 in which version a major vm write
++ * occured. This implement a simple straightforward
++ * version similar to what's going to be
++ * in kernel 2.6.19+
++ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
++ * nopfn.
++ */
++
++static struct {
++      spinlock_t lock;
++      struct page *dummy_page;
++      atomic_t present;
++} drm_np_retry =
++{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
++
++
++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
++                                  struct fault_data *data);
++
++
++struct page * get_nopage_retry(void)
++{
++      if (atomic_read(&drm_np_retry.present) == 0) {
++              struct page *page = alloc_page(GFP_KERNEL);
++              if (!page)
++                      return NOPAGE_OOM;
++              spin_lock(&drm_np_retry.lock);
++              drm_np_retry.dummy_page = page;
++              atomic_set(&drm_np_retry.present,1);
++              spin_unlock(&drm_np_retry.lock);
++      }
++      get_page(drm_np_retry.dummy_page);
++      return drm_np_retry.dummy_page;
++}
++
++void free_nopage_retry(void)
++{
++      if (atomic_read(&drm_np_retry.present) == 1) {
++              spin_lock(&drm_np_retry.lock);
++              __free_page(drm_np_retry.dummy_page);
++              drm_np_retry.dummy_page = NULL;
++              atomic_set(&drm_np_retry.present, 0);
++              spin_unlock(&drm_np_retry.lock);
++      }
++}
++
++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                             unsigned long address,
++                             int *type)
++{
++      struct fault_data data;
++
++      if (type)
++              *type = VM_FAULT_MINOR;
++
++      data.address = address;
++      data.vma = vma;
++      drm_bo_vm_fault(vma, &data);
++      switch (data.type) {
++      case VM_FAULT_OOM:
++              return NOPAGE_OOM;
++      case VM_FAULT_SIGBUS:
++              return NOPAGE_SIGBUS;
++      default:
++              break;
++      }
++
++      return NOPAGE_REFAULT;
++}
++
++#endif
++
++#if !defined(DRM_FULL_MM_COMPAT) && \
++  ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
++   (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
++
++static int drm_pte_is_clear(struct vm_area_struct *vma,
++                          unsigned long addr)
++{
++      struct mm_struct *mm = vma->vm_mm;
++      int ret = 1;
++      pte_t *pte;
++      pmd_t *pmd;
++      pud_t *pud;
++      pgd_t *pgd;
++
++      spin_lock(&mm->page_table_lock);
++      pgd = pgd_offset(mm, addr);
++      if (pgd_none(*pgd))
++              goto unlock;
++      pud = pud_offset(pgd, addr);
++        if (pud_none(*pud))
++              goto unlock;
++      pmd = pmd_offset(pud, addr);
++      if (pmd_none(*pmd))
++              goto unlock;
++      pte = pte_offset_map(pmd, addr);
++      if (!pte)
++              goto unlock;
++      ret = pte_none(*pte);
++      pte_unmap(pte);
++ unlock:
++      spin_unlock(&mm->page_table_lock);
++      return ret;
++}
++
++static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
++                unsigned long pfn)
++{
++      int ret;
++      if (!drm_pte_is_clear(vma, addr))
++              return -EBUSY;
++
++      ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
++      return ret;
++}
++
++
++static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
++                                  struct fault_data *data)
++{
++      unsigned long address = data->address;
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      dev = bo->dev;
++      drm_bo_read_lock(&dev->bm.bm_lock, 0);
++
++      mutex_lock(&bo->mutex);
++
++      err = drm_bo_wait(bo, 0, 1, 0);
++      if (err) {
++              data->type = (err == -EAGAIN) ?
++                      VM_FAULT_MINOR : VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              unsigned long _end = jiffies + 3*DRM_HZ;
++              uint32_t new_mask = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++
++              do {
++                      err = drm_bo_move_buffer(bo, new_mask, 0, 0);
++              } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
++
++              if (err) {
++                      DRM_ERROR("Timeout moving buffer to mappable location.\n");
++                      data->type = VM_FAULT_SIGBUS;
++                      goto out_unlock;
++              }
++      }
++
++      if (address > vma->vm_end) {
++              data->type = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      dev = bo->dev;
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              data->type = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      data->type = VM_FAULT_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, address, pfn);
++
++      if (!err || err == -EBUSY)
++              data->type = VM_FAULT_MINOR;
++      else
++              data->type = VM_FAULT_OOM;
++out_unlock:
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return NULL;
++}
++
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
++  !defined(DRM_FULL_MM_COMPAT)
++
++/**
++ */
++
++unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
++                         unsigned long address)
++{
++      struct fault_data data;
++      data.address = address;
++
++      (void) drm_bo_vm_fault(vma, &data);
++      if (data.type == VM_FAULT_OOM)
++              return NOPFN_OOM;
++      else if (data.type == VM_FAULT_SIGBUS)
++              return NOPFN_SIGBUS;
++
++      /*
++       * pfn already set.
++       */
++
++      return 0;
++}
++#endif
++
++
++#ifdef DRM_ODD_MM_COMPAT
++
++/*
++ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
++ * workaround for a single BUG statement in do_no_page in these versions. The
++ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
++ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
++ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
++ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
++ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
++ * phew.
++ */
++
++typedef struct p_mm_entry {
++      struct list_head head;
++      struct mm_struct *mm;
++      atomic_t refcount;
++        int locked;
++} p_mm_entry_t;
++
++typedef struct vma_entry {
++      struct list_head head;
++      struct vm_area_struct *vma;
++} vma_entry_t;
++
++
++struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                             unsigned long address,
++                             int *type)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++
++      mutex_lock(&bo->mutex);
++
++      if (type)
++              *type = VM_FAULT_MINOR;
++
++      if (address > vma->vm_end) {
++              page = NOPAGE_SIGBUS;
++              goto out_unlock;
++      }
++
++      dev = bo->dev;
++
++      if (drm_mem_reg_is_pci(dev, &bo->mem)) {
++              DRM_ERROR("Invalid compat nopage.\n");
++              page = NOPAGE_SIGBUS;
++              goto out_unlock;
++      }
++
++      ttm = bo->ttm;
++      drm_ttm_fixup_caching(ttm);
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++      page = drm_ttm_get_page(ttm, page_offset);
++      if (!page) {
++              page = NOPAGE_OOM;
++              goto out_unlock;
++      }
++
++      get_page(page);
++out_unlock:
++      mutex_unlock(&bo->mutex);
++      return page;
++}
++
++
++
++
++int drm_bo_map_bound(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
++      int ret = 0;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++
++      ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
++                              &bus_offset, &bus_size);
++      BUG_ON(ret);
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
++              unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
++              pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
++              ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
++                                       vma->vm_end - vma->vm_start,
++                                       pgprot);
++      }
++
++      return ret;
++}
++
++
++int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
++{
++      p_mm_entry_t *entry, *n_entry;
++      vma_entry_t *v_entry;
++      struct mm_struct *mm = vma->vm_mm;
++
++      v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
++      if (!v_entry) {
++              DRM_ERROR("Allocation of vma pointer entry failed\n");
++              return -ENOMEM;
++      }
++      v_entry->vma = vma;
++
++      list_add_tail(&v_entry->head, &bo->vma_list);
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              if (mm == entry->mm) {
++                      atomic_inc(&entry->refcount);
++                      return 0;
++              } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
++      }
++
++      n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
++      if (!n_entry) {
++              DRM_ERROR("Allocation of process mm pointer entry failed\n");
++              return -ENOMEM;
++      }
++      INIT_LIST_HEAD(&n_entry->head);
++      n_entry->mm = mm;
++      n_entry->locked = 0;
++      atomic_set(&n_entry->refcount, 0);
++      list_add_tail(&n_entry->head, &entry->head);
++
++      return 0;
++}
++
++void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
++{
++      p_mm_entry_t *entry, *n;
++      vma_entry_t *v_entry, *v_n;
++      int found = 0;
++      struct mm_struct *mm = vma->vm_mm;
++
++      list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
++              if (v_entry->vma == vma) {
++                      found = 1;
++                      list_del(&v_entry->head);
++                      drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
++                      break;
++              }
++      }
++      BUG_ON(!found);
++
++      list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
++              if (mm == entry->mm) {
++                      if (atomic_add_negative(-1, &entry->refcount)) {
++                              list_del(&entry->head);
++                              BUG_ON(entry->locked);
++                              drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
++                      }
++                      return;
++              }
++      }
++      BUG_ON(1);
++}
++
++
++
++int drm_bo_lock_kmm(struct drm_buffer_object * bo)
++{
++      p_mm_entry_t *entry;
++      int lock_ok = 1;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              BUG_ON(entry->locked);
++              if (!down_write_trylock(&entry->mm->mmap_sem)) {
++                      lock_ok = 0;
++                      break;
++              }
++              entry->locked = 1;
++      }
++
++      if (lock_ok)
++              return 0;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              if (!entry->locked)
++                      break;
++              up_write(&entry->mm->mmap_sem);
++              entry->locked = 0;
++      }
++
++      /*
++       * Possible deadlock. Try again. Our callers should handle this
++       * and restart.
++       */
++
++      return -EAGAIN;
++}
++
++void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
++{
++      p_mm_entry_t *entry;
++
++      list_for_each_entry(entry, &bo->p_mm_list, head) {
++              BUG_ON(!entry->locked);
++              up_write(&entry->mm->mmap_sem);
++              entry->locked = 0;
++      }
++}
++
++int drm_bo_remap_bound(struct drm_buffer_object *bo)
++{
++      vma_entry_t *v_entry;
++      int ret = 0;
++
++      if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
++              list_for_each_entry(v_entry, &bo->vma_list, head) {
++                      ret = drm_bo_map_bound(v_entry->vma);
++                      if (ret)
++                              break;
++              }
++      }
++
++      return ret;
++}
++
++void drm_bo_finish_unmap(struct drm_buffer_object *bo)
++{
++      vma_entry_t *v_entry;
++
++      list_for_each_entry(v_entry, &bo->vma_list, head) {
++              v_entry->vma->vm_flags &= ~VM_PFNMAP;
++      }
++}
++
++#endif
++
++#ifdef DRM_IDR_COMPAT_FN
++/* only called when idp->lock is held */
++static void __free_layer(struct idr *idp, struct idr_layer *p)
++{
++      p->ary[0] = idp->id_free;
++      idp->id_free = p;
++      idp->id_free_cnt++;
++}
++
++static void free_layer(struct idr *idp, struct idr_layer *p)
++{
++      unsigned long flags;
++
++      /*
++       * Depends on the return element being zeroed.
++       */
++      spin_lock_irqsave(&idp->lock, flags);
++      __free_layer(idp, p);
++      spin_unlock_irqrestore(&idp->lock, flags);
++}
++
++/**
++ * idr_for_each - iterate through all stored pointers
++ * @idp: idr handle
++ * @fn: function to be called for each pointer
++ * @data: data passed back to callback function
++ *
++ * Iterate over the pointers registered with the given idr.  The
++ * callback function will be called for each pointer currently
++ * registered, passing the id, the pointer and the data pointer passed
++ * to this function.  It is not safe to modify the idr tree while in
++ * the callback, so functions such as idr_get_new and idr_remove are
++ * not allowed.
++ *
++ * We check the return of @fn each time. If it returns anything other
++ * than 0, we break out and return that value.
++ *
++* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
++ */
++int idr_for_each(struct idr *idp,
++               int (*fn)(int id, void *p, void *data), void *data)
++{
++      int n, id, max, error = 0;
++      struct idr_layer *p;
++      struct idr_layer *pa[MAX_LEVEL];
++      struct idr_layer **paa = &pa[0];
++
++      n = idp->layers * IDR_BITS;
++      p = idp->top;
++      max = 1 << n;
++
++      id = 0;
++      while (id < max) {
++              while (n > 0 && p) {
++                      n -= IDR_BITS;
++                      *paa++ = p;
++                      p = p->ary[(id >> n) & IDR_MASK];
++              }
++
++              if (p) {
++                      error = fn(id, (void *)p, data);
++                      if (error)
++                              break;
++              }
++
++              id += 1 << n;
++              while (n < fls(id)) {
++                      n += IDR_BITS;
++                      p = *--paa;
++              }
++      }
++
++      return error;
++}
++EXPORT_SYMBOL(idr_for_each);
++
++/**
++ * idr_remove_all - remove all ids from the given idr tree
++ * @idp: idr handle
++ *
++ * idr_destroy() only frees up unused, cached idp_layers, but this
++ * function will remove all id mappings and leave all idp_layers
++ * unused.
++ *
++ * A typical clean-up sequence for objects stored in an idr tree, will
++ * use idr_for_each() to free all objects, if necessay, then
++ * idr_remove_all() to remove all ids, and idr_destroy() to free
++ * up the cached idr_layers.
++ */
++void idr_remove_all(struct idr *idp)
++{
++       int n, id, max, error = 0;
++       struct idr_layer *p;
++       struct idr_layer *pa[MAX_LEVEL];
++       struct idr_layer **paa = &pa[0];
++
++       n = idp->layers * IDR_BITS;
++       p = idp->top;
++       max = 1 << n;
++
++       id = 0;
++       while (id < max && !error) {
++               while (n > IDR_BITS && p) {
++                       n -= IDR_BITS;
++                       *paa++ = p;
++                       p = p->ary[(id >> n) & IDR_MASK];
++               }
++
++               id += 1 << n;
++               while (n < fls(id)) {
++                       if (p) {
++                               memset(p, 0, sizeof *p);
++                               free_layer(idp, p);
++                       }
++                       n += IDR_BITS;
++                       p = *--paa;
++               }
++       }
++       idp->top = NULL;
++       idp->layers = 0;
++}
++EXPORT_SYMBOL(idr_remove_all);
++
++#endif /* DRM_IDR_COMPAT_FN */
++
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++/**
++ * idr_replace - replace pointer for given id
++ * @idp: idr handle
++ * @ptr: pointer you want associated with the id
++ * @id: lookup key
++ *
++ * Replace the pointer registered with an id and return the old value.
++ * A -ENOENT return indicates that @id was not found.
++ * A -EINVAL return indicates that @id was not within valid constraints.
++ *
++ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
++ */
++void *idr_replace(struct idr *idp, void *ptr, int id)
++{
++      int n;
++      struct idr_layer *p, *old_p;
++
++      n = idp->layers * IDR_BITS;
++      p = idp->top;
++
++      id &= MAX_ID_MASK;
++
++      if (id >= (1 << n))
++              return ERR_PTR(-EINVAL);
++
++      n -= IDR_BITS;
++      while ((n > 0) && p) {
++              p = p->ary[(id >> n) & IDR_MASK];
++              n -= IDR_BITS;
++      }
++
++      n = id & IDR_MASK;
++      if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
++              return ERR_PTR(-ENOENT);
++
++      old_p = p->ary[n];
++      p->ary[n] = ptr;
++
++      return (void *)old_p;
++}
++EXPORT_SYMBOL(idr_replace);
++#endif
++
++#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
++#define drm_kmap_get_fixmap_pte(vaddr)                                        \
++      pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
++                         pgprot_t protection)
++{
++      enum fixed_addresses idx;
++      unsigned long vaddr;
++      static pte_t *km_pte;
++      static int initialized = 0;
++
++      if (unlikely(!initialized)) {
++              km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
++              initialized = 1;
++      }
++
++      pagefault_disable();
++      idx = type + KM_TYPE_NR*smp_processor_id();
++      vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++      set_pte(km_pte-idx, pfn_pte(pfn, protection));
++
++      return (void*) vaddr;
++}
++
++EXPORT_SYMBOL(kmap_atomic_prot_pfn);
++
++#endif
++
++#ifdef DRM_FULL_MM_COMPAT
++#ifdef DRM_NO_FAULT
++unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                            unsigned long address)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long ret = NOPFN_REFAULT;
++
++      if (address > vma->vm_end)
++              return NOPFN_SIGBUS;
++
++      dev = bo->dev;
++      err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (err)
++              return NOPFN_REFAULT;
++
++      err = mutex_lock_interruptible(&bo->mutex);
++      if (err) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return NOPFN_REFAULT;
++      }
++
++      err = drm_bo_wait(bo, 0, 1, 0, 1);
++      if (err) {
++              ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++              goto out_unlock;
++      }
++
++      bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              uint32_t new_flags = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++              err = drm_bo_move_buffer(bo, new_flags, 0, 0);
++              if (err) {
++                      ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
++                      goto out_unlock;
++              }
++      }
++
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              ret = NOPFN_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      ret = NOPFN_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, address, pfn);
++      if (err) {
++              ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
++              goto out_unlock;
++      }
++out_unlock:
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_compat.h git-nokia/drivers/gpu/drm-tungsten/drm_compat.h
+--- git/drivers/gpu/drm-tungsten/drm_compat.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_compat.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,380 @@
++/**
++ * \file drm_compat.h
++ * Backward compatability definitions for Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_COMPAT_H_
++#define _DRM_COMPAT_H_
++
++#ifndef minor
++#define minor(x) MINOR((x))
++#endif
++
++#ifndef MODULE_LICENSE
++#define MODULE_LICENSE(x)
++#endif
++
++#ifndef preempt_disable
++#define preempt_disable()
++#define preempt_enable()
++#endif
++
++#ifndef pte_offset_map
++#define pte_offset_map pte_offset
++#define pte_unmap(pte)
++#endif
++
++#ifndef module_param
++#define module_param(name, type, perm)
++#endif
++
++/* older kernels had different irq args */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++#undef DRM_IRQ_ARGS
++#define DRM_IRQ_ARGS          int irq, void *arg, struct pt_regs *regs
++#endif
++
++#ifndef list_for_each_safe
++#define list_for_each_safe(pos, n, head)                              \
++      for (pos = (head)->next, n = pos->next; pos != (head);          \
++              pos = n, n = pos->next)
++#endif
++
++#ifndef list_for_each_entry
++#define list_for_each_entry(pos, head, member)                                \
++       for (pos = list_entry((head)->next, typeof(*pos), member),     \
++                    prefetch(pos->member.next);                               \
++            &pos->member != (head);                                   \
++            pos = list_entry(pos->member.next, typeof(*pos), member), \
++                    prefetch(pos->member.next))
++#endif
++
++#ifndef list_for_each_entry_safe
++#define list_for_each_entry_safe(pos, n, head, member)                  \
++        for (pos = list_entry((head)->next, typeof(*pos), member),      \
++                n = list_entry(pos->member.next, typeof(*pos), member); \
++             &pos->member != (head);                                    \
++             pos = n, n = list_entry(n->member.next, typeof(*n), member))
++#endif
++
++#ifndef __user
++#define __user
++#endif
++
++#if !defined(__put_page)
++#define __put_page(p)           atomic_dec(&(p)->count)
++#endif
++
++#if !defined(__GFP_COMP)
++#define __GFP_COMP 0
++#endif
++
++#if !defined(IRQF_SHARED)
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
++{
++  return remap_page_range(vma, from,
++                        pfn << PAGE_SHIFT,
++                        size,
++                        pgprot);
++}
++
++static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
++{
++      void *addr;
++
++      addr = kmalloc(size * nmemb, flags);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++#define mutex_lock down
++#define mutex_unlock up
++
++#define mutex semaphore
++
++#define mutex_init(a) sema_init((a), 1)
++
++#endif
++
++#ifndef DEFINE_SPINLOCK
++#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
++#endif
++
++/* old architectures */
++#ifdef __AMD64__
++#define __x86_64__
++#endif
++
++/* sysfs __ATTR macro */
++#ifndef __ATTR
++#define __ATTR(_name,_mode,_show,_store) { \
++        .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE },     \
++        .show   = _show,                                        \
++        .store  = _store,                                       \
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++#define vmalloc_user(_size) ({void * tmp = vmalloc(_size);   \
++      if (tmp) memset(tmp, 0, size);                       \
++      (tmp);})
++#endif
++
++#ifndef list_for_each_entry_safe_reverse
++#define list_for_each_entry_safe_reverse(pos, n, head, member)          \
++        for (pos = list_entry((head)->prev, typeof(*pos), member),      \
++                n = list_entry(pos->member.prev, typeof(*pos), member); \
++             &pos->member != (head);                                    \
++             pos = n, n = list_entry(n->member.prev, typeof(*n), member))
++#endif
++
++#include <linux/mm.h>
++#include <asm/page.h>
++
++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
++     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
++#define DRM_ODD_MM_COMPAT
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
++#define DRM_FULL_MM_COMPAT
++#endif
++
++
++/*
++ * Flush relevant caches and clear a VMA structure so that page references
++ * will cause a page fault. Don't flush tlbs.
++ */
++
++extern void drm_clear_vma(struct vm_area_struct *vma,
++                        unsigned long addr, unsigned long end);
++
++/*
++ * Return the PTE protection map entries for the VMA flags given by
++ * flags. This is a functional interface to the kernel's protection map.
++ */
++
++extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
++
++#ifndef GFP_DMA32
++#define GFP_DMA32 GFP_KERNEL
++#endif
++#ifndef __GFP_DMA32
++#define __GFP_DMA32 GFP_KERNEL
++#endif
++
++#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++
++/*
++ * These are too slow in earlier kernels.
++ */
++
++extern int drm_unmap_page_from_agp(struct page *page);
++extern int drm_map_page_into_agp(struct page *page);
++
++#define map_page_into_agp drm_map_page_into_agp
++#define unmap_page_from_agp drm_unmap_page_from_agp
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++extern struct page *get_nopage_retry(void);
++extern void free_nopage_retry(void);
++
++#define NOPAGE_REFAULT get_nopage_retry()
++#endif
++
++
++#ifndef DRM_FULL_MM_COMPAT
++
++/*
++ * For now, just return a dummy page that we've allocated out of
++ * static space. The page will be put by do_nopage() since we've already
++ * filled out the pte.
++ */
++
++struct fault_data {
++      struct vm_area_struct *vma;
++      unsigned long address;
++      pgoff_t pgoff;
++      unsigned int flags;
++
++      int type;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
++                                   unsigned long address,
++                                   int *type);
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
++  !defined(DRM_FULL_MM_COMPAT)
++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                                   unsigned long address);
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
++#endif /* ndef DRM_FULL_MM_COMPAT */
++
++#ifdef DRM_ODD_MM_COMPAT
++
++struct drm_buffer_object;
++
++
++/*
++ * Add a vma to the ttm vma list, and the
++ * process mm pointer to the ttm mm list. Needs the ttm mutex.
++ */
++
++extern int drm_bo_add_vma(struct drm_buffer_object * bo,
++                         struct vm_area_struct *vma);
++/*
++ * Delete a vma and the corresponding mm pointer from the
++ * ttm lists. Needs the ttm mutex.
++ */
++extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
++                            struct vm_area_struct *vma);
++
++/*
++ * Attempts to lock all relevant mmap_sems for a ttm, while
++ * not releasing the ttm mutex. May return -EAGAIN to avoid
++ * deadlocks. In that case the caller shall release the ttm mutex,
++ * schedule() and try again.
++ */
++
++extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
++
++/*
++ * Unlock all relevant mmap_sems for a ttm.
++ */
++extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
++
++/*
++ * If the ttm was bound to the aperture, this function shall be called
++ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
++ * vmas mapping this ttm. This is needed just after unmapping the ptes of
++ * the vma, otherwise the do_nopage() function will bug :(. The function
++ * releases the mmap_sems for this ttm.
++ */
++
++extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
++
++/*
++ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
++ * fault these pfns in, because the first one will set the vma VM_PFNMAP
++ * flag, which will make the next fault bug in do_nopage(). The function
++ * releases the mmap_sems for this ttm.
++ */
++
++extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
++
++
++/*
++ * Remap a vma for a bound ttm. Call with the ttm mutex held and
++ * the relevant mmap_sem locked.
++ */
++extern int drm_bo_map_bound(struct vm_area_struct *vma);
++
++#endif
++
++/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
++#define DRM_IDR_COMPAT_FN
++#define DRM_NO_FAULT
++extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
++                                   unsigned long address);
++#endif
++#ifdef DRM_IDR_COMPAT_FN
++int idr_for_each(struct idr *idp,
++               int (*fn)(int id, void *p, void *data), void *data);
++void idr_remove_all(struct idr *idp);
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
++void *idr_replace(struct idr *idp, void *ptr, int id);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++typedef _Bool                   bool;
++#endif
++
++
++#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
++#define DRM_KMAP_ATOMIC_PROT_PFN
++extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
++                                pgprot_t protection);
++#endif
++
++#if !defined(flush_agp_mappings)
++#define flush_agp_mappings() do {} while(0)
++#endif
++
++#ifndef DMA_BIT_MASK
++#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
++#endif
++
++#ifndef VM_CAN_NONLINEAR
++#define DRM_VM_NOPAGE 1
++#endif
++
++#ifdef DRM_VM_NOPAGE
++
++extern struct page *drm_vm_nopage(struct vm_area_struct *vma,
++                                unsigned long address, int *type);
++
++extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
++                                    unsigned long address, int *type);
++
++extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
++                                    unsigned long address, int *type);
++
++extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
++                                   unsigned long address, int *type);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
++#define drm_core_ioremap_wc drm_core_ioremap
++#endif
++
++#ifndef OS_HAS_GEM
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
++#define OS_HAS_GEM 1
++#else
++#define OS_HAS_GEM 0
++#endif
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_context.c git-nokia/drivers/gpu/drm-tungsten/drm_context.c
+--- git/drivers/gpu/drm-tungsten/drm_context.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_context.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,472 @@
++/**
++ * \file drm_context.c
++ * IOCTLs for generic contexts
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/*
++ * ChangeLog:
++ *  2001-11-16        Torsten Duwe <duwe@caldera.de>
++ *            added context constructor/destructor hooks,
++ *            needed by SiS driver's memory management.
++ */
++
++#include "drmP.h"
++
++/******************************************************************/
++/** \name Context bitmap support */
++/*@{*/
++
++/**
++ * Free a handle from the context bitmap.
++ *
++ * \param dev DRM device.
++ * \param ctx_handle context handle.
++ *
++ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
++ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
++ * lock.
++ */
++void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_remove(&dev->ctx_idr, ctx_handle);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Context bitmap allocation.
++ *
++ * \param dev DRM device.
++ * \return (non-negative) context handle on success or a negative number on failure.
++ *
++ * Allocate a new idr from drm_device::ctx_idr while holding the
++ * drm_device::struct_mutex lock.
++ */
++static int drm_ctxbitmap_next(struct drm_device *dev)
++{
++      int new_id;
++      int ret;
++
++again:
++      if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++      mutex_lock(&dev->struct_mutex);
++      ret = idr_get_new_above(&dev->ctx_idr, NULL,
++                              DRM_RESERVED_CONTEXTS, &new_id);
++      if (ret == -EAGAIN) {
++              mutex_unlock(&dev->struct_mutex);
++              goto again;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++      return new_id;
++}
++
++/**
++ * Context bitmap initialization.
++ *
++ * \param dev DRM device.
++ *
++ * Initialise the drm_device::ctx_idr
++ */
++int drm_ctxbitmap_init(struct drm_device *dev)
++{
++      idr_init(&dev->ctx_idr);
++      return 0;
++}
++
++/**
++ * Context bitmap cleanup.
++ *
++ * \param dev DRM device.
++ *
++ * Free all idr members using drm_ctx_sarea_free helper function
++ * while holding the drm_device::struct_mutex lock.
++ */
++void drm_ctxbitmap_cleanup(struct drm_device *dev)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_remove_all(&dev->ctx_idr);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/*@}*/
++
++/******************************************************************/
++/** \name Per Context SAREA Support */
++/*@{*/
++
++/**
++ * Get per-context SAREA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_priv_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Gets the map from drm_device::ctx_idr with the handle specified and
++ * returns its handle.
++ */
++int drm_getsareactx(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_ctx_priv_map *request = data;
++      struct drm_map *map;
++      struct drm_map_list *_entry;
++
++      mutex_lock(&dev->struct_mutex);
++
++      map = idr_find(&dev->ctx_idr, request->ctx_id);
++      if (!map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      request->handle = NULL;
++      list_for_each_entry(_entry, &dev->maplist, head) {
++              if (_entry->map == map) {
++                      request->handle =
++                          (void *)(unsigned long)_entry->user_token;
++                      break;
++              }
++      }
++      if (request->handle == NULL)
++              return -EINVAL;
++
++      return 0;
++}
++
++/**
++ * Set per-context SAREA.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_priv_map structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches the mapping specified in \p arg and update the entry in
++ * drm_device::ctx_idr with it.
++ */
++int drm_setsareactx(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_ctx_priv_map *request = data;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list = NULL;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map
++                  && r_list->user_token == (unsigned long) request->handle)
++                      goto found;
++      }
++      bad:
++      mutex_unlock(&dev->struct_mutex);
++      return -EINVAL;
++
++      found:
++      map = r_list->map;
++      if (!map)
++              goto bad;
++
++      if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
++              goto bad;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/*@}*/
++
++/******************************************************************/
++/** \name The actual DRM context handling routines */
++/*@{*/
++
++/**
++ * Switch context.
++ *
++ * \param dev DRM device.
++ * \param old old context handle.
++ * \param new new context handle.
++ * \return zero on success or a negative number on failure.
++ *
++ * Attempt to set drm_device::context_flag.
++ */
++static int drm_context_switch(struct drm_device *dev, int old, int new)
++{
++      if (test_and_set_bit(0, &dev->context_flag)) {
++              DRM_ERROR("Reentering -- FIXME\n");
++              return -EBUSY;
++      }
++
++      DRM_DEBUG("Context switch from %d to %d\n", old, new);
++
++      if (new == dev->last_context) {
++              clear_bit(0, &dev->context_flag);
++              return 0;
++      }
++
++      return 0;
++}
++
++/**
++ * Complete context switch.
++ *
++ * \param dev DRM device.
++ * \param new new context handle.
++ * \return zero on success or a negative number on failure.
++ *
++ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
++ * hardware lock is held, clears the drm_device::context_flag and wakes up
++ * drm_device::context_wait.
++ */
++static int drm_context_switch_complete(struct drm_device *dev, int new)
++{
++      dev->last_context = new;        /* PRE/POST: This is the _only_ writer. */
++      dev->last_switch = jiffies;
++
++      if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
++              DRM_ERROR("Lock isn't held after context switch\n");
++      }
++
++      /* If a context switch is ever initiated
++         when the kernel holds the lock, release
++         that lock here. */
++      clear_bit(0, &dev->context_flag);
++      wake_up(&dev->context_wait);
++
++      return 0;
++}
++
++/**
++ * Reserve contexts.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx_res structure.
++ * \return zero on success or a negative number on failure.
++ */
++int drm_resctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx_res *res = data;
++      struct drm_ctx ctx;
++      int i;
++
++      if (res->count >= DRM_RESERVED_CONTEXTS) {
++              memset(&ctx, 0, sizeof(ctx));
++              for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
++                      ctx.handle = i;
++                      if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
++                              return -EFAULT;
++              }
++      }
++      res->count = DRM_RESERVED_CONTEXTS;
++
++      return 0;
++}
++
++/**
++ * Add context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Get a new handle for the context and copy to userspace.
++ */
++int drm_addctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx_list *ctx_entry;
++      struct drm_ctx *ctx = data;
++
++      ctx->handle = drm_ctxbitmap_next(dev);
++      if (ctx->handle == DRM_KERNEL_CONTEXT) {
++              /* Skip kernel's context and get a new one. */
++              ctx->handle = drm_ctxbitmap_next(dev);
++      }
++      DRM_DEBUG("%d\n", ctx->handle);
++      if (ctx->handle == -1) {
++              DRM_DEBUG("Not enough free contexts.\n");
++              /* Should this return -EBUSY instead? */
++              return -ENOMEM;
++      }
++
++      if (ctx->handle != DRM_KERNEL_CONTEXT) {
++              if (dev->driver->context_ctor)
++                      if (!dev->driver->context_ctor(dev, ctx->handle)) {
++                              DRM_DEBUG("Running out of ctxs or memory.\n");
++                              return -ENOMEM;
++                      }
++      }
++
++      ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
++      if (!ctx_entry) {
++              DRM_DEBUG("out of memory\n");
++              return -ENOMEM;
++      }
++
++      INIT_LIST_HEAD(&ctx_entry->head);
++      ctx_entry->handle = ctx->handle;
++      ctx_entry->tag = file_priv;
++
++      mutex_lock(&dev->ctxlist_mutex);
++      list_add(&ctx_entry->head, &dev->ctxlist);
++      ++dev->ctx_count;
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      return 0;
++}
++
++int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      /* This does nothing */
++      return 0;
++}
++
++/**
++ * Get context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ */
++int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      /* This is 0, because we don't handle any context flags */
++      ctx->flags = 0;
++
++      return 0;
++}
++
++/**
++ * Switch context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls context_switch().
++ */
++int drm_switchctx(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      return drm_context_switch(dev, dev->last_context, ctx->handle);
++}
++
++/**
++ * New context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls context_switch_complete().
++ */
++int drm_newctx(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      drm_context_switch_complete(dev, ctx->handle);
++
++      return 0;
++}
++
++/**
++ * Remove context.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument pointing to a drm_ctx structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
++ */
++int drm_rmctx(struct drm_device *dev, void *data,
++            struct drm_file *file_priv)
++{
++      struct drm_ctx *ctx = data;
++
++      DRM_DEBUG("%d\n", ctx->handle);
++      if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
++              file_priv->remove_auth_on_close = 1;
++      }
++      if (ctx->handle != DRM_KERNEL_CONTEXT) {
++              if (dev->driver->context_dtor)
++                      dev->driver->context_dtor(dev, ctx->handle);
++              drm_ctxbitmap_free(dev, ctx->handle);
++      }
++
++      mutex_lock(&dev->ctxlist_mutex);
++      if (!list_empty(&dev->ctxlist)) {
++              struct drm_ctx_list *pos, *n;
++
++              list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
++                      if (pos->handle == ctx->handle) {
++                              list_del(&pos->head);
++                              drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
++                              --dev->ctx_count;
++                      }
++              }
++      }
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      return 0;
++}
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_core.h git-nokia/drivers/gpu/drm-tungsten/drm_core.h
+--- git/drivers/gpu/drm-tungsten/drm_core.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_core.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,35 @@
++/*
++ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#define CORE_AUTHOR           "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
++
++#define CORE_NAME             "drm"
++#define CORE_DESC             "DRM shared core routines"
++#define CORE_DATE             "20060810"
++
++#define DRM_IF_MAJOR  1
++#define DRM_IF_MINOR  3
++
++#define CORE_MAJOR    1
++#define CORE_MINOR    1
++#define CORE_PATCHLEVEL 0
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_dma.c git-nokia/drivers/gpu/drm-tungsten/drm_dma.c
+--- git/drivers/gpu/drm-tungsten/drm_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,179 @@
++/**
++ * \file drm_dma.c
++ * DMA IOCTL and function support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Initialize the DMA data.
++ *
++ * \param dev DRM device.
++ * \return zero on success or a negative value on failure.
++ *
++ * Allocate and initialize a drm_device_dma structure.
++ */
++int drm_dma_setup(struct drm_device *dev)
++{
++      int i;
++
++      dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
++      if (!dev->dma)
++              return -ENOMEM;
++
++      memset(dev->dma, 0, sizeof(*dev->dma));
++
++      for (i = 0; i <= DRM_MAX_ORDER; i++)
++              memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
++
++      return 0;
++}
++
++/**
++ * Cleanup the DMA resources.
++ *
++ * \param dev DRM device.
++ *
++ * Free all pages associated with DMA buffers, the buffers and pages lists, and
++ * finally the drm_device::dma structure itself.
++ */
++void drm_dma_takedown(struct drm_device *dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i, j;
++
++      if (!dma)
++              return;
++
++      /* Clear dma buffers */
++      for (i = 0; i <= DRM_MAX_ORDER; i++) {
++              if (dma->bufs[i].seg_count) {
++                      DRM_DEBUG("order %d: buf_count = %d,"
++                                " seg_count = %d\n",
++                                i,
++                                dma->bufs[i].buf_count,
++                                dma->bufs[i].seg_count);
++                      for (j = 0; j < dma->bufs[i].seg_count; j++) {
++                              if (dma->bufs[i].seglist[j]) {
++                                      drm_pci_free(dev, dma->bufs[i].seglist[j]);
++                              }
++                      }
++                      drm_free(dma->bufs[i].seglist,
++                               dma->bufs[i].seg_count
++                               * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
++              }
++              if (dma->bufs[i].buf_count) {
++                      for (j = 0; j < dma->bufs[i].buf_count; j++) {
++                              if (dma->bufs[i].buflist[j].dev_private) {
++                                      drm_free(dma->bufs[i].buflist[j].
++                                               dev_private,
++                                               dma->bufs[i].buflist[j].
++                                               dev_priv_size, DRM_MEM_BUFS);
++                              }
++                      }
++                      drm_free(dma->bufs[i].buflist,
++                               dma->bufs[i].buf_count *
++                               sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
++              }
++      }
++
++      if (dma->buflist) {
++              drm_free(dma->buflist,
++                       dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
++      }
++
++      if (dma->pagelist) {
++              drm_free(dma->pagelist,
++                       dma->page_count * sizeof(*dma->pagelist),
++                       DRM_MEM_PAGES);
++      }
++      drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
++      dev->dma = NULL;
++}
++
++/**
++ * Free a buffer.
++ *
++ * \param dev DRM device.
++ * \param buf buffer to free.
++ *
++ * Resets the fields of \p buf.
++ */
++void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
++{
++      if (!buf)
++              return;
++
++      buf->waiting = 0;
++      buf->pending = 0;
++      buf->file_priv = NULL;
++      buf->used = 0;
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
++          && waitqueue_active(&buf->dma_wait)) {
++              wake_up_interruptible(&buf->dma_wait);
++      }
++}
++
++/**
++ * Reclaim the buffers.
++ *
++ * \param file_priv DRM file private.
++ *
++ * Frees each buffer associated with \p file_priv not already on the hardware.
++ */
++void drm_core_reclaim_buffers(struct drm_device *dev,
++                            struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma)
++              return;
++      for (i = 0; i < dma->buf_count; i++) {
++              if (dma->buflist[i]->file_priv == file_priv) {
++                      switch (dma->buflist[i]->list) {
++                      case DRM_LIST_NONE:
++                              drm_free_buffer(dev, dma->buflist[i]);
++                              break;
++                      case DRM_LIST_WAIT:
++                              dma->buflist[i]->list = DRM_LIST_RECLAIM;
++                              break;
++                      default:
++                              /* Buffer already on hardware. */
++                              break;
++                      }
++              }
++      }
++}
++EXPORT_SYMBOL(drm_core_reclaim_buffers);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_drawable.c git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c
+--- git/drivers/gpu/drm-tungsten/drm_drawable.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_drawable.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,192 @@
++/**
++ * \file drm_drawable.c
++ * IOCTLs for drawables
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ * \author Michel Dänzer <michel@tungstengraphics.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++/**
++ * Allocate drawable ID and memory to store information about it.
++ */
++int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      unsigned long irqflags;
++      struct drm_draw *draw = data;
++      int new_id = 0;
++      int ret;
++
++again:
++      if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++
++      spin_lock_irqsave(&dev->drw_lock, irqflags);
++      ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
++      if (ret == -EAGAIN) {
++              spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++              goto again;
++      }
++
++      spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++
++      draw->handle = new_id;
++
++      DRM_DEBUG("%d\n", draw->handle);
++
++      return 0;
++}
++
++/**
++ * Free drawable ID and memory to store information about it.
++ */
++int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_draw *draw = data;
++      unsigned long irqflags;
++
++      spin_lock_irqsave(&dev->drw_lock, irqflags);
++
++      drm_free(drm_get_drawable_info(dev, draw->handle),
++               sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
++
++      idr_remove(&dev->drw_idr, draw->handle);
++
++      spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++      DRM_DEBUG("%d\n", draw->handle);
++      return 0;
++}
++
++int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_update_draw *update = data;
++      unsigned long irqflags;
++      struct drm_clip_rect *rects;
++      struct drm_drawable_info *info;
++      int err;
++
++      info = idr_find(&dev->drw_idr, update->handle);
++      if (!info) {
++              info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
++              if (!info)
++                      return -ENOMEM;
++              if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
++                      DRM_ERROR("No such drawable %d\n", update->handle);
++                      drm_free(info, sizeof(*info), DRM_MEM_BUFS);
++                      return -EINVAL;
++              }
++      }
++
++      switch (update->type) {
++      case DRM_DRAWABLE_CLIPRECTS:
++              if (update->num != info->num_rects) {
++                      rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
++                                       DRM_MEM_BUFS);
++              } else
++                      rects = info->rects;
++
++              if (update->num && !rects) {
++                      DRM_ERROR("Failed to allocate cliprect memory\n");
++                      err = -ENOMEM;
++                      goto error;
++              }
++
++              if (update->num && DRM_COPY_FROM_USER(rects,
++                                                   (struct drm_clip_rect __user *)
++                                                   (unsigned long)update->data,
++                                                   update->num *
++                                                   sizeof(*rects))) {
++                      DRM_ERROR("Failed to copy cliprects from userspace\n");
++                      err = -EFAULT;
++                      goto error;
++              }
++
++              spin_lock_irqsave(&dev->drw_lock, irqflags);
++
++              if (rects != info->rects) {
++                      drm_free(info->rects, info->num_rects *
++                               sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
++              }
++
++              info->rects = rects;
++              info->num_rects = update->num;
++
++              spin_unlock_irqrestore(&dev->drw_lock, irqflags);
++
++              DRM_DEBUG("Updated %d cliprects for drawable %d\n",
++                        info->num_rects, update->handle);
++              break;
++      default:
++              DRM_ERROR("Invalid update type %d\n", update->type);
++              return -EINVAL;
++      }
++
++      return 0;
++
++error:
++      if (rects != info->rects)
++              drm_free(rects, update->num * sizeof(struct drm_clip_rect),
++                       DRM_MEM_BUFS);
++
++      return err;
++}
++
++/**
++ * Caller must hold the drawable spinlock!
++ */
++struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
++{
++      return idr_find(&dev->drw_idr, id);
++}
++EXPORT_SYMBOL(drm_get_drawable_info);
++
++static int drm_drawable_free(int idr, void *p, void *data)
++{
++      struct drm_drawable_info *info = p;
++
++      if (info) {
++              drm_free(info->rects, info->num_rects *
++                       sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
++              drm_free(info, sizeof(*info), DRM_MEM_BUFS);
++      }
++
++      return 0;
++}
++
++void drm_drawable_free_all(struct drm_device *dev)
++{
++      idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
++      idr_remove_all(&dev->drw_idr);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_drv.c git-nokia/drivers/gpu/drm-tungsten/drm_drv.c
+--- git/drivers/gpu/drm-tungsten/drm_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,697 @@
++/**
++ * \file drm_drv.c
++ * Generic driver template
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ *
++ * To use this template, you must at least define the following (samples
++ * given for the MGA driver):
++ *
++ * \code
++ * #define DRIVER_AUTHOR      "VA Linux Systems, Inc."
++ *
++ * #define DRIVER_NAME                "mga"
++ * #define DRIVER_DESC                "Matrox G200/G400"
++ * #define DRIVER_DATE                "20001127"
++ *
++ * #define drm_x              mga_##x
++ * \endcode
++ */
++
++/*
++ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "drm_core.h"
++
++static void drm_cleanup(struct drm_device * dev);
++int drm_fb_loaded = 0;
++
++static int drm_version(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++/** Ioctl table */
++static struct drm_ioctl_desc drm_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
++      /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
++      DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++#if __OS_HAS_AGP
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++#endif
++
++      DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
++                    DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
++
++      DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
++
++#if OS_HAS_GEM
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
++#endif
++};
++
++#define DRM_CORE_IOCTL_COUNT  ARRAY_SIZE( drm_ioctls )
++
++
++/**
++ * Take down the DRM device.
++ *
++ * \param dev DRM device structure.
++ *
++ * Frees every resource in \p dev.
++ *
++ * \sa drm_device
++ */
++int drm_lastclose(struct drm_device * dev)
++{
++      struct drm_magic_entry *pt, *next;
++      struct drm_map_list *r_list, *list_t;
++      struct drm_vma_entry *vma, *vma_temp;
++      int i;
++
++      DRM_DEBUG("\n");
++
++      /*
++       * We can't do much about this function failing.
++       */
++
++      drm_bo_driver_finish(dev);
++
++      if (dev->driver->lastclose)
++              dev->driver->lastclose(dev);
++      DRM_DEBUG("driver lastclose completed\n");
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      /* Free drawable information memory */
++      mutex_lock(&dev->struct_mutex);
++
++      drm_drawable_free_all(dev);
++      del_timer(&dev->timer);
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++
++      if (dev->magicfree.next) {
++              list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
++                      list_del(&pt->head);
++                      drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
++                      drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
++              }
++              drm_ht_remove(&dev->magiclist);
++      }
++
++
++      /* Clear AGP information */
++      if (drm_core_has_AGP(dev) && dev->agp) {
++              struct drm_agp_mem *entry, *tempe;
++
++              /* Remove AGP resources, but leave dev->agp
++                 intact until drv_cleanup is called. */
++              list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
++                      if (entry->bound)
++                              drm_unbind_agp(entry->memory);
++                      drm_free_agp(entry->memory, entry->pages);
++                      drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
++              }
++              INIT_LIST_HEAD(&dev->agp->memory);
++
++              if (dev->agp->acquired)
++                      drm_agp_release(dev);
++
++              dev->agp->acquired = 0;
++              dev->agp->enabled = 0;
++      }
++      if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
++              drm_sg_cleanup(dev->sg);
++              dev->sg = NULL;
++      }
++
++      /* Clear vma list (only built for debugging) */
++      list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
++              list_del(&vma->head);
++              drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
++      }
++
++      list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
++              if (!(r_list->map->flags & _DRM_DRIVER)) {
++                      drm_rmmap_locked(dev, r_list->map);
++                      r_list = NULL;
++              }
++      }
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
++              for (i = 0; i < dev->queue_count; i++) {
++
++                      if (dev->queuelist[i]) {
++                              drm_free(dev->queuelist[i],
++                                       sizeof(*dev->queuelist[0]),
++                                       DRM_MEM_QUEUES);
++                              dev->queuelist[i] = NULL;
++                      }
++              }
++              drm_free(dev->queuelist,
++                       dev->queue_slots * sizeof(*dev->queuelist),
++                       DRM_MEM_QUEUES);
++              dev->queuelist = NULL;
++      }
++      dev->queue_count = 0;
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
++              drm_dma_takedown(dev);
++
++      if (dev->lock.hw_lock) {
++              dev->sigdata.lock = dev->lock.hw_lock = NULL;   /* SHM removed */
++              dev->lock.file_priv = NULL;
++              wake_up_interruptible(&dev->lock.lock_queue);
++      }
++      dev->dev_mapping = NULL;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("lastclose completed\n");
++      return 0;
++}
++
++void drm_cleanup_pci(struct pci_dev *pdev)
++{
++      struct drm_device *dev = pci_get_drvdata(pdev);
++
++      pci_set_drvdata(pdev, NULL);
++      pci_release_regions(pdev);
++      if (dev)
++              drm_cleanup(dev);
++}
++EXPORT_SYMBOL(drm_cleanup_pci);
++
++/**
++ * Module initialization. Called via init_module at module load time, or via
++ * linux/init/main.c (this is not currently supported).
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Initializes an array of drm_device structures, and attempts to
++ * initialize all available devices, using consecutive minors, registering the
++ * stubs and initializing the AGP device.
++ *
++ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
++ * after the initialization for driver customization.
++ */
++int drm_init(struct drm_driver *driver,
++                     struct pci_device_id *pciidlist)
++{
++      struct pci_dev *pdev;
++      struct pci_device_id *pid;
++      int rc, i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) {
++              pid = &pciidlist[i];
++
++              pdev = NULL;
++              /* pass back in pdev to account for multiple identical cards */
++              while ((pdev =
++                      pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
++                                     pid->subdevice, pdev))) {
++                      /* Are there device class requirements? */
++                      if ((pid->class != 0)
++                              && ((pdev->class & pid->class_mask) != pid->class)) {
++                              continue;
++                      }
++                      /* is there already a driver loaded, or (short circuit saves work) */
++                      /* does something like VesaFB have control of the memory region? */
++                      if (
++#ifdef CONFIG_PCI
++                          pci_dev_driver(pdev) ||
++#endif
++                          pci_request_regions(pdev, "DRM scan")) {
++                              /* go into stealth mode */
++                              drm_fb_loaded = 1;
++                              pci_dev_put(pdev);
++                              break;
++                      }
++                      /* no fbdev or vesadev, put things back and wait for normal probe */
++                      pci_release_regions(pdev);
++              }
++      }
++
++      if (!drm_fb_loaded)
++              return pci_register_driver(&driver->pci_driver);
++      else {
++              for (i = 0; pciidlist[i].vendor != 0; i++) {
++                      pid = &pciidlist[i];
++
++                      pdev = NULL;
++                      /* pass back in pdev to account for multiple identical cards */
++                      while ((pdev =
++                              pci_get_subsys(pid->vendor, pid->device,
++                                             pid->subvendor, pid->subdevice,
++                                             pdev))) {
++                              /* Are there device class requirements? */
++                              if ((pid->class != 0)
++                                      && ((pdev->class & pid->class_mask) != pid->class)) {
++                                      continue;
++                              }
++#ifdef CONFIG_PCI
++                              /* stealth mode requires a manual probe */
++                              pci_dev_get(pdev);
++#endif
++                              if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) {
++                                      pci_dev_put(pdev);
++                                      return rc;
++                              }
++                      }
++              }
++              DRM_INFO("Used old pci detect: framebuffer loaded\n");
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_init);
++
++/**
++ * Called via cleanup_module() at module unload time.
++ *
++ * Cleans up all DRM device, calling drm_lastclose().
++ *
++ * \sa drm_init
++ */
++static void drm_cleanup(struct drm_device * dev)
++{
++
++      DRM_DEBUG("\n");
++      if (!dev) {
++              DRM_ERROR("cleanup called no dev\n");
++              return;
++      }
++
++      drm_lastclose(dev);
++      drm_fence_manager_takedown(dev);
++
++      if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
++          && dev->agp->agp_mtrr >= 0) {
++              int retval;
++              retval = mtrr_del(dev->agp->agp_mtrr,
++                                dev->agp->agp_info.aper_base,
++                                dev->agp->agp_info.aper_size * 1024 * 1024);
++              DRM_DEBUG("mtrr_del=%d\n", retval);
++      }
++
++      if (drm_core_has_AGP(dev) && dev->agp) {
++              drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
++              dev->agp = NULL;
++      }
++      if (dev->driver->unload)
++              dev->driver->unload(dev);
++
++      if (!drm_fb_loaded)
++              pci_disable_device(dev->pdev);
++
++      drm_ctxbitmap_cleanup(dev);
++      drm_ht_remove(&dev->map_hash);
++      drm_mm_takedown(&dev->offset_manager);
++      drm_ht_remove(&dev->object_hash);
++
++      drm_put_minor(dev);
++      if (drm_put_dev(dev))
++              DRM_ERROR("Cannot unload module\n");
++}
++
++int drm_minors_cleanup(int id, void *ptr, void *data)
++{
++      struct drm_minor *minor = ptr;
++      struct drm_device *dev;
++      struct drm_driver *driver = data;
++
++      dev = minor->dev;
++      if (minor->dev->driver != driver)
++              return 0;
++
++      if (minor->type != DRM_MINOR_LEGACY)
++              return 0;
++
++      if (dev)
++              pci_dev_put(dev->pdev);
++      drm_cleanup(dev);
++      return 1;
++}
++
++void drm_exit(struct drm_driver *driver)
++{
++      DRM_DEBUG("\n");
++      if (drm_fb_loaded) {
++              idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
++      } else
++              pci_unregister_driver(&driver->pci_driver);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      free_nopage_retry();
++#endif
++      DRM_INFO("Module unloaded\n");
++}
++EXPORT_SYMBOL(drm_exit);
++
++/** File operations structure */
++static const struct file_operations drm_stub_fops = {
++      .owner = THIS_MODULE,
++      .open = drm_stub_open
++};
++
++static int __init drm_core_init(void)
++{
++      int ret;
++      struct sysinfo si;
++      unsigned long avail_memctl_mem;
++      unsigned long max_memctl_mem;
++
++      idr_init(&drm_minors_idr);
++      si_meminfo(&si);
++
++      /*
++       * AGP only allows low / DMA32 memory ATM.
++       */
++
++      avail_memctl_mem = si.totalram - si.totalhigh;
++
++      /*
++       * Avoid overflows
++       */
++
++      max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
++      max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
++
++      if (avail_memctl_mem >= max_memctl_mem)
++              avail_memctl_mem = max_memctl_mem;
++
++      drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
++
++      ret = -ENOMEM;
++
++      if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
++              goto err_p1;
++
++      drm_class = drm_sysfs_create(THIS_MODULE, "drm");
++      if (IS_ERR(drm_class)) {
++              printk(KERN_ERR "DRM: Error creating drm class.\n");
++              ret = PTR_ERR(drm_class);
++              goto err_p2;
++      }
++
++      drm_proc_root = proc_mkdir("dri", NULL);
++      if (!drm_proc_root) {
++              DRM_ERROR("Cannot create /proc/dri\n");
++              ret = -1;
++              goto err_p3;
++      }
++
++      drm_mem_init();
++
++      DRM_INFO("Initialized %s %d.%d.%d %s\n",
++               CORE_NAME,
++               CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++      return 0;
++err_p3:
++      drm_sysfs_destroy();
++err_p2:
++      unregister_chrdev(DRM_MAJOR, "drm");
++
++      idr_destroy(&drm_minors_idr);
++err_p1:
++      return ret;
++}
++
++static void __exit drm_core_exit(void)
++{
++      remove_proc_entry("dri", NULL);
++      drm_sysfs_destroy();
++
++      unregister_chrdev(DRM_MAJOR, "drm");
++
++      idr_destroy(&drm_minors_idr);
++}
++
++module_init(drm_core_init);
++module_exit(drm_core_exit);
++
++/**
++ * Get version information
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_version structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Fills in the version information in \p arg.
++ */
++static int drm_version(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_version *version = data;
++      int len;
++
++      version->version_major = dev->driver->major;
++      version->version_minor = dev->driver->minor;
++      version->version_patchlevel = dev->driver->patchlevel;
++      DRM_COPY(version->name, dev->driver->name);
++      DRM_COPY(version->date, dev->driver->date);
++      DRM_COPY(version->desc, dev->driver->desc);
++
++      return 0;
++}
++
++/**
++ * Called whenever a process performs an ioctl on /dev/drm.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ *
++ * Looks up the ioctl function in the ::ioctls table, checking for root
++ * previleges if so required, and dispatches to the respective function.
++ *
++ * Copies data in and out according to the size and direction given in cmd,
++ * which must match the ioctl cmd known by the kernel.  The kernel uses a 512
++ * byte stack buffer to store the ioctl arguments in kernel space.  Should we
++ * ever need much larger ioctl arguments, we may need to allocate memory.
++ */
++int drm_ioctl(struct inode *inode, struct file *filp,
++            unsigned int cmd, unsigned long arg)
++{
++      return drm_unlocked_ioctl(filp, cmd, arg);
++}
++EXPORT_SYMBOL(drm_ioctl);
++
++long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      struct drm_ioctl_desc *ioctl;
++      drm_ioctl_t *func;
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      int retcode = -EINVAL;
++      char kdata[512];
++
++      atomic_inc(&dev->ioctl_count);
++      atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++      ++file_priv->ioctl_count;
++
++      DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
++                current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device),
++                file_priv->authenticated);
++
++      if ((nr >= DRM_CORE_IOCTL_COUNT) &&
++          ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
++              goto err_i1;
++      if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++              && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
++              ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
++      else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++              ioctl = &drm_ioctls[nr];
++              cmd = ioctl->cmd;
++      } else {
++              retcode = -EINVAL;
++              goto err_i1;
++      }
++#if 0
++      /*
++       * This check is disabled, because driver private ioctl->cmd
++       * are not the ioctl commands with size and direction bits but
++       * just the indices. The DRM core ioctl->cmd are the proper ioctl
++       * commands. The drivers' ioctl tables need to be fixed.
++       */
++      if (ioctl->cmd != cmd) {
++              retcode = -EINVAL;
++              goto err_i1;
++      }
++#endif
++
++      func = ioctl->func;
++      /* is there a local override? */
++      if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
++              func = dev->driver->dma_ioctl;
++
++      if (cmd & IOC_IN) {
++              if (copy_from_user(kdata, (void __user *)arg,
++                                 _IOC_SIZE(cmd)) != 0) {
++                      retcode = -EACCES;
++                      goto err_i1;
++              }
++      }
++
++      if (!func) {
++              DRM_DEBUG("no function\n");
++              retcode = -EINVAL;
++      } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
++                 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
++                 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
++              retcode = -EACCES;
++      } else {
++              retcode = func(dev, kdata, file_priv);
++      }
++
++      if (cmd & IOC_OUT) {
++              if (copy_to_user((void __user *)arg, kdata,
++                               _IOC_SIZE(cmd)) != 0)
++                      retcode = -EACCES;
++      }
++
++err_i1:
++      atomic_dec(&dev->ioctl_count);
++      if (retcode)
++              DRM_DEBUG("ret = %d\n", retcode);
++      return retcode;
++}
++EXPORT_SYMBOL(drm_unlocked_ioctl);
++
++drm_local_map_t *drm_getsarea(struct drm_device *dev)
++{
++      struct drm_map_list *entry;
++
++      list_for_each_entry(entry, &dev->maplist, head) {
++              if (entry->map && entry->map->type == _DRM_SHM &&
++                  (entry->map->flags & _DRM_CONTAINS_LOCK)) {
++                      return entry->map;
++              }
++      }
++      return NULL;
++}
++EXPORT_SYMBOL(drm_getsarea);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_fence.c git-nokia/drivers/gpu/drm-tungsten/drm_fence.c
+--- git/drivers/gpu/drm-tungsten/drm_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,829 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++
++/*
++ * Convenience function to be called by fence::wait methods that
++ * need polling.
++ */
++
++int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
++                         int interruptible, uint32_t mask, 
++                         unsigned long end_jiffies)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      uint32_t count = 0;
++      int ret;
++
++      DECLARE_WAITQUEUE(entry, current);
++      add_wait_queue(&fc->fence_queue, &entry);
++
++      ret = 0;
++      
++      for (;;) {
++              __set_current_state((interruptible) ? 
++                                  TASK_INTERRUPTIBLE :
++                                  TASK_UNINTERRUPTIBLE);
++              if (drm_fence_object_signaled(fence, mask))
++                      break;
++              if (time_after_eq(jiffies, end_jiffies)) {
++                      ret = -EBUSY;
++                      break;
++              }
++              if (lazy)
++                      schedule_timeout(1);
++              else if ((++count & 0x0F) == 0){
++                      __set_current_state(TASK_RUNNING);
++                      schedule();
++                      __set_current_state((interruptible) ? 
++                                          TASK_INTERRUPTIBLE :
++                                          TASK_UNINTERRUPTIBLE);
++              }                       
++              if (interruptible && signal_pending(current)) {
++                      ret = -EAGAIN;
++                      break;
++              }
++      }
++      __set_current_state(TASK_RUNNING);
++      remove_wait_queue(&fc->fence_queue, &entry);
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_wait_polling);
++
++/*
++ * Typically called by the IRQ handler.
++ */
++
++void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
++                     uint32_t sequence, uint32_t type, uint32_t error)
++{
++      int wake = 0;
++      uint32_t diff;
++      uint32_t relevant_type;
++      uint32_t new_type;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct list_head *head;
++      struct drm_fence_object *fence, *next;
++      int found = 0;
++
++      if (list_empty(&fc->ring))
++              return;
++
++      list_for_each_entry(fence, &fc->ring, ring) {
++              diff = (sequence - fence->sequence) & driver->sequence_mask;
++              if (diff > driver->wrap_diff) {
++                      found = 1;
++                      break;
++              }
++      }
++
++      fc->waiting_types &= ~type;
++      head = (found) ? &fence->ring : &fc->ring;
++
++      list_for_each_entry_safe_reverse(fence, next, head, ring) {
++              if (&fence->ring == &fc->ring)
++                      break;
++
++              if (error) {
++                      fence->error = error;
++                      fence->signaled_types = fence->type;
++                      list_del_init(&fence->ring);
++                      wake = 1;
++                      break;
++              }
++
++              if (type & DRM_FENCE_TYPE_EXE)
++                      type |= fence->native_types;
++
++              relevant_type = type & fence->type;
++              new_type = (fence->signaled_types | relevant_type) ^
++                      fence->signaled_types;
++
++              if (new_type) {
++                      fence->signaled_types |= new_type;
++                      DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
++                                fence->base.hash.key, fence->signaled_types);
++
++                      if (driver->needed_flush)
++                              fc->pending_flush |= driver->needed_flush(fence);
++
++                      if (new_type & fence->waiting_types)
++                              wake = 1;
++              }
++
++              fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
++
++              if (!(fence->type & ~fence->signaled_types)) {
++                      DRM_DEBUG("Fence completely signaled 0x%08lx\n",
++                                fence->base.hash.key);
++                      list_del_init(&fence->ring);
++              }
++      }
++
++      /*
++       * Reinstate lost waiting types.
++       */
++
++      if ((fc->waiting_types & type) != type) {
++              head = head->prev;
++              list_for_each_entry(fence, head, ring) {
++                      if (&fence->ring == &fc->ring)
++                              break;
++                      diff = (fc->highest_waiting_sequence - fence->sequence) &
++                              driver->sequence_mask;
++                      if (diff > driver->wrap_diff)
++                              break;
++                      
++                      fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
++              }
++      }
++
++      if (wake) 
++              wake_up_all(&fc->fence_queue);
++}
++EXPORT_SYMBOL(drm_fence_handler);
++
++static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      unsigned long flags;
++
++      write_lock_irqsave(&fm->lock, flags);
++      list_del_init(ring);
++      write_unlock_irqrestore(&fm->lock, flags);
++}
++
++void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
++{
++      struct drm_fence_object *tmp_fence = *fence;
++      struct drm_device *dev = tmp_fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++      *fence = NULL;
++      if (atomic_dec_and_test(&tmp_fence->usage)) {
++              drm_fence_unring(dev, &tmp_fence->ring);
++              DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
++                        tmp_fence->base.hash.key);
++              atomic_dec(&fm->count);
++              BUG_ON(!list_empty(&tmp_fence->base.list));
++              drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
++      }
++}
++EXPORT_SYMBOL(drm_fence_usage_deref_locked);
++
++void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
++{
++      struct drm_fence_object *tmp_fence = *fence;
++      struct drm_device *dev = tmp_fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      *fence = NULL;
++      if (atomic_dec_and_test(&tmp_fence->usage)) {
++              mutex_lock(&dev->struct_mutex);
++              if (atomic_read(&tmp_fence->usage) == 0) {
++                      drm_fence_unring(dev, &tmp_fence->ring);
++                      atomic_dec(&fm->count);
++                      BUG_ON(!list_empty(&tmp_fence->base.list));
++                      drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
++              }
++              mutex_unlock(&dev->struct_mutex);
++      }
++}
++EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
++
++struct drm_fence_object
++*drm_fence_reference_locked(struct drm_fence_object *src)
++{
++      DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
++
++      atomic_inc(&src->usage);
++      return src;
++}
++
++void drm_fence_reference_unlocked(struct drm_fence_object **dst,
++                                struct drm_fence_object *src)
++{
++      mutex_lock(&src->dev->struct_mutex);
++      *dst = src;
++      atomic_inc(&src->usage);
++      mutex_unlock(&src->dev->struct_mutex);
++}
++EXPORT_SYMBOL(drm_fence_reference_unlocked);
++
++static void drm_fence_object_destroy(struct drm_file *priv,
++                                   struct drm_user_object *base)
++{
++      struct drm_fence_object *fence =
++          drm_user_object_entry(base, struct drm_fence_object, base);
++
++      drm_fence_usage_deref_locked(&fence);
++}
++
++int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
++{
++      unsigned long flags;
++      int signaled;
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      
++      mask &= fence->type;
++      read_lock_irqsave(&fm->lock, flags);
++      signaled = (mask & fence->signaled_types) == mask;
++      read_unlock_irqrestore(&fm->lock, flags);
++      if (!signaled && driver->poll) {
++              write_lock_irqsave(&fm->lock, flags);
++              driver->poll(dev, fence->fence_class, mask);
++              signaled = (mask & fence->signaled_types) == mask;
++              write_unlock_irqrestore(&fm->lock, flags);
++      }
++      return signaled;
++}
++EXPORT_SYMBOL(drm_fence_object_signaled);
++
++
++int drm_fence_object_flush(struct drm_fence_object *fence,
++                         uint32_t type)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      unsigned long irq_flags;
++      uint32_t saved_pending_flush;
++      uint32_t diff;
++      int call_flush;
++
++      if (type & ~fence->type) {
++              DRM_ERROR("Flush trying to extend fence type, "
++                        "0x%x, 0x%x\n", type, fence->type);
++              return -EINVAL;
++      }
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++      fence->waiting_types |= type;
++      fc->waiting_types |= fence->waiting_types;
++      diff = (fence->sequence - fc->highest_waiting_sequence) & 
++              driver->sequence_mask;
++
++      if (diff < driver->wrap_diff)
++              fc->highest_waiting_sequence = fence->sequence;
++
++      /*
++       * fence->waiting_types has changed. Determine whether
++       * we need to initiate some kind of flush as a result of this.
++       */
++
++      saved_pending_flush = fc->pending_flush;
++      if (driver->needed_flush) 
++              fc->pending_flush |= driver->needed_flush(fence);
++
++      if (driver->poll)
++              driver->poll(dev, fence->fence_class, fence->waiting_types);
++
++      call_flush = fc->pending_flush;
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++
++      if (call_flush && driver->flush)
++              driver->flush(dev, fence->fence_class);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_flush);
++
++/*
++ * Make sure old fence objects are signaled before their fence sequences are
++ * wrapped around and reused.
++ */
++
++void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
++                       uint32_t sequence)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
++      struct drm_fence_object *fence;
++      unsigned long irq_flags;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      int call_flush;
++
++      uint32_t diff;
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++
++      list_for_each_entry_reverse(fence, &fc->ring, ring) {
++              diff = (sequence - fence->sequence) & driver->sequence_mask;
++              if (diff <= driver->flush_diff)
++                      break;
++      
++              fence->waiting_types = fence->type;
++              fc->waiting_types |= fence->type;
++
++              if (driver->needed_flush)
++                      fc->pending_flush |= driver->needed_flush(fence);
++      }       
++      
++      if (driver->poll)
++              driver->poll(dev, fence_class, fc->waiting_types);
++
++      call_flush = fc->pending_flush;
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++
++      if (call_flush && driver->flush)
++              driver->flush(dev, fence->fence_class);
++
++      /*
++       * FIXME: Shold we implement a wait here for really old fences?
++       */
++
++}
++EXPORT_SYMBOL(drm_fence_flush_old);
++
++int drm_fence_object_wait(struct drm_fence_object *fence,
++                        int lazy, int ignore_signals, uint32_t mask)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      int ret = 0;
++      unsigned long _end = 3 * DRM_HZ;
++
++      if (mask & ~fence->type) {
++              DRM_ERROR("Wait trying to extend fence type"
++                        " 0x%08x 0x%08x\n", mask, fence->type);
++              BUG();
++              return -EINVAL;
++      }
++
++      if (driver->wait)
++              return driver->wait(fence, lazy, !ignore_signals, mask);
++
++
++      drm_fence_object_flush(fence, mask);
++      if (driver->has_irq(dev, fence->fence_class, mask)) {
++              if (!ignore_signals)
++                      ret = wait_event_interruptible_timeout
++                              (fc->fence_queue, 
++                               drm_fence_object_signaled(fence, mask), 
++                               3 * DRM_HZ);
++              else 
++                      ret = wait_event_timeout
++                              (fc->fence_queue, 
++                               drm_fence_object_signaled(fence, mask), 
++                               3 * DRM_HZ);
++
++              if (unlikely(ret == -ERESTARTSYS))
++                      return -EAGAIN;
++
++              if (unlikely(ret == 0))
++                      return -EBUSY;
++
++              return 0;
++      }
++
++      return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
++                                    _end);
++}
++EXPORT_SYMBOL(drm_fence_object_wait);
++
++
++
++int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
++                        uint32_t fence_class, uint32_t type)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_driver *driver = dev->driver->fence_driver;
++      struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
++      unsigned long flags;
++      uint32_t sequence;
++      uint32_t native_types;
++      int ret;
++
++      drm_fence_unring(dev, &fence->ring);
++      ret = driver->emit(dev, fence_class, fence_flags, &sequence,
++                         &native_types);
++      if (ret)
++              return ret;
++
++      write_lock_irqsave(&fm->lock, flags);
++      fence->fence_class = fence_class;
++      fence->type = type;
++      fence->waiting_types = 0;
++      fence->signaled_types = 0;
++      fence->error = 0;
++      fence->sequence = sequence;
++      fence->native_types = native_types;
++      if (list_empty(&fc->ring))
++              fc->highest_waiting_sequence = sequence - 1;
++      list_add_tail(&fence->ring, &fc->ring);
++      fc->latest_queued_sequence = sequence;
++      write_unlock_irqrestore(&fm->lock, flags);
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_emit);
++
++static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
++                               uint32_t type,
++                               uint32_t fence_flags,
++                               struct drm_fence_object *fence)
++{
++      int ret = 0;
++      unsigned long flags;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      mutex_lock(&dev->struct_mutex);
++      atomic_set(&fence->usage, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      write_lock_irqsave(&fm->lock, flags);
++      INIT_LIST_HEAD(&fence->ring);
++
++      /*
++       *  Avoid hitting BUG() for kernel-only fence objects.
++       */
++
++      INIT_LIST_HEAD(&fence->base.list);
++      fence->fence_class = fence_class;
++      fence->type = type;
++      fence->signaled_types = 0;
++      fence->waiting_types = 0;
++      fence->sequence = 0;
++      fence->error = 0;
++      fence->dev = dev;
++      write_unlock_irqrestore(&fm->lock, flags);
++      if (fence_flags & DRM_FENCE_FLAG_EMIT) {
++              ret = drm_fence_object_emit(fence, fence_flags,
++                                          fence->fence_class, type);
++      }
++      return ret;
++}
++
++int drm_fence_add_user_object(struct drm_file *priv,
++                            struct drm_fence_object *fence, int shareable)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_add_user_object(priv, &fence->base, shareable);
++      if (ret)
++              goto out;
++      atomic_inc(&fence->usage);
++      fence->base.type = drm_fence_type;
++      fence->base.remove = &drm_fence_object_destroy;
++      DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
++out:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++EXPORT_SYMBOL(drm_fence_add_user_object);
++
++int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
++                          uint32_t type, unsigned flags,
++                          struct drm_fence_object **c_fence)
++{
++      struct drm_fence_object *fence;
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++
++      fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
++      if (!fence) {
++              DRM_ERROR("Out of memory creating fence object\n");
++              return -ENOMEM;
++      }
++      ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
++      if (ret) {
++              drm_fence_usage_deref_unlocked(&fence);
++              return ret;
++      }
++      *c_fence = fence;
++      atomic_inc(&fm->count);
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_fence_object_create);
++
++void drm_fence_manager_init(struct drm_device *dev)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fence_class;
++      struct drm_fence_driver *fed = dev->driver->fence_driver;
++      int i;
++      unsigned long flags;
++
++      rwlock_init(&fm->lock);
++      write_lock_irqsave(&fm->lock, flags);
++      fm->initialized = 0;
++      if (!fed)
++          goto out_unlock;
++
++      fm->initialized = 1;
++      fm->num_classes = fed->num_classes;
++      BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
++
++      for (i = 0; i < fm->num_classes; ++i) {
++          fence_class = &fm->fence_class[i];
++
++          memset(fence_class, 0, sizeof(*fence_class));
++          INIT_LIST_HEAD(&fence_class->ring);
++          DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
++      }
++
++      atomic_set(&fm->count, 0);
++ out_unlock:
++      write_unlock_irqrestore(&fm->lock, flags);
++}
++
++void drm_fence_fill_arg(struct drm_fence_object *fence,
++                      struct drm_fence_arg *arg)
++{
++      struct drm_device *dev = fence->dev;
++      struct drm_fence_manager *fm = &dev->fm;
++      unsigned long irq_flags;
++
++      read_lock_irqsave(&fm->lock, irq_flags);
++      arg->handle = fence->base.hash.key;
++      arg->fence_class = fence->fence_class;
++      arg->type = fence->type;
++      arg->signaled = fence->signaled_types;
++      arg->error = fence->error;
++      arg->sequence = fence->sequence;
++      read_unlock_irqrestore(&fm->lock, irq_flags);
++}
++EXPORT_SYMBOL(drm_fence_fill_arg);
++
++void drm_fence_manager_takedown(struct drm_device *dev)
++{
++}
++
++struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
++                                               uint32_t handle)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_fence_object *fence;
++
++      mutex_lock(&dev->struct_mutex);
++      uo = drm_lookup_user_object(priv, handle);
++      if (!uo || (uo->type != drm_fence_type)) {
++              mutex_unlock(&dev->struct_mutex);
++              return NULL;
++      }
++      fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
++      mutex_unlock(&dev->struct_mutex);
++      return fence;
++}
++
++int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      if (arg->flags & DRM_FENCE_FLAG_EMIT)
++              LOCK_TEST_WITH_RETURN(dev, file_priv);
++      ret = drm_fence_object_create(dev, arg->fence_class,
++                                    arg->type, arg->flags, &fence);
++      if (ret)
++              return ret;
++      ret = drm_fence_add_user_object(file_priv, fence,
++                                      arg->flags &
++                                      DRM_FENCE_FLAG_SHAREABLE);
++      if (ret) {
++              drm_fence_usage_deref_unlocked(&fence);
++              return ret;
++      }
++
++      /*
++       * usage > 0. No need to lock dev->struct_mutex;
++       */
++
++      arg->handle = fence->base.hash.key;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      struct drm_user_object *uo;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
++      if (ret)
++              return ret;
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
++}
++
++int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_flush(fence, arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_wait(fence,
++                                  arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
++                                  0, arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++
++int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      fence = drm_lookup_fence_object(file_priv, arg->handle);
++      if (!fence)
++              return -EINVAL;
++      ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
++                                  arg->type);
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
++
++int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      int ret;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_arg *arg = data;
++      struct drm_fence_object *fence;
++      ret = 0;
++
++      if (!fm->initialized) {
++              DRM_ERROR("The DRM driver does not support fencing.\n");
++              return -EINVAL;
++      }
++
++      if (!dev->bm.initialized) {
++              DRM_ERROR("Buffer object manager is not initialized\n");
++              return -EINVAL;
++      }
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
++                                     NULL, &fence);
++      if (ret)
++              return ret;
++
++      if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
++              ret = drm_fence_add_user_object(file_priv, fence,
++                                              arg->flags &
++                                              DRM_FENCE_FLAG_SHAREABLE);
++              if (ret)
++                      return ret;
++      }
++
++      arg->handle = fence->base.hash.key;
++
++      drm_fence_fill_arg(fence, arg);
++      drm_fence_usage_deref_unlocked(&fence);
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_fops.c git-nokia/drivers/gpu/drm-tungsten/drm_fops.c
+--- git/drivers/gpu/drm-tungsten/drm_fops.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_fops.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,532 @@
++/**
++ * \file drm_fops.c
++ * File operations for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Daryll Strauss <daryll@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_sarea.h"
++#include <linux/poll.h>
++
++static int drm_open_helper(struct inode *inode, struct file *filp,
++                         struct drm_device * dev);
++
++static int drm_setup(struct drm_device * dev)
++{
++      drm_local_map_t *map;
++      int i;
++      int ret;
++      int sareapage;
++
++      if (dev->driver->firstopen) {
++              ret = dev->driver->firstopen(dev);
++              if (ret != 0)
++                      return ret;
++      }
++
++      dev->magicfree.next = NULL;
++
++      /* prebuild the SAREA */
++      sareapage = max(SAREA_MAX, PAGE_SIZE);
++      i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
++      if (i != 0)
++              return i;
++
++      atomic_set(&dev->ioctl_count, 0);
++      atomic_set(&dev->vma_count, 0);
++      dev->buf_use = 0;
++      atomic_set(&dev->buf_alloc, 0);
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
++              i = drm_dma_setup(dev);
++              if (i < 0)
++                      return i;
++      }
++
++      for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
++              atomic_set(&dev->counts[i], 0);
++
++      drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
++      INIT_LIST_HEAD(&dev->magicfree);
++
++      dev->sigdata.lock = NULL;
++      init_waitqueue_head(&dev->lock.lock_queue);
++      dev->queue_count = 0;
++      dev->queue_reserved = 0;
++      dev->queue_slots = 0;
++      dev->queuelist = NULL;
++      dev->context_flag = 0;
++      dev->interrupt_flag = 0;
++      dev->dma_flag = 0;
++      dev->last_context = 0;
++      dev->last_switch = 0;
++      dev->last_checked = 0;
++      init_waitqueue_head(&dev->context_wait);
++      dev->if_version = 0;
++
++      dev->ctx_start = 0;
++      dev->lck_start = 0;
++
++      dev->buf_async = NULL;
++      init_waitqueue_head(&dev->buf_readers);
++      init_waitqueue_head(&dev->buf_writers);
++
++      DRM_DEBUG("\n");
++
++      /*
++       * The kernel's context could be created here, but is now created
++       * in drm_dma_enqueue.  This is more resource-efficient for
++       * hardware that does not do DMA, but may mean that
++       * drm_select_queue fails between the time the interrupt is
++       * initialized and the time the queues are initialized.
++       */
++
++      return 0;
++}
++
++/**
++ * Open file.
++ *
++ * \param inode device inode
++ * \param filp file pointer.
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches the DRM device with the same minor number, calls open_helper(), and
++ * increments the device open count. If the open count was previous at zero,
++ * i.e., it's the first that the device is open, then calls setup().
++ */
++int drm_open(struct inode *inode, struct file *filp)
++{
++      struct drm_device *dev = NULL;
++      int minor_id = iminor(inode);
++      struct drm_minor *minor;
++      int retcode = 0;
++
++      minor = idr_find(&drm_minors_idr, minor_id);
++      if (!minor)
++              return -ENODEV;
++
++      if (!(dev = minor->dev))
++              return -ENODEV;
++
++      retcode = drm_open_helper(inode, filp, dev);
++      if (!retcode) {
++              atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
++              spin_lock(&dev->count_lock);
++              if (!dev->open_count++) {
++                      spin_unlock(&dev->count_lock);
++                      retcode = drm_setup(dev);
++                      goto out;
++              }
++              spin_unlock(&dev->count_lock);
++      }
++
++out:
++      mutex_lock(&dev->struct_mutex);
++      BUG_ON((dev->dev_mapping != NULL) &&
++             (dev->dev_mapping != inode->i_mapping));
++      if (dev->dev_mapping == NULL)
++              dev->dev_mapping = inode->i_mapping;
++      mutex_unlock(&dev->struct_mutex);
++
++      return retcode;
++}
++EXPORT_SYMBOL(drm_open);
++
++/**
++ * File \c open operation.
++ *
++ * \param inode device inode.
++ * \param filp file pointer.
++ *
++ * Puts the dev->fops corresponding to the device minor number into
++ * \p filp, call the \c open method, and restore the file operations.
++ */
++int drm_stub_open(struct inode *inode, struct file *filp)
++{
++      struct drm_device *dev = NULL;
++      struct drm_minor *minor;
++      int minor_id = iminor(inode);
++      int err = -ENODEV;
++      const struct file_operations *old_fops;
++
++      DRM_DEBUG("\n");
++
++      minor = idr_find(&drm_minors_idr, minor_id);
++      if (!minor)
++              return -ENODEV;
++      
++      if (!(dev = minor->dev))
++              return -ENODEV;
++
++      old_fops = filp->f_op;
++      filp->f_op = fops_get(&dev->driver->fops);
++      if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
++              fops_put(filp->f_op);
++              filp->f_op = fops_get(old_fops);
++      }
++      fops_put(old_fops);
++
++      return err;
++}
++
++/**
++ * Check whether DRI will run on this CPU.
++ *
++ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
++ */
++static int drm_cpu_valid(void)
++{
++#if defined(__i386__)
++      if (boot_cpu_data.x86 == 3)
++              return 0;       /* No cmpxchg on a 386 */
++#endif
++#if defined(__sparc__) && !defined(__sparc_v9__)
++      return 0;               /* No cmpxchg before v9 sparc. */
++#endif
++      return 1;
++}
++
++/**
++ * Called whenever a process opens /dev/drm.
++ *
++ * \param inode device inode.
++ * \param filp file pointer.
++ * \param dev device.
++ * \return zero on success or a negative number on failure.
++ *
++ * Creates and initializes a drm_file structure for the file private data in \p
++ * filp and add it into the double linked list in \p dev.
++ */
++static int drm_open_helper(struct inode *inode, struct file *filp,
++                         struct drm_device * dev)
++{
++      int minor_id = iminor(inode);
++      struct drm_file *priv;
++      int ret;
++      int i, j;
++
++      if (filp->f_flags & O_EXCL)
++              return -EBUSY;  /* No exclusive opens */
++      if (!drm_cpu_valid())
++              return -EINVAL;
++
++      DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor_id);
++
++      priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
++      if (!priv)
++              return -ENOMEM;
++
++      memset(priv, 0, sizeof(*priv));
++      filp->private_data = priv;
++      priv->filp = filp;
++      priv->uid = current->euid;
++      priv->pid = current->pid;
++      priv->minor = idr_find(&drm_minors_idr, minor_id);
++      priv->ioctl_count = 0;
++      /* for compatibility root is always authenticated */
++      priv->authenticated = capable(CAP_SYS_ADMIN);
++      priv->lock_count = 0;
++
++      INIT_LIST_HEAD(&priv->lhead);
++      INIT_LIST_HEAD(&priv->refd_objects);
++
++      for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
++              ret = drm_ht_create(&priv->refd_object_hash[i],
++                                  DRM_FILE_HASH_ORDER);
++              if (ret)
++                      break;
++      }
++
++      if (ret) {
++              for (j = 0; j < i; ++j)
++                      drm_ht_remove(&priv->refd_object_hash[j]);
++              goto out_free;
++      }
++
++      if (dev->driver->driver_features & DRIVER_GEM)
++              drm_gem_open(dev, priv);
++
++      if (dev->driver->open) {
++              ret = dev->driver->open(dev, priv);
++              if (ret < 0)
++                      goto out_free;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      if (list_empty(&dev->filelist))
++              priv->master = 1;
++
++      list_add(&priv->lhead, &dev->filelist);
++      mutex_unlock(&dev->struct_mutex);
++
++#ifdef __alpha__
++      /*
++       * Default the hose
++       */
++      if (!dev->hose) {
++              struct pci_dev *pci_dev;
++              pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
++              if (pci_dev) {
++                      dev->hose = pci_dev->sysdata;
++                      pci_dev_put(pci_dev);
++              }
++              if (!dev->hose) {
++                      struct pci_bus *b = pci_bus_b(pci_root_buses.next);
++                      if (b)
++                              dev->hose = b->sysdata;
++              }
++      }
++#endif
++
++      return 0;
++      out_free:
++      drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
++      filp->private_data = NULL;
++      return ret;
++}
++
++/** No-op. */
++int drm_fasync(int fd, struct file *filp, int on)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      int retcode;
++
++      DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
++                (long)old_encode_dev(priv->minor->device));
++      retcode = fasync_helper(fd, filp, on, &dev->buf_async);
++      if (retcode < 0)
++              return retcode;
++      return 0;
++}
++EXPORT_SYMBOL(drm_fasync);
++
++static void drm_object_release(struct file *filp)
++{
++      struct drm_file *priv = filp->private_data;
++      struct list_head *head;
++      struct drm_ref_object *ref_object;
++      int i;
++
++      /*
++       * Free leftover ref objects created by me. Note that we cannot use
++       * list_for_each() here, as the struct_mutex may be temporarily
++       * released by the remove_() functions, and thus the lists may be
++       * altered.
++       * Also, a drm_remove_ref_object() will not remove it
++       * from the list unless its refcount is 1.
++       */
++
++      head = &priv->refd_objects;
++      while (head->next != head) {
++              ref_object = list_entry(head->next, struct drm_ref_object, list);
++              drm_remove_ref_object(priv, ref_object);
++              head = &priv->refd_objects;
++      }
++
++      for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
++              drm_ht_remove(&priv->refd_object_hash[i]);
++}
++
++/**
++ * Release file.
++ *
++ * \param inode device inode
++ * \param file_priv DRM file private.
++ * \return zero on success or a negative number on failure.
++ *
++ * If the hardware lock is held then free it, and take it again for the kernel
++ * context since it's necessary to reclaim buffers. Unlink the file private
++ * data from its list and free it. Decreases the open count and if it reaches
++ * zero calls drm_lastclose().
++ */
++int drm_release(struct inode *inode, struct file *filp)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      int retcode = 0;
++
++      lock_kernel();
++
++      DRM_DEBUG("open_count = %d\n", dev->open_count);
++
++      if (dev->driver->preclose)
++              dev->driver->preclose(dev, file_priv);
++
++      /* ========================================================
++       * Begin inline drm_release
++       */
++
++      DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
++                current->pid, (long)old_encode_dev(file_priv->minor->device),
++                dev->open_count);
++
++      if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
++              if (drm_i_have_hw_lock(dev, file_priv)) {
++                      dev->driver->reclaim_buffers_locked(dev, file_priv);
++              } else {
++                      unsigned long _end=jiffies + 3*DRM_HZ;
++                      int locked = 0;
++
++                      drm_idlelock_take(&dev->lock);
++
++                      /*
++                       * Wait for a while.
++                       */
++
++                      do{
++                              spin_lock_bh(&dev->lock.spinlock);
++                              locked = dev->lock.idle_has_lock;
++                              spin_unlock_bh(&dev->lock.spinlock);
++                              if (locked)
++                                      break;
++                              schedule();
++                      } while (!time_after_eq(jiffies, _end));
++
++                      if (!locked) {
++                              DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
++                                        "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
++                                        "\tI will go on reclaiming the buffers anyway.\n");
++                      }
++
++                      dev->driver->reclaim_buffers_locked(dev, file_priv);
++                      drm_idlelock_release(&dev->lock);
++              }
++      }
++
++      if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
++
++              drm_idlelock_take(&dev->lock);
++              dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
++              drm_idlelock_release(&dev->lock);
++
++      }
++
++      if (drm_i_have_hw_lock(dev, file_priv)) {
++              DRM_DEBUG("File %p released, freeing lock for context %d\n",
++                        filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
++
++              drm_lock_free(&dev->lock,
++                            _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
++      }
++
++
++      if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
++          !dev->driver->reclaim_buffers_locked) {
++              dev->driver->reclaim_buffers(dev, file_priv);
++      }
++
++      if (dev->driver->driver_features & DRIVER_GEM)
++              drm_gem_release(dev, file_priv);
++
++      drm_fasync(-1, filp, 0);
++
++      mutex_lock(&dev->ctxlist_mutex);
++
++      if (!list_empty(&dev->ctxlist)) {
++              struct drm_ctx_list *pos, *n;
++
++              list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
++                      if (pos->tag == file_priv &&
++                          pos->handle != DRM_KERNEL_CONTEXT) {
++                              if (dev->driver->context_dtor)
++                                      dev->driver->context_dtor(dev,
++                                                                pos->handle);
++
++                              drm_ctxbitmap_free(dev, pos->handle);
++
++                              list_del(&pos->head);
++                              drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
++                              --dev->ctx_count;
++                      }
++              }
++      }
++      mutex_unlock(&dev->ctxlist_mutex);
++
++      mutex_lock(&dev->struct_mutex);
++      drm_object_release(filp);
++      if (file_priv->remove_auth_on_close == 1) {
++              struct drm_file *temp;
++
++              list_for_each_entry(temp, &dev->filelist, lhead)
++                      temp->authenticated = 0;
++      }
++      list_del(&file_priv->lhead);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (dev->driver->postclose)
++              dev->driver->postclose(dev, file_priv);
++      drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
++
++      /* ========================================================
++       * End inline drm_release
++       */
++
++      atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
++      spin_lock(&dev->count_lock);
++      if (!--dev->open_count) {
++              if (atomic_read(&dev->ioctl_count) || dev->blocked) {
++                      DRM_ERROR("Device busy: %d %d\n",
++                                atomic_read(&dev->ioctl_count), dev->blocked);
++                      spin_unlock(&dev->count_lock);
++                      unlock_kernel();
++                      return -EBUSY;
++              }
++              spin_unlock(&dev->count_lock);
++              unlock_kernel();
++              return drm_lastclose(dev);
++      }
++      spin_unlock(&dev->count_lock);
++
++      unlock_kernel();
++
++      return retcode;
++}
++EXPORT_SYMBOL(drm_release);
++
++/** No-op. */
++/* This is to deal with older X servers that believe 0 means data is
++ * available which is not the correct return for a poll function.
++ * This cannot be fixed until the Xserver is fixed. Xserver will need
++ * to set a newer interface version to avoid breaking older Xservers.
++ * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22"
++ * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try
++ * to return the correct response.
++ */
++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
++{
++      /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */
++      return 0;
++}
++EXPORT_SYMBOL(drm_poll);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_gem.c git-nokia/drivers/gpu/drm-tungsten/drm_gem.c
+--- git/drivers/gpu/drm-tungsten/drm_gem.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_gem.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,444 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include <linux/version.h>
++
++#include "drmP.h"
++
++#if OS_HAS_GEM
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <linux/mm.h>
++#include <linux/uaccess.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/pagemap.h>
++
++/** @file drm_gem.c
++ *
++ * This file provides some of the base ioctls and library routines for
++ * the graphics memory manager implemented by each device driver.
++ *
++ * Because various devices have different requirements in terms of
++ * synchronization and migration strategies, implementing that is left up to
++ * the driver, and all that the general API provides should be generic --
++ * allocating objects, reading/writing data with the cpu, freeing objects.
++ * Even there, platform-dependent optimizations for reading/writing data with
++ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
++ * the DRI2 implementation wants to have at least allocate/mmap be generic.
++ *
++ * The goal was to have swap-backed object allocation managed through
++ * struct file.  However, file descriptors as handles to a struct file have
++ * two major failings:
++ * - Process limits prevent more than 1024 or so being used at a time by
++ *   default.
++ * - Inability to allocate high fds will aggravate the X Server's select()
++ *   handling, and likely that of many GL client applications as well.
++ *
++ * This led to a plan of using our own integer IDs (called handles, following
++ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
++ * ioctls.  The objects themselves will still include the struct file so
++ * that we can transition to fds if the required kernel infrastructure shows
++ * up at a later date, and as our interface with shmfs for memory allocation.
++ */
++
++/**
++ * Initialize the GEM device fields
++ */
++
++int
++drm_gem_init(struct drm_device *dev)
++{
++      spin_lock_init(&dev->object_name_lock);
++      idr_init(&dev->object_name_idr);
++      atomic_set(&dev->object_count, 0);
++      atomic_set(&dev->object_memory, 0);
++      atomic_set(&dev->pin_count, 0);
++      atomic_set(&dev->pin_memory, 0);
++      atomic_set(&dev->gtt_count, 0);
++      atomic_set(&dev->gtt_memory, 0);
++      return 0;
++}
++
++/**
++ * Allocate a GEM object of the specified size with shmfs backing store
++ */
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size)
++{
++      struct drm_gem_object *obj;
++
++      BUG_ON((size & (PAGE_SIZE - 1)) != 0);
++
++      obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
++
++      obj->dev = dev;
++      obj->filp = shmem_file_setup("drm mm object", size, 0);
++      if (IS_ERR(obj->filp)) {
++              kfree(obj);
++              return NULL;
++      }
++
++      kref_init(&obj->refcount);
++      kref_init(&obj->handlecount);
++      obj->size = size;
++      if (dev->driver->gem_init_object != NULL &&
++          dev->driver->gem_init_object(obj) != 0) {
++              fput(obj->filp);
++              kfree(obj);
++              return NULL;
++      }
++      atomic_inc(&dev->object_count);
++      atomic_add(obj->size, &dev->object_memory);
++      return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_alloc);
++
++/**
++ * Removes the mapping from handle to filp for this object.
++ */
++static int
++drm_gem_handle_delete(struct drm_file *filp, int handle)
++{
++      struct drm_device *dev;
++      struct drm_gem_object *obj;
++
++      /* This is gross. The idr system doesn't let us try a delete and
++       * return an error code.  It just spews if you fail at deleting.
++       * So, we have to grab a lock around finding the object and then
++       * doing the delete on it and dropping the refcount, or the user
++       * could race us to double-decrement the refcount and cause a
++       * use-after-free later.  Given the frequency of our handle lookups,
++       * we may want to use ida for number allocation and a hash table
++       * for the pointers, anyway.
++       */
++      spin_lock(&filp->table_lock);
++
++      /* Check if we currently have a reference on the object */
++      obj = idr_find(&filp->object_idr, handle);
++      if (obj == NULL) {
++              spin_unlock(&filp->table_lock);
++              return -EINVAL;
++      }
++      dev = obj->dev;
++
++      /* Release reference and decrement refcount. */
++      idr_remove(&filp->object_idr, handle);
++      spin_unlock(&filp->table_lock);
++
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_handle_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Create a handle for this object. This adds a handle reference
++ * to the object, which includes a regular reference count. Callers
++ * will likely want to dereference the object afterwards.
++ */
++int
++drm_gem_handle_create(struct drm_file *file_priv,
++                     struct drm_gem_object *obj,
++                     int *handlep)
++{
++      int     ret;
++
++      /*
++       * Get the user-visible handle using idr.
++       */
++again:
++      /* ensure there is space available to allocate a handle */
++      if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
++              return -ENOMEM;
++
++      /* do the allocation under our spinlock */
++      spin_lock(&file_priv->table_lock);
++      ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
++      spin_unlock(&file_priv->table_lock);
++      if (ret == -EAGAIN)
++              goto again;
++
++      if (ret != 0)
++              return ret;
++
++      drm_gem_object_handle_reference(obj);
++      return 0;
++}
++EXPORT_SYMBOL(drm_gem_handle_create);
++
++/** Returns a reference to the object named by the handle. */
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
++                    int handle)
++{
++      struct drm_gem_object *obj;
++
++      spin_lock(&filp->table_lock);
++
++      /* Check if we currently have a reference on the object */
++      obj = idr_find(&filp->object_idr, handle);
++      if (obj == NULL) {
++              spin_unlock(&filp->table_lock);
++              return NULL;
++      }
++
++      drm_gem_object_reference(obj);
++
++      spin_unlock(&filp->table_lock);
++
++      return obj;
++}
++EXPORT_SYMBOL(drm_gem_object_lookup);
++
++/**
++ * Releases the handle to an mm object.
++ */
++int
++drm_gem_close_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_gem_close *args = data;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      ret = drm_gem_handle_delete(file_priv, args->handle);
++
++      return ret;
++}
++
++/**
++ * Create a global name for an object, returning the name.
++ *
++ * Note that the name does not hold a reference; when the object
++ * is freed, the name goes away.
++ */
++int
++drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_gem_flink *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++
++again:
++      if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
++              return -ENOMEM;
++
++      spin_lock(&dev->object_name_lock);
++      if (obj->name) {
++              spin_unlock(&dev->object_name_lock);
++              return -EEXIST;
++      }
++      ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
++                               &obj->name);
++      spin_unlock(&dev->object_name_lock);
++      if (ret == -EAGAIN)
++              goto again;
++
++      if (ret != 0) {
++              mutex_lock(&dev->struct_mutex);
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      /*
++       * Leave the reference from the lookup around as the
++       * name table now holds one
++       */
++      args->name = (uint64_t) obj->name;
++
++      return 0;
++}
++
++/**
++ * Open an object using the global name, returning a handle and the size.
++ *
++ * This handle (of course) holds a reference to the object, so the object
++ * will not go away until the handle is deleted.
++ */
++int
++drm_gem_open_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_gem_open *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++      int handle;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      spin_lock(&dev->object_name_lock);
++      obj = idr_find(&dev->object_name_idr, (int) args->name);
++      if (obj)
++              drm_gem_object_reference(obj);
++      spin_unlock(&dev->object_name_lock);
++      if (!obj)
++              return -ENOENT;
++
++      ret = drm_gem_handle_create(file_priv, obj, &handle);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret)
++              return ret;
++
++      args->handle = handle;
++      args->size = obj->size;
++
++      return 0;
++}
++
++/**
++ * Called at device open time, sets up the structure for handling refcounting
++ * of mm objects.
++ */
++void
++drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
++{
++      idr_init(&file_private->object_idr);
++      spin_lock_init(&file_private->table_lock);
++}
++
++/**
++ * Called at device close to release the file's
++ * handle references on objects.
++ */
++static int
++drm_gem_object_release_handle(int id, void *ptr, void *data)
++{
++      struct drm_gem_object *obj = ptr;
++
++      drm_gem_object_handle_unreference(obj);
++
++      return 0;
++}
++
++/**
++ * Called at close time when the filp is going away.
++ *
++ * Releases any remaining references on objects by this filp.
++ */
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
++{
++      mutex_lock(&dev->struct_mutex);
++      idr_for_each(&file_private->object_idr,
++                   &drm_gem_object_release_handle, NULL);
++
++      idr_destroy(&file_private->object_idr);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Called after the last reference to the object has been lost.
++ *
++ * Frees the object
++ */
++void
++drm_gem_object_free(struct kref *kref)
++{
++      struct drm_gem_object *obj = (struct drm_gem_object *) kref;
++      struct drm_device *dev = obj->dev;
++
++      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++      if (dev->driver->gem_free_object != NULL)
++              dev->driver->gem_free_object(obj);
++
++      fput(obj->filp);
++      atomic_dec(&dev->object_count);
++      atomic_sub(obj->size, &dev->object_memory);
++      kfree(obj);
++}
++EXPORT_SYMBOL(drm_gem_object_free);
++
++/**
++ * Called after the last handle to the object has been closed
++ *
++ * Removes any name for the object. Note that this must be
++ * called before drm_gem_object_free or we'll be touching
++ * freed memory
++ */
++void
++drm_gem_object_handle_free(struct kref *kref)
++{
++      struct drm_gem_object *obj = container_of(kref,
++                                                struct drm_gem_object,
++                                                handlecount);
++      struct drm_device *dev = obj->dev;
++
++      /* Remove any name for this object */
++      spin_lock(&dev->object_name_lock);
++      if (obj->name) {
++              idr_remove(&dev->object_name_idr, obj->name);
++              spin_unlock(&dev->object_name_lock);
++              /*
++               * The object name held a reference to this object, drop
++               * that now.
++               */
++              drm_gem_object_unreference(obj);
++      } else
++              spin_unlock(&dev->object_name_lock);
++
++}
++EXPORT_SYMBOL(drm_gem_object_handle_free);
++
++#else
++
++int drm_gem_init(struct drm_device *dev)
++{
++      return 0;
++}
++
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
++{
++
++}
++
++void
++drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
++{
++
++}
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm.h git-nokia/drivers/gpu/drm-tungsten/drm.h
+--- git/drivers/gpu/drm-tungsten/drm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1145 @@
++/**
++ * \file drm.h
++ * Header for the Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ *
++ * \par Acknowledgments:
++ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/**
++ * \mainpage
++ *
++ * The Direct Rendering Manager (DRM) is a device-independent kernel-level
++ * device driver that provides support for the XFree86 Direct Rendering
++ * Infrastructure (DRI).
++ *
++ * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
++ * ways:
++ *     -# The DRM provides synchronized access to the graphics hardware via
++ *        the use of an optimized two-tiered lock.
++ *     -# The DRM enforces the DRI security policy for access to the graphics
++ *        hardware by only allowing authenticated X11 clients access to
++ *        restricted regions of memory.
++ *     -# The DRM provides a generic DMA engine, complete with multiple
++ *        queues and the ability to detect the need for an OpenGL context
++ *        switch.
++ *     -# The DRM is extensible via the use of small device-specific modules
++ *        that rely extensively on the API exported by the DRM module.
++ *
++ */
++
++#ifndef _DRM_H_
++#define _DRM_H_
++
++#ifndef __user
++#define __user
++#endif
++#ifndef __iomem
++#define __iomem
++#endif
++
++#ifdef __GNUC__
++# define DEPRECATED  __attribute__ ((deprecated))
++#else
++# define DEPRECATED
++#endif
++
++#if defined(__linux__)
++#include <asm/ioctl.h>                /* For _IO* macros */
++#define DRM_IOCTL_NR(n)               _IOC_NR(n)
++#define DRM_IOC_VOID          _IOC_NONE
++#define DRM_IOC_READ          _IOC_READ
++#define DRM_IOC_WRITE         _IOC_WRITE
++#define DRM_IOC_READWRITE     _IOC_READ|_IOC_WRITE
++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
++#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
++#include <sys/ioccom.h>
++#define DRM_IOCTL_NR(n)               ((n) & 0xff)
++#define DRM_IOC_VOID          IOC_VOID
++#define DRM_IOC_READ          IOC_OUT
++#define DRM_IOC_WRITE         IOC_IN
++#define DRM_IOC_READWRITE     IOC_INOUT
++#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
++#endif
++
++#ifdef __OpenBSD__
++#define DRM_MAJOR       81
++#endif
++#if defined(__linux__) || defined(__NetBSD__)
++#define DRM_MAJOR       226
++#endif
++#define DRM_MAX_MINOR   15
++
++#define DRM_NAME      "drm"     /**< Name in kernel, /dev, and /proc */
++#define DRM_MIN_ORDER 5         /**< At least 2^5 bytes = 32 bytes */
++#define DRM_MAX_ORDER 22        /**< Up to 2^22 bytes = 4MB */
++#define DRM_RAM_PERCENT 10      /**< How much system ram can we lock? */
++
++#define _DRM_LOCK_HELD        0x80000000U /**< Hardware lock is held */
++#define _DRM_LOCK_CONT        0x40000000U /**< Hardware lock is contended */
++#define _DRM_LOCK_IS_HELD(lock)          ((lock) & _DRM_LOCK_HELD)
++#define _DRM_LOCK_IS_CONT(lock)          ((lock) & _DRM_LOCK_CONT)
++#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
++
++#if defined(__linux__)
++typedef unsigned int drm_handle_t;
++#else
++#include <sys/types.h>
++typedef unsigned long drm_handle_t;   /**< To mapped regions */
++#endif
++typedef unsigned int drm_context_t;   /**< GLXContext handle */
++typedef unsigned int drm_drawable_t;
++typedef unsigned int drm_magic_t;     /**< Magic for authentication */
++
++/**
++ * Cliprect.
++ *
++ * \warning If you change this structure, make sure you change
++ * XF86DRIClipRectRec in the server as well
++ *
++ * \note KW: Actually it's illegal to change either for
++ * backwards-compatibility reasons.
++ */
++struct drm_clip_rect {
++      unsigned short x1;
++      unsigned short y1;
++      unsigned short x2;
++      unsigned short y2;
++};
++
++/**
++ * Texture region,
++ */
++struct drm_tex_region {
++      unsigned char next;
++      unsigned char prev;
++      unsigned char in_use;
++      unsigned char padding;
++      unsigned int age;
++};
++
++/**
++ * Hardware lock.
++ *
++ * The lock structure is a simple cache-line aligned integer.  To avoid
++ * processor bus contention on a multiprocessor system, there should not be any
++ * other data stored in the same cache line.
++ */
++struct drm_hw_lock {
++      __volatile__ unsigned int lock;         /**< lock variable */
++      char padding[60];                       /**< Pad to cache line */
++};
++
++/* This is beyond ugly, and only works on GCC.  However, it allows me to use
++ * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
++ * fix is to use uint32_t instead of size_t, but that fix will break existing
++ * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
++ * eventually happen, though.  I chose 'unsigned long' to be the fallback type
++ * because that works on all the platforms I know about.  Hopefully, the
++ * real fix will happen before that bites us.
++ */
++
++#ifdef __SIZE_TYPE__
++# define DRM_SIZE_T __SIZE_TYPE__
++#else
++# warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
++# define DRM_SIZE_T unsigned long
++#endif
++
++/**
++ * DRM_IOCTL_VERSION ioctl argument type.
++ *
++ * \sa drmGetVersion().
++ */
++struct drm_version {
++      int version_major;        /**< Major version */
++      int version_minor;        /**< Minor version */
++      int version_patchlevel;   /**< Patch level */
++      DRM_SIZE_T name_len;      /**< Length of name buffer */
++      char __user *name;                /**< Name of driver */
++      DRM_SIZE_T date_len;      /**< Length of date buffer */
++      char __user *date;                /**< User-space buffer to hold date */
++      DRM_SIZE_T desc_len;      /**< Length of desc buffer */
++      char __user *desc;                /**< User-space buffer to hold desc */
++};
++
++/**
++ * DRM_IOCTL_GET_UNIQUE ioctl argument type.
++ *
++ * \sa drmGetBusid() and drmSetBusId().
++ */
++struct drm_unique {
++      DRM_SIZE_T unique_len;    /**< Length of unique */
++      char __user *unique;              /**< Unique name for driver instantiation */
++};
++
++#undef DRM_SIZE_T
++
++struct drm_list {
++      int count;                /**< Length of user-space structures */
++      struct drm_version __user *version;
++};
++
++struct drm_block {
++      int unused;
++};
++
++/**
++ * DRM_IOCTL_CONTROL ioctl argument type.
++ *
++ * \sa drmCtlInstHandler() and drmCtlUninstHandler().
++ */
++struct drm_control {
++      enum {
++              DRM_ADD_COMMAND,
++              DRM_RM_COMMAND,
++              DRM_INST_HANDLER,
++              DRM_UNINST_HANDLER
++      } func;
++      int irq;
++};
++
++/**
++ * Type of memory to map.
++ */
++enum drm_map_type {
++      _DRM_FRAME_BUFFER = 0,    /**< WC (no caching), no core dump */
++      _DRM_REGISTERS = 1,       /**< no caching, no core dump */
++      _DRM_SHM = 2,             /**< shared, cached */
++      _DRM_AGP = 3,             /**< AGP/GART */
++      _DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
++      _DRM_CONSISTENT = 5,      /**< Consistent memory for PCI DMA */
++      _DRM_TTM = 6
++};
++
++/**
++ * Memory mapping flags.
++ */
++enum drm_map_flags {
++      _DRM_RESTRICTED = 0x01,      /**< Cannot be mapped to user-virtual */
++      _DRM_READ_ONLY = 0x02,
++      _DRM_LOCKED = 0x04,          /**< shared, cached, locked */
++      _DRM_KERNEL = 0x08,          /**< kernel requires access */
++      _DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
++      _DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
++      _DRM_REMOVABLE = 0x40,       /**< Removable mapping */
++      _DRM_DRIVER = 0x80           /**< Managed by driver */
++};
++
++struct drm_ctx_priv_map {
++      unsigned int ctx_id;     /**< Context requesting private mapping */
++      void *handle;            /**< Handle of map */
++};
++
++/**
++ * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
++ * argument type.
++ *
++ * \sa drmAddMap().
++ */
++struct drm_map {
++      unsigned long offset;    /**< Requested physical address (0 for SAREA)*/
++      unsigned long size;      /**< Requested physical size (bytes) */
++      enum drm_map_type type;  /**< Type of memory to map */
++      enum drm_map_flags flags;        /**< Flags */
++      void *handle;            /**< User-space: "Handle" to pass to mmap() */
++                               /**< Kernel-space: kernel-virtual address */
++      int mtrr;                /**< MTRR slot used */
++      /*   Private data */
++};
++
++/**
++ * DRM_IOCTL_GET_CLIENT ioctl argument type.
++ */
++struct drm_client {
++      int idx;                /**< Which client desired? */
++      int auth;               /**< Is client authenticated? */
++      unsigned long pid;      /**< Process ID */
++      unsigned long uid;      /**< User ID */
++      unsigned long magic;    /**< Magic */
++      unsigned long iocs;     /**< Ioctl count */
++};
++
++enum drm_stat_type {
++      _DRM_STAT_LOCK,
++      _DRM_STAT_OPENS,
++      _DRM_STAT_CLOSES,
++      _DRM_STAT_IOCTLS,
++      _DRM_STAT_LOCKS,
++      _DRM_STAT_UNLOCKS,
++      _DRM_STAT_VALUE,        /**< Generic value */
++      _DRM_STAT_BYTE,         /**< Generic byte counter (1024bytes/K) */
++      _DRM_STAT_COUNT,        /**< Generic non-byte counter (1000/k) */
++
++      _DRM_STAT_IRQ,          /**< IRQ */
++      _DRM_STAT_PRIMARY,      /**< Primary DMA bytes */
++      _DRM_STAT_SECONDARY,    /**< Secondary DMA bytes */
++      _DRM_STAT_DMA,          /**< DMA */
++      _DRM_STAT_SPECIAL,      /**< Special DMA (e.g., priority or polled) */
++      _DRM_STAT_MISSED        /**< Missed DMA opportunity */
++          /* Add to the *END* of the list */
++};
++
++/**
++ * DRM_IOCTL_GET_STATS ioctl argument type.
++ */
++struct drm_stats {
++      unsigned long count;
++      struct {
++              unsigned long value;
++              enum drm_stat_type type;
++      } data[15];
++};
++
++/**
++ * Hardware locking flags.
++ */
++enum drm_lock_flags {
++      _DRM_LOCK_READY = 0x01,      /**< Wait until hardware is ready for DMA */
++      _DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
++      _DRM_LOCK_FLUSH = 0x04,      /**< Flush this context's DMA queue first */
++      _DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
++      /* These *HALT* flags aren't supported yet
++         -- they will be used to support the
++         full-screen DGA-like mode. */
++      _DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
++      _DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
++};
++
++/**
++ * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
++ *
++ * \sa drmGetLock() and drmUnlock().
++ */
++struct drm_lock {
++      int context;
++      enum drm_lock_flags flags;
++};
++
++/**
++ * DMA flags
++ *
++ * \warning
++ * These values \e must match xf86drm.h.
++ *
++ * \sa drm_dma.
++ */
++enum drm_dma_flags {
++      /* Flags for DMA buffer dispatch */
++      _DRM_DMA_BLOCK = 0x01,        /**<
++                                     * Block until buffer dispatched.
++                                     *
++                                     * \note The buffer may not yet have
++                                     * been processed by the hardware --
++                                     * getting a hardware lock with the
++                                     * hardware quiescent will ensure
++                                     * that the buffer has been
++                                     * processed.
++                                     */
++      _DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
++      _DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
++
++      /* Flags for DMA buffer request */
++      _DRM_DMA_WAIT = 0x10,         /**< Wait for free buffers */
++      _DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
++      _DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
++};
++
++/**
++ * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
++ *
++ * \sa drmAddBufs().
++ */
++struct drm_buf_desc {
++      int count;               /**< Number of buffers of this size */
++      int size;                /**< Size in bytes */
++      int low_mark;            /**< Low water mark */
++      int high_mark;           /**< High water mark */
++      enum {
++              _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
++              _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
++              _DRM_SG_BUFFER  = 0x04, /**< Scatter/gather memory buffer */
++              _DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
++              _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
++      } flags;
++      unsigned long agp_start; /**<
++                                * Start address of where the AGP buffers are
++                                * in the AGP aperture
++                                */
++};
++
++/**
++ * DRM_IOCTL_INFO_BUFS ioctl argument type.
++ */
++struct drm_buf_info {
++      int count;                /**< Number of buffers described in list */
++      struct drm_buf_desc __user *list; /**< List of buffer descriptions */
++};
++
++/**
++ * DRM_IOCTL_FREE_BUFS ioctl argument type.
++ */
++struct drm_buf_free {
++      int count;
++      int __user *list;
++};
++
++/**
++ * Buffer information
++ *
++ * \sa drm_buf_map.
++ */
++struct drm_buf_pub {
++      int idx;                       /**< Index into the master buffer list */
++      int total;                     /**< Buffer size */
++      int used;                      /**< Amount of buffer in use (for DMA) */
++      void __user *address;          /**< Address of buffer */
++};
++
++/**
++ * DRM_IOCTL_MAP_BUFS ioctl argument type.
++ */
++struct drm_buf_map {
++      int count;              /**< Length of the buffer list */
++#if defined(__cplusplus)
++      void __user *c_virtual;
++#else
++      void __user *virtual;           /**< Mmap'd area in user-virtual */
++#endif
++      struct drm_buf_pub __user *list;        /**< Buffer information */
++};
++
++/**
++ * DRM_IOCTL_DMA ioctl argument type.
++ *
++ * Indices here refer to the offset into the buffer list in drm_buf_get.
++ *
++ * \sa drmDMA().
++ */
++struct drm_dma {
++      int context;                      /**< Context handle */
++      int send_count;                   /**< Number of buffers to send */
++      int __user *send_indices;         /**< List of handles to buffers */
++      int __user *send_sizes;           /**< Lengths of data to send */
++      enum drm_dma_flags flags;         /**< Flags */
++      int request_count;                /**< Number of buffers requested */
++      int request_size;                 /**< Desired size for buffers */
++      int __user *request_indices;     /**< Buffer information */
++      int __user *request_sizes;
++      int granted_count;                /**< Number of buffers granted */
++};
++
++enum drm_ctx_flags {
++      _DRM_CONTEXT_PRESERVED = 0x01,
++      _DRM_CONTEXT_2DONLY = 0x02
++};
++
++/**
++ * DRM_IOCTL_ADD_CTX ioctl argument type.
++ *
++ * \sa drmCreateContext() and drmDestroyContext().
++ */
++struct drm_ctx {
++      drm_context_t handle;
++      enum drm_ctx_flags flags;
++};
++
++/**
++ * DRM_IOCTL_RES_CTX ioctl argument type.
++ */
++struct drm_ctx_res {
++      int count;
++      struct drm_ctx __user *contexts;
++};
++
++/**
++ * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
++ */
++struct drm_draw {
++      drm_drawable_t handle;
++};
++
++/**
++ * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
++ */
++typedef enum {
++      DRM_DRAWABLE_CLIPRECTS,
++} drm_drawable_info_type_t;
++
++struct drm_update_draw {
++      drm_drawable_t handle;
++      unsigned int type;
++      unsigned int num;
++      unsigned long long data;
++};
++
++/**
++ * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
++ */
++struct drm_auth {
++      drm_magic_t magic;
++};
++
++/**
++ * DRM_IOCTL_IRQ_BUSID ioctl argument type.
++ *
++ * \sa drmGetInterruptFromBusID().
++ */
++struct drm_irq_busid {
++      int irq;        /**< IRQ number */
++      int busnum;     /**< bus number */
++      int devnum;     /**< device number */
++      int funcnum;    /**< function number */
++};
++
++enum drm_vblank_seq_type {
++      _DRM_VBLANK_ABSOLUTE = 0x0,     /**< Wait for specific vblank sequence number */
++      _DRM_VBLANK_RELATIVE = 0x1,     /**< Wait for given number of vblanks */
++      _DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
++      _DRM_VBLANK_NEXTONMISS = 0x10000000,    /**< If missed, wait for next vblank */
++      _DRM_VBLANK_SECONDARY = 0x20000000,     /**< Secondary display controller */
++      _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
++};
++
++#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
++                              _DRM_VBLANK_NEXTONMISS)
++
++struct drm_wait_vblank_request {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      unsigned long signal;
++};
++
++struct drm_wait_vblank_reply {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      long tval_sec;
++      long tval_usec;
++};
++
++/**
++ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
++ *
++ * \sa drmWaitVBlank().
++ */
++union drm_wait_vblank {
++      struct drm_wait_vblank_request request;
++      struct drm_wait_vblank_reply reply;
++};
++
++
++#define _DRM_PRE_MODESET 1
++#define _DRM_POST_MODESET 2
++
++/**
++ * DRM_IOCTL_MODESET_CTL ioctl argument type
++ *
++ * \sa drmModesetCtl().
++ */
++struct drm_modeset_ctl {
++      uint32_t crtc;
++      uint32_t cmd;
++};
++
++/**
++ * DRM_IOCTL_AGP_ENABLE ioctl argument type.
++ *
++ * \sa drmAgpEnable().
++ */
++struct drm_agp_mode {
++      unsigned long mode;     /**< AGP mode */
++};
++
++/**
++ * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
++ *
++ * \sa drmAgpAlloc() and drmAgpFree().
++ */
++struct drm_agp_buffer {
++      unsigned long size;     /**< In bytes -- will round to page boundary */
++      unsigned long handle;   /**< Used for binding / unbinding */
++      unsigned long type;     /**< Type of memory to allocate */
++      unsigned long physical; /**< Physical used by i810 */
++};
++
++/**
++ * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
++ *
++ * \sa drmAgpBind() and drmAgpUnbind().
++ */
++struct drm_agp_binding {
++      unsigned long handle;   /**< From drm_agp_buffer */
++      unsigned long offset;   /**< In bytes -- will round to page boundary */
++};
++
++/**
++ * DRM_IOCTL_AGP_INFO ioctl argument type.
++ *
++ * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
++ * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
++ * drmAgpVendorId() and drmAgpDeviceId().
++ */
++struct drm_agp_info {
++      int agp_version_major;
++      int agp_version_minor;
++      unsigned long mode;
++      unsigned long aperture_base;   /**< physical address */
++      unsigned long aperture_size;   /**< bytes */
++      unsigned long memory_allowed;  /**< bytes */
++      unsigned long memory_used;
++
++      /** \name PCI information */
++      /*@{ */
++      unsigned short id_vendor;
++      unsigned short id_device;
++      /*@} */
++};
++
++/**
++ * DRM_IOCTL_SG_ALLOC ioctl argument type.
++ */
++struct drm_scatter_gather {
++      unsigned long size;     /**< In bytes -- will round to page boundary */
++      unsigned long handle;   /**< Used for mapping / unmapping */
++};
++
++/**
++ * DRM_IOCTL_SET_VERSION ioctl argument type.
++ */
++struct drm_set_version {
++      int drm_di_major;
++      int drm_di_minor;
++      int drm_dd_major;
++      int drm_dd_minor;
++};
++
++
++#define DRM_FENCE_FLAG_EMIT                0x00000001
++#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
++/**
++ * On hardware with no interrupt events for operation completion,
++ * indicates that the kernel should sleep while waiting for any blocking
++ * operation to complete rather than spinning.
++ *
++ * Has no effect otherwise.
++ */
++#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
++#define DRM_FENCE_FLAG_NO_USER             0x00000010
++
++/* Reserved for driver use */
++#define DRM_FENCE_MASK_DRIVER              0xFF000000
++
++#define DRM_FENCE_TYPE_EXE                 0x00000001
++
++struct drm_fence_arg {
++      unsigned int handle;
++      unsigned int fence_class;
++      unsigned int type;
++      unsigned int flags;
++      unsigned int signaled;
++      unsigned int error;
++      unsigned int sequence;
++      unsigned int pad64;
++      uint64_t expand_pad[2]; /*Future expansion */
++};
++
++/* Buffer permissions, referring to how the GPU uses the buffers.
++ * these translate to fence types used for the buffers.
++ * Typically a texture buffer is read, A destination buffer is write and
++ *  a command (batch-) buffer is exe. Can be or-ed together.
++ */
++
++#define DRM_BO_FLAG_READ        (1ULL << 0)
++#define DRM_BO_FLAG_WRITE       (1ULL << 1)
++#define DRM_BO_FLAG_EXE         (1ULL << 2)
++
++/*
++ * All of the bits related to access mode
++ */
++#define DRM_BO_MASK_ACCESS    (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
++/*
++ * Status flags. Can be read to determine the actual state of a buffer.
++ * Can also be set in the buffer mask before validation.
++ */
++
++/*
++ * Mask: Never evict this buffer. Not even with force. This type of buffer is only
++ * available to root and must be manually removed before buffer manager shutdown
++ * or lock.
++ * Flags: Acknowledge
++ */
++#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
++
++/*
++ * Mask: Require that the buffer is placed in mappable memory when validated.
++ *       If not set the buffer may or may not be in mappable memory when validated.
++ * Flags: If set, the buffer is in mappable memory.
++ */
++#define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
++
++/* Mask: The buffer should be shareable with other processes.
++ * Flags: The buffer is shareable with other processes.
++ */
++#define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
++
++/* Mask: If set, place the buffer in cache-coherent memory if available.
++ *       If clear, never place the buffer in cache coherent memory if validated.
++ * Flags: The buffer is currently in cache-coherent memory.
++ */
++#define DRM_BO_FLAG_CACHED      (1ULL << 7)
++
++/* Mask: Make sure that every time this buffer is validated,
++ *       it ends up on the same location provided that the memory mask is the same.
++ *       The buffer will also not be evicted when claiming space for
++ *       other buffers. Basically a pinned buffer but it may be thrown out as
++ *       part of buffer manager shutdown or locking.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
++
++/* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
++ * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
++ * with unsnooped PTEs instead of snooped, by using chipset-specific cache
++ * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
++ * as the eviction to local memory (TTM unbind) on map is just a side effect
++ * to prevent aggressive cache prefetch from the GPU disturbing the cache
++ * management that the DRM is doing.
++ *
++ * Flags: Acknowledge.
++ * Buffers allocated with this flag should not be used for suballocators
++ * This type may have issues on CPUs with over-aggressive caching
++ * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
++ */
++#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
++
++
++/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
++
++/*
++ * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
++ * Flags: Acknowledge.
++ */
++#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
++#define DRM_BO_FLAG_TILE           (1ULL << 15)
++
++/*
++ * Memory type flags that can be or'ed together in the mask, but only
++ * one appears in flags.
++ */
++
++/* System memory */
++#define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
++/* Translation table memory */
++#define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
++/* Vram memory */
++#define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
++/* Up to the driver to define. */
++#define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
++#define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
++#define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
++#define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
++#define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
++/* We can add more of these now with a 64-bit flag type */
++
++/*
++ * This is a mask covering all of the memory type flags; easier to just
++ * use a single constant than a bunch of | values. It covers
++ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
++ */
++#define DRM_BO_MASK_MEM         0x00000000FF000000ULL
++/*
++ * This adds all of the CPU-mapping options in with the memory
++ * type to label all bits which change how the page gets mapped
++ */
++#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
++                               DRM_BO_FLAG_CACHED_MAPPED | \
++                               DRM_BO_FLAG_CACHED | \
++                               DRM_BO_FLAG_MAPPABLE)
++                               
++/* Driver-private flags */
++#define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
++
++/*
++ * Don't block on validate and map. Instead, return EBUSY.
++ */
++#define DRM_BO_HINT_DONT_BLOCK  0x00000002
++/*
++ * Don't place this buffer on the unfenced list. This means
++ * that the buffer will not end up having a fence associated
++ * with it as a result of this operation
++ */
++#define DRM_BO_HINT_DONT_FENCE  0x00000004
++/**
++ * On hardware with no interrupt events for operation completion,
++ * indicates that the kernel should sleep while waiting for any blocking
++ * operation to complete rather than spinning.
++ *
++ * Has no effect otherwise.
++ */
++#define DRM_BO_HINT_WAIT_LAZY   0x00000008
++/*
++ * The client has compute relocations refering to this buffer using the
++ * offset in the presumed_offset field. If that offset ends up matching
++ * where this buffer lands, the kernel is free to skip executing those
++ * relocations
++ */
++#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
++
++#define DRM_BO_INIT_MAGIC 0xfe769812
++#define DRM_BO_INIT_MAJOR 1
++#define DRM_BO_INIT_MINOR 0
++#define DRM_BO_INIT_PATCH 0
++
++
++struct drm_bo_info_req {
++      uint64_t mask;
++      uint64_t flags;
++      unsigned int handle;
++      unsigned int hint;
++      unsigned int fence_class;
++      unsigned int desired_tile_stride;
++      unsigned int tile_info;
++      unsigned int pad64;
++      uint64_t presumed_offset;
++};
++
++struct drm_bo_create_req {
++      uint64_t flags;
++      uint64_t size;
++      uint64_t buffer_start;
++      unsigned int hint;
++      unsigned int page_alignment;
++};
++
++
++/*
++ * Reply flags
++ */
++
++#define DRM_BO_REP_BUSY 0x00000001
++
++struct drm_bo_info_rep {
++      uint64_t flags;
++      uint64_t proposed_flags;
++      uint64_t size;
++      uint64_t offset;
++      uint64_t arg_handle;
++      uint64_t buffer_start;
++      unsigned int handle;
++      unsigned int fence_flags;
++      unsigned int rep_flags;
++      unsigned int page_alignment;
++      unsigned int desired_tile_stride;
++      unsigned int hw_tile_stride;
++      unsigned int tile_info;
++      unsigned int pad64;
++      uint64_t expand_pad[4]; /*Future expansion */
++};
++
++struct drm_bo_arg_rep {
++      struct drm_bo_info_rep bo_info;
++      int ret;
++      unsigned int pad64;
++};
++
++struct drm_bo_create_arg {
++      union {
++              struct drm_bo_create_req req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_handle_arg {
++      unsigned int handle;
++};
++
++struct drm_bo_reference_info_arg {
++      union {
++              struct drm_bo_handle_arg req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_map_wait_idle_arg {
++      union {
++              struct drm_bo_info_req req;
++              struct drm_bo_info_rep rep;
++      } d;
++};
++
++struct drm_bo_op_req {
++      enum {
++              drm_bo_validate,
++              drm_bo_fence,
++              drm_bo_ref_fence,
++      } op;
++      unsigned int arg_handle;
++      struct drm_bo_info_req bo_req;
++};
++
++
++struct drm_bo_op_arg {
++      uint64_t next;
++      union {
++              struct drm_bo_op_req req;
++              struct drm_bo_arg_rep rep;
++      } d;
++      int handled;
++      unsigned int pad64;
++};
++
++
++#define DRM_BO_MEM_LOCAL 0
++#define DRM_BO_MEM_TT 1
++#define DRM_BO_MEM_VRAM 2
++#define DRM_BO_MEM_PRIV0 3
++#define DRM_BO_MEM_PRIV1 4
++#define DRM_BO_MEM_PRIV2 5
++#define DRM_BO_MEM_PRIV3 6
++#define DRM_BO_MEM_PRIV4 7
++
++#define DRM_BO_MEM_TYPES 8 /* For now. */
++
++#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
++#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
++
++struct drm_bo_version_arg {
++      uint32_t major;
++      uint32_t minor;
++      uint32_t patchlevel;
++};
++
++struct drm_mm_type_arg {
++      unsigned int mem_type;
++      unsigned int lock_flags;
++};
++
++struct drm_mm_init_arg {
++      unsigned int magic;
++      unsigned int major;
++      unsigned int minor;
++      unsigned int mem_type;
++      uint64_t p_offset;
++      uint64_t p_size;
++};
++
++struct drm_mm_info_arg {
++      unsigned int mem_type;
++      uint64_t p_size;
++};
++
++struct drm_gem_close {
++      /** Handle of the object to be closed. */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_gem_flink {
++      /** Handle for the object being named */
++      uint32_t handle;
++
++      /** Returned global name */
++      uint32_t name;
++};
++
++struct drm_gem_open {
++      /** Name of object being opened */
++      uint32_t name;
++
++      /** Returned handle for the object */
++      uint32_t handle;
++      
++      /** Returned size of the object */
++      uint64_t size;
++};
++
++/**
++ * \name Ioctls Definitions
++ */
++/*@{*/
++
++#define DRM_IOCTL_BASE                        'd'
++#define DRM_IO(nr)                    _IO(DRM_IOCTL_BASE,nr)
++#define DRM_IOR(nr,type)              _IOR(DRM_IOCTL_BASE,nr,type)
++#define DRM_IOW(nr,type)              _IOW(DRM_IOCTL_BASE,nr,type)
++#define DRM_IOWR(nr,type)             _IOWR(DRM_IOCTL_BASE,nr,type)
++
++#define DRM_IOCTL_VERSION             DRM_IOWR(0x00, struct drm_version)
++#define DRM_IOCTL_GET_UNIQUE          DRM_IOWR(0x01, struct drm_unique)
++#define DRM_IOCTL_GET_MAGIC           DRM_IOR( 0x02, struct drm_auth)
++#define DRM_IOCTL_IRQ_BUSID           DRM_IOWR(0x03, struct drm_irq_busid)
++#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
++#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
++#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
++#define DRM_IOCTL_SET_VERSION         DRM_IOWR(0x07, struct drm_set_version)
++#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08,  struct drm_modeset_ctl)
++
++#define DRM_IOCTL_GEM_CLOSE           DRM_IOW (0x09, struct drm_gem_close)
++#define DRM_IOCTL_GEM_FLINK           DRM_IOWR(0x0a, struct drm_gem_flink)
++#define DRM_IOCTL_GEM_OPEN            DRM_IOWR(0x0b, struct drm_gem_open)
++
++#define DRM_IOCTL_SET_UNIQUE          DRM_IOW( 0x10, struct drm_unique)
++#define DRM_IOCTL_AUTH_MAGIC          DRM_IOW( 0x11, struct drm_auth)
++#define DRM_IOCTL_BLOCK                       DRM_IOWR(0x12, struct drm_block)
++#define DRM_IOCTL_UNBLOCK             DRM_IOWR(0x13, struct drm_block)
++#define DRM_IOCTL_CONTROL             DRM_IOW( 0x14, struct drm_control)
++#define DRM_IOCTL_ADD_MAP             DRM_IOWR(0x15, struct drm_map)
++#define DRM_IOCTL_ADD_BUFS            DRM_IOWR(0x16, struct drm_buf_desc)
++#define DRM_IOCTL_MARK_BUFS           DRM_IOW( 0x17, struct drm_buf_desc)
++#define DRM_IOCTL_INFO_BUFS           DRM_IOWR(0x18, struct drm_buf_info)
++#define DRM_IOCTL_MAP_BUFS            DRM_IOWR(0x19, struct drm_buf_map)
++#define DRM_IOCTL_FREE_BUFS           DRM_IOW( 0x1a, struct drm_buf_free)
++
++#define DRM_IOCTL_RM_MAP              DRM_IOW( 0x1b, struct drm_map)
++
++#define DRM_IOCTL_SET_SAREA_CTX               DRM_IOW( 0x1c, struct drm_ctx_priv_map)
++#define DRM_IOCTL_GET_SAREA_CTX               DRM_IOWR(0x1d, struct drm_ctx_priv_map)
++
++#define DRM_IOCTL_ADD_CTX             DRM_IOWR(0x20, struct drm_ctx)
++#define DRM_IOCTL_RM_CTX              DRM_IOWR(0x21, struct drm_ctx)
++#define DRM_IOCTL_MOD_CTX             DRM_IOW( 0x22, struct drm_ctx)
++#define DRM_IOCTL_GET_CTX             DRM_IOWR(0x23, struct drm_ctx)
++#define DRM_IOCTL_SWITCH_CTX          DRM_IOW( 0x24, struct drm_ctx)
++#define DRM_IOCTL_NEW_CTX             DRM_IOW( 0x25, struct drm_ctx)
++#define DRM_IOCTL_RES_CTX             DRM_IOWR(0x26, struct drm_ctx_res)
++#define DRM_IOCTL_ADD_DRAW            DRM_IOWR(0x27, struct drm_draw)
++#define DRM_IOCTL_RM_DRAW             DRM_IOWR(0x28, struct drm_draw)
++#define DRM_IOCTL_DMA                 DRM_IOWR(0x29, struct drm_dma)
++#define DRM_IOCTL_LOCK                        DRM_IOW( 0x2a, struct drm_lock)
++#define DRM_IOCTL_UNLOCK              DRM_IOW( 0x2b, struct drm_lock)
++#define DRM_IOCTL_FINISH              DRM_IOW( 0x2c, struct drm_lock)
++
++#define DRM_IOCTL_AGP_ACQUIRE         DRM_IO(  0x30)
++#define DRM_IOCTL_AGP_RELEASE         DRM_IO(  0x31)
++#define DRM_IOCTL_AGP_ENABLE          DRM_IOW( 0x32, struct drm_agp_mode)
++#define DRM_IOCTL_AGP_INFO            DRM_IOR( 0x33, struct drm_agp_info)
++#define DRM_IOCTL_AGP_ALLOC           DRM_IOWR(0x34, struct drm_agp_buffer)
++#define DRM_IOCTL_AGP_FREE            DRM_IOW( 0x35, struct drm_agp_buffer)
++#define DRM_IOCTL_AGP_BIND            DRM_IOW( 0x36, struct drm_agp_binding)
++#define DRM_IOCTL_AGP_UNBIND          DRM_IOW( 0x37, struct drm_agp_binding)
++
++#define DRM_IOCTL_SG_ALLOC            DRM_IOWR(0x38, struct drm_scatter_gather)
++#define DRM_IOCTL_SG_FREE             DRM_IOW( 0x39, struct drm_scatter_gather)
++
++#define DRM_IOCTL_WAIT_VBLANK         DRM_IOWR(0x3a, union drm_wait_vblank)
++
++#define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
++
++#define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
++#define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
++#define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
++#define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
++
++#define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
++#define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
++
++#define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
++#define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
++#define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
++#define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
++#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
++#define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
++#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
++#define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
++
++/*@}*/
++
++/**
++ * Device specific ioctls should only be in their respective headers
++ * The device specific ioctl range is from 0x40 to 0x99.
++ * Generic IOCTLS restart at 0xA0.
++ *
++ * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
++ * drmCommandReadWrite().
++ */
++#define DRM_COMMAND_BASE                0x40
++#define DRM_COMMAND_END                 0xA0
++
++/* typedef area */
++#ifndef __KERNEL__
++typedef struct drm_clip_rect drm_clip_rect_t;
++typedef struct drm_tex_region drm_tex_region_t;
++typedef struct drm_hw_lock drm_hw_lock_t;
++typedef struct drm_version drm_version_t;
++typedef struct drm_unique drm_unique_t;
++typedef struct drm_list drm_list_t;
++typedef struct drm_block drm_block_t;
++typedef struct drm_control drm_control_t;
++typedef enum drm_map_type drm_map_type_t;
++typedef enum drm_map_flags drm_map_flags_t;
++typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
++typedef struct drm_map drm_map_t;
++typedef struct drm_client drm_client_t;
++typedef enum drm_stat_type drm_stat_type_t;
++typedef struct drm_stats drm_stats_t;
++typedef enum drm_lock_flags drm_lock_flags_t;
++typedef struct drm_lock drm_lock_t;
++typedef enum drm_dma_flags drm_dma_flags_t;
++typedef struct drm_buf_desc drm_buf_desc_t;
++typedef struct drm_buf_info drm_buf_info_t;
++typedef struct drm_buf_free drm_buf_free_t;
++typedef struct drm_buf_pub drm_buf_pub_t;
++typedef struct drm_buf_map drm_buf_map_t;
++typedef struct drm_dma drm_dma_t;
++typedef union drm_wait_vblank drm_wait_vblank_t;
++typedef struct drm_agp_mode drm_agp_mode_t;
++typedef enum drm_ctx_flags drm_ctx_flags_t;
++typedef struct drm_ctx drm_ctx_t;
++typedef struct drm_ctx_res drm_ctx_res_t;
++typedef struct drm_draw drm_draw_t;
++typedef struct drm_update_draw drm_update_draw_t;
++typedef struct drm_auth drm_auth_t;
++typedef struct drm_irq_busid drm_irq_busid_t;
++typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
++typedef struct drm_agp_buffer drm_agp_buffer_t;
++typedef struct drm_agp_binding drm_agp_binding_t;
++typedef struct drm_agp_info drm_agp_info_t;
++typedef struct drm_scatter_gather drm_scatter_gather_t;
++typedef struct drm_set_version drm_set_version_t;
++
++typedef struct drm_fence_arg drm_fence_arg_t;
++typedef struct drm_mm_type_arg drm_mm_type_arg_t;
++typedef struct drm_mm_init_arg drm_mm_init_arg_t;
++typedef enum drm_bo_type drm_bo_type_t;
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.c git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c
+--- git/drivers/gpu/drm-tungsten/drm_hashtab.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,207 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple open hash tab implementation.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "drm_hashtab.h"
++#include <linux/hash.h>
++
++int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
++{
++      unsigned int i;
++
++      ht->size = 1 << order;
++      ht->order = order;
++      ht->fill = 0;
++      ht->table = NULL;
++      ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
++      if (!ht->use_vmalloc) {
++              ht->table = drm_calloc(ht->size, sizeof(*ht->table),
++                                     DRM_MEM_HASHTAB);
++      }
++      if (!ht->table) {
++              ht->use_vmalloc = 1;
++              ht->table = vmalloc(ht->size * sizeof(*ht->table));
++      }
++      if (!ht->table) {
++              DRM_ERROR("Out of memory for hash table\n");
++              return -ENOMEM;
++      }
++      for (i = 0; i < ht->size; ++i) {
++              INIT_HLIST_HEAD(&ht->table[i]);
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_create);
++
++void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list;
++      unsigned int hashed_key;
++      int count = 0;
++
++      hashed_key = hash_long(key, ht->order);
++      DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
++      h_list = &ht->table[hashed_key];
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
++      }
++}
++
++static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
++                                        unsigned long key)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list;
++      unsigned int hashed_key;
++
++      hashed_key = hash_long(key, ht->order);
++      h_list = &ht->table[hashed_key];
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              if (entry->key == key)
++                      return list;
++              if (entry->key > key)
++                      break;
++      }
++      return NULL;
++}
++
++int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
++{
++      struct drm_hash_item *entry;
++      struct hlist_head *h_list;
++      struct hlist_node *list, *parent;
++      unsigned int hashed_key;
++      unsigned long key = item->key;
++
++      hashed_key = hash_long(key, ht->order);
++      h_list = &ht->table[hashed_key];
++      parent = NULL;
++      hlist_for_each(list, h_list) {
++              entry = hlist_entry(list, struct drm_hash_item, head);
++              if (entry->key == key)
++                      return -EINVAL;
++              if (entry->key > key)
++                      break;
++              parent = list;
++      }
++      if (parent) {
++              hlist_add_after(parent, &item->head);
++      } else {
++              hlist_add_head(&item->head, h_list);
++      }
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_insert_item);
++
++/*
++ * Just insert an item and return any "bits" bit key that hasn't been
++ * used before.
++ */
++int drm_ht_just_insert_please(struct drm_open_hash *ht,
++                            struct drm_hash_item *item,
++                            unsigned long seed, int bits, int shift,
++                            unsigned long add)
++{
++      int ret;
++      unsigned long mask = (1 << bits) - 1;
++      unsigned long first, unshifted_key;
++
++      unshifted_key = hash_long(seed, bits);
++      first = unshifted_key;
++      do {
++              item->key = (unshifted_key << shift) + add;
++              ret = drm_ht_insert_item(ht, item);
++              if (ret)
++                      unshifted_key = (unshifted_key + 1) & mask;
++      } while (ret && (unshifted_key != first));
++
++      if (ret) {
++              DRM_ERROR("Available key bit space exhausted\n");
++              return -EINVAL;
++      }
++      return 0;
++}
++
++int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
++                   struct drm_hash_item **item)
++{
++      struct hlist_node *list;
++
++      list = drm_ht_find_key(ht, key);
++      if (!list)
++              return -EINVAL;
++
++      *item = hlist_entry(list, struct drm_hash_item, head);
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_find_item);
++
++int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
++{
++      struct hlist_node *list;
++
++      list = drm_ht_find_key(ht, key);
++      if (list) {
++              hlist_del_init(list);
++              ht->fill--;
++              return 0;
++      }
++      return -EINVAL;
++}
++
++int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
++{
++      hlist_del_init(&item->head);
++      ht->fill--;
++      return 0;
++}
++EXPORT_SYMBOL(drm_ht_remove_item);
++
++void drm_ht_remove(struct drm_open_hash *ht)
++{
++      if (ht->table) {
++              if (ht->use_vmalloc)
++                      vfree(ht->table);
++              else
++                      drm_free(ht->table, ht->size * sizeof(*ht->table),
++                               DRM_MEM_HASHTAB);
++              ht->table = NULL;
++      }
++}
++EXPORT_SYMBOL(drm_ht_remove);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_hashtab.h git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h
+--- git/drivers/gpu/drm-tungsten/drm_hashtab.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_hashtab.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple open hash tab implementation.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef DRM_HASHTAB_H
++#define DRM_HASHTAB_H
++
++#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
++
++struct drm_hash_item {
++      struct hlist_node head;
++      unsigned long key;
++};
++
++struct drm_open_hash {
++      unsigned int size;
++      unsigned int order;
++      unsigned int fill;
++      struct hlist_head *table;
++      int use_vmalloc;
++};
++
++
++extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
++extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
++extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
++                                   unsigned long seed, int bits, int shift,
++                                   unsigned long add);
++extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
++
++extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
++extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
++extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
++extern void drm_ht_remove(struct drm_open_hash *ht);
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_internal.h git-nokia/drivers/gpu/drm-tungsten/drm_internal.h
+--- git/drivers/gpu/drm-tungsten/drm_internal.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_internal.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,40 @@
++/*
++ * Copyright 2007 Red Hat, Inc
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* This header file holds function prototypes and data types that are
++ * internal to the drm (not exported to user space) but shared across
++ * drivers and platforms */
++
++#ifndef __DRM_INTERNAL_H__
++#define __DRM_INTERNAL_H__
++
++/**
++ * Drawable information.
++ */
++struct drm_drawable_info {
++      unsigned int num_rects;
++      struct drm_clip_rect *rects;
++};
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioc32.c git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c
+--- git/drivers/gpu/drm-tungsten/drm_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1073 @@
++/**
++ * \file drm_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the DRM.
++ *
++ * \author Paul Mackerras <paulus@samba.org>
++ *
++ * Copyright (C) Paul Mackerras 2005.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm_core.h"
++
++#define DRM_IOCTL_VERSION32           DRM_IOWR(0x00, drm_version32_t)
++#define DRM_IOCTL_GET_UNIQUE32                DRM_IOWR(0x01, drm_unique32_t)
++#define DRM_IOCTL_GET_MAP32           DRM_IOWR(0x04, drm_map32_t)
++#define DRM_IOCTL_GET_CLIENT32                DRM_IOWR(0x05, drm_client32_t)
++#define DRM_IOCTL_GET_STATS32         DRM_IOR( 0x06, drm_stats32_t)
++
++#define DRM_IOCTL_SET_UNIQUE32                DRM_IOW( 0x10, drm_unique32_t)
++#define DRM_IOCTL_ADD_MAP32           DRM_IOWR(0x15, drm_map32_t)
++#define DRM_IOCTL_ADD_BUFS32          DRM_IOWR(0x16, drm_buf_desc32_t)
++#define DRM_IOCTL_MARK_BUFS32         DRM_IOW( 0x17, drm_buf_desc32_t)
++#define DRM_IOCTL_INFO_BUFS32         DRM_IOWR(0x18, drm_buf_info32_t)
++#define DRM_IOCTL_MAP_BUFS32          DRM_IOWR(0x19, drm_buf_map32_t)
++#define DRM_IOCTL_FREE_BUFS32         DRM_IOW( 0x1a, drm_buf_free32_t)
++
++#define DRM_IOCTL_RM_MAP32            DRM_IOW( 0x1b, drm_map32_t)
++
++#define DRM_IOCTL_SET_SAREA_CTX32     DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
++#define DRM_IOCTL_GET_SAREA_CTX32     DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
++
++#define DRM_IOCTL_RES_CTX32           DRM_IOWR(0x26, drm_ctx_res32_t)
++#define DRM_IOCTL_DMA32                       DRM_IOWR(0x29, drm_dma32_t)
++
++#define DRM_IOCTL_AGP_ENABLE32                DRM_IOW( 0x32, drm_agp_mode32_t)
++#define DRM_IOCTL_AGP_INFO32          DRM_IOR( 0x33, drm_agp_info32_t)
++#define DRM_IOCTL_AGP_ALLOC32         DRM_IOWR(0x34, drm_agp_buffer32_t)
++#define DRM_IOCTL_AGP_FREE32          DRM_IOW( 0x35, drm_agp_buffer32_t)
++#define DRM_IOCTL_AGP_BIND32          DRM_IOW( 0x36, drm_agp_binding32_t)
++#define DRM_IOCTL_AGP_UNBIND32                DRM_IOW( 0x37, drm_agp_binding32_t)
++
++#define DRM_IOCTL_SG_ALLOC32          DRM_IOW( 0x38, drm_scatter_gather32_t)
++#define DRM_IOCTL_SG_FREE32           DRM_IOW( 0x39, drm_scatter_gather32_t)
++
++#define DRM_IOCTL_WAIT_VBLANK32               DRM_IOWR(0x3a, drm_wait_vblank32_t)
++
++typedef struct drm_version_32 {
++      int version_major;        /**< Major version */
++      int version_minor;        /**< Minor version */
++      int version_patchlevel;   /**< Patch level */
++      u32 name_len;             /**< Length of name buffer */
++      u32 name;                 /**< Name of driver */
++      u32 date_len;             /**< Length of date buffer */
++      u32 date;                 /**< User-space buffer to hold date */
++      u32 desc_len;             /**< Length of desc buffer */
++      u32 desc;                 /**< User-space buffer to hold desc */
++} drm_version32_t;
++
++static int compat_drm_version(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_version32_t v32;
++      struct drm_version __user *version;
++      int err;
++
++      if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
++              return -EFAULT;
++
++      version = compat_alloc_user_space(sizeof(*version));
++      if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
++              return -EFAULT;
++      if (__put_user(v32.name_len, &version->name_len)
++          || __put_user((void __user *)(unsigned long)v32.name,
++                        &version->name)
++          || __put_user(v32.date_len, &version->date_len)
++          || __put_user((void __user *)(unsigned long)v32.date,
++                        &version->date)
++          || __put_user(v32.desc_len, &version->desc_len)
++          || __put_user((void __user *)(unsigned long)v32.desc,
++                        &version->desc))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_VERSION, (unsigned long)version);
++      if (err)
++              return err;
++
++      if (__get_user(v32.version_major, &version->version_major)
++          || __get_user(v32.version_minor, &version->version_minor)
++          || __get_user(v32.version_patchlevel, &version->version_patchlevel)
++          || __get_user(v32.name_len, &version->name_len)
++          || __get_user(v32.date_len, &version->date_len)
++          || __get_user(v32.desc_len, &version->desc_len))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_unique32 {
++      u32 unique_len; /**< Length of unique */
++      u32 unique;     /**< Unique name for driver instantiation */
++} drm_unique32_t;
++
++static int compat_drm_getunique(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_unique32_t uq32;
++      struct drm_unique __user *u;
++      int err;
++
++      if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
++              return -EFAULT;
++
++      u = compat_alloc_user_space(sizeof(*u));
++      if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
++              return -EFAULT;
++      if (__put_user(uq32.unique_len, &u->unique_len)
++          || __put_user((void __user *)(unsigned long)uq32.unique,
++                        &u->unique))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
++      if (err)
++              return err;
++
++      if (__get_user(uq32.unique_len, &u->unique_len))
++              return -EFAULT;
++      if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
++              return -EFAULT;
++      return 0;
++}
++
++static int compat_drm_setunique(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_unique32_t uq32;
++      struct drm_unique __user *u;
++
++      if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
++              return -EFAULT;
++
++      u = compat_alloc_user_space(sizeof(*u));
++      if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
++              return -EFAULT;
++      if (__put_user(uq32.unique_len, &u->unique_len)
++          || __put_user((void __user *)(unsigned long)uq32.unique,
++                        &u->unique))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
++}
++
++typedef struct drm_map32 {
++      u32 offset;             /**< Requested physical address (0 for SAREA)*/
++      u32 size;               /**< Requested physical size (bytes) */
++      enum drm_map_type type; /**< Type of memory to map */
++      enum drm_map_flags flags;       /**< Flags */
++      u32 handle;             /**< User-space: "Handle" to pass to mmap() */
++      int mtrr;               /**< MTRR slot used */
++} drm_map32_t;
++
++static int compat_drm_getmap(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      drm_map32_t m32;
++      struct drm_map __user *map;
++      int idx, err;
++      void *handle;
++
++      if (get_user(idx, &argp->offset))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user(idx, &map->offset))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_MAP, (unsigned long)map);
++      if (err)
++              return err;
++
++      if (__get_user(m32.offset, &map->offset)
++          || __get_user(m32.size, &map->size)
++          || __get_user(m32.type, &map->type)
++          || __get_user(m32.flags, &map->flags)
++          || __get_user(handle, &map->handle)
++          || __get_user(m32.mtrr, &map->mtrr))
++              return -EFAULT;
++
++      m32.handle = (unsigned long)handle;
++      if (copy_to_user(argp, &m32, sizeof(m32)))
++              return -EFAULT;
++      return 0;
++
++}
++
++static int compat_drm_addmap(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      drm_map32_t m32;
++      struct drm_map __user *map;
++      int err;
++      void *handle;
++
++      if (copy_from_user(&m32, argp, sizeof(m32)))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user(m32.offset, &map->offset)
++          || __put_user(m32.size, &map->size)
++          || __put_user(m32.type, &map->type)
++          || __put_user(m32.flags, &map->flags))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_ADD_MAP, (unsigned long)map);
++      if (err)
++              return err;
++
++      if (__get_user(m32.offset, &map->offset)
++          || __get_user(m32.mtrr, &map->mtrr)
++          || __get_user(handle, &map->handle))
++              return -EFAULT;
++
++      m32.handle = (unsigned long)handle;
++      if (m32.handle != (unsigned long)handle && printk_ratelimit())
++              printk(KERN_ERR "compat_drm_addmap truncated handle"
++                     " %p for type %d offset %x\n",
++                     handle, m32.type, m32.offset);
++
++      if (copy_to_user(argp, &m32, sizeof(m32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_rmmap(struct file *file, unsigned int cmd,
++                          unsigned long arg)
++{
++      drm_map32_t __user *argp = (void __user *)arg;
++      struct drm_map __user *map;
++      u32 handle;
++
++      if (get_user(handle, &argp->handle))
++              return -EFAULT;
++
++      map = compat_alloc_user_space(sizeof(*map));
++      if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
++              return -EFAULT;
++      if (__put_user((void *)(unsigned long)handle, &map->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RM_MAP, (unsigned long)map);
++}
++
++typedef struct drm_client32 {
++      int idx;        /**< Which client desired? */
++      int auth;       /**< Is client authenticated? */
++      u32 pid;        /**< Process ID */
++      u32 uid;        /**< User ID */
++      u32 magic;      /**< Magic */
++      u32 iocs;       /**< Ioctl count */
++} drm_client32_t;
++
++static int compat_drm_getclient(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_client32_t c32;
++      drm_client32_t __user *argp = (void __user *)arg;
++      struct drm_client __user *client;
++      int idx, err;
++
++      if (get_user(idx, &argp->idx))
++              return -EFAULT;
++
++      client = compat_alloc_user_space(sizeof(*client));
++      if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
++              return -EFAULT;
++      if (__put_user(idx, &client->idx))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_CLIENT, (unsigned long)client);
++      if (err)
++              return err;
++
++      if (__get_user(c32.auth, &client->auth)
++          || __get_user(c32.pid, &client->pid)
++          || __get_user(c32.uid, &client->uid)
++          || __get_user(c32.magic, &client->magic)
++          || __get_user(c32.iocs, &client->iocs))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &c32, sizeof(c32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_stats32 {
++      u32 count;
++      struct {
++              u32 value;
++              enum drm_stat_type type;
++      } data[15];
++} drm_stats32_t;
++
++static int compat_drm_getstats(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_stats32_t s32;
++      drm_stats32_t __user *argp = (void __user *)arg;
++      struct drm_stats __user *stats;
++      int i, err;
++
++      stats = compat_alloc_user_space(sizeof(*stats));
++      if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_STATS, (unsigned long)stats);
++      if (err)
++              return err;
++
++      if (__get_user(s32.count, &stats->count))
++              return -EFAULT;
++      for (i = 0; i < 15; ++i)
++              if (__get_user(s32.data[i].value, &stats->data[i].value)
++                  || __get_user(s32.data[i].type, &stats->data[i].type))
++                      return -EFAULT;
++
++      if (copy_to_user(argp, &s32, sizeof(s32)))
++              return -EFAULT;
++      return 0;
++}
++
++typedef struct drm_buf_desc32 {
++      int count;               /**< Number of buffers of this size */
++      int size;                /**< Size in bytes */
++      int low_mark;            /**< Low water mark */
++      int high_mark;           /**< High water mark */
++      int flags;
++      u32 agp_start;           /**< Start address in the AGP aperture */
++} drm_buf_desc32_t;
++
++static int compat_drm_addbufs(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_buf_desc32_t __user *argp = (void __user *)arg;
++      struct drm_buf_desc __user *buf;
++      int err;
++      unsigned long agp_start;
++
++      buf = compat_alloc_user_space(sizeof(*buf));
++      if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
++              return -EFAULT;
++
++      if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
++          || __get_user(agp_start, &argp->agp_start)
++          || __put_user(agp_start, &buf->agp_start))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
++      if (err)
++              return err;
++
++      if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
++          || __get_user(agp_start, &buf->agp_start)
++          || __put_user(agp_start, &argp->agp_start))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_markbufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_desc32_t b32;
++      drm_buf_desc32_t __user *argp = (void __user *)arg;
++      struct drm_buf_desc __user *buf;
++
++      if (copy_from_user(&b32, argp, sizeof(b32)))
++              return -EFAULT;
++
++      buf = compat_alloc_user_space(sizeof(*buf));
++      if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
++              return -EFAULT;
++
++      if (__put_user(b32.size, &buf->size)
++          || __put_user(b32.low_mark, &buf->low_mark)
++          || __put_user(b32.high_mark, &buf->high_mark))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
++}
++
++typedef struct drm_buf_info32 {
++      int count;              /**< Entries in list */
++      u32 list;
++} drm_buf_info32_t;
++
++static int compat_drm_infobufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_info32_t req32;
++      drm_buf_info32_t __user *argp = (void __user *)arg;
++      drm_buf_desc32_t __user *to;
++      struct drm_buf_info __user *request;
++      struct drm_buf_desc __user *list;
++      size_t nbytes;
++      int i, err;
++      int count, actual;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      count = req32.count;
++      to = (drm_buf_desc32_t __user *)(unsigned long)req32.list;
++      if (count < 0)
++              count = 0;
++      if (count > 0
++          && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
++              return -EFAULT;
++
++      nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
++      request = compat_alloc_user_space(nbytes);
++      if (!access_ok(VERIFY_WRITE, request, nbytes))
++              return -EFAULT;
++      list = (struct drm_buf_desc *) (request + 1);
++
++      if (__put_user(count, &request->count)
++          || __put_user(list, &request->list))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_INFO_BUFS, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(actual, &request->count))
++              return -EFAULT;
++      if (count >= actual)
++              for (i = 0; i < actual; ++i)
++                      if (__copy_in_user(&to[i], &list[i],
++                                         offsetof(struct drm_buf_desc, flags)))
++                              return -EFAULT;
++
++      if (__put_user(actual, &argp->count))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_buf_pub32 {
++      int idx;                /**< Index into the master buffer list */
++      int total;              /**< Buffer size */
++      int used;               /**< Amount of buffer in use (for DMA) */
++      u32 address;            /**< Address of buffer */
++} drm_buf_pub32_t;
++
++typedef struct drm_buf_map32 {
++      int count;              /**< Length of the buffer list */
++      u32 virtual;            /**< Mmap'd area in user-virtual */
++      u32 list;               /**< Buffer information */
++} drm_buf_map32_t;
++
++static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_buf_map32_t __user *argp = (void __user *)arg;
++      drm_buf_map32_t req32;
++      drm_buf_pub32_t __user *list32;
++      struct drm_buf_map __user *request;
++      struct drm_buf_pub __user *list;
++      int i, err;
++      int count, actual;
++      size_t nbytes;
++      void __user *addr;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++      count = req32.count;
++      list32 = (void __user *)(unsigned long)req32.list;
++
++      if (count < 0)
++              return -EINVAL;
++      nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
++      request = compat_alloc_user_space(nbytes);
++      if (!access_ok(VERIFY_WRITE, request, nbytes))
++              return -EFAULT;
++      list = (struct drm_buf_pub *) (request + 1);
++
++      if (__put_user(count, &request->count)
++          || __put_user(list, &request->list))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_MAP_BUFS, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(actual, &request->count))
++              return -EFAULT;
++      if (count >= actual)
++              for (i = 0; i < actual; ++i)
++                      if (__copy_in_user(&list32[i], &list[i],
++                                         offsetof(struct drm_buf_pub, address))
++                          || __get_user(addr, &list[i].address)
++                          || __put_user((unsigned long)addr,
++                                        &list32[i].address))
++                              return -EFAULT;
++
++      if (__put_user(actual, &argp->count)
++          || __get_user(addr, &request->virtual)
++          || __put_user((unsigned long)addr, &argp->virtual))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_buf_free32 {
++      int count;
++      u32 list;
++} drm_buf_free32_t;
++
++static int compat_drm_freebufs(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_buf_free32_t req32;
++      struct drm_buf_free __user *request;
++      drm_buf_free32_t __user *argp = (void __user *)arg;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(req32.count, &request->count)
++          || __put_user((int __user *)(unsigned long)req32.list,
++                        &request->list))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_FREE_BUFS, (unsigned long)request);
++}
++
++typedef struct drm_ctx_priv_map32 {
++      unsigned int ctx_id;     /**< Context requesting private mapping */
++      u32 handle;             /**< Handle of map */
++} drm_ctx_priv_map32_t;
++
++static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_ctx_priv_map32_t req32;
++      struct drm_ctx_priv_map __user *request;
++      drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(req32.ctx_id, &request->ctx_id)
++          || __put_user((void *)(unsigned long)req32.handle,
++                        &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
++}
++
++static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      struct drm_ctx_priv_map __user *request;
++      drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
++      int err;
++      unsigned int ctx_id;
++      void *handle;
++
++      if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(ctx_id, &argp->ctx_id))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
++              return -EFAULT;
++      if (__put_user(ctx_id, &request->ctx_id))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(handle, &request->handle)
++          || __put_user((unsigned long)handle, &argp->handle))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_ctx_res32 {
++      int count;
++      u32 contexts;
++} drm_ctx_res32_t;
++
++static int compat_drm_resctx(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_ctx_res32_t __user *argp = (void __user *)arg;
++      drm_ctx_res32_t res32;
++      struct drm_ctx_res __user *res;
++      int err;
++
++      if (copy_from_user(&res32, argp, sizeof(res32)))
++              return -EFAULT;
++
++      res = compat_alloc_user_space(sizeof(*res));
++      if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
++              return -EFAULT;
++      if (__put_user(res32.count, &res->count)
++          || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
++                        &res->contexts))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_RES_CTX, (unsigned long)res);
++      if (err)
++              return err;
++
++      if (__get_user(res32.count, &res->count)
++          || __put_user(res32.count, &argp->count))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_dma32 {
++      int context;              /**< Context handle */
++      int send_count;           /**< Number of buffers to send */
++      u32 send_indices;         /**< List of handles to buffers */
++      u32 send_sizes;           /**< Lengths of data to send */
++      enum drm_dma_flags flags;                 /**< Flags */
++      int request_count;        /**< Number of buffers requested */
++      int request_size;         /**< Desired size for buffers */
++      u32 request_indices;      /**< Buffer information */
++      u32 request_sizes;
++      int granted_count;        /**< Number of buffers granted */
++} drm_dma32_t;
++
++static int compat_drm_dma(struct file *file, unsigned int cmd,
++                        unsigned long arg)
++{
++      drm_dma32_t d32;
++      drm_dma32_t __user *argp = (void __user *)arg;
++      struct drm_dma __user *d;
++      int err;
++
++      if (copy_from_user(&d32, argp, sizeof(d32)))
++              return -EFAULT;
++
++      d = compat_alloc_user_space(sizeof(*d));
++      if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
++              return -EFAULT;
++
++      if (__put_user(d32.context, &d->context)
++          || __put_user(d32.send_count, &d->send_count)
++          || __put_user((int __user *)(unsigned long)d32.send_indices,
++                        &d->send_indices)
++          || __put_user((int __user *)(unsigned long)d32.send_sizes,
++                        &d->send_sizes)
++          || __put_user(d32.flags, &d->flags)
++          || __put_user(d32.request_count, &d->request_count)
++          || __put_user((int __user *)(unsigned long)d32.request_indices,
++                        &d->request_indices)
++          || __put_user((int __user *)(unsigned long)d32.request_sizes,
++                        &d->request_sizes))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_DMA, (unsigned long)d);
++      if (err)
++              return err;
++
++      if (__get_user(d32.request_size, &d->request_size)
++          || __get_user(d32.granted_count, &d->granted_count)
++          || __put_user(d32.request_size, &argp->request_size)
++          || __put_user(d32.granted_count, &argp->granted_count))
++              return -EFAULT;
++
++      return 0;
++}
++
++#if __OS_HAS_AGP
++typedef struct drm_agp_mode32 {
++      u32 mode;       /**< AGP mode */
++} drm_agp_mode32_t;
++
++static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_agp_mode32_t __user *argp = (void __user *)arg;
++      drm_agp_mode32_t m32;
++      struct drm_agp_mode __user *mode;
++
++      if (get_user(m32.mode, &argp->mode))
++              return -EFAULT;
++
++      mode = compat_alloc_user_space(sizeof(*mode));
++      if (put_user(m32.mode, &mode->mode))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
++}
++
++typedef struct drm_agp_info32 {
++      int agp_version_major;
++      int agp_version_minor;
++      u32 mode;
++      u32 aperture_base;      /* physical address */
++      u32 aperture_size;      /* bytes */
++      u32 memory_allowed;     /* bytes */
++      u32 memory_used;
++
++      /* PCI information */
++      unsigned short id_vendor;
++      unsigned short id_device;
++} drm_agp_info32_t;
++
++static int compat_drm_agp_info(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_info32_t __user *argp = (void __user *)arg;
++      drm_agp_info32_t i32;
++      struct drm_agp_info __user *info;
++      int err;
++
++      info = compat_alloc_user_space(sizeof(*info));
++      if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_AGP_INFO, (unsigned long)info);
++      if (err)
++              return err;
++
++      if (__get_user(i32.agp_version_major, &info->agp_version_major)
++          || __get_user(i32.agp_version_minor, &info->agp_version_minor)
++          || __get_user(i32.mode, &info->mode)
++          || __get_user(i32.aperture_base, &info->aperture_base)
++          || __get_user(i32.aperture_size, &info->aperture_size)
++          || __get_user(i32.memory_allowed, &info->memory_allowed)
++          || __get_user(i32.memory_used, &info->memory_used)
++          || __get_user(i32.id_vendor, &info->id_vendor)
++          || __get_user(i32.id_device, &info->id_device))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &i32, sizeof(i32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++typedef struct drm_agp_buffer32 {
++      u32 size;       /**< In bytes -- will round to page boundary */
++      u32 handle;     /**< Used for binding / unbinding */
++      u32 type;       /**< Type of memory to allocate */
++      u32 physical;   /**< Physical used by i810 */
++} drm_agp_buffer32_t;
++
++static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_agp_buffer32_t __user *argp = (void __user *)arg;
++      drm_agp_buffer32_t req32;
++      struct drm_agp_buffer __user *request;
++      int err;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.size, &request->size)
++          || __put_user(req32.type, &request->type))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(req32.handle, &request->handle)
++          || __get_user(req32.physical, &request->physical)
++          || copy_to_user(argp, &req32, sizeof(req32))) {
++              drm_ioctl(file->f_dentry->d_inode, file,
++                        DRM_IOCTL_AGP_FREE, (unsigned long)request);
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int compat_drm_agp_free(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_buffer32_t __user *argp = (void __user *)arg;
++      struct drm_agp_buffer __user *request;
++      u32 handle;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || get_user(handle, &argp->handle)
++          || __put_user(handle, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_FREE, (unsigned long)request);
++}
++
++typedef struct drm_agp_binding32 {
++      u32 handle;     /**< From drm_agp_buffer */
++      u32 offset;     /**< In bytes -- will round to page boundary */
++} drm_agp_binding32_t;
++
++static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_agp_binding32_t __user *argp = (void __user *)arg;
++      drm_agp_binding32_t req32;
++      struct drm_agp_binding __user *request;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.handle, &request->handle)
++          || __put_user(req32.offset, &request->offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_BIND, (unsigned long)request);
++}
++
++static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_agp_binding32_t __user *argp = (void __user *)arg;
++      struct drm_agp_binding __user *request;
++      u32 handle;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || get_user(handle, &argp->handle)
++          || __put_user(handle, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
++}
++#endif                                /* __OS_HAS_AGP */
++
++typedef struct drm_scatter_gather32 {
++      u32 size;       /**< In bytes -- will round to page boundary */
++      u32 handle;     /**< Used for mapping / unmapping */
++} drm_scatter_gather32_t;
++
++static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_scatter_gather32_t __user *argp = (void __user *)arg;
++      struct drm_scatter_gather __user *request;
++      int err;
++      unsigned long x;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(x, &argp->size)
++          || __put_user(x, &request->size))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_SG_ALLOC, (unsigned long)request);
++      if (err)
++              return err;
++
++      /* XXX not sure about the handle conversion here... */
++      if (__get_user(x, &request->handle)
++          || __put_user(x >> PAGE_SHIFT, &argp->handle))
++              return -EFAULT;
++
++      return 0;
++}
++
++static int compat_drm_sg_free(struct file *file, unsigned int cmd,
++                            unsigned long arg)
++{
++      drm_scatter_gather32_t __user *argp = (void __user *)arg;
++      struct drm_scatter_gather __user *request;
++      unsigned long x;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
++          || __get_user(x, &argp->handle)
++          || __put_user(x << PAGE_SHIFT, &request->handle))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_SG_FREE, (unsigned long)request);
++}
++
++struct drm_wait_vblank_request32 {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      u32 signal;
++};
++
++struct drm_wait_vblank_reply32 {
++      enum drm_vblank_seq_type type;
++      unsigned int sequence;
++      s32 tval_sec;
++      s32 tval_usec;
++};
++
++typedef union drm_wait_vblank32 {
++      struct drm_wait_vblank_request32 request;
++      struct drm_wait_vblank_reply32 reply;
++} drm_wait_vblank32_t;
++
++static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_wait_vblank32_t __user *argp = (void __user *)arg;
++      drm_wait_vblank32_t req32;
++      union drm_wait_vblank __user *request;
++      int err;
++
++      if (copy_from_user(&req32, argp, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.request.type, &request->request.type)
++          || __put_user(req32.request.sequence, &request->request.sequence)
++          || __put_user(req32.request.signal, &request->request.signal))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
++      if (err)
++              return err;
++
++      if (__get_user(req32.reply.type, &request->reply.type)
++          || __get_user(req32.reply.sequence, &request->reply.sequence)
++          || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
++          || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
++              return -EFAULT;
++
++      if (copy_to_user(argp, &req32, sizeof(req32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++drm_ioctl_compat_t *drm_compat_ioctls[] = {
++      [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
++      [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
++      [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
++      [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
++      [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
++      [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
++      [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
++      [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
++#if __OS_HAS_AGP
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
++      [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
++#endif
++      [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
++      [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
++      [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/drm.
++ *
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn;
++      int ret;
++
++
++      /* Assume that ioctls without an explicit compat routine will "just
++       * work".  This may not always be a good assumption, but it's better
++       * than always failing.
++       */
++      if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
++              return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++
++      fn = drm_compat_ioctls[nr];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_compat_ioctl);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ioctl.c git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c
+--- git/drivers/gpu/drm-tungsten/drm_ioctl.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ioctl.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,351 @@
++/**
++ * \file drm_ioctl.c
++ * IOCTL processing for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_core.h"
++
++#include "linux/pci.h"
++
++/**
++ * Get the bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_unique structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Copies the bus id from drm_device::unique into user space.
++ */
++int drm_getunique(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_unique *u = data;
++
++      if (u->unique_len >= dev->unique_len) {
++              if (copy_to_user(u->unique, dev->unique, dev->unique_len))
++                      return -EFAULT;
++      }
++      u->unique_len = dev->unique_len;
++
++      return 0;
++}
++
++/**
++ * Set the bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_unique structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Copies the bus id from userspace into drm_device::unique, and verifies that
++ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
++ * in interface version 1.1 and will return EBUSY when setversion has requested
++ * version 1.1 or greater.
++ */
++int drm_setunique(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_unique *u = data;
++      int domain, bus, slot, func, ret;
++
++      if (dev->unique_len || dev->unique)
++              return -EBUSY;
++
++      if (!u->unique_len || u->unique_len > 1024)
++              return -EINVAL;
++
++      dev->unique_len = u->unique_len;
++      dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
++      if (!dev->unique)
++              return -ENOMEM;
++      if (copy_from_user(dev->unique, u->unique, dev->unique_len))
++              return -EFAULT;
++
++      dev->unique[dev->unique_len] = '\0';
++
++      dev->devname =
++          drm_alloc(strlen(dev->driver->pci_driver.name) +
++                    strlen(dev->unique) + 2, DRM_MEM_DRIVER);
++      if (!dev->devname)
++              return -ENOMEM;
++
++      sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
++              dev->unique);
++
++      /* Return error if the busid submitted doesn't match the device's actual
++       * busid.
++       */
++      ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
++      if (ret != 3)
++              return -EINVAL;
++      domain = bus >> 8;
++      bus &= 0xff;
++
++      if ((domain != drm_get_pci_domain(dev)) ||
++          (bus != dev->pdev->bus->number) ||
++          (slot != PCI_SLOT(dev->pdev->devfn)) ||
++          (func != PCI_FUNC(dev->pdev->devfn)))
++              return -EINVAL;
++
++      return 0;
++}
++
++static int drm_set_busid(struct drm_device * dev)
++{
++      int len;
++      if (dev->unique != NULL)
++              return -EBUSY;
++
++      dev->unique_len = 40;
++      dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
++      if (dev->unique == NULL)
++              return -ENOMEM;
++
++      len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
++                     drm_get_pci_domain(dev),
++                     dev->pdev->bus->number,
++                     PCI_SLOT(dev->pdev->devfn),
++                     PCI_FUNC(dev->pdev->devfn));
++      if (len > dev->unique_len)
++              DRM_ERROR("buffer overflow");
++
++      dev->devname =
++          drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
++                    2, DRM_MEM_DRIVER);
++      if (dev->devname == NULL)
++              return -ENOMEM;
++
++      sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
++              dev->unique);
++
++      return 0;
++}
++
++/**
++ * Get a mapping information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_map structure.
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches for the mapping with the specified offset and copies its information
++ * into userspace
++ */
++int drm_getmap(struct drm_device *dev, void *data,
++             struct drm_file *file_priv)
++{
++      struct drm_map *map = data;
++      struct drm_map_list *r_list = NULL;
++      struct list_head *list;
++      int idx;
++      int i;
++
++      idx = map->offset;
++
++      mutex_lock(&dev->struct_mutex);
++      if (idx < 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      i = 0;
++      list_for_each(list, &dev->maplist) {
++              if (i == idx) {
++                      r_list = list_entry(list, struct drm_map_list, head);
++                      break;
++              }
++              i++;
++      }
++      if (!r_list || !r_list->map) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      map->offset = r_list->map->offset;
++      map->size = r_list->map->size;
++      map->type = r_list->map->type;
++      map->flags = r_list->map->flags;
++      map->handle = (void *)(unsigned long) r_list->user_token;
++      map->mtrr = r_list->map->mtrr;
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Get client information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_client structure.
++ *
++ * \return zero on success or a negative number on failure.
++ *
++ * Searches for the client with the specified index and copies its information
++ * into userspace
++ */
++int drm_getclient(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      struct drm_client *client = data;
++      struct drm_file *pt;
++      int idx;
++      int i;
++
++      idx = client->idx;
++      mutex_lock(&dev->struct_mutex);
++
++      i = 0;
++      list_for_each_entry(pt, &dev->filelist, lhead) {
++              if (i++ >= idx) {
++                      client->auth = pt->authenticated;
++                      client->pid = pt->pid;
++                      client->uid = pt->uid;
++                      client->magic = pt->magic;
++                      client->iocs = pt->ioctl_count;
++                      mutex_unlock(&dev->struct_mutex);
++
++                      return 0;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++      return -EINVAL;
++}
++
++/**
++ * Get statistics information.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_stats structure.
++ *
++ * \return zero on success or a negative number on failure.
++ */
++int drm_getstats(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      struct drm_stats *stats = data;
++      int i;
++
++      memset(stats, 0, sizeof(*stats));
++
++      mutex_lock(&dev->struct_mutex);
++
++      for (i = 0; i < dev->counters; i++) {
++              if (dev->types[i] == _DRM_STAT_LOCK)
++                      stats->data[i].value =
++                          (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
++              else
++                      stats->data[i].value = atomic_read(&dev->counts[i]);
++              stats->data[i].type = dev->types[i];
++      }
++
++      stats->count = dev->counters;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Setversion ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Sets the requested interface version
++ */
++int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_set_version *sv = data;
++      int if_version, retcode = 0;
++
++      if (sv->drm_di_major != -1) {
++              if (sv->drm_di_major != DRM_IF_MAJOR ||
++                  sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
++                      retcode = -EINVAL;
++                      goto done;
++              }
++              if_version = DRM_IF_VERSION(sv->drm_di_major,
++                                          sv->drm_di_minor);
++              dev->if_version = max(if_version, dev->if_version);
++              if (sv->drm_di_minor >= 1) {
++                      /*
++                       * Version 1.1 includes tying of DRM to specific device
++                       */
++                      drm_set_busid(dev);
++              }
++      }
++
++      if (sv->drm_dd_major != -1) {
++              if (sv->drm_dd_major != dev->driver->major ||
++                  sv->drm_dd_minor < 0 || sv->drm_dd_minor >
++                  dev->driver->minor) {
++                      retcode = -EINVAL;
++                      goto done;
++              }
++
++              if (dev->driver->set_version)
++                      dev->driver->set_version(dev, sv);
++      }
++
++done:
++      sv->drm_di_major = DRM_IF_MAJOR;
++      sv->drm_di_minor = DRM_IF_MINOR;
++      sv->drm_dd_major = dev->driver->major;
++      sv->drm_dd_minor = dev->driver->minor;
++
++      return retcode;
++}
++
++/** No-op ioctl. */
++int drm_noop(struct drm_device *dev, void *data,
++           struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_irq.c git-nokia/drivers/gpu/drm-tungsten/drm_irq.c
+--- git/drivers/gpu/drm-tungsten/drm_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,771 @@
++/**
++ * \file drm_irq.c
++ * IRQ support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
++ *
++ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#include <linux/interrupt.h>  /* For task queue support */
++
++/**
++ * Get interrupt from bus id.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_irq_busid structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Finds the PCI device with the specified bus id and gets its IRQ number.
++ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
++ * to that of the device that this DRM instance attached to.
++ */
++int drm_irq_by_busid(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_irq_busid *p = data;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
++          (p->busnum & 0xff) != dev->pdev->bus->number ||
++          p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
++              return -EINVAL;
++
++      p->irq = dev->pdev->irq;
++
++      DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
++                p->irq);
++
++      return 0;
++}
++
++static void vblank_disable_fn(unsigned long arg)
++{
++      struct drm_device *dev = (struct drm_device *)arg;
++      unsigned long irqflags;
++      int i;
++
++      if (!dev->vblank_disable_allowed)
++              return;
++
++      for (i = 0; i < dev->num_crtcs; i++) {
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++              if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
++                  dev->vblank_enabled[i]) {
++                      DRM_DEBUG("disabling vblank on crtc %d\n", i);
++                      dev->last_vblank[i] =
++                              dev->driver->get_vblank_counter(dev, i);
++                      dev->driver->disable_vblank(dev, i);
++                      dev->vblank_enabled[i] = 0;
++              }
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++      }
++}
++
++static void drm_vblank_cleanup(struct drm_device *dev)
++{
++      /* Bail if the driver didn't call drm_vblank_init() */
++      if (dev->num_crtcs == 0)
++              return;
++
++      del_timer(&dev->vblank_disable_timer);
++
++      vblank_disable_fn((unsigned long)dev);
++
++      drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++      drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
++               DRM_MEM_DRIVER);
++      drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
++               dev->num_crtcs, DRM_MEM_DRIVER);
++
++      dev->num_crtcs = 0;
++}
++
++int drm_vblank_init(struct drm_device *dev, int num_crtcs)
++{
++      int i, ret = -ENOMEM;
++
++      setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
++                  (unsigned long)dev);
++      init_timer_deferrable(&dev->vblank_disable_timer);
++      spin_lock_init(&dev->vbl_lock);
++      atomic_set(&dev->vbl_signal_pending, 0);
++      dev->num_crtcs = num_crtcs;
++
++      dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
++                                 DRM_MEM_DRIVER);
++      if (!dev->vbl_queue)
++              goto err;
++
++      dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
++                                DRM_MEM_DRIVER);
++      if (!dev->vbl_sigs)
++              goto err;
++
++      dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
++                                    DRM_MEM_DRIVER);
++      if (!dev->_vblank_count)
++              goto err;
++
++      dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_refcount)
++              goto err;
++
++      dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_enabled)
++              goto err;
++
++      dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
++      if (!dev->last_vblank)
++              goto err;
++
++      dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
++                                       DRM_MEM_DRIVER);
++      if (!dev->vblank_inmodeset)
++              goto err;
++
++      /* Zero per-crtc vblank stuff */
++      for (i = 0; i < num_crtcs; i++) {
++              init_waitqueue_head(&dev->vbl_queue[i]);
++              INIT_LIST_HEAD(&dev->vbl_sigs[i]);
++              atomic_set(&dev->_vblank_count[i], 0);
++              atomic_set(&dev->vblank_refcount[i], 0);
++      }
++
++      dev->vblank_disable_allowed = 0;
++
++      return 0;
++
++err:
++      drm_vblank_cleanup(dev);
++      return ret;
++}
++EXPORT_SYMBOL(drm_vblank_init);
++
++/**
++ * Install IRQ handler.
++ *
++ * \param dev DRM device.
++ *
++ * Initializes the IRQ related data. Installs the handler, calling the driver
++ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
++ * before and after the installation.
++ */
++int drm_irq_install(struct drm_device * dev)
++{
++      int ret = 0;
++      unsigned long sh_flags = 0;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      if (dev->pdev->irq == 0)
++              return -EINVAL;
++
++      mutex_lock(&dev->struct_mutex);
++
++      /* Driver must have been initialized */
++      if (!dev->dev_private) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      if (dev->irq_enabled) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EBUSY;
++      }
++      dev->irq_enabled = 1;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("irq=%d\n", dev->pdev->irq);
++
++      /* Before installing handler */
++      dev->driver->irq_preinstall(dev);
++
++      /* Install handler */
++      if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
++              sh_flags = IRQF_SHARED;
++
++      ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
++                        sh_flags, dev->devname, dev);
++      if (ret < 0) {
++              mutex_lock(&dev->struct_mutex);
++              dev->irq_enabled = 0;
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++      /* Expose the device irq to device drivers that want to export it for
++       * whatever reason.
++       */
++      dev->irq = dev->pdev->irq;
++
++      /* After installing handler */
++      ret = dev->driver->irq_postinstall(dev);
++      if (ret < 0) {
++              mutex_lock(&dev->struct_mutex);
++              dev->irq_enabled = 0;
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_irq_install);
++
++/**
++ * Uninstall the IRQ handler.
++ *
++ * \param dev DRM device.
++ *
++ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
++ */
++int drm_irq_uninstall(struct drm_device * dev)
++{
++      int irq_enabled;
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++              return -EINVAL;
++
++      mutex_lock(&dev->struct_mutex);
++      irq_enabled = dev->irq_enabled;
++      dev->irq_enabled = 0;
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!irq_enabled)
++              return -EINVAL;
++
++      DRM_DEBUG("irq=%d\n", dev->pdev->irq);
++
++      dev->driver->irq_uninstall(dev);
++
++      free_irq(dev->pdev->irq, dev);
++
++      drm_vblank_cleanup(dev);
++
++      dev->locked_tasklet_func = NULL;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_irq_uninstall);
++
++/**
++ * IRQ control ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_control structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Calls irq_install() or irq_uninstall() according to \p arg.
++ */
++int drm_control(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_control *ctl = data;
++
++      /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
++
++
++      switch (ctl->func) {
++      case DRM_INST_HANDLER:
++              if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++                      return 0;
++              if (dev->if_version < DRM_IF_VERSION(1, 2) &&
++                  ctl->irq != dev->pdev->irq)
++                      return -EINVAL;
++              return drm_irq_install(dev);
++      case DRM_UNINST_HANDLER:
++              if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
++                      return 0;
++              return drm_irq_uninstall(dev);
++      default:
++              return -EINVAL;
++      }
++}
++
++/**
++ * drm_vblank_count - retrieve "cooked" vblank counter value
++ * @dev: DRM device
++ * @crtc: which counter to retrieve
++ *
++ * Fetches the "cooked" vblank count value that represents the number of
++ * vblank events since the system was booted, including lost events due to
++ * modesetting activity.
++ */
++u32 drm_vblank_count(struct drm_device *dev, int crtc)
++{
++      return atomic_read(&dev->_vblank_count[crtc]);
++}
++EXPORT_SYMBOL(drm_vblank_count);
++
++/**
++ * drm_update_vblank_count - update the master vblank counter
++ * @dev: DRM device
++ * @crtc: counter to update
++ *
++ * Call back into the driver to update the appropriate vblank counter
++ * (specified by @crtc).  Deal with wraparound, if it occurred, and
++ * update the last read value so we can deal with wraparound on the next
++ * call if necessary.
++ *
++ * Only necessary when going from off->on, to account for frames we
++ * didn't get an interrupt for.
++ *
++ * Note: caller must hold dev->vbl_lock since this reads & writes
++ * device vblank fields.
++ */
++static void drm_update_vblank_count(struct drm_device *dev, int crtc)
++{
++      u32 cur_vblank, diff;
++
++      /*
++       * Interrupts were disabled prior to this call, so deal with counter
++       * wrap if needed.
++       * NOTE!  It's possible we lost a full dev->max_vblank_count events
++       * here if the register is small or we had vblank interrupts off for
++       * a long time.
++       */
++      cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
++      diff = cur_vblank - dev->last_vblank[crtc];
++      if (cur_vblank < dev->last_vblank[crtc]) {
++              diff += dev->max_vblank_count;
++
++              DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
++                        crtc, dev->last_vblank[crtc], cur_vblank, diff);
++      }
++
++      DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
++                crtc, diff);
++
++      atomic_add(diff, &dev->_vblank_count[crtc]);
++}
++
++/**
++ * drm_vblank_get - get a reference count on vblank events
++ * @dev: DRM device
++ * @crtc: which CRTC to own
++ *
++ * Acquire a reference count on vblank events to avoid having them disabled
++ * while in use.
++ *
++ * RETURNS
++ * Zero on success, nonzero on failure.
++ */
++int drm_vblank_get(struct drm_device *dev, int crtc)
++{
++      unsigned long irqflags;
++      int ret = 0;
++
++      spin_lock_irqsave(&dev->vbl_lock, irqflags);
++      /* Going from 0->1 means we have to enable interrupts again */
++      if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
++          !dev->vblank_enabled[crtc]) {
++              ret = dev->driver->enable_vblank(dev, crtc);
++              if (ret)
++                      atomic_dec(&dev->vblank_refcount[crtc]);
++              else {
++                      dev->vblank_enabled[crtc] = 1;
++                      drm_update_vblank_count(dev, crtc);
++              }
++      }
++      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_vblank_get);
++
++/**
++ * drm_vblank_put - give up ownership of vblank events
++ * @dev: DRM device
++ * @crtc: which counter to give up
++ *
++ * Release ownership of a given vblank counter, turning off interrupts
++ * if possible.
++ */
++void drm_vblank_put(struct drm_device *dev, int crtc)
++{
++      /* Last user schedules interrupt disable */
++      if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
++          mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
++}
++EXPORT_SYMBOL(drm_vblank_put);
++
++/**
++ * drm_modeset_ctl - handle vblank event counter changes across mode switch
++ * @DRM_IOCTL_ARGS: standard ioctl arguments
++ *
++ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
++ * ioctls around modesetting so that any lost vblank events are accounted for.
++ *
++ * Generally the counter will reset across mode sets.  If interrupts are
++ * enabled around this call, we don't have to do anything since the counter
++ * will have already been incremented.
++ */
++int drm_modeset_ctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_modeset_ctl *modeset = data;
++      unsigned long irqflags;
++      int crtc, ret = 0;
++
++      /* If drm_vblank_init() hasn't been called yet, just no-op */
++      if (!dev->num_crtcs)
++              goto out;
++
++      crtc = modeset->crtc;
++      if (crtc >= dev->num_crtcs) {
++              ret = -EINVAL;
++              goto out;
++      }
++
++      /*
++       * To avoid all the problems that might happen if interrupts
++       * were enabled/disabled around or between these calls, we just
++       * have the kernel take a reference on the CRTC (just once though
++       * to avoid corrupting the count if multiple, mismatch calls occur),
++       * so that interrupts remain enabled in the interim.
++       */
++      switch (modeset->cmd) {
++      case _DRM_PRE_MODESET:
++              if (!dev->vblank_inmodeset[crtc]) {
++                      dev->vblank_inmodeset[crtc] = 1;
++                      drm_vblank_get(dev, crtc);
++              }
++              break;
++      case _DRM_POST_MODESET:
++              if (dev->vblank_inmodeset[crtc]) {
++                      spin_lock_irqsave(&dev->vbl_lock, irqflags);
++                      dev->vblank_disable_allowed = 1;
++                      dev->vblank_inmodeset[crtc] = 0;
++                      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++                      drm_vblank_put(dev, crtc);
++              }
++              break;
++      default:
++              ret = -EINVAL;
++              break;
++      }
++
++out:
++      return ret;
++}
++
++/**
++ * Wait for VBLANK.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param data user argument, pointing to a drm_wait_vblank structure.
++ * \return zero on success or a negative number on failure.
++ *
++ * Verifies the IRQ is installed.
++ *
++ * If a signal is requested checks if this task has already scheduled the same signal
++ * for the same vblank sequence number - nothing to be done in
++ * that case. If the number of tasks waiting for the interrupt exceeds 100 the
++ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
++ * task.
++ *
++ * If a signal is not requested, then calls vblank_wait().
++ */
++int drm_wait_vblank(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      union drm_wait_vblank *vblwait = data;
++      int ret = 0;
++      unsigned int flags, seq, crtc;
++
++      if ((!dev->pdev->irq) || (!dev->irq_enabled))
++              return -EINVAL;
++
++      if (vblwait->request.type &
++          ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
++              DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
++                        vblwait->request.type,
++                        (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
++              return -EINVAL;
++      }
++
++      flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
++      crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
++
++      if (crtc >= dev->num_crtcs)
++              return -EINVAL;
++
++      ret = drm_vblank_get(dev, crtc);
++      if (ret)
++              return ret;
++      seq = drm_vblank_count(dev, crtc);
++
++      switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
++      case _DRM_VBLANK_RELATIVE:
++              vblwait->request.sequence += seq;
++              vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
++      case _DRM_VBLANK_ABSOLUTE:
++              break;
++      default:
++              ret = -EINVAL;
++              goto done;
++      }
++
++      if ((flags & _DRM_VBLANK_NEXTONMISS) &&
++          (seq - vblwait->request.sequence) <= (1<<23)) {
++              vblwait->request.sequence = seq + 1;
++      }
++
++      if (flags & _DRM_VBLANK_SIGNAL) {
++              unsigned long irqflags;
++              struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
++              struct drm_vbl_sig *vbl_sig;
++
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++
++              /* Check if this task has already scheduled the same signal
++               * for the same vblank sequence number; nothing to be done in
++               * that case
++               */
++              list_for_each_entry(vbl_sig, vbl_sigs, head) {
++                      if (vbl_sig->sequence == vblwait->request.sequence
++                          && vbl_sig->info.si_signo ==
++                          vblwait->request.signal
++                          && vbl_sig->task == current) {
++                              spin_unlock_irqrestore(&dev->vbl_lock,
++                                                     irqflags);
++                              vblwait->reply.sequence = seq;
++                              goto done;
++                      }
++              }
++
++              if (atomic_read(&dev->vbl_signal_pending) >= 100) {
++                      spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++                      ret = -EBUSY;
++                      goto done;
++              }
++
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++              vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
++                                   DRM_MEM_DRIVER);
++              if (!vbl_sig) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              ret = drm_vblank_get(dev, crtc);
++              if (ret) {
++                      drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
++                               DRM_MEM_DRIVER);
++                      return ret;
++              }
++
++              atomic_inc(&dev->vbl_signal_pending);
++
++              vbl_sig->sequence = vblwait->request.sequence;
++              vbl_sig->info.si_signo = vblwait->request.signal;
++              vbl_sig->task = current;
++
++              spin_lock_irqsave(&dev->vbl_lock, irqflags);
++
++              list_add_tail(&vbl_sig->head, vbl_sigs);
++
++              spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
++
++              vblwait->reply.sequence = seq;
++      } else {
++              DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
++                          ((drm_vblank_count(dev, crtc)
++                            - vblwait->request.sequence) <= (1 << 23)));
++
++              if (ret != -EINTR) {
++                      struct timeval now;
++
++                      do_gettimeofday(&now);
++
++                      vblwait->reply.tval_sec = now.tv_sec;
++                      vblwait->reply.tval_usec = now.tv_usec;
++                      vblwait->reply.sequence = drm_vblank_count(dev, crtc);
++              }
++      }
++
++done:
++      drm_vblank_put(dev, crtc);
++      return ret;
++}
++
++/**
++ * Send the VBLANK signals.
++ *
++ * \param dev DRM device.
++ * \param crtc CRTC where the vblank event occurred
++ *
++ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
++ *
++ * If a signal is not requested, then calls vblank_wait().
++ */
++static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
++{
++      struct drm_vbl_sig *vbl_sig, *tmp;
++      struct list_head *vbl_sigs;
++      unsigned int vbl_seq;
++      unsigned long flags;
++
++      spin_lock_irqsave(&dev->vbl_lock, flags);
++
++      vbl_sigs = &dev->vbl_sigs[crtc];
++      vbl_seq = drm_vblank_count(dev, crtc);
++
++      list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
++          if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
++              vbl_sig->info.si_code = vbl_seq;
++              send_sig_info(vbl_sig->info.si_signo,
++                            &vbl_sig->info, vbl_sig->task);
++
++              list_del(&vbl_sig->head);
++
++              drm_free(vbl_sig, sizeof(*vbl_sig),
++                       DRM_MEM_DRIVER);
++              atomic_dec(&dev->vbl_signal_pending);
++              drm_vblank_put(dev, crtc);
++          }
++      }
++
++      spin_unlock_irqrestore(&dev->vbl_lock, flags);
++}
++
++/**
++ * drm_handle_vblank - handle a vblank event
++ * @dev: DRM device
++ * @crtc: where this event occurred
++ *
++ * Drivers should call this routine in their vblank interrupt handlers to
++ * update the vblank counter and send any signals that may be pending.
++ */
++void drm_handle_vblank(struct drm_device *dev, int crtc)
++{
++      atomic_inc(&dev->_vblank_count[crtc]);
++      DRM_WAKEUP(&dev->vbl_queue[crtc]);
++      drm_vbl_send_signals(dev, crtc);
++}
++EXPORT_SYMBOL(drm_handle_vblank);
++
++/**
++ * Tasklet wrapper function.
++ *
++ * \param data DRM device in disguise.
++ *
++ * Attempts to grab the HW lock and calls the driver callback on success. On
++ * failure, leave the lock marked as contended so the callback can be called
++ * from drm_unlock().
++ */
++static void drm_locked_tasklet_func(unsigned long data)
++{
++      struct drm_device *dev = (struct drm_device *)data;
++      unsigned long irqflags;
++      void (*tasklet_func)(struct drm_device *);
++      
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++
++      if (!tasklet_func ||
++          !drm_lock_take(&dev->lock,
++                         DRM_KERNEL_CONTEXT)) {
++              return;
++      }
++
++      dev->lock.lock_time = jiffies;
++      atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      dev->locked_tasklet_func = NULL;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++      
++      if (tasklet_func != NULL)
++              tasklet_func(dev);
++
++      drm_lock_free(&dev->lock,
++                    DRM_KERNEL_CONTEXT);
++}
++
++/**
++ * Schedule a tasklet to call back a driver hook with the HW lock held.
++ *
++ * \param dev DRM device.
++ * \param func Driver callback.
++ *
++ * This is intended for triggering actions that require the HW lock from an
++ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
++ * completes. Note that the callback may be called from interrupt or process
++ * context, it must not make any assumptions about this. Also, the HW lock will
++ * be held with the kernel context or any client context.
++ */
++void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
++{
++      unsigned long irqflags;
++      static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
++
++      if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
++          test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
++              return;
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++
++      if (dev->locked_tasklet_func) {
++              spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++              return;
++      }
++
++      dev->locked_tasklet_func = func;
++
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++
++      drm_tasklet.data = (unsigned long)dev;
++
++      tasklet_hi_schedule(&drm_tasklet);
++}
++EXPORT_SYMBOL(drm_locked_tasklet);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_lock.c git-nokia/drivers/gpu/drm-tungsten/drm_lock.c
+--- git/drivers/gpu/drm-tungsten/drm_lock.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_lock.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,389 @@
++/**
++ * \file drm_lock.c
++ * IOCTLs for locking
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++static int drm_notifier(void *priv);
++
++/**
++ * Lock ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Add the current task to the lock wait queue, and attempt to take to lock.
++ */
++int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DECLARE_WAITQUEUE(entry, current);
++      struct drm_lock *lock = data;
++      int ret = 0;
++
++      ++file_priv->lock_count;
++
++      if (lock->context == DRM_KERNEL_CONTEXT) {
++              DRM_ERROR("Process %d using kernel context %d\n",
++                        current->pid, lock->context);
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
++                lock->context, current->pid,
++                dev->lock.hw_lock->lock, lock->flags);
++
++      if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
++              if (lock->context < 0)
++                      return -EINVAL;
++
++      add_wait_queue(&dev->lock.lock_queue, &entry);
++      spin_lock_bh(&dev->lock.spinlock);
++      dev->lock.user_waiters++;
++      spin_unlock_bh(&dev->lock.spinlock);
++      for (;;) {
++              __set_current_state(TASK_INTERRUPTIBLE);
++              if (!dev->lock.hw_lock) {
++                      /* Device has been unregistered */
++                      ret = -EINTR;
++                      break;
++              }
++              if (drm_lock_take(&dev->lock, lock->context)) {
++                      dev->lock.file_priv = file_priv;
++                      dev->lock.lock_time = jiffies;
++                      atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
++                      break;  /* Got lock */
++              }
++
++              /* Contention */
++              schedule();
++              if (signal_pending(current)) {
++                      ret = -ERESTARTSYS;
++                      break;
++              }
++      }
++      spin_lock_bh(&dev->lock.spinlock);
++      dev->lock.user_waiters--;
++      spin_unlock_bh(&dev->lock.spinlock);
++      __set_current_state(TASK_RUNNING);
++      remove_wait_queue(&dev->lock.lock_queue, &entry);
++
++      DRM_DEBUG("%d %s\n", lock->context,
++                ret ? "interrupted" : "has lock");
++      if (ret) return ret;
++
++      /* don't set the block all signals on the master process for now 
++       * really probably not the correct answer but lets us debug xkb
++       * xserver for now */
++      if (!file_priv->master) {
++              sigemptyset(&dev->sigmask);
++              sigaddset(&dev->sigmask, SIGSTOP);
++              sigaddset(&dev->sigmask, SIGTSTP);
++              sigaddset(&dev->sigmask, SIGTTIN);
++              sigaddset(&dev->sigmask, SIGTTOU);
++              dev->sigdata.context = lock->context;
++              dev->sigdata.lock = dev->lock.hw_lock;
++              block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
++      }
++
++      if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
++              dev->driver->dma_ready(dev);
++
++      if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
++      {
++              if (dev->driver->dma_quiescent(dev)) {
++                      DRM_DEBUG("%d waiting for DMA quiescent\n",
++                                lock->context);
++                      return -EBUSY;
++              }
++      }
++
++      if (dev->driver->kernel_context_switch &&
++          dev->last_context != lock->context) {
++              dev->driver->kernel_context_switch(dev, dev->last_context,
++                                                 lock->context);
++      }
++
++      return 0;
++}
++
++/**
++ * Unlock ioctl.
++ *
++ * \param inode device inode.
++ * \param file_priv DRM file private.
++ * \param cmd command.
++ * \param arg user argument, pointing to a drm_lock structure.
++ * \return zero on success or negative number on failure.
++ *
++ * Transfer and free the lock.
++ */
++int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_lock *lock = data;
++      unsigned long irqflags;
++      void (*tasklet_func)(struct drm_device *);
++
++      if (lock->context == DRM_KERNEL_CONTEXT) {
++              DRM_ERROR("Process %d using kernel context %d\n",
++                        current->pid, lock->context);
++              return -EINVAL;
++      }
++
++      spin_lock_irqsave(&dev->tasklet_lock, irqflags);
++      tasklet_func = dev->locked_tasklet_func;
++      dev->locked_tasklet_func = NULL;
++      spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
++      if (tasklet_func != NULL)
++              tasklet_func(dev);
++
++      atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
++
++      /* kernel_context_switch isn't used by any of the x86 drm
++       * modules but is required by the Sparc driver.
++       */
++      if (dev->driver->kernel_context_switch_unlock)
++              dev->driver->kernel_context_switch_unlock(dev);
++      else {
++              if (drm_lock_free(&dev->lock,lock->context)) {
++                      /* FIXME: Should really bail out here. */
++              }
++      }
++
++      unblock_all_signals();
++      return 0;
++}
++
++/**
++ * Take the heavyweight lock.
++ *
++ * \param lock lock pointer.
++ * \param context locking context.
++ * \return one if the lock is held, or zero otherwise.
++ *
++ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
++ */
++int drm_lock_take(struct drm_lock_data *lock_data,
++                unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      do {
++              old = *lock;
++              if (old & _DRM_LOCK_HELD)
++                      new = old | _DRM_LOCK_CONT;
++              else {
++                      new = context | _DRM_LOCK_HELD |
++                              ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
++                               _DRM_LOCK_CONT : 0);
++              }
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++      spin_unlock_bh(&lock_data->spinlock);
++
++      /* Warn on recursive locking of user contexts. */
++      if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
++              if (context != DRM_KERNEL_CONTEXT) {
++                      DRM_ERROR("%d holds heavyweight lock\n",
++                                context);
++              }
++              return 0;
++      }
++
++      return !_DRM_LOCK_IS_HELD(old);
++}
++
++/**
++ * This takes a lock forcibly and hands it to context.        Should ONLY be used
++ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
++ *
++ * \param dev DRM device.
++ * \param lock lock pointer.
++ * \param context locking context.
++ * \return always one.
++ *
++ * Resets the lock file pointer.
++ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
++ */
++static int drm_lock_transfer(struct drm_lock_data *lock_data,
++                           unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      lock_data->file_priv = NULL;
++      do {
++              old = *lock;
++              new = context | _DRM_LOCK_HELD;
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++      return 1;
++}
++
++/**
++ * Free lock.
++ *
++ * \param dev DRM device.
++ * \param lock lock.
++ * \param context context.
++ *
++ * Resets the lock file pointer.
++ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
++ * waiting on the lock queue.
++ */
++int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
++{
++      unsigned int old, new, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      if (lock_data->kernel_waiters != 0) {
++              drm_lock_transfer(lock_data, 0);
++              lock_data->idle_has_lock = 1;
++              spin_unlock_bh(&lock_data->spinlock);
++              return 1;
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++
++      do {
++              old = *lock;
++              new = _DRM_LOCKING_CONTEXT(old);
++              prev = cmpxchg(lock, old, new);
++      } while (prev != old);
++
++      if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
++              DRM_ERROR("%d freed heavyweight lock held by %d\n",
++                        context, _DRM_LOCKING_CONTEXT(old));
++              return 1;
++      }
++      wake_up_interruptible(&lock_data->lock_queue);
++      return 0;
++}
++
++/**
++ * If we get here, it means that the process has called DRM_IOCTL_LOCK
++ * without calling DRM_IOCTL_UNLOCK.
++ *
++ * If the lock is not held, then let the signal proceed as usual.  If the lock
++ * is held, then set the contended flag and keep the signal blocked.
++ *
++ * \param priv pointer to a drm_sigdata structure.
++ * \return one if the signal should be delivered normally, or zero if the
++ * signal should be blocked.
++ */
++static int drm_notifier(void *priv)
++{
++      struct drm_sigdata *s = (struct drm_sigdata *) priv;
++      unsigned int old, new, prev;
++
++      /* Allow signal delivery if lock isn't held */
++      if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
++          || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
++              return 1;
++
++      /* Otherwise, set flag to force call to
++         drmUnlock */
++      do {
++              old = s->lock->lock;
++              new = old | _DRM_LOCK_CONT;
++              prev = cmpxchg(&s->lock->lock, old, new);
++      } while (prev != old);
++      return 0;
++}
++
++/**
++ * This function returns immediately and takes the hw lock
++ * with the kernel context if it is free, otherwise it gets the highest priority when and if
++ * it is eventually released.
++ *
++ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
++ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
++ * a deadlock, which is why the "idlelock" was invented).
++ *
++ * This should be sufficient to wait for GPU idle without
++ * having to worry about starvation.
++ */
++
++void drm_idlelock_take(struct drm_lock_data *lock_data)
++{
++      int ret = 0;
++
++      spin_lock_bh(&lock_data->spinlock);
++      lock_data->kernel_waiters++;
++      if (!lock_data->idle_has_lock) {
++
++              spin_unlock_bh(&lock_data->spinlock);
++              ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
++              spin_lock_bh(&lock_data->spinlock);
++
++              if (ret == 1)
++                      lock_data->idle_has_lock = 1;
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++}
++EXPORT_SYMBOL(drm_idlelock_take);
++
++void drm_idlelock_release(struct drm_lock_data *lock_data)
++{
++      unsigned int old, prev;
++      volatile unsigned int *lock = &lock_data->hw_lock->lock;
++
++      spin_lock_bh(&lock_data->spinlock);
++      if (--lock_data->kernel_waiters == 0) {
++              if (lock_data->idle_has_lock) {
++                      do {
++                              old = *lock;
++                              prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
++                      } while (prev != old);
++                      wake_up_interruptible(&lock_data->lock_queue);
++                      lock_data->idle_has_lock = 0;
++              }
++      }
++      spin_unlock_bh(&lock_data->spinlock);
++}
++EXPORT_SYMBOL(drm_idlelock_release);
++
++int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
++{
++
++      return (file_priv->lock_count && dev->lock.hw_lock &&
++              _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
++              dev->lock.file_priv == file_priv);
++}
++
++EXPORT_SYMBOL(drm_i_have_hw_lock);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.c git-nokia/drivers/gpu/drm-tungsten/drm_memory.c
+--- git/drivers/gpu/drm-tungsten/drm_memory.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,374 @@
++/**
++ * \file drm_memory.c
++ * Memory management wrappers for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/highmem.h>
++#include <asm-generic/iomap.h>
++#include "drmP.h"
++
++static struct {
++      spinlock_t lock;
++      uint64_t cur_used;
++      uint64_t emer_used;
++      uint64_t low_threshold;
++      uint64_t high_threshold;
++      uint64_t emer_threshold;
++} drm_memctl = {
++      .lock = SPIN_LOCK_UNLOCKED
++};
++
++static inline size_t drm_size_align(size_t size)
++{
++      size_t tmpSize = 4;
++      if (size > PAGE_SIZE)
++              return PAGE_ALIGN(size);
++
++      while (tmpSize < size)
++              tmpSize <<= 1;
++
++      return (size_t) tmpSize;
++}
++
++int drm_alloc_memctl(size_t size)
++{
++        int ret = 0;
++      unsigned long a_size = drm_size_align(size);
++      unsigned long new_used;
++
++      spin_lock(&drm_memctl.lock);
++      new_used = drm_memctl.cur_used + a_size;
++      if (likely(new_used < drm_memctl.high_threshold)) {
++              drm_memctl.cur_used = new_used;
++              goto out;
++      }
++
++      /*
++       * Allow small allocations from root-only processes to
++       * succeed until the emergency threshold is reached.
++       */
++
++      new_used += drm_memctl.emer_used;
++      if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
++                   (a_size > 16*PAGE_SIZE) ||
++                   (new_used > drm_memctl.emer_threshold))) {
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      drm_memctl.cur_used = drm_memctl.high_threshold;
++      drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
++out:
++      spin_unlock(&drm_memctl.lock);
++      return ret;
++}
++EXPORT_SYMBOL(drm_alloc_memctl);
++
++
++void drm_free_memctl(size_t size)
++{
++      unsigned long a_size = drm_size_align(size);
++
++      spin_lock(&drm_memctl.lock);
++      if (likely(a_size >= drm_memctl.emer_used)) {
++              a_size -= drm_memctl.emer_used;
++              drm_memctl.emer_used = 0;
++      } else {
++              drm_memctl.emer_used -= a_size;
++              a_size = 0;
++      }
++      drm_memctl.cur_used -= a_size;
++      spin_unlock(&drm_memctl.lock);
++}
++EXPORT_SYMBOL(drm_free_memctl);
++
++void drm_query_memctl(uint64_t *cur_used,
++                    uint64_t *emer_used,
++                    uint64_t *low_threshold,
++                    uint64_t *high_threshold,
++                    uint64_t *emer_threshold)
++{
++      spin_lock(&drm_memctl.lock);
++      *cur_used = drm_memctl.cur_used;
++      *emer_used = drm_memctl.emer_used;
++      *low_threshold = drm_memctl.low_threshold;
++      *high_threshold = drm_memctl.high_threshold;
++      *emer_threshold = drm_memctl.emer_threshold;
++      spin_unlock(&drm_memctl.lock);
++}
++EXPORT_SYMBOL(drm_query_memctl);
++
++void drm_init_memctl(size_t p_low_threshold,
++                   size_t p_high_threshold,
++                   size_t unit_size)
++{
++      spin_lock(&drm_memctl.lock);
++      drm_memctl.emer_used = 0;
++      drm_memctl.cur_used = 0;
++      drm_memctl.low_threshold = p_low_threshold * unit_size;
++      drm_memctl.high_threshold = p_high_threshold * unit_size;
++      drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
++              drm_memctl.high_threshold;
++      spin_unlock(&drm_memctl.lock);
++}
++
++
++#ifndef DEBUG_MEMORY
++
++/** No-op. */
++void drm_mem_init(void)
++{
++}
++
++/**
++ * Called when "/proc/dri/%dev%/mem" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param len requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * No-op.
++ */
++int drm_mem_info(char *buf, char **start, off_t offset,
++               int len, int *eof, void *data)
++{
++      return 0;
++}
++
++/** Wrapper around kmalloc() */
++void *drm_calloc(size_t nmemb, size_t size, int area)
++{
++      return kcalloc(nmemb, size, GFP_KERNEL);
++}
++EXPORT_SYMBOL(drm_calloc);
++
++/** Wrapper around kmalloc() and kfree() */
++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
++{
++      void *pt;
++
++      if (!(pt = kmalloc(size, GFP_KERNEL)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, DRM_MIN(oldsize,size));
++              kfree(oldpt);
++      }
++      return pt;
++}
++
++/**
++ * Allocate pages.
++ *
++ * \param order size order.
++ * \param area memory area. (Not used.)
++ * \return page address on success, or zero on failure.
++ *
++ * Allocate and reserve free pages.
++ */
++unsigned long drm_alloc_pages(int order, int area)
++{
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address)
++              return 0;
++
++      /* Zero */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++/**
++ * Free pages.
++ *
++ * \param address address of the pages to free.
++ * \param order size order.
++ * \param area memory area. (Not used.)
++ *
++ * Unreserve and free pages allocated by alloc_pages().
++ */
++void drm_free_pages(unsigned long address, int order, int area)
++{
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address)
++              return;
++
++      /* Unreserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              ClearPageReserved(virt_to_page(addr));
++      }
++
++      free_pages(address, order);
++}
++
++#if __OS_HAS_AGP
++static void *agp_remap(unsigned long offset, unsigned long size,
++                            struct drm_device * dev)
++{
++      unsigned long *phys_addr_map, i, num_pages =
++          PAGE_ALIGN(size) / PAGE_SIZE;
++      struct drm_agp_mem *agpmem;
++      struct page **page_map;
++      void *addr;
++
++      size = PAGE_ALIGN(size);
++
++#ifdef __alpha__
++      offset -= dev->hose->mem_space->start;
++#endif
++
++      list_for_each_entry(agpmem, &dev->agp->memory, head)
++              if (agpmem->bound <= offset
++                  && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
++                  (offset + size))
++                      break;
++      if (!agpmem)
++              return NULL;
++
++      /*
++       * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
++       * the CPU do not get remapped by the GART.  We fix this by using the kernel's
++       * page-table instead (that's probably faster anyhow...).
++       */
++      /* note: use vmalloc() because num_pages could be large... */
++      page_map = vmalloc(num_pages * sizeof(struct page *));
++      if (!page_map)
++              return NULL;
++
++      phys_addr_map =
++          agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
++      for (i = 0; i < num_pages; ++i)
++              page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
++      addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
++      vfree(page_map);
++
++      return addr;
++}
++
++/** Wrapper around agp_allocate_memory() */
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      return drm_agp_allocate_memory(pages, type);
++}
++#else
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
++}
++#endif
++
++/** Wrapper around agp_free_memory() */
++int drm_free_agp(DRM_AGP_MEM * handle, int pages)
++{
++      return drm_agp_free_memory(handle) ? 0 : -EINVAL;
++}
++EXPORT_SYMBOL(drm_free_agp);
++
++/** Wrapper around agp_bind_memory() */
++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
++{
++      return drm_agp_bind_memory(handle, start);
++}
++
++/** Wrapper around agp_unbind_memory() */
++int drm_unbind_agp(DRM_AGP_MEM * handle)
++{
++      return drm_agp_unbind_memory(handle);
++}
++EXPORT_SYMBOL(drm_unbind_agp);
++
++#else  /* __OS_HAS_AGP*/
++static void *agp_remap(unsigned long offset, unsigned long size,
++                     struct drm_device * dev)
++{
++      return NULL;
++}
++#endif                                /* agp */
++#else
++static void *agp_remap(unsigned long offset, unsigned long size,
++                     struct drm_device * dev)
++{
++      return NULL;
++}
++#endif                                /* debug_memory */
++
++void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
++{
++      if (drm_core_has_AGP(dev) &&
++          dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
++              map->handle = agp_remap(map->offset, map->size, dev);
++      else
++              map->handle = ioremap(map->offset, map->size);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremap);
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
++{
++      map->handle = ioremap_wc(map->offset, map->size);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremap_wc);
++#endif
++
++void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
++{
++      if (!map->handle || !map->size)
++              return;
++
++      if (drm_core_has_AGP(dev) &&
++          dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
++              vunmap(map->handle);
++      else
++              iounmap(map->handle);
++}
++EXPORT_SYMBOL_GPL(drm_core_ioremapfree);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.c git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c
+--- git/drivers/gpu/drm-tungsten/drm_memory_debug.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,403 @@
++/**
++ * \file drm_memory_debug.c
++ * Memory management wrappers for DRM.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#ifdef DEBUG_MEMORY
++
++typedef struct drm_mem_stats {
++      const char *name;
++      int succeed_count;
++      int free_count;
++      int fail_count;
++      unsigned long bytes_allocated;
++      unsigned long bytes_freed;
++} drm_mem_stats_t;
++
++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
++static unsigned long drm_ram_available = 0;   /* In pages */
++static unsigned long drm_ram_used = 0;
++static drm_mem_stats_t drm_mem_stats[] = {
++      [DRM_MEM_DMA] = {"dmabufs"},
++      [DRM_MEM_SAREA] = {"sareas"},
++      [DRM_MEM_DRIVER] = {"driver"},
++      [DRM_MEM_MAGIC] = {"magic"},
++      [DRM_MEM_IOCTLS] = {"ioctltab"},
++      [DRM_MEM_MAPS] = {"maplist"},
++      [DRM_MEM_VMAS] = {"vmalist"},
++      [DRM_MEM_BUFS] = {"buflist"},
++      [DRM_MEM_SEGS] = {"seglist"},
++      [DRM_MEM_PAGES] = {"pagelist"},
++      [DRM_MEM_FILES] = {"files"},
++      [DRM_MEM_QUEUES] = {"queues"},
++      [DRM_MEM_CMDS] = {"commands"},
++      [DRM_MEM_MAPPINGS] = {"mappings"},
++      [DRM_MEM_BUFLISTS] = {"buflists"},
++      [DRM_MEM_AGPLISTS] = {"agplist"},
++      [DRM_MEM_SGLISTS] = {"sglist"},
++      [DRM_MEM_TOTALAGP] = {"totalagp"},
++      [DRM_MEM_BOUNDAGP] = {"boundagp"},
++      [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
++      [DRM_MEM_CTXLIST] = {"ctxlist"},
++      [DRM_MEM_STUB] = {"stub"},
++      {NULL, 0,}              /* Last entry must be null */
++};
++
++void drm_mem_init(void)
++{
++      drm_mem_stats_t *mem;
++      struct sysinfo si;
++
++      for (mem = drm_mem_stats; mem->name; ++mem) {
++              mem->succeed_count = 0;
++              mem->free_count = 0;
++              mem->fail_count = 0;
++              mem->bytes_allocated = 0;
++              mem->bytes_freed = 0;
++      }
++
++      si_meminfo(&si);
++      drm_ram_available = si.totalram;
++      drm_ram_used = 0;
++}
++
++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
++
++static int drm__mem_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data)
++{
++      drm_mem_stats_t *pt;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *eof = 0;
++      *start = &buf[offset];
++
++      DRM_PROC_PRINT("                  total counts                  "
++                     " |    outstanding  \n");
++      DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
++                     " | allocs      bytes\n\n");
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "system", 0, 0, 0,
++                     drm_ram_available << (PAGE_SHIFT - 10));
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "locked", 0, 0, 0, drm_ram_used >> 10);
++      DRM_PROC_PRINT("\n");
++      for (pt = drm_mem_stats; pt->name; pt++) {
++              DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
++                             pt->name,
++                             pt->succeed_count,
++                             pt->free_count,
++                             pt->fail_count,
++                             pt->bytes_allocated,
++                             pt->bytes_freed,
++                             pt->succeed_count - pt->free_count,
++                             (long)pt->bytes_allocated
++                             - (long)pt->bytes_freed);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++int drm_mem_info(char *buf, char **start, off_t offset,
++               int len, int *eof, void *data)
++{
++      int ret;
++
++      spin_lock(&drm_mem_lock);
++      ret = drm__mem_info(buf, start, offset, len, eof, data);
++      spin_unlock(&drm_mem_lock);
++      return ret;
++}
++
++void *drm_alloc(size_t size, int area)
++{
++      void *pt;
++
++      if (!size) {
++              DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
++              return NULL;
++      }
++
++      if (!(pt = kmalloc(size, GFP_KERNEL))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return NULL;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      spin_unlock(&drm_mem_lock);
++      return pt;
++}
++EXPORT_SYMBOL(drm_alloc);
++
++void *drm_calloc(size_t nmemb, size_t size, int area)
++{
++      void *addr;
++
++      addr = drm_alloc(nmemb * size, area);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++EXPORT_SYMBOL(drm_calloc);
++
++void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
++{
++      void *pt;
++
++      if (!(pt = drm_alloc(size, area)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, oldsize);
++              drm_free(oldpt, oldsize, area);
++      }
++      return pt;
++}
++EXPORT_SYMBOL(drm_realloc);
++
++void drm_free(void *pt, size_t size, int area)
++{
++      int alloc_count;
++      int free_count;
++
++      if (!pt)
++              DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
++      else
++              kfree(pt);
++      spin_lock(&drm_mem_lock);
++      drm_mem_stats[area].bytes_freed += size;
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++EXPORT_SYMBOL(drm_free);
++
++unsigned long drm_alloc_pages(int order, int area)
++{
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += bytes;
++      drm_ram_used += bytes;
++      spin_unlock(&drm_mem_lock);
++
++      /* Zero outside the lock */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++void drm_free_pages(unsigned long address, int order, int area)
++{
++      unsigned long bytes = PAGE_SIZE << order;
++      int alloc_count;
++      int free_count;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address) {
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++      } else {
++              /* Unreserve */
++              for (addr = address, sz = bytes;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              free_pages(address, order);
++      }
++
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += bytes;
++      drm_ram_used -= bytes;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++#if __OS_HAS_AGP
++
++DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
++{
++      DRM_AGP_MEM *handle;
++
++      if (!pages) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
++              return NULL;
++      }
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++      if ((handle = drm_agp_allocate_memory(pages, type))) {
++#else
++      if ((handle = drm_agp_allocate_memory(dev->agp->bridge, pages, type))) {
++#endif
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return handle;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return NULL;
++}
++
++int drm_free_agp(DRM_AGP_MEM * handle, int pages)
++{
++      int alloc_count;
++      int free_count;
++      int retval = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                            "Attempt to free NULL AGP handle\n");
++              return retval;
++      }
++
++      if (drm_agp_free_memory(handle)) {
++              spin_lock(&drm_mem_lock);
++              free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
++              alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              if (free_count > alloc_count) {
++                      DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                                    "Excess frees: %d frees, %d allocs\n",
++                                    free_count, alloc_count);
++              }
++              return 0;
++      }
++      return retval;
++}
++
++int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
++{
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to bind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if (!(retcode = drm_agp_bind_memory(handle, start))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
++                  += handle->page_count << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return retcode;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return retcode;
++}
++
++int drm_unbind_agp(DRM_AGP_MEM * handle)
++{
++      int alloc_count;
++      int free_count;
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to unbind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if ((retcode = drm_agp_unbind_memory(handle)))
++              return retcode;
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
++      alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++      drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
++          += handle->page_count << PAGE_SHIFT;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++      return retcode;
++}
++
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory_debug.h git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h
+--- git/drivers/gpu/drm-tungsten/drm_memory_debug.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory_debug.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,379 @@
++/**
++ * \file drm_memory_debug.h
++ * Memory management wrappers for DRM.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++typedef struct drm_mem_stats {
++      const char *name;
++      int succeed_count;
++      int free_count;
++      int fail_count;
++      unsigned long bytes_allocated;
++      unsigned long bytes_freed;
++} drm_mem_stats_t;
++
++static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
++static unsigned long drm_ram_available = 0;   /* In pages */
++static unsigned long drm_ram_used = 0;
++static drm_mem_stats_t drm_mem_stats[] =
++{
++      [DRM_MEM_DMA] = {"dmabufs"},
++      [DRM_MEM_SAREA] = {"sareas"},
++      [DRM_MEM_DRIVER] = {"driver"},
++      [DRM_MEM_MAGIC] = {"magic"},
++      [DRM_MEM_IOCTLS] = {"ioctltab"},
++      [DRM_MEM_MAPS] = {"maplist"},
++      [DRM_MEM_VMAS] = {"vmalist"},
++      [DRM_MEM_BUFS] = {"buflist"},
++      [DRM_MEM_SEGS] = {"seglist"},
++      [DRM_MEM_PAGES] = {"pagelist"},
++      [DRM_MEM_FILES] = {"files"},
++      [DRM_MEM_QUEUES] = {"queues"},
++      [DRM_MEM_CMDS] = {"commands"},
++      [DRM_MEM_MAPPINGS] = {"mappings"},
++      [DRM_MEM_BUFLISTS] = {"buflists"},
++      [DRM_MEM_AGPLISTS] = {"agplist"},
++      [DRM_MEM_SGLISTS] = {"sglist"},
++      [DRM_MEM_TOTALAGP] = {"totalagp"},
++      [DRM_MEM_BOUNDAGP] = {"boundagp"},
++      [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
++      [DRM_MEM_CTXLIST] = {"ctxlist"},
++      [DRM_MEM_STUB] = {"stub"},
++      {NULL, 0,}              /* Last entry must be null */
++};
++
++void drm_mem_init (void) {
++      drm_mem_stats_t *mem;
++      struct sysinfo si;
++
++      for (mem = drm_mem_stats; mem->name; ++mem) {
++              mem->succeed_count = 0;
++              mem->free_count = 0;
++              mem->fail_count = 0;
++              mem->bytes_allocated = 0;
++              mem->bytes_freed = 0;
++      }
++
++      si_meminfo(&si);
++      drm_ram_available = si.totalram;
++      drm_ram_used = 0;
++}
++
++/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
++
++static int drm__mem_info (char *buf, char **start, off_t offset,
++                         int request, int *eof, void *data) {
++      drm_mem_stats_t *pt;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *eof = 0;
++      *start = &buf[offset];
++
++      DRM_PROC_PRINT("                  total counts                  "
++                     " |    outstanding  \n");
++      DRM_PROC_PRINT("type       alloc freed fail     bytes      freed"
++                     " | allocs      bytes\n\n");
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "system", 0, 0, 0,
++                     drm_ram_available << (PAGE_SHIFT - 10));
++      DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
++                     "locked", 0, 0, 0, drm_ram_used >> 10);
++      DRM_PROC_PRINT("\n");
++      for (pt = drm_mem_stats; pt->name; pt++) {
++              DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
++                             pt->name,
++                             pt->succeed_count,
++                             pt->free_count,
++                             pt->fail_count,
++                             pt->bytes_allocated,
++                             pt->bytes_freed,
++                             pt->succeed_count - pt->free_count,
++                             (long)pt->bytes_allocated
++                             - (long)pt->bytes_freed);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++int drm_mem_info (char *buf, char **start, off_t offset,
++                 int len, int *eof, void *data) {
++      int ret;
++
++      spin_lock(&drm_mem_lock);
++      ret = drm__mem_info (buf, start, offset, len, eof, data);
++      spin_unlock(&drm_mem_lock);
++      return ret;
++}
++
++void *drm_alloc (size_t size, int area) {
++      void *pt;
++
++      if (!size) {
++              DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
++              return NULL;
++      }
++
++      if (!(pt = kmalloc(size, GFP_KERNEL))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return NULL;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      spin_unlock(&drm_mem_lock);
++      return pt;
++}
++
++void *drm_calloc (size_t nmemb, size_t size, int area) {
++      void *addr;
++
++      addr = drm_alloc (nmemb * size, area);
++      if (addr != NULL)
++              memset((void *)addr, 0, size * nmemb);
++
++      return addr;
++}
++
++void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
++      void *pt;
++
++      if (!(pt = drm_alloc (size, area)))
++              return NULL;
++      if (oldpt && oldsize) {
++              memcpy(pt, oldpt, oldsize);
++              drm_free (oldpt, oldsize, area);
++      }
++      return pt;
++}
++
++void drm_free (void *pt, size_t size, int area) {
++      int alloc_count;
++      int free_count;
++
++      if (!pt)
++              DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
++      else
++              kfree(pt);
++      spin_lock(&drm_mem_lock);
++      drm_mem_stats[area].bytes_freed += size;
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++unsigned long drm_alloc_pages (int order, int area) {
++      unsigned long address;
++      unsigned long bytes = PAGE_SIZE << order;
++      unsigned long addr;
++      unsigned int sz;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++
++      address = __get_free_pages(GFP_KERNEL, order);
++      if (!address) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += bytes;
++      drm_ram_used += bytes;
++      spin_unlock(&drm_mem_lock);
++
++      /* Zero outside the lock */
++      memset((void *)address, 0, bytes);
++
++      /* Reserve */
++      for (addr = address, sz = bytes;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return address;
++}
++
++void drm_free_pages (unsigned long address, int order, int area) {
++      unsigned long bytes = PAGE_SIZE << order;
++      int alloc_count;
++      int free_count;
++      unsigned long addr;
++      unsigned int sz;
++
++      if (!address) {
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++      } else {
++              /* Unreserve */
++              for (addr = address, sz = bytes;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              free_pages(address, order);
++      }
++
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += bytes;
++      drm_ram_used -= bytes;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++}
++
++#if __OS_HAS_AGP
++
++DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) {
++      DRM_AGP_MEM *handle;
++
++      if (!pages) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
++              return NULL;
++      }
++
++      if ((handle = drm_agp_allocate_memory (pages, type))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return handle;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return NULL;
++}
++
++int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
++      int alloc_count;
++      int free_count;
++      int retval = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                            "Attempt to free NULL AGP handle\n");
++              return retval;
++      }
++
++      if (drm_agp_free_memory (handle)) {
++              spin_lock(&drm_mem_lock);
++              free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
++              alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
++                  += pages << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              if (free_count > alloc_count) {
++                      DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
++                                    "Excess frees: %d frees, %d allocs\n",
++                                    free_count, alloc_count);
++              }
++              return 0;
++      }
++      return retval;
++}
++
++int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to bind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if (!(retcode = drm_agp_bind_memory (handle, start))) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++              drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
++                  += handle->page_count << PAGE_SHIFT;
++              spin_unlock(&drm_mem_lock);
++              return retcode;
++      }
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
++      spin_unlock(&drm_mem_lock);
++      return retcode;
++}
++
++int drm_unbind_agp (DRM_AGP_MEM * handle) {
++      int alloc_count;
++      int free_count;
++      int retcode = -EINVAL;
++
++      if (!handle) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Attempt to unbind NULL AGP handle\n");
++              return retcode;
++      }
++
++      if ((retcode = drm_agp_unbind_memory (handle)))
++              return retcode;
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
++      alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
++      drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
++          += handle->page_count << PAGE_SHIFT;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++      return retcode;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_memory.h git-nokia/drivers/gpu/drm-tungsten/drm_memory.h
+--- git/drivers/gpu/drm-tungsten/drm_memory.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_memory.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,61 @@
++/**
++ * \file drm_memory.h
++ * Memory management wrappers for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++/**
++ * Cut down version of drm_memory_debug.h, which used to be called
++ * drm_memory.h.
++ */
++
++#if __OS_HAS_AGP
++
++#include <linux/vmalloc.h>
++
++#ifdef HAVE_PAGE_AGP
++#include <asm/agp.h>
++#else
++# ifdef __powerpc__
++#  define PAGE_AGP    __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
++# else
++#  define PAGE_AGP    PAGE_KERNEL
++# endif
++#endif
++
++#else                         /* __OS_HAS_AGP */
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_mm.c git-nokia/drivers/gpu/drm-tungsten/drm_mm.c
+--- git/drivers/gpu/drm-tungsten/drm_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,298 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++
++/*
++ * Generic simple memory manager implementation. Intended to be used as a base
++ * class implementation for more advanced memory managers.
++ *
++ * Note that the algorithm used is quite simple and there might be substantial
++ * performance gains if a smarter free list is implemented. Currently it is just an
++ * unordered stack of free regions. This could easily be improved if an RB-tree
++ * is used instead. At least if we expect heavy fragmentation.
++ *
++ * Aligned allocations can also see improvement.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include <linux/slab.h>
++
++unsigned long drm_mm_tail_space(struct drm_mm *mm)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free)
++              return 0;
++
++      return entry->size;
++}
++
++int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free)
++              return -ENOMEM;
++
++      if (entry->size <= size)
++              return -ENOMEM;
++
++      entry->size -= size;
++      return 0;
++}
++
++
++static int drm_mm_create_tail_node(struct drm_mm *mm,
++                          unsigned long start,
++                          unsigned long size)
++{
++      struct drm_mm_node *child;
++
++      child = (struct drm_mm_node *)
++              drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
++      if (!child)
++              return -ENOMEM;
++
++      child->free = 1;
++      child->size = size;
++      child->start = start;
++      child->mm = mm;
++
++      list_add_tail(&child->ml_entry, &mm->ml_entry);
++      list_add_tail(&child->fl_entry, &mm->fl_entry);
++
++      return 0;
++}
++
++
++int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
++{
++      struct list_head *tail_node;
++      struct drm_mm_node *entry;
++
++      tail_node = mm->ml_entry.prev;
++      entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
++      if (!entry->free) {
++              return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
++      }
++      entry->size += size;
++      return 0;
++}
++
++static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
++                                          unsigned long size)
++{
++      struct drm_mm_node *child;
++
++      child = (struct drm_mm_node *)
++              drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
++      if (!child)
++              return NULL;
++
++      INIT_LIST_HEAD(&child->fl_entry);
++
++      child->free = 0;
++      child->size = size;
++      child->start = parent->start;
++      child->mm = parent->mm;
++
++      list_add_tail(&child->ml_entry, &parent->ml_entry);
++      INIT_LIST_HEAD(&child->fl_entry);
++
++      parent->size -= size;
++      parent->start += size;
++      return child;
++}
++
++struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
++                              unsigned long size, unsigned alignment)
++{
++
++      struct drm_mm_node *align_splitoff = NULL;
++      struct drm_mm_node *child;
++      unsigned tmp = 0;
++
++      if (alignment)
++              tmp = parent->start % alignment;
++
++      if (tmp) {
++              align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
++              if (!align_splitoff)
++                      return NULL;
++      }
++
++      if (parent->size == size) {
++              list_del_init(&parent->fl_entry);
++              parent->free = 0;
++              return parent;
++      } else {
++              child = drm_mm_split_at_start(parent, size);
++      }
++
++      if (align_splitoff)
++              drm_mm_put_block(align_splitoff);
++
++      return child;
++}
++EXPORT_SYMBOL(drm_mm_get_block);
++
++/*
++ * Put a block. Merge with the previous and / or next block if they are free.
++ * Otherwise add to the free stack.
++ */
++
++void drm_mm_put_block(struct drm_mm_node * cur)
++{
++
++      struct drm_mm *mm = cur->mm;
++      struct list_head *cur_head = &cur->ml_entry;
++      struct list_head *root_head = &mm->ml_entry;
++      struct drm_mm_node *prev_node = NULL;
++      struct drm_mm_node *next_node;
++
++      int merged = 0;
++
++      if (cur_head->prev != root_head) {
++              prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
++              if (prev_node->free) {
++                      prev_node->size += cur->size;
++                      merged = 1;
++              }
++      }
++      if (cur_head->next != root_head) {
++              next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
++              if (next_node->free) {
++                      if (merged) {
++                              prev_node->size += next_node->size;
++                              list_del(&next_node->ml_entry);
++                              list_del(&next_node->fl_entry);
++                              drm_ctl_free(next_node, sizeof(*next_node),
++                                           DRM_MEM_MM);
++                      } else {
++                              next_node->size += cur->size;
++                              next_node->start = cur->start;
++                              merged = 1;
++                      }
++              }
++      }
++      if (!merged) {
++              cur->free = 1;
++              list_add(&cur->fl_entry, &mm->fl_entry);
++      } else {
++              list_del(&cur->ml_entry);
++              drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
++      }
++}
++EXPORT_SYMBOL(drm_mm_put_block);
++
++struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
++                                unsigned long size,
++                                unsigned alignment, int best_match)
++{
++      struct list_head *list;
++      const struct list_head *free_stack = &mm->fl_entry;
++      struct drm_mm_node *entry;
++      struct drm_mm_node *best;
++      unsigned long best_size;
++      unsigned wasted;
++
++      best = NULL;
++      best_size = ~0UL;
++
++      list_for_each(list, free_stack) {
++              entry = list_entry(list, struct drm_mm_node, fl_entry);
++              wasted = 0;
++
++              if (entry->size < size)
++                      continue;
++
++              if (alignment) {
++                      register unsigned tmp = entry->start % alignment;
++                      if (tmp)
++                              wasted += alignment - tmp;
++              }
++
++
++              if (entry->size >= size + wasted) {
++                      if (!best_match)
++                              return entry;
++                      if (size < best_size) {
++                              best = entry;
++                              best_size = entry->size;
++                      }
++              }
++      }
++
++      return best;
++}
++EXPORT_SYMBOL(drm_mm_search_free);
++
++int drm_mm_clean(struct drm_mm * mm)
++{
++      struct list_head *head = &mm->ml_entry;
++
++      return (head->next->next == head);
++}
++
++int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
++{
++      INIT_LIST_HEAD(&mm->ml_entry);
++      INIT_LIST_HEAD(&mm->fl_entry);
++
++      return drm_mm_create_tail_node(mm, start, size);
++}
++
++EXPORT_SYMBOL(drm_mm_init);
++
++void drm_mm_takedown(struct drm_mm * mm)
++{
++      struct list_head *bnode = mm->fl_entry.next;
++      struct drm_mm_node *entry;
++
++      entry = list_entry(bnode, struct drm_mm_node, fl_entry);
++
++      if (entry->ml_entry.next != &mm->ml_entry ||
++          entry->fl_entry.next != &mm->fl_entry) {
++              DRM_ERROR("Memory manager not clean. Delaying takedown\n");
++              return;
++      }
++
++      list_del(&entry->fl_entry);
++      list_del(&entry->ml_entry);
++      drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
++}
++
++EXPORT_SYMBOL(drm_mm_takedown);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_object.c git-nokia/drivers/gpu/drm-tungsten/drm_object.c
+--- git/drivers/gpu/drm-tungsten/drm_object.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_object.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,294 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
++                      int shareable)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      /* The refcount will be bumped to 1 when we add the ref object below. */
++      atomic_set(&item->refcount, 0);
++      item->shareable = shareable;
++      item->owner = priv;
++
++      ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
++                                      (unsigned long)item, 31, 0, 0);
++      if (ret)
++              return ret;
++
++      ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
++      if (ret)
++              ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_add_user_object);
++
++struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_hash_item *hash;
++      int ret;
++      struct drm_user_object *item;
++
++      DRM_ASSERT_LOCKED(&dev->struct_mutex);
++
++      ret = drm_ht_find_item(&dev->object_hash, key, &hash);
++      if (ret)
++              return NULL;
++
++      item = drm_hash_entry(hash, struct drm_user_object, hash);
++
++      if (priv != item->owner) {
++              struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
++              ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
++              if (ret) {
++                      DRM_ERROR("Object not registered for usage\n");
++                      return NULL;
++              }
++      }
++      return item;
++}
++EXPORT_SYMBOL(drm_lookup_user_object);
++
++static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
++{
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      if (atomic_dec_and_test(&item->refcount)) {
++              ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
++              BUG_ON(ret);
++              item->remove(priv, item);
++      }
++}
++
++static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
++                               enum drm_ref_type action)
++{
++      int ret = 0;
++
++      switch (action) {
++      case _DRM_REF_USE:
++              atomic_inc(&ro->refcount);
++              break;
++      default:
++              if (!ro->ref_struct_locked) {
++                      break;
++              } else {
++                      ro->ref_struct_locked(priv, ro, action);
++              }
++      }
++      return ret;
++}
++
++int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
++                     enum drm_ref_type ref_action)
++{
++      int ret = 0;
++      struct drm_ref_object *item;
++      struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      if (!referenced_object->shareable && priv != referenced_object->owner) {
++              DRM_ERROR("Not allowed to reference this object\n");
++              return -EINVAL;
++      }
++
++      /*
++       * If this is not a usage reference, Check that usage has been registered
++       * first. Otherwise strange things may happen on destruction.
++       */
++
++      if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
++              item =
++                  drm_lookup_ref_object(priv, referenced_object,
++                                        _DRM_REF_USE);
++              if (!item) {
++                      DRM_ERROR
++                          ("Object not registered for usage by this client\n");
++                      return -EINVAL;
++              }
++      }
++
++      if (NULL !=
++          (item =
++           drm_lookup_ref_object(priv, referenced_object, ref_action))) {
++              atomic_inc(&item->refcount);
++              return drm_object_ref_action(priv, referenced_object,
++                                           ref_action);
++      }
++
++      item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
++      if (item == NULL) {
++              DRM_ERROR("Could not allocate reference object\n");
++              return -ENOMEM;
++      }
++
++      atomic_set(&item->refcount, 1);
++      item->hash.key = (unsigned long)referenced_object;
++      ret = drm_ht_insert_item(ht, &item->hash);
++      item->unref_action = ref_action;
++
++      if (ret)
++              goto out;
++
++      list_add(&item->list, &priv->refd_objects);
++      ret = drm_object_ref_action(priv, referenced_object, ref_action);
++out:
++      return ret;
++}
++
++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
++                                      struct drm_user_object *referenced_object,
++                                      enum drm_ref_type ref_action)
++{
++      struct drm_hash_item *hash;
++      int ret;
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
++                             (unsigned long)referenced_object, &hash);
++      if (ret)
++              return NULL;
++
++      return drm_hash_entry(hash, struct drm_ref_object, hash);
++}
++EXPORT_SYMBOL(drm_lookup_ref_object);
++
++static void drm_remove_other_references(struct drm_file *priv,
++                                      struct drm_user_object *ro)
++{
++      int i;
++      struct drm_open_hash *ht;
++      struct drm_hash_item *hash;
++      struct drm_ref_object *item;
++
++      for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
++              ht = &priv->refd_object_hash[i];
++              while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
++                      item = drm_hash_entry(hash, struct drm_ref_object, hash);
++                      drm_remove_ref_object(priv, item);
++              }
++      }
++}
++
++void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
++{
++      int ret;
++      struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
++      struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
++      enum drm_ref_type unref_action;
++
++      DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
++      unref_action = item->unref_action;
++      if (atomic_dec_and_test(&item->refcount)) {
++              ret = drm_ht_remove_item(ht, &item->hash);
++              BUG_ON(ret);
++              list_del_init(&item->list);
++              if (unref_action == _DRM_REF_USE)
++                      drm_remove_other_references(priv, user_object);
++              drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
++      }
++
++      switch (unref_action) {
++      case _DRM_REF_USE:
++              drm_deref_user_object(priv, user_object);
++              break;
++      default:
++              BUG_ON(!user_object->unref);
++              user_object->unref(priv, user_object, unref_action);
++              break;
++      }
++
++}
++EXPORT_SYMBOL(drm_remove_ref_object);
++
++int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
++                      enum drm_object_type type, struct drm_user_object **object)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_hash_item *hash;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
++      if (ret) {
++              DRM_ERROR("Could not find user object to reference.\n");
++              goto out_err;
++      }
++      uo = drm_hash_entry(hash, struct drm_user_object, hash);
++      if (uo->type != type) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
++      if (ret)
++              goto out_err;
++      mutex_unlock(&dev->struct_mutex);
++      *object = uo;
++      return 0;
++out_err:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
++                        enum drm_object_type type)
++{
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_user_object *uo;
++      struct drm_ref_object *ro;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      uo = drm_lookup_user_object(priv, user_token);
++      if (!uo || (uo->type != type)) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
++      if (!ro) {
++              ret = -EINVAL;
++              goto out_err;
++      }
++      drm_remove_ref_object(priv, ro);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++out_err:
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_objects.h git-nokia/drivers/gpu/drm-tungsten/drm_objects.h
+--- git/drivers/gpu/drm-tungsten/drm_objects.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_objects.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,832 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef _DRM_OBJECTS_H
++#define _DRM_OBJECTS_H
++
++struct drm_device;
++struct drm_bo_mem_reg;
++
++/***************************************************
++ * User space objects. (drm_object.c)
++ */
++
++#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
++
++enum drm_object_type {
++      drm_fence_type,
++      drm_buffer_type,
++      drm_lock_type,
++          /*
++           * Add other user space object types here.
++           */
++      drm_driver_type0 = 256,
++      drm_driver_type1,
++      drm_driver_type2,
++      drm_driver_type3,
++      drm_driver_type4
++};
++
++/*
++ * A user object is a structure that helps the drm give out user handles
++ * to kernel internal objects and to keep track of these objects so that
++ * they can be destroyed, for example when the user space process exits.
++ * Designed to be accessible using a user space 32-bit handle.
++ */
++
++struct drm_user_object {
++      struct drm_hash_item hash;
++      struct list_head list;
++      enum drm_object_type type;
++      atomic_t refcount;
++      int shareable;
++      struct drm_file *owner;
++      void (*ref_struct_locked) (struct drm_file *priv,
++                                 struct drm_user_object *obj,
++                                 enum drm_ref_type ref_action);
++      void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
++                     enum drm_ref_type unref_action);
++      void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
++};
++
++/*
++ * A ref object is a structure which is used to
++ * keep track of references to user objects and to keep track of these
++ * references so that they can be destroyed for example when the user space
++ * process exits. Designed to be accessible using a pointer to the _user_ object.
++ */
++
++struct drm_ref_object {
++      struct drm_hash_item hash;
++      struct list_head list;
++      atomic_t refcount;
++      enum drm_ref_type unref_action;
++};
++
++/**
++ * Must be called with the struct_mutex held.
++ */
++
++extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
++                             int shareable);
++/**
++ * Must be called with the struct_mutex held.
++ */
++
++extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
++                                               uint32_t key);
++
++/*
++ * Must be called with the struct_mutex held. May temporarily release it.
++ */
++
++extern int drm_add_ref_object(struct drm_file *priv,
++                            struct drm_user_object *referenced_object,
++                            enum drm_ref_type ref_action);
++
++/*
++ * Must be called with the struct_mutex held.
++ */
++
++struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
++                                      struct drm_user_object *referenced_object,
++                                      enum drm_ref_type ref_action);
++/*
++ * Must be called with the struct_mutex held.
++ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
++ * release the struct_mutex before calling drm_remove_ref_object.
++ * This function may temporarily release the struct_mutex.
++ */
++
++extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
++extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
++                             enum drm_object_type type,
++                             struct drm_user_object **object);
++extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
++                               enum drm_object_type type);
++
++/***************************************************
++ * Fence objects. (drm_fence.c)
++ */
++
++struct drm_fence_object {
++      struct drm_user_object base;
++      struct drm_device *dev;
++      atomic_t usage;
++
++      /*
++       * The below three fields are protected by the fence manager spinlock.
++       */
++
++      struct list_head ring;
++      int fence_class;
++      uint32_t native_types;
++      uint32_t type;
++      uint32_t signaled_types;
++      uint32_t sequence;
++      uint32_t waiting_types;
++      uint32_t error;
++};
++
++#define _DRM_FENCE_CLASSES 8
++#define _DRM_FENCE_TYPE_EXE 0x00
++
++struct drm_fence_class_manager {
++      struct list_head ring;
++      uint32_t pending_flush;
++      uint32_t waiting_types;
++      wait_queue_head_t fence_queue;
++      uint32_t highest_waiting_sequence;
++        uint32_t latest_queued_sequence;
++};
++
++struct drm_fence_manager {
++      int initialized;
++      rwlock_t lock;
++      struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
++      uint32_t num_classes;
++      atomic_t count;
++};
++
++struct drm_fence_driver {
++      unsigned long *waiting_jiffies;
++      uint32_t num_classes;
++      uint32_t wrap_diff;
++      uint32_t flush_diff;
++      uint32_t sequence_mask;
++
++      /*
++       * Driver implemented functions:
++       * has_irq() : 1 if the hardware can update the indicated type_flags using an
++       * irq handler. 0 if polling is required.
++       *
++       * emit() : Emit a sequence number to the command stream.
++       * Return the sequence number.
++       *
++       * flush() : Make sure the flags indicated in fc->pending_flush will eventually
++       * signal for fc->highest_received_sequence and all preceding sequences.
++       * Acknowledge by clearing the flags fc->pending_flush.
++       *
++       * poll() : Call drm_fence_handler with any new information.
++       *
++       * needed_flush() : Given the current state of the fence->type flags and previusly 
++       * executed or queued flushes, return the type_flags that need flushing.
++       *
++       * wait(): Wait for the "mask" flags to signal on a given fence, performing
++       * whatever's necessary to make this happen.
++       */
++
++      int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
++                      uint32_t flags);
++      int (*emit) (struct drm_device *dev, uint32_t fence_class,
++                   uint32_t flags, uint32_t *breadcrumb,
++                   uint32_t *native_type);
++      void (*flush) (struct drm_device *dev, uint32_t fence_class);
++      void (*poll) (struct drm_device *dev, uint32_t fence_class,
++              uint32_t types);
++      uint32_t (*needed_flush) (struct drm_fence_object *fence);
++      int (*wait) (struct drm_fence_object *fence, int lazy,
++                   int interruptible, uint32_t mask);
++};
++
++extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
++                                int interruptible, uint32_t mask,
++                                unsigned long end_jiffies);
++extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
++                            uint32_t sequence, uint32_t type,
++                            uint32_t error);
++extern void drm_fence_manager_init(struct drm_device *dev);
++extern void drm_fence_manager_takedown(struct drm_device *dev);
++extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
++                              uint32_t sequence);
++extern int drm_fence_object_flush(struct drm_fence_object *fence,
++                                uint32_t type);
++extern int drm_fence_object_signaled(struct drm_fence_object *fence,
++                                   uint32_t type);
++extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
++extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
++extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
++extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
++                                       struct drm_fence_object *src);
++extern int drm_fence_object_wait(struct drm_fence_object *fence,
++                               int lazy, int ignore_signals, uint32_t mask);
++extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
++                                 uint32_t fence_flags, uint32_t fence_class,
++                                 struct drm_fence_object **c_fence);
++extern int drm_fence_object_emit(struct drm_fence_object *fence,
++                               uint32_t fence_flags, uint32_t class,
++                               uint32_t type);
++extern void drm_fence_fill_arg(struct drm_fence_object *fence,
++                             struct drm_fence_arg *arg);
++
++extern int drm_fence_add_user_object(struct drm_file *priv,
++                                   struct drm_fence_object *fence,
++                                   int shareable);
++
++extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
++                                struct drm_file *file_priv);
++extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
++                                   struct drm_file *file_priv);
++extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
++                                     struct drm_file *file_priv);
++extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv);
++extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++/**************************************************
++ *TTMs
++ */
++
++/*
++ * The ttm backend GTT interface. (In our case AGP).
++ * Any similar type of device (PCIE?)
++ * needs only to implement these functions to be usable with the TTM interface.
++ * The AGP backend implementation lives in drm_agpsupport.c
++ * basically maps these calls to available functions in agpgart.
++ * Each drm device driver gets an
++ * additional function pointer that creates these types,
++ * so that the device can choose the correct aperture.
++ * (Multiple AGP apertures, etc.)
++ * Most device drivers will let this point to the standard AGP implementation.
++ */
++
++#define DRM_BE_FLAG_NEEDS_FREE     0x00000001
++#define DRM_BE_FLAG_BOUND_CACHED   0x00000002
++
++struct drm_ttm_backend;
++struct drm_ttm_backend_func {
++      int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
++      int (*populate) (struct drm_ttm_backend *backend,
++                       unsigned long num_pages, struct page **pages,
++                       struct page *dummy_read_page);
++      void (*clear) (struct drm_ttm_backend *backend);
++      int (*bind) (struct drm_ttm_backend *backend,
++                   struct drm_bo_mem_reg *bo_mem);
++      int (*unbind) (struct drm_ttm_backend *backend);
++      void (*destroy) (struct drm_ttm_backend *backend);
++};
++
++/**
++ * This structure associates a set of flags and methods with a drm_ttm
++ * object, and will also be subclassed by the particular backend.
++ *
++ * \sa #drm_agp_ttm_backend
++ */
++struct drm_ttm_backend {
++      struct drm_device *dev;
++      uint32_t flags;
++      struct drm_ttm_backend_func *func;
++};
++
++struct drm_ttm {
++      struct page *dummy_read_page;
++      struct page **pages;
++      long first_himem_page;
++      long last_lomem_page;
++      uint32_t page_flags;
++      unsigned long num_pages;
++      atomic_t vma_count;
++      struct drm_device *dev;
++      int destroy;
++      uint32_t mapping_offset;
++      struct drm_ttm_backend *be;
++      unsigned long highest_lomem_entry;
++      unsigned long lowest_himem_entry;
++      enum {
++              ttm_bound,
++              ttm_evicted,
++              ttm_unbound,
++              ttm_unpopulated,
++      } state;
++
++};
++
++extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
++                                    uint32_t page_flags,
++                                    struct page *dummy_read_page);
++extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
++extern void drm_ttm_unbind(struct drm_ttm *ttm);
++extern void drm_ttm_evict(struct drm_ttm *ttm);
++extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
++extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
++extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
++extern int drm_ttm_populate(struct drm_ttm *ttm);
++extern int drm_ttm_set_user(struct drm_ttm *ttm,
++                          struct task_struct *tsk,
++                          unsigned long start,
++                          unsigned long num_pages);
++
++/*
++ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
++ * this which calls this function iff there are no vmas referencing it anymore.
++ * Otherwise it is called when the last vma exits.
++ */
++
++extern int drm_ttm_destroy(struct drm_ttm *ttm);
++
++#define DRM_FLAG_MASKED(_old, _new, _mask) {\
++(_old) ^= (((_old) ^ (_new)) & (_mask)); \
++}
++
++#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
++#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
++
++/*
++ * Page flags.
++ */
++
++/*
++ * This ttm should not be cached by the CPU
++ */
++#define DRM_TTM_PAGE_UNCACHED   (1 << 0)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_USED       (1 << 1)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_BOUND      (1 << 2)
++/*
++ * This flat is not used at this time; I don't know what the
++ * intent was
++ */
++#define DRM_TTM_PAGE_PRESENT    (1 << 3)
++/*
++ * The array of page pointers was allocated with vmalloc
++ * instead of drm_calloc.
++ */
++#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
++/*
++ * This ttm is mapped from user space
++ */
++#define DRM_TTM_PAGE_USER       (1 << 5)
++/*
++ * This ttm will be written to by the GPU
++ */
++#define DRM_TTM_PAGE_WRITE    (1 << 6)
++/*
++ * This ttm was mapped to the GPU, and so the contents may have
++ * been modified
++ */
++#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
++/*
++ * This flag is not used at this time; I don't know what the
++ * intent was.
++ */
++#define DRM_TTM_PAGE_USER_DMA   (1 << 8)
++
++/***************************************************
++ * Buffer objects. (drm_bo.c, drm_bo_move.c)
++ */
++
++struct drm_bo_mem_reg {
++      struct drm_mm_node *mm_node;
++      unsigned long size;
++      unsigned long num_pages;
++      uint32_t page_alignment;
++      uint32_t mem_type;
++      /*
++       * Current buffer status flags, indicating
++       * where the buffer is located and which
++       * access modes are in effect
++       */
++      uint64_t flags;
++      /**
++       * These are the flags proposed for
++       * a validate operation. If the
++       * validate succeeds, they'll get moved
++       * into the flags field
++       */
++      uint64_t proposed_flags;
++      
++      uint32_t desired_tile_stride;
++      uint32_t hw_tile_stride;
++};
++
++enum drm_bo_type {
++      /*
++       * drm_bo_type_device are 'normal' drm allocations,
++       * pages are allocated from within the kernel automatically
++       * and the objects can be mmap'd from the drm device. Each
++       * drm_bo_type_device object has a unique name which can be
++       * used by other processes to share access to the underlying
++       * buffer.
++       */
++      drm_bo_type_device,
++      /*
++       * drm_bo_type_user are buffers of pages that already exist
++       * in the process address space. They are more limited than
++       * drm_bo_type_device buffers in that they must always
++       * remain cached (as we assume the user pages are mapped cached),
++       * and they are not sharable to other processes through DRM
++       * (although, regular shared memory should still work fine).
++       */
++      drm_bo_type_user,
++      /*
++       * drm_bo_type_kernel are buffers that exist solely for use
++       * within the kernel. The pages cannot be mapped into the
++       * process. One obvious use would be for the ring
++       * buffer where user access would not (ideally) be required.
++       */
++      drm_bo_type_kernel,
++};
++
++struct drm_buffer_object {
++      struct drm_device *dev;
++      struct drm_user_object base;
++
++      /*
++       * If there is a possibility that the usage variable is zero,
++       * then dev->struct_mutext should be locked before incrementing it.
++       */
++
++      atomic_t usage;
++      unsigned long buffer_start;
++      enum drm_bo_type type;
++      unsigned long offset;
++      atomic_t mapped;
++      struct drm_bo_mem_reg mem;
++
++      struct list_head lru;
++      struct list_head ddestroy;
++
++      uint32_t fence_type;
++      uint32_t fence_class;
++      uint32_t new_fence_type;
++      uint32_t new_fence_class;
++      struct drm_fence_object *fence;
++      uint32_t priv_flags;
++      wait_queue_head_t event_queue;
++      struct mutex mutex;
++      unsigned long num_pages;
++
++      /* For pinned buffers */
++      struct drm_mm_node *pinned_node;
++      uint32_t pinned_mem_type;
++      struct list_head pinned_lru;
++
++      /* For vm */
++      struct drm_ttm *ttm;
++      struct drm_map_list map_list;
++      uint32_t memory_type;
++      unsigned long bus_offset;
++      uint32_t vm_flags;
++      void *iomap;
++
++#ifdef DRM_ODD_MM_COMPAT
++      /* dev->struct_mutex only protected. */
++      struct list_head vma_list;
++      struct list_head p_mm_list;
++#endif
++
++};
++
++#define _DRM_BO_FLAG_UNFENCED 0x00000001
++#define _DRM_BO_FLAG_EVICTED  0x00000002
++
++/*
++ * This flag indicates that a flag called with bo->mutex held has
++ * temporarily released the buffer object mutex, (usually to wait for something).
++ * and thus any post-lock validation needs to be rerun.
++ */
++
++#define _DRM_BO_FLAG_UNLOCKED 0x00000004
++
++struct drm_mem_type_manager {
++      int has_type;
++      int use_type;
++      int kern_init_type;
++      struct drm_mm manager;
++      struct list_head lru;
++      struct list_head pinned;
++      uint32_t flags;
++      uint32_t drm_bus_maptype;
++      unsigned long gpu_offset;
++      unsigned long io_offset;
++      unsigned long io_size;
++      void *io_addr;
++      uint64_t size; /* size of managed area for reporting to userspace */
++};
++
++struct drm_bo_lock {
++      struct drm_user_object base;
++      wait_queue_head_t queue;
++      atomic_t write_lock_pending;
++      atomic_t readers;
++};
++
++#define _DRM_FLAG_MEMTYPE_FIXED     0x00000001        /* Fixed (on-card) PCI memory */
++#define _DRM_FLAG_MEMTYPE_MAPPABLE  0x00000002        /* Memory mappable */
++#define _DRM_FLAG_MEMTYPE_CACHED    0x00000004        /* Cached binding */
++#define _DRM_FLAG_NEEDS_IOREMAP     0x00000008        /* Fixed memory needs ioremap
++                                                 before kernel access. */
++#define _DRM_FLAG_MEMTYPE_CMA       0x00000010        /* Can't map aperture */
++#define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020        /* Select caching */
++
++struct drm_buffer_manager {
++      struct drm_bo_lock bm_lock;
++      struct mutex evict_mutex;
++      int nice_mode;
++      int initialized;
++      struct drm_file *last_to_validate;
++      struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
++      struct list_head unfenced;
++      struct list_head ddestroy;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      struct work_struct wq;
++#else
++      struct delayed_work wq;
++#endif
++      uint32_t fence_type;
++      unsigned long cur_pages;
++      atomic_t count;
++      struct page *dummy_read_page;
++};
++
++struct drm_bo_driver {
++      const uint32_t *mem_type_prio;
++      const uint32_t *mem_busy_prio;
++      uint32_t num_mem_type_prio;
++      uint32_t num_mem_busy_prio;
++      struct drm_ttm_backend *(*create_ttm_backend_entry)
++       (struct drm_device *dev);
++      int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++      int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
++      int (*init_mem_type) (struct drm_device *dev, uint32_t type,
++                            struct drm_mem_type_manager *man);
++      /*
++       * evict_flags:
++       *
++       * @bo: the buffer object to be evicted
++       *
++       * Return the bo flags for a buffer which is not mapped to the hardware.
++       * These will be placed in proposed_flags so that when the move is
++       * finished, they'll end up in bo->mem.flags
++       */
++      uint64_t(*evict_flags) (struct drm_buffer_object *bo);
++      /*
++       * move:
++       *
++       * @bo: the buffer to move
++       *
++       * @evict: whether this motion is evicting the buffer from
++       * the graphics address space
++       *
++       * @no_wait: whether this should give up and return -EBUSY
++       * if this move would require sleeping
++       *
++       * @new_mem: the new memory region receiving the buffer
++       *
++       * Move a buffer between two memory regions.
++       */
++      int (*move) (struct drm_buffer_object *bo,
++                   int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
++      /*
++       * ttm_cache_flush
++       */
++      void (*ttm_cache_flush)(struct drm_ttm *ttm);
++
++      /*
++       * command_stream_barrier
++       *
++       * @dev: The drm device.
++       *
++       * @bo: The buffer object to validate.
++       *
++       * @new_fence_class: The new fence class for the buffer object.
++       *
++       * @new_fence_type: The new fence type for the buffer object.
++       *
++       * @no_wait: whether this should give up and return -EBUSY
++       * if this operation would require sleeping
++       *
++       * Insert a command stream barrier that makes sure that the
++       * buffer is idle once the commands associated with the
++       * current validation are starting to execute. If an error
++       * condition is returned, or the function pointer is NULL,
++       * the drm core will force buffer idle
++       * during validation.
++       */
++
++      int (*command_stream_barrier) (struct drm_buffer_object *bo,
++                                     uint32_t new_fence_class,
++                                     uint32_t new_fence_type,
++                                     int no_wait);                                   
++};
++
++/*
++ * buffer objects (drm_bo.c)
++ */
++
++extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int drm_bo_driver_finish(struct drm_device *dev);
++extern int drm_bo_driver_init(struct drm_device *dev);
++extern int drm_bo_pci_offset(struct drm_device *dev,
++                           struct drm_bo_mem_reg *mem,
++                           unsigned long *bus_base,
++                           unsigned long *bus_offset,
++                           unsigned long *bus_size);
++extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
++
++extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
++extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
++extern void drm_putback_buffer_objects(struct drm_device *dev);
++extern int drm_fence_buffer_objects(struct drm_device *dev,
++                                  struct list_head *list,
++                                  uint32_t fence_flags,
++                                  struct drm_fence_object *fence,
++                                  struct drm_fence_object **used_fence);
++extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
++extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
++                                  enum drm_bo_type type, uint64_t flags,
++                                  uint32_t hint, uint32_t page_alignment,
++                                  unsigned long buffer_start,
++                                  struct drm_buffer_object **bo);
++extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
++                     int no_wait, int check_unfenced);
++extern int drm_bo_mem_space(struct drm_buffer_object *bo,
++                          struct drm_bo_mem_reg *mem, int no_wait);
++extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
++                            uint64_t new_mem_flags,
++                            int no_wait, int move_unfenced);
++extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean);
++extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
++                        unsigned long p_offset, unsigned long p_size,
++                        int kern_init);
++extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
++                                uint64_t flags, uint64_t mask, uint32_t hint,
++                                uint32_t fence_class,
++                                struct drm_bo_info_rep *rep,
++                                struct drm_buffer_object **bo_rep);
++extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
++                                                        uint32_t handle,
++                                                        int check_owner);
++extern int drm_bo_do_validate(struct drm_buffer_object *bo,
++                            uint64_t flags, uint64_t mask, uint32_t hint,
++                            uint32_t fence_class,
++                            struct drm_bo_info_rep *rep);
++extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
++/*
++ * Buffer object memory move- and map helpers.
++ * drm_bo_move.c
++ */
++
++extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
++                         int evict, int no_wait,
++                         struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
++                            int evict,
++                            int no_wait, struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
++                                   int evict, int no_wait,
++                                   uint32_t fence_class, uint32_t fence_type,
++                                   uint32_t fence_flags,
++                                   struct drm_bo_mem_reg *new_mem);
++extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
++extern unsigned long drm_bo_offset_end(unsigned long offset,
++                                     unsigned long end);
++
++struct drm_bo_kmap_obj {
++      void *virtual;
++      struct page *page;
++      enum {
++              bo_map_iomap,
++              bo_map_vmap,
++              bo_map_kmap,
++              bo_map_premapped,
++      } bo_kmap_type;
++};
++
++static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
++{
++      *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
++                   map->bo_kmap_type == bo_map_premapped);
++      return map->virtual;
++}
++extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
++extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
++                     unsigned long num_pages, struct drm_bo_kmap_obj *map);
++extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
++                         unsigned long dst_offset,
++                         unsigned long *pfn,
++                         pgprot_t *prot);
++extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
++                              struct drm_bo_info_rep *rep);
++
++
++/*
++ * drm_regman.c
++ */
++
++struct drm_reg {
++      struct list_head head;
++      struct drm_fence_object *fence;
++      uint32_t fence_type;
++      uint32_t new_fence_type;
++};
++
++struct drm_reg_manager {
++      struct list_head free;
++      struct list_head lru;
++      struct list_head unfenced;
++
++      int (*reg_reusable)(const struct drm_reg *reg, const void *data);
++      void (*reg_destroy)(struct drm_reg *reg);
++};
++
++extern int drm_regs_alloc(struct drm_reg_manager *manager,
++                        const void *data,
++                        uint32_t fence_class,
++                        uint32_t fence_type,
++                        int interruptible,
++                        int no_wait,
++                        struct drm_reg **reg);
++
++extern void drm_regs_fence(struct drm_reg_manager *regs,
++                         struct drm_fence_object *fence);
++
++extern void drm_regs_free(struct drm_reg_manager *manager);
++extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
++extern void drm_regs_init(struct drm_reg_manager *manager,
++                        int (*reg_reusable)(const struct drm_reg *,
++                                            const void *),
++                        void (*reg_destroy)(struct drm_reg *));
++
++/*
++ * drm_bo_lock.c
++ * Simple replacement for the hardware lock on buffer manager init and clean.
++ */
++
++
++extern void drm_bo_init_lock(struct drm_bo_lock *lock);
++extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
++extern int drm_bo_read_lock(struct drm_bo_lock *lock,
++                          int interruptible);
++extern int drm_bo_write_lock(struct drm_bo_lock *lock,
++                           int interruptible,
++                           struct drm_file *file_priv);
++
++extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
++                             struct drm_file *file_priv);
++
++#ifdef CONFIG_DEBUG_MUTEXES
++#define DRM_ASSERT_LOCKED(_mutex)                                     \
++      BUG_ON(!mutex_is_locked(_mutex) ||                              \
++             ((_mutex)->owner != current_thread_info()))
++#else
++#define DRM_ASSERT_LOCKED(_mutex)
++#endif
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_os_linux.h git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h
+--- git/drivers/gpu/drm-tungsten/drm_os_linux.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_os_linux.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,145 @@
++/**
++ * \file drm_os_linux.h
++ * OS abstraction macros.
++ */
++
++#include <linux/interrupt.h>  /* For task queue support */
++#include <linux/delay.h>
++
++/** Current process ID */
++#define DRM_CURRENTPID                        current->pid
++#define DRM_SUSER(p)                  capable(CAP_SYS_ADMIN)
++#define DRM_UDELAY(d)                 udelay(d)
++#if LINUX_VERSION_CODE <= 0x020608    /* KERNEL_VERSION(2,6,8) */
++#ifndef __iomem
++#define __iomem
++#endif
++/** Read a byte from a MMIO region */
++#define DRM_READ8(map, offset)                readb(((void __iomem *)(map)->handle) + (offset))
++/** Read a word from a MMIO region */
++#define DRM_READ16(map, offset)               readw(((void __iomem *)(map)->handle) + (offset))
++/** Read a dword from a MMIO region */
++#define DRM_READ32(map, offset)               readl(((void __iomem *)(map)->handle) + (offset))
++/** Write a byte into a MMIO region */
++#define DRM_WRITE8(map, offset, val)  writeb(val, ((void __iomem *)(map)->handle) + (offset))
++/** Write a word into a MMIO region */
++#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
++/** Write a dword into a MMIO region */
++#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
++#else
++/** Read a byte from a MMIO region */
++#define DRM_READ8(map, offset)                readb((map)->handle + (offset))
++/** Read a word from a MMIO region */
++#define DRM_READ16(map, offset)               readw((map)->handle + (offset))
++/** Read a dword from a MMIO region */
++#define DRM_READ32(map, offset)               readl((map)->handle + (offset))
++/** Write a byte into a MMIO region */
++#define DRM_WRITE8(map, offset, val)  writeb(val, (map)->handle + (offset))
++/** Write a word into a MMIO region */
++#define DRM_WRITE16(map, offset, val) writew(val, (map)->handle + (offset))
++/** Write a dword into a MMIO region */
++#define DRM_WRITE32(map, offset, val) writel(val, (map)->handle + (offset))
++#endif
++/** Read memory barrier */
++#define DRM_READMEMORYBARRIER()               rmb()
++/** Write memory barrier */
++#define DRM_WRITEMEMORYBARRIER()      wmb()
++/** Read/write memory barrier */
++#define DRM_MEMORYBARRIER()           mb()
++
++/** IRQ handler arguments and return type and values */
++#define DRM_IRQ_ARGS          int irq, void *arg
++/** backwards compatibility with old irq return values */
++#ifndef IRQ_HANDLED
++typedef void irqreturn_t;
++#define IRQ_HANDLED           /* nothing */
++#define IRQ_NONE              /* nothing */
++#endif
++
++/** AGP types */
++#if __OS_HAS_AGP
++#define DRM_AGP_MEM           struct agp_memory
++#define DRM_AGP_KERN          struct agp_kern_info
++#else
++/* define some dummy types for non AGP supporting kernels */
++struct no_agp_kern {
++      unsigned long aper_base;
++      unsigned long aper_size;
++};
++#define DRM_AGP_MEM           int
++#define DRM_AGP_KERN          struct no_agp_kern
++#endif
++
++#if !(__OS_HAS_MTRR)
++static __inline__ int mtrr_add(unsigned long base, unsigned long size,
++                             unsigned int type, char increment)
++{
++      return -ENODEV;
++}
++
++static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++      return -ENODEV;
++}
++
++#define MTRR_TYPE_WRCOMB     1
++#endif
++
++/** Other copying of data to kernel space */
++#define DRM_COPY_FROM_USER(arg1, arg2, arg3)          \
++      copy_from_user(arg1, arg2, arg3)
++/** Other copying of data from kernel space */
++#define DRM_COPY_TO_USER(arg1, arg2, arg3)            \
++      copy_to_user(arg1, arg2, arg3)
++/* Macros for copyfrom user, but checking readability only once */
++#define DRM_VERIFYAREA_READ( uaddr, size )            \
++      (access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
++#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3)        \
++      __copy_from_user(arg1, arg2, arg3)
++#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)  \
++      __copy_to_user(arg1, arg2, arg3)
++#define DRM_GET_USER_UNCHECKED(val, uaddr)            \
++      __get_user(val, uaddr)
++
++#define DRM_HZ HZ
++
++#define DRM_WAIT_ON( ret, queue, timeout, condition )         \
++do {                                                          \
++      DECLARE_WAITQUEUE(entry, current);                      \
++      unsigned long end = jiffies + (timeout);                \
++      add_wait_queue(&(queue), &entry);                       \
++                                                              \
++      for (;;) {                                              \
++              __set_current_state(TASK_INTERRUPTIBLE);        \
++              if (condition)                                  \
++                      break;                                  \
++              if (time_after_eq(jiffies, end)) {              \
++                      ret = -EBUSY;                           \
++                      break;                                  \
++              }                                               \
++              schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);    \
++              if (signal_pending(current)) {                  \
++                      ret = -EINTR;                           \
++                      break;                                  \
++              }                                               \
++      }                                                       \
++      __set_current_state(TASK_RUNNING);                      \
++      remove_wait_queue(&(queue), &entry);                    \
++} while (0)
++
++#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
++#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
++
++/** Type for the OS's non-sleepable mutex lock */
++#define DRM_SPINTYPE          spinlock_t
++/**
++ * Initialize the lock for use.  name is an optional string describing the
++ * lock
++ */
++#define DRM_SPININIT(l,name)  spin_lock_init(l)
++#define DRM_SPINUNINIT(l)
++#define DRM_SPINLOCK(l)               spin_lock(l)
++#define DRM_SPINUNLOCK(l)     spin_unlock(l)
++#define DRM_SPINLOCK_IRQSAVE(l, _flags)       spin_lock_irqsave(l, _flags);
++#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
++#define DRM_SPINLOCK_ASSERT(l)                do {} while (0)
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_pci.c git-nokia/drivers/gpu/drm-tungsten/drm_pci.c
+--- git/drivers/gpu/drm-tungsten/drm_pci.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_pci.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,177 @@
++/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
++/**
++ * \file drm_pci.c
++ * \brief Functions and ioctls to manage PCI memory
++ *
++ * \warning These interfaces aren't stable yet.
++ *
++ * \todo Implement the remaining ioctl's for the PCI pools.
++ * \todo The wrappers here are so thin that they would be better off inlined..
++ *
++ * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
++ * \author Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++/*
++ * Copyright 2003 Jos�Fonseca.
++ * Copyright 2003 Leif Delgass.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
++ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/pci.h>
++#include <linux/dma-mapping.h>
++#include "drmP.h"
++
++/**********************************************************************/
++/** \name PCI memory */
++/*@{*/
++
++/**
++ * \brief Allocate a PCI consistent memory block, for DMA.
++ */
++drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
++                              dma_addr_t maxaddr)
++{
++      drm_dma_handle_t *dmah;
++      unsigned long addr;
++      size_t sz;
++#ifdef DRM_DEBUG_MEMORY
++      int area = DRM_MEM_DMA;
++
++      spin_lock(&drm_mem_lock);
++      if ((drm_ram_used >> PAGE_SHIFT)
++          > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
++              spin_unlock(&drm_mem_lock);
++              return 0;
++      }
++      spin_unlock(&drm_mem_lock);
++#endif
++
++      /* pci_alloc_consistent only guarantees alignment to the smallest
++       * PAGE_SIZE order which is greater than or equal to the requested size.
++       * Return NULL here for now to make sure nobody tries for larger alignment
++       */
++      if (align > size)
++              return NULL;
++
++      if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
++              DRM_ERROR("Setting pci dma mask failed\n");
++              return NULL;
++      }
++
++      dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
++      if (!dmah)
++              return NULL;
++
++      dmah->size = size;
++      dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
++
++#ifdef DRM_DEBUG_MEMORY
++      if (dmah->vaddr == NULL) {
++              spin_lock(&drm_mem_lock);
++              ++drm_mem_stats[area].fail_count;
++              spin_unlock(&drm_mem_lock);
++              kfree(dmah);
++              return NULL;
++      }
++
++      spin_lock(&drm_mem_lock);
++      ++drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_allocated += size;
++      drm_ram_used += size;
++      spin_unlock(&drm_mem_lock);
++#else
++      if (dmah->vaddr == NULL) {
++              kfree(dmah);
++              return NULL;
++      }
++#endif
++
++      memset(dmah->vaddr, 0, size);
++
++      /* XXX - Is virt_to_page() legal for consistent mem? */
++      /* Reserve */
++      for (addr = (unsigned long)dmah->vaddr, sz = size;
++           sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++              SetPageReserved(virt_to_page(addr));
++      }
++
++      return dmah;
++}
++EXPORT_SYMBOL(drm_pci_alloc);
++
++/**
++ * \brief Free a PCI consistent memory block without freeing its descriptor.
++ *
++ * This function is for internal use in the Linux-specific DRM core code.
++ */
++void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
++{
++      unsigned long addr;
++      size_t sz;
++#ifdef DRM_DEBUG_MEMORY
++      int area = DRM_MEM_DMA;
++      int alloc_count;
++      int free_count;
++#endif
++
++      if (!dmah->vaddr) {
++#ifdef DRM_DEBUG_MEMORY
++              DRM_MEM_ERROR(area, "Attempt to free address 0\n");
++#endif
++      } else {
++              /* XXX - Is virt_to_page() legal for consistent mem? */
++              /* Unreserve */
++              for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
++                   sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
++                      ClearPageReserved(virt_to_page(addr));
++              }
++              dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
++                                dmah->busaddr);
++      }
++
++#ifdef DRM_DEBUG_MEMORY
++      spin_lock(&drm_mem_lock);
++      free_count = ++drm_mem_stats[area].free_count;
++      alloc_count = drm_mem_stats[area].succeed_count;
++      drm_mem_stats[area].bytes_freed += size;
++      drm_ram_used -= size;
++      spin_unlock(&drm_mem_lock);
++      if (free_count > alloc_count) {
++              DRM_MEM_ERROR(area,
++                            "Excess frees: %d frees, %d allocs\n",
++                            free_count, alloc_count);
++      }
++#endif
++
++}
++
++/**
++ * \brief Free a PCI consistent memory block
++ */
++void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
++{
++      __drm_pci_free(dev, dmah);
++      kfree(dmah);
++}
++EXPORT_SYMBOL(drm_pci_free);
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_pciids.h git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h
+--- git/drivers/gpu/drm-tungsten/drm_pciids.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_pciids.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,614 @@
++/*
++   This file is auto-generated from the drm_pciids.txt in the DRM CVS
++   Please contact dri-devel@lists.sf.net to add new cards to this list
++*/
++#define radeon_PCI_IDS \
++      {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
++      {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
++      {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
++      {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
++      {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
++      {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
++      {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
++      {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
++      {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
++      {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
++      {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
++      {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
++      {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
++      {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
++      {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
++      {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
++      {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
++      {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
++      {0, 0, 0}
++
++#define r128_PCI_IDS \
++      {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define mga_PCI_IDS \
++      {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
++      {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
++      {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
++      {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
++      {0, 0, 0}
++
++#define mach64_PCI_IDS \
++      {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define sis_PCI_IDS \
++      {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
++      {0, 0, 0}
++
++#define pvr2d_PCI_IDS \
++      {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define tdfx_PCI_IDS \
++      {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define viadrv_PCI_IDS \
++      {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
++      {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
++      {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
++      {0x1106, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
++      {0, 0, 0}
++
++#define i810_PCI_IDS \
++      {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define i830_PCI_IDS \
++      {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define gamma_PCI_IDS \
++      {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
++
++#define savage_PCI_IDS \
++      {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
++      {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
++      {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
++      {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
++      {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
++      {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
++      {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
++      {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
++      {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
++      {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
++      {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
++      {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
++      {0, 0, 0}
++
++#define ffb_PCI_IDS \
++      {0, 0, 0}
++
++#define i915_PCI_IDS \
++      {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I8XX}, \
++      {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x27A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x27AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x29A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2A02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2A12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x29C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x29B2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x29D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I915}, \
++      {0x8086, 0x2A42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0x8086, 0x2E22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_I9XX|CHIP_I965}, \
++      {0, 0, 0}
++
++#define imagine_PCI_IDS \
++      {0x105d, 0x2309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128}, \
++      {0x105d, 0x2339, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_128_2}, \
++      {0x105d, 0x493d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_T2R}, \
++      {0x105d, 0x5348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, IMAGINE_REV4}, \
++      {0, 0, 0}
++
++#define nv_PCI_IDS \
++      {0x10DE, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x002D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x00A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV04}, \
++      {0x10DE, 0x0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0171, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0172, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0173, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0174, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0175, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0176, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0177, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0178, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0179, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x017D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0189, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x018D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x01A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x01F0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV10}, \
++      {0x10DE, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0203, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0251, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0252, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0253, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0258, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0259, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x025B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0282, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x028C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV20}, \
++      {0x10DE, 0x0301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0308, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x031F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0322, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0323, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0327, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0328, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0329, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x032F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0331, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0332, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0333, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x033F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0334, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0338, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0342, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0345, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0348, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x034F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV30}, \
++      {0x10DE, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x004E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x00CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10de, 0x00f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10de, 0x00f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x014F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0161, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0163, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0164, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0165, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0166, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0167, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x016E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0212, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0215, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0228, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0091, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0092, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0094, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0098, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x0099, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0x10DE, 0x009E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NV40}, \
++      {0, 0, 0}
++
++#define xgi_PCI_IDS \
++      {0x18ca, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0x18ca, 0x0047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
++      {0, 0, 0}
+diff -Nurd git/drivers/gpu/drm-tungsten/drmP.h git-nokia/drivers/gpu/drm-tungsten/drmP.h
+--- git/drivers/gpu/drm-tungsten/drmP.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drmP.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1507 @@
++/**
++ * \file drmP.h
++ * Private header for Direct Rendering Manager
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_P_H_
++#define _DRM_P_H_
++
++#ifdef __KERNEL__
++#ifdef __alpha__
++/* add include of current.h so that "current" is defined
++ * before static inline funcs in wait.h. Doing this so we
++ * can build the DRM (part of PI DRI). 4/21/2000 S + B */
++#include <asm/current.h>
++#endif                                /* __alpha__ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/miscdevice.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#include <linux/init.h>
++#include <linux/file.h>
++#include <linux/pci.h>
++#include <linux/version.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>   /* For (un)lock_kernel */
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <linux/kref.h>
++#include <linux/pagemap.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++#include <linux/mutex.h>
++#endif
++#if defined(__alpha__) || defined(__powerpc__)
++#include <asm/pgtable.h>      /* For pte_wrprotect */
++#endif
++#include <asm/io.h>
++#include <asm/mman.h>
++#include <asm/uaccess.h>
++#ifdef CONFIG_MTRR
++#include <asm/mtrr.h>
++#endif
++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
++#include <asm/agp.h>
++#include <linux/types.h>
++#include <linux/agp_backend.h>
++#endif
++#include <linux/workqueue.h>
++#include <linux/poll.h>
++#include <asm/pgalloc.h>
++#include "drm.h"
++#include <linux/slab.h>
++#include <linux/idr.h>
++
++#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
++#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
++
++#include "drm_os_linux.h"
++#include "drm_hashtab.h"
++#include "drm_internal.h"
++
++struct drm_device;
++struct drm_file;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++typedef unsigned long uintptr_t;
++#endif
++
++/* If you want the memory alloc debug functionality, change define below */
++/* #define DEBUG_MEMORY */
++
++/***********************************************************************/
++/** \name DRM template customization defaults */
++/*@{*/
++
++/* driver capabilities and requirements mask */
++#define DRIVER_USE_AGP     0x1
++#define DRIVER_REQUIRE_AGP 0x2
++#define DRIVER_USE_MTRR    0x4
++#define DRIVER_PCI_DMA     0x8
++#define DRIVER_SG          0x10
++#define DRIVER_HAVE_DMA    0x20
++#define DRIVER_HAVE_IRQ    0x40
++#define DRIVER_IRQ_SHARED  0x80
++#define DRIVER_DMA_QUEUE   0x100
++#define DRIVER_FB_DMA      0x200
++#define DRIVER_GEM       0x400
++
++/*@}*/
++
++/***********************************************************************/
++/** \name Begin the DRM... */
++/*@{*/
++
++#define DRM_DEBUG_CODE 2        /**< Include debugging code if > 1, then
++                                   also include looping detection. */
++
++#define DRM_MAGIC_HASH_ORDER  4 /**< Size of key hash table. Must be power of 2. */
++#define DRM_KERNEL_CONTEXT    0        /**< Change drm_resctx if changed */
++#define DRM_RESERVED_CONTEXTS 1        /**< Change drm_resctx if changed */
++#define DRM_LOOPING_LIMIT     5000000
++#define DRM_TIME_SLICE              (HZ/20)  /**< Time slice for GLXContexts */
++#define DRM_LOCK_SLICE              1 /**< Time slice for lock, in jiffies */
++
++#define DRM_FLAG_DEBUG          0x01
++
++#define DRM_MEM_DMA      0
++#define DRM_MEM_SAREA    1
++#define DRM_MEM_DRIVER           2
++#define DRM_MEM_MAGIC    3
++#define DRM_MEM_IOCTLS           4
++#define DRM_MEM_MAPS     5
++#define DRM_MEM_VMAS     6
++#define DRM_MEM_BUFS     7
++#define DRM_MEM_SEGS     8
++#define DRM_MEM_PAGES    9
++#define DRM_MEM_FILES   10
++#define DRM_MEM_QUEUES          11
++#define DRM_MEM_CMDS    12
++#define DRM_MEM_MAPPINGS  13
++#define DRM_MEM_BUFLISTS  14
++#define DRM_MEM_AGPLISTS  15
++#define DRM_MEM_TOTALAGP  16
++#define DRM_MEM_BOUNDAGP  17
++#define DRM_MEM_CTXBITMAP 18
++#define DRM_MEM_STUB      19
++#define DRM_MEM_SGLISTS   20
++#define DRM_MEM_CTXLIST   21
++#define DRM_MEM_MM        22
++#define DRM_MEM_HASHTAB   23
++#define DRM_MEM_OBJECTS   24
++#define DRM_MEM_FENCE     25
++#define DRM_MEM_TTM       26
++#define DRM_MEM_BUFOBJ    27
++
++#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
++#define DRM_MAP_HASH_OFFSET 0x10000000
++#define DRM_MAP_HASH_ORDER 12
++#define DRM_OBJECT_HASH_ORDER 12
++#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
++#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
++/*
++ * This should be small enough to allow the use of kmalloc for hash tables
++ * instead of vmalloc.
++ */
++
++#define DRM_FILE_HASH_ORDER 8
++#define DRM_MM_INIT_MAX_PAGES 256
++
++/*@}*/
++
++#include "drm_compat.h"
++
++/***********************************************************************/
++/** \name Macros to make printk easier */
++/*@{*/
++
++/**
++ * Error output.
++ *
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#define DRM_ERROR(fmt, arg...) \
++      printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg)
++
++/**
++ * Memory error output.
++ *
++ * \param area memory area where the error occurred.
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#define DRM_MEM_ERROR(area, fmt, arg...) \
++      printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \
++             drm_mem_stats[area].name , ##arg)
++#define DRM_INFO(fmt, arg...)  printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
++
++/**
++ * Debug output.
++ *
++ * \param fmt printf() like format string.
++ * \param arg arguments
++ */
++#if DRM_DEBUG_CODE
++#define DRM_DEBUG(fmt, arg...)                                                \
++      do {                                                            \
++              if ( drm_debug )                                        \
++                      printk(KERN_DEBUG                               \
++                             "[" DRM_NAME ":%s] " fmt ,               \
++                             __FUNCTION__ , ##arg);                   \
++      } while (0)
++#else
++#define DRM_DEBUG(fmt, arg...)                 do { } while (0)
++#endif
++
++#define DRM_PROC_LIMIT (PAGE_SIZE-80)
++
++#define DRM_PROC_PRINT(fmt, arg...)                                   \
++   len += sprintf(&buf[len], fmt , ##arg);                            \
++   if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
++
++#define DRM_PROC_PRINT_RET(ret, fmt, arg...)                          \
++   len += sprintf(&buf[len], fmt , ##arg);                            \
++   if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
++
++/*@}*/
++
++/***********************************************************************/
++/** \name Internal types and structures */
++/*@{*/
++
++#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
++#define DRM_MIN(a,b) min(a,b)
++#define DRM_MAX(a,b) max(a,b)
++
++#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
++#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
++#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
++
++#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
++/**
++ * Get the private SAREA mapping.
++ *
++ * \param _dev DRM device.
++ * \param _ctx context number.
++ * \param _map output mapping.
++ */
++#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {     \
++      (_map) = (_dev)->context_sareas[_ctx];          \
++} while(0)
++
++/**
++ * Test that the hardware lock is held by the caller, returning otherwise.
++ *
++ * \param dev DRM device.
++ * \param file_priv DRM file private pointer of the caller.
++ */
++#define LOCK_TEST_WITH_RETURN( dev, file_priv )                               \
++do {                                                                  \
++      if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||           \
++           dev->lock.file_priv != file_priv ) {                       \
++              DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
++                         __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
++                         dev->lock.file_priv, file_priv );            \
++              return -EINVAL;                                         \
++      }                                                               \
++} while (0)
++
++/**
++ * Copy and IOCTL return string to user space
++ */
++#define DRM_COPY( name, value )                                               \
++      len = strlen( value );                                          \
++      if ( len > name##_len ) len = name##_len;                       \
++      name##_len = strlen( value );                                   \
++      if ( len && name ) {                                            \
++              if ( copy_to_user( name, value, len ) )                 \
++                      return -EFAULT;                                 \
++      }
++
++/**
++ * Ioctl function type.
++ *
++ * \param dev DRM device structure
++ * \param data pointer to kernel-space stored data, copied in and out according
++ *           to ioctl description.
++ * \param file_priv DRM file private pointer.
++ */
++typedef int drm_ioctl_t(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++
++typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
++                             unsigned long arg);
++
++#define DRM_AUTH        0x1
++#define DRM_MASTER      0x2
++#define DRM_ROOT_ONLY   0x4
++
++struct drm_ioctl_desc {
++      unsigned int cmd;
++      drm_ioctl_t *func;
++      int flags;
++};
++/**
++ * Creates a driver or general drm_ioctl_desc array entry for the given
++ * ioctl, for use by drm_ioctl().
++ */
++#define DRM_IOCTL_DEF(ioctl, func, flags) \
++      [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
++
++struct drm_magic_entry {
++      struct list_head head;
++      struct drm_hash_item hash_item;
++      struct drm_file *priv;
++};
++
++struct drm_vma_entry {
++      struct list_head head;
++      struct vm_area_struct *vma;
++      pid_t pid;
++};
++
++/**
++ * DMA buffer.
++ */
++struct drm_buf {
++      int idx;                       /**< Index into master buflist */
++      int total;                     /**< Buffer size */
++      int order;                     /**< log-base-2(total) */
++      int used;                      /**< Amount of buffer in use (for DMA) */
++      unsigned long offset;          /**< Byte offset (used internally) */
++      void *address;                 /**< Address of buffer */
++      unsigned long bus_address;     /**< Bus address of buffer */
++      struct drm_buf *next;          /**< Kernel-only: used for free list */
++      __volatile__ int waiting;      /**< On kernel DMA queue */
++      __volatile__ int pending;      /**< On hardware DMA queue */
++      wait_queue_head_t dma_wait;    /**< Processes waiting */
++      struct drm_file *file_priv;    /**< Private of holding file descr */
++      int context;                   /**< Kernel queue for this buffer */
++      int while_locked;              /**< Dispatch this buffer while locked */
++      enum {
++              DRM_LIST_NONE = 0,
++              DRM_LIST_FREE = 1,
++              DRM_LIST_WAIT = 2,
++              DRM_LIST_PEND = 3,
++              DRM_LIST_PRIO = 4,
++              DRM_LIST_RECLAIM = 5
++      } list;                        /**< Which list we're on */
++
++      int dev_priv_size;              /**< Size of buffer private storage */
++      void *dev_private;              /**< Per-buffer private storage */
++};
++
++/** bufs is one longer than it has to be */
++struct drm_waitlist {
++      int count;                      /**< Number of possible buffers */
++      struct drm_buf **bufs;          /**< List of pointers to buffers */
++      struct drm_buf **rp;                    /**< Read pointer */
++      struct drm_buf **wp;                    /**< Write pointer */
++      struct drm_buf **end;           /**< End pointer */
++      spinlock_t read_lock;
++      spinlock_t write_lock;
++};
++
++struct drm_freelist {
++      int initialized;               /**< Freelist in use */
++      atomic_t count;                /**< Number of free buffers */
++      struct drm_buf *next;          /**< End pointer */
++
++      wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
++      int low_mark;                  /**< Low water mark */
++      int high_mark;                 /**< High water mark */
++      atomic_t wfh;                  /**< If waiting for high mark */
++      spinlock_t lock;
++};
++
++typedef struct drm_dma_handle {
++      dma_addr_t busaddr;
++      void *vaddr;
++      size_t size;
++} drm_dma_handle_t;
++
++/**
++ * Buffer entry.  There is one of this for each buffer size order.
++ */
++struct drm_buf_entry {
++      int buf_size;                   /**< size */
++      int buf_count;                  /**< number of buffers */
++      struct drm_buf *buflist;                /**< buffer list */
++      int seg_count;
++      int page_order;
++      struct drm_dma_handle **seglist;
++      struct drm_freelist freelist;
++};
++
++
++enum drm_ref_type {
++      _DRM_REF_USE = 0,
++      _DRM_REF_TYPE1,
++      _DRM_NO_REF_TYPES
++};
++
++
++/** File private data */
++struct drm_file {
++      int authenticated;
++      int master;
++      pid_t pid;
++      uid_t uid;
++      drm_magic_t magic;
++      unsigned long ioctl_count;
++      struct list_head lhead;
++      struct drm_minor *minor;
++      int remove_auth_on_close;
++      unsigned long lock_count;
++
++      /*
++       * The user object hash table is global and resides in the
++       * drm_device structure. We protect the lists and hash tables with the
++       * device struct_mutex. A bit coarse-grained but probably the best
++       * option.
++       */
++
++      struct list_head refd_objects;
++
++      /** Mapping of mm object handles to object pointers. */
++      struct idr object_idr;
++      /** Lock for synchronization of access to object_idr. */
++      spinlock_t table_lock;
++
++      struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
++      struct file *filp;
++      void *driver_priv;
++};
++
++/** Wait queue */
++struct drm_queue {
++      atomic_t use_count;             /**< Outstanding uses (+1) */
++      atomic_t finalization;          /**< Finalization in progress */
++      atomic_t block_count;           /**< Count of processes waiting */
++      atomic_t block_read;            /**< Queue blocked for reads */
++      wait_queue_head_t read_queue;   /**< Processes waiting on block_read */
++      atomic_t block_write;           /**< Queue blocked for writes */
++      wait_queue_head_t write_queue;  /**< Processes waiting on block_write */
++#if 1
++      atomic_t total_queued;          /**< Total queued statistic */
++      atomic_t total_flushed;         /**< Total flushes statistic */
++      atomic_t total_locks;           /**< Total locks statistics */
++#endif
++      enum drm_ctx_flags flags;       /**< Context preserving and 2D-only */
++      struct drm_waitlist waitlist;   /**< Pending buffers */
++      wait_queue_head_t flush_queue;  /**< Processes waiting until flush */
++};
++
++/**
++ * Lock data.
++ */
++struct drm_lock_data {
++      struct drm_hw_lock *hw_lock;            /**< Hardware lock */
++      /** Private of lock holder's file (NULL=kernel) */
++      struct drm_file *file_priv;
++      wait_queue_head_t lock_queue;   /**< Queue of blocked processes */
++      unsigned long lock_time;        /**< Time of last lock in jiffies */
++      spinlock_t spinlock;
++      uint32_t kernel_waiters;
++      uint32_t user_waiters;
++      int idle_has_lock;
++};
++
++/**
++ * DMA data.
++ */
++struct drm_device_dma {
++
++      struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];   /**< buffers, grouped by their size order */
++      int buf_count;                  /**< total number of buffers */
++      struct drm_buf **buflist;               /**< Vector of pointers into drm_device_dma::bufs */
++      int seg_count;
++      int page_count;                 /**< number of pages */
++      unsigned long *pagelist;        /**< page list */
++      unsigned long byte_count;
++      enum {
++              _DRM_DMA_USE_AGP = 0x01,
++              _DRM_DMA_USE_SG = 0x02,
++              _DRM_DMA_USE_FB = 0x04,
++              _DRM_DMA_USE_PCI_RO = 0x08
++      } flags;
++
++};
++
++/**
++ * AGP memory entry.  Stored as a doubly linked list.
++ */
++struct drm_agp_mem {
++      unsigned long handle;           /**< handle */
++      DRM_AGP_MEM *memory;
++      unsigned long bound;            /**< address */
++      int pages;
++      struct list_head head;
++};
++
++/**
++ * AGP data.
++ *
++ * \sa drm_agp_init() and drm_device::agp.
++ */
++struct drm_agp_head {
++      DRM_AGP_KERN agp_info;          /**< AGP device information */
++      struct list_head memory;
++      unsigned long mode;             /**< AGP mode */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
++      struct agp_bridge_data *bridge;
++#endif
++      int enabled;                    /**< whether the AGP bus as been enabled */
++      int acquired;                   /**< whether the AGP device has been acquired */
++      unsigned long base;
++      int agp_mtrr;
++      int cant_use_aperture;
++      unsigned long page_mask;
++};
++
++/**
++ * Scatter-gather memory.
++ */
++struct drm_sg_mem {
++      unsigned long handle;
++      void *virtual;
++      int pages;
++      struct page **pagelist;
++      dma_addr_t *busaddr;
++};
++
++struct drm_sigdata {
++      int context;
++      struct drm_hw_lock *lock;
++};
++
++
++/*
++ * Generic memory manager structs
++ */
++
++struct drm_mm_node {
++      struct list_head fl_entry;
++      struct list_head ml_entry;
++      int free;
++      unsigned long start;
++      unsigned long size;
++      struct drm_mm *mm;
++      void *private;
++};
++
++struct drm_mm {
++      struct list_head fl_entry;
++      struct list_head ml_entry;
++};
++
++
++/**
++ * Mappings list
++ */
++struct drm_map_list {
++      struct list_head head;          /**< list head */
++      struct drm_hash_item hash;
++      struct drm_map *map;                    /**< mapping */
++      uint64_t user_token;
++      struct drm_mm_node *file_offset_node;
++};
++
++typedef struct drm_map drm_local_map_t;
++
++/**
++ * Context handle list
++ */
++struct drm_ctx_list {
++      struct list_head head;          /**< list head */
++      drm_context_t handle;           /**< context handle */
++      struct drm_file *tag;           /**< associated fd private data */
++};
++
++struct drm_vbl_sig {
++      struct list_head head;
++      unsigned int sequence;
++      struct siginfo info;
++      struct task_struct *task;
++};
++
++/* location of GART table */
++#define DRM_ATI_GART_MAIN 1
++#define DRM_ATI_GART_FB   2
++
++#define DRM_ATI_GART_PCI 1
++#define DRM_ATI_GART_PCIE 2
++#define DRM_ATI_GART_IGP 3
++
++struct drm_ati_pcigart_info {
++      int gart_table_location;
++      int gart_reg_if;
++      void *addr;
++      dma_addr_t bus_addr;
++      dma_addr_t table_mask;
++      dma_addr_t member_mask;
++      struct drm_dma_handle *table_handle;
++      drm_local_map_t mapping;
++      int table_size;
++};
++
++/**
++ * This structure defines the drm_mm memory object, which will be used by the
++ * DRM for its buffer objects.
++ */
++struct drm_gem_object {
++      /** Reference count of this object */
++      struct kref refcount;
++
++      /** Handle count of this object. Each handle also holds a reference */
++      struct kref handlecount;
++
++      /** Related drm device */
++      struct drm_device *dev;
++      
++      /** File representing the shmem storage */
++      struct file *filp;
++
++      /**
++       * Size of the object, in bytes.  Immutable over the object's
++       * lifetime.
++       */
++      size_t size;
++
++      /**
++       * Global name for this object, starts at 1. 0 means unnamed.
++       * Access is covered by the object_name_lock in the related drm_device
++       */
++      int name;
++
++      /**
++       * Memory domains. These monitor which caches contain read/write data
++       * related to the object. When transitioning from one set of domains
++       * to another, the driver is called to ensure that caches are suitably
++       * flushed and invalidated
++       */
++      uint32_t        read_domains;
++      uint32_t        write_domain;
++
++      /**
++       * While validating an exec operation, the
++       * new read/write domain values are computed here.
++       * They will be transferred to the above values
++       * at the point that any cache flushing occurs
++       */
++      uint32_t        pending_read_domains;
++      uint32_t        pending_write_domain;
++
++      void *driver_private;
++};
++
++#include "drm_objects.h"
++
++/**
++ * DRM driver structure. This structure represent the common code for
++ * a family of cards. There will one drm_device for each card present
++ * in this family
++ */
++
++struct drm_driver {
++      int (*load) (struct drm_device *, unsigned long flags);
++      int (*firstopen) (struct drm_device *);
++      int (*open) (struct drm_device *, struct drm_file *);
++      void (*preclose) (struct drm_device *, struct drm_file *file_priv);
++      void (*postclose) (struct drm_device *, struct drm_file *);
++      void (*lastclose) (struct drm_device *);
++      int (*unload) (struct drm_device *);
++      int (*suspend) (struct drm_device *, pm_message_t state);
++      int (*resume) (struct drm_device *);
++      int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
++      void (*dma_ready) (struct drm_device *);
++      int (*dma_quiescent) (struct drm_device *);
++      int (*context_ctor) (struct drm_device *dev, int context);
++      int (*context_dtor) (struct drm_device *dev, int context);
++      int (*kernel_context_switch) (struct drm_device *dev, int old,
++                                    int new);
++      void (*kernel_context_switch_unlock) (struct drm_device * dev);
++      /**
++       * get_vblank_counter - get raw hardware vblank counter
++       * @dev: DRM device
++       * @crtc: counter to fetch
++       *
++       * Driver callback for fetching a raw hardware vblank counter
++       * for @crtc.  If a device doesn't have a hardware counter, the
++       * driver can simply return the value of drm_vblank_count and
++       * make the enable_vblank() and disable_vblank() hooks into no-ops,
++       * leaving interrupts enabled at all times.
++       *
++       * Wraparound handling and loss of events due to modesetting is dealt
++       * with in the DRM core code.
++       *
++       * RETURNS
++       * Raw vblank counter value.
++       */
++      u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
++
++      /**
++       * enable_vblank - enable vblank interrupt events
++       * @dev: DRM device
++       * @crtc: which irq to enable
++       *
++       * Enable vblank interrupts for @crtc.  If the device doesn't have
++       * a hardware vblank counter, this routine should be a no-op, since
++       * interrupts will have to stay on to keep the count accurate.
++       *
++       * RETURNS
++       * Zero on success, appropriate errno if the given @crtc's vblank
++       * interrupt cannot be enabled.
++       */
++      int (*enable_vblank) (struct drm_device *dev, int crtc);
++
++      /**
++       * disable_vblank - disable vblank interrupt events
++       * @dev: DRM device
++       * @crtc: which irq to enable
++       *
++       * Disable vblank interrupts for @crtc.  If the device doesn't have
++       * a hardware vblank counter, this routine should be a no-op, since
++       * interrupts will have to stay on to keep the count accurate.
++       */
++      void (*disable_vblank) (struct drm_device *dev, int crtc);
++      int (*dri_library_name) (struct drm_device *dev, char * buf);
++
++      /**
++       * Called by \c drm_device_is_agp.  Typically used to determine if a
++       * card is really attached to AGP or not.
++       *
++       * \param dev  DRM device handle
++       *
++       * \returns
++       * One of three values is returned depending on whether or not the
++       * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
++       * (return of 1), or may or may not be AGP (return of 2).
++       */
++      int (*device_is_agp) (struct drm_device *dev);
++
++/* these have to be filled in */
++       irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
++      void (*irq_preinstall) (struct drm_device *dev);
++      int (*irq_postinstall) (struct drm_device *dev);
++      void (*irq_uninstall) (struct drm_device *dev);
++      void (*reclaim_buffers) (struct drm_device *dev,
++                               struct drm_file *file_priv);
++      void (*reclaim_buffers_locked) (struct drm_device *dev,
++                                      struct drm_file *file_priv);
++      void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
++                                          struct drm_file *file_priv);
++      unsigned long (*get_map_ofs) (struct drm_map *map);
++      unsigned long (*get_reg_ofs) (struct drm_device *dev);
++      void (*set_version) (struct drm_device *dev,
++                           struct drm_set_version *sv);
++
++      int (*proc_init)(struct drm_minor *minor);
++      void (*proc_cleanup)(struct drm_minor *minor);
++
++      /**
++       * Driver-specific constructor for drm_gem_objects, to set up
++       * obj->driver_private.
++       *
++       * Returns 0 on success.
++       */
++      int (*gem_init_object) (struct drm_gem_object *obj);
++      void (*gem_free_object) (struct drm_gem_object *obj);
++
++      struct drm_fence_driver *fence_driver;
++      struct drm_bo_driver *bo_driver;
++
++      int major;
++      int minor;
++      int patchlevel;
++      char *name;
++      char *desc;
++      char *date;
++
++/* variables */
++      u32 driver_features;
++      int dev_priv_size;
++      struct drm_ioctl_desc *ioctls;
++      int num_ioctls;
++      struct file_operations fops;
++      struct pci_driver pci_driver;
++};
++
++#define DRM_MINOR_UNASSIGNED 0
++#define DRM_MINOR_LEGACY 1
++
++/**
++ * DRM minor structure. This structure represents a drm minor number.
++ */
++struct drm_minor {
++      int index;                      /**< Minor device number */
++      int type;                       /**< Control or render */
++      dev_t device;                   /**< Device number for mknod */
++      struct device kdev;             /**< Linux device */
++      struct drm_device *dev;
++      struct proc_dir_entry *dev_root;  /**< proc directory entry */
++      struct class_device *dev_class;
++};
++
++
++/**
++ * DRM device structure. This structure represent a complete card that
++ * may contain multiple heads.
++ */
++struct drm_device {
++      char *unique;                   /**< Unique identifier: e.g., busid */
++      int unique_len;                 /**< Length of unique field */
++      char *devname;                  /**< For /proc/interrupts */
++      int if_version;                 /**< Highest interface version set */
++
++      int blocked;                    /**< Blocked due to VC switch? */
++
++      /** \name Locks */
++      /*@{ */
++      spinlock_t count_lock;          /**< For inuse, drm_device::open_count, drm_device::buf_use */
++      struct mutex struct_mutex;      /**< For others */
++      /*@} */
++
++      /** \name Usage Counters */
++      /*@{ */
++      int open_count;                 /**< Outstanding files open */
++      atomic_t ioctl_count;           /**< Outstanding IOCTLs pending */
++      atomic_t vma_count;             /**< Outstanding vma areas open */
++      int buf_use;                    /**< Buffers in use -- cannot alloc */
++      atomic_t buf_alloc;             /**< Buffer allocation in progress */
++      /*@} */
++
++      /** \name Performance counters */
++      /*@{ */
++      unsigned long counters;
++      enum drm_stat_type types[15];
++      atomic_t counts[15];
++      /*@} */
++
++      /** \name Authentication */
++      /*@{ */
++      struct list_head filelist;
++      struct drm_open_hash magiclist;
++      struct list_head magicfree;
++      /*@} */
++
++      /** \name Memory management */
++      /*@{ */
++      struct list_head maplist;       /**< Linked list of regions */
++      int map_count;                  /**< Number of mappable regions */
++      struct drm_open_hash map_hash;       /**< User token hash table for maps */
++      struct drm_mm offset_manager;        /**< User token manager */
++      struct drm_open_hash object_hash;    /**< User token hash table for objects */
++      struct address_space *dev_mapping;  /**< For unmap_mapping_range() */
++      struct page *ttm_dummy_page;
++
++      /** \name Context handle management */
++      /*@{ */
++      struct list_head ctxlist;       /**< Linked list of context handles */
++      int ctx_count;                  /**< Number of context handles */
++      struct mutex ctxlist_mutex;     /**< For ctxlist */
++
++      struct idr ctx_idr;
++
++      struct list_head vmalist;       /**< List of vmas (for debugging) */
++      struct drm_lock_data lock;              /**< Information on hardware lock */
++      /*@} */
++
++      /** \name DMA queues (contexts) */
++      /*@{ */
++      int queue_count;                /**< Number of active DMA queues */
++      int queue_reserved;             /**< Number of reserved DMA queues */
++      int queue_slots;                /**< Actual length of queuelist */
++      struct drm_queue **queuelist;   /**< Vector of pointers to DMA queues */
++      struct drm_device_dma *dma;             /**< Optional pointer for DMA support */
++      /*@} */
++
++      /** \name Context support */
++      /*@{ */
++      int irq;                        /**< Interrupt used by board */
++      int irq_enabled;                /**< True if irq handler is enabled */
++      __volatile__ long context_flag; /**< Context swapping flag */
++      __volatile__ long interrupt_flag; /**< Interruption handler flag */
++      __volatile__ long dma_flag;     /**< DMA dispatch flag */
++      struct timer_list timer;        /**< Timer for delaying ctx switch */
++      wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
++      int last_checked;               /**< Last context checked for DMA */
++      int last_context;               /**< Last current context */
++      unsigned long last_switch;      /**< jiffies at last context switch */
++      /*@} */
++
++      struct work_struct work;
++
++      /** \name VBLANK IRQ support */
++      /*@{ */
++
++      /*
++       * At load time, disabling the vblank interrupt won't be allowed since
++       * old clients may not call the modeset ioctl and therefore misbehave.
++       * Once the modeset ioctl *has* been called though, we can safely
++       * disable them when unused.
++       */
++      int vblank_disable_allowed;
++
++      wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
++      atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
++      spinlock_t vbl_lock;
++      struct list_head *vbl_sigs;             /**< signal list to send on VBLANK */
++      atomic_t vbl_signal_pending;    /* number of signals pending on all crtcs*/
++      atomic_t *vblank_refcount;      /* number of users of vblank interrupts per crtc */
++      u32 *last_vblank;               /* protected by dev->vbl_lock, used */
++                                      /* for wraparound handling */
++      int *vblank_enabled;            /* so we don't call enable more than
++                                         once per disable */
++      int *vblank_inmodeset;          /* Display driver is setting mode */
++      struct timer_list vblank_disable_timer;
++
++      u32 max_vblank_count;           /**< size of vblank counter register */
++      spinlock_t tasklet_lock;        /**< For drm_locked_tasklet */
++      void (*locked_tasklet_func)(struct drm_device *dev);
++
++      /*@} */
++      cycles_t ctx_start;
++      cycles_t lck_start;
++
++      struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
++      wait_queue_head_t buf_readers;  /**< Processes waiting to read */
++      wait_queue_head_t buf_writers;  /**< Processes waiting to ctx switch */
++
++      struct drm_agp_head *agp;               /**< AGP data */
++
++      struct pci_dev *pdev;           /**< PCI device structure */
++      int pci_vendor;                 /**< PCI vendor id */
++      int pci_device;                 /**< PCI device id */
++#ifdef __alpha__
++      struct pci_controller *hose;
++#endif
++      int num_crtcs;                  /**< Number of CRTCs on this device */
++      struct drm_sg_mem *sg;          /**< Scatter gather memory */
++      void *dev_private;              /**< device private data */
++      struct drm_sigdata sigdata;             /**< For block_all_signals */
++      sigset_t sigmask;
++
++      struct drm_driver *driver;
++      drm_local_map_t *agp_buffer_map;
++      unsigned int agp_buffer_token;
++      struct drm_minor *primary;              /**< render type primary screen head */
++
++      struct drm_fence_manager fm;
++      struct drm_buffer_manager bm;
++
++      /** \name Drawable information */
++      /*@{ */
++      spinlock_t drw_lock;
++      struct idr drw_idr;
++      /*@} */
++
++      /** \name GEM information */
++      /*@{ */
++      spinlock_t object_name_lock;
++      struct idr object_name_idr;
++      atomic_t object_count;
++      atomic_t object_memory;
++      atomic_t pin_count;
++      atomic_t pin_memory;
++      atomic_t gtt_count;
++      atomic_t gtt_memory;
++      uint32_t gtt_total;
++      uint32_t invalidate_domains;    /* domains pending invalidation */
++      uint32_t flush_domains;         /* domains pending flush */
++      /*@} */
++};
++
++#if __OS_HAS_AGP
++struct drm_agp_ttm_backend {
++      struct drm_ttm_backend backend;
++      DRM_AGP_MEM *mem;
++      struct agp_bridge_data *bridge;
++      int populated;
++};
++#endif
++
++
++static __inline__ int drm_core_check_feature(struct drm_device *dev,
++                                           int feature)
++{
++      return ((dev->driver->driver_features & feature) ? 1 : 0);
++}
++
++#ifdef __alpha__
++#define drm_get_pci_domain(dev) dev->hose->index
++#else
++#define drm_get_pci_domain(dev) 0
++#endif
++
++#if __OS_HAS_AGP
++static inline int drm_core_has_AGP(struct drm_device *dev)
++{
++      return drm_core_check_feature(dev, DRIVER_USE_AGP);
++}
++#else
++#define drm_core_has_AGP(dev) (0)
++#endif
++
++#if __OS_HAS_MTRR
++static inline int drm_core_has_MTRR(struct drm_device *dev)
++{
++      return drm_core_check_feature(dev, DRIVER_USE_MTRR);
++}
++
++#define DRM_MTRR_WC           MTRR_TYPE_WRCOMB
++
++static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
++                             unsigned int flags)
++{
++      return mtrr_add(offset, size, flags, 1);
++}
++
++static inline int drm_mtrr_del(int handle, unsigned long offset,
++                             unsigned long size, unsigned int flags)
++{
++      return mtrr_del(handle, offset, size);
++}
++
++#else
++static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
++                             unsigned int flags)
++{
++      return -ENODEV;
++}
++
++static inline int drm_mtrr_del(int handle, unsigned long offset,
++                             unsigned long size, unsigned int flags)
++{
++      return -ENODEV;
++}
++
++#define drm_core_has_MTRR(dev) (0)
++#define DRM_MTRR_WC           0
++#endif
++
++
++/******************************************************************/
++/** \name Internal function definitions */
++/*@{*/
++
++                              /* Driver support (drm_drv.h) */
++extern int drm_fb_loaded;
++extern int drm_init(struct drm_driver *driver,
++                            struct pci_device_id *pciidlist);
++extern void drm_exit(struct drm_driver *driver);
++extern void drm_cleanup_pci(struct pci_dev *pdev);
++extern int drm_ioctl(struct inode *inode, struct file *filp,
++                   unsigned int cmd, unsigned long arg);
++extern long drm_unlocked_ioctl(struct file *filp,
++                             unsigned int cmd, unsigned long arg);
++extern long drm_compat_ioctl(struct file *filp,
++                           unsigned int cmd, unsigned long arg);
++
++extern int drm_lastclose(struct drm_device *dev);
++
++                              /* Device support (drm_fops.h) */
++extern int drm_open(struct inode *inode, struct file *filp);
++extern int drm_stub_open(struct inode *inode, struct file *filp);
++extern int drm_fasync(int fd, struct file *filp, int on);
++extern int drm_release(struct inode *inode, struct file *filp);
++unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
++
++                              /* Mapping support (drm_vm.h) */
++extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
++extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
++extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
++extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
++
++                              /* Memory management support (drm_memory.h) */
++#include "drm_memory.h"
++extern void drm_mem_init(void);
++extern int drm_mem_info(char *buf, char **start, off_t offset,
++                      int request, int *eof, void *data);
++extern void *drm_calloc(size_t nmemb, size_t size, int area);
++extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
++extern unsigned long drm_alloc_pages(int order, int area);
++extern void drm_free_pages(unsigned long address, int order, int area);
++extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
++extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
++extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
++extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
++                                            struct page **pages,
++                                            unsigned long num_pages,
++                                            uint32_t gtt_offset);
++extern int drm_unbind_agp(DRM_AGP_MEM * handle);
++
++extern void drm_free_memctl(size_t size);
++extern int drm_alloc_memctl(size_t size);
++extern void drm_query_memctl(uint64_t *cur_used,
++                           uint64_t *emer_used,
++                           uint64_t *low_threshold,
++                           uint64_t *high_threshold,
++                           uint64_t *emer_threshold);
++extern void drm_init_memctl(size_t low_threshold,
++                          size_t high_threshold,
++                          size_t unit_size);
++
++                              /* Misc. IOCTL support (drm_ioctl.h) */
++extern int drm_irq_by_busid(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int drm_getunique(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_setunique(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_getmap(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_getclient(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_getstats(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_setversion(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int drm_noop(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv);
++
++                              /* Context IOCTL support (drm_context.h) */
++extern int drm_resctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_addctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_modctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_getctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_switchctx(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_newctx(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_rmctx(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv);
++
++extern int drm_ctxbitmap_init(struct drm_device *dev);
++extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
++extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
++
++extern int drm_setsareactx(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int drm_getsareactx(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++
++                              /* Drawable IOCTL support (drm_drawable.h) */
++extern int drm_adddraw(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_rmdraw(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_update_drawable_info(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv);
++extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
++                                                     drm_drawable_t id);
++extern void drm_drawable_free_all(struct drm_device *dev);
++
++                              /* Authentication IOCTL support (drm_auth.h) */
++extern int drm_getmagic(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_authmagic(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++
++                              /* Locking IOCTL support (drm_lock.h) */
++extern int drm_lock(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv);
++extern int drm_unlock(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv);
++extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
++extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
++extern void drm_idlelock_take(struct drm_lock_data *lock_data);
++extern void drm_idlelock_release(struct drm_lock_data *lock_data);
++
++/*
++ * These are exported to drivers so that they can implement fencing using
++ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
++ */
++
++extern int drm_i_have_hw_lock(struct drm_device *dev,
++                            struct drm_file *file_priv);
++
++                              /* Buffer management support (drm_bufs.h) */
++extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request);
++extern int drm_addmap(struct drm_device *dev, unsigned int offset,
++                    unsigned int size, enum drm_map_type type,
++                    enum drm_map_flags flags, drm_local_map_t ** map_ptr);
++extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
++extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
++extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int drm_addbufs(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_infobufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_markbufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_freebufs(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_mapbufs(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern int drm_order(unsigned long size);
++extern unsigned long drm_get_resource_start(struct drm_device *dev,
++                                          unsigned int resource);
++extern unsigned long drm_get_resource_len(struct drm_device *dev,
++                                        unsigned int resource);
++extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
++                                                drm_local_map_t *map);
++
++
++                              /* DMA support (drm_dma.h) */
++extern int drm_dma_setup(struct drm_device *dev);
++extern void drm_dma_takedown(struct drm_device *dev);
++extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
++extern void drm_core_reclaim_buffers(struct drm_device *dev,
++                                   struct drm_file *filp);
++
++                              /* IRQ support (drm_irq.h) */
++extern int drm_control(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
++extern int drm_irq_install(struct drm_device *dev);
++extern int drm_irq_uninstall(struct drm_device *dev);
++extern void drm_driver_irq_preinstall(struct drm_device *dev);
++extern void drm_driver_irq_postinstall(struct drm_device *dev);
++extern void drm_driver_irq_uninstall(struct drm_device *dev);
++
++extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
++extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
++extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
++extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
++extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
++extern void drm_handle_vblank(struct drm_device *dev, int crtc);
++extern int drm_vblank_get(struct drm_device *dev, int crtc);
++extern void drm_vblank_put(struct drm_device *dev, int crtc);
++
++                              /* Modesetting support */
++extern int drm_modeset_ctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++
++                              /* AGP/GART support (drm_agpsupport.h) */
++extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
++extern int drm_agp_acquire(struct drm_device *dev);
++extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_agp_release(struct drm_device *dev);
++extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
++extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
++extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
++extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
++extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
++extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
++extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
++extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type);
++#else
++extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
++#endif
++extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
++extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
++extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
++extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
++extern void drm_agp_chipset_flush(struct drm_device *dev);
++                              /* Stub support (drm_stub.h) */
++extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
++                   struct drm_driver *driver);
++extern int drm_put_dev(struct drm_device *dev);
++extern int drm_put_minor(struct drm_device *dev);
++extern unsigned int drm_debug; /* 1 to enable debug output */
++
++extern struct class *drm_class;
++extern struct proc_dir_entry *drm_proc_root;
++
++extern struct idr drm_minors_idr;
++
++extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
++
++                              /* Proc support (drm_proc.h) */
++int drm_proc_init(struct drm_minor *minor, int minor_id,
++                struct proc_dir_entry *root);
++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
++
++                              /* Scatter Gather Support (drm_scatter.h) */
++extern void drm_sg_cleanup(struct drm_sg_mem * entry);
++extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
++extern int drm_sg_free(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++                             /* ATI PCIGART support (ati_pcigart.h) */
++extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
++extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
++
++extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
++                         size_t align, dma_addr_t maxaddr);
++extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
++extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
++
++                             /* sysfs support (drm_sysfs.c) */
++struct drm_sysfs_class;
++extern struct class *drm_sysfs_create(struct module *owner, char *name);
++extern void drm_sysfs_destroy(void);
++extern int drm_sysfs_device_add(struct drm_minor *minor);
++extern void drm_sysfs_device_remove(struct drm_minor *minor);
++
++/*
++ * Basic memory manager support (drm_mm.c)
++ */
++
++extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
++                                             unsigned alignment);
++extern void drm_mm_put_block(struct drm_mm_node *cur);
++extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
++                                              unsigned alignment, int best_match);
++extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
++extern void drm_mm_takedown(struct drm_mm *mm);
++extern int drm_mm_clean(struct drm_mm *mm);
++extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
++extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
++extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
++
++static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
++{
++      return block->mm;
++}
++
++/* Graphics Execution Manager library functions (drm_gem.c) */
++int
++drm_gem_init (struct drm_device *dev);
++
++void
++drm_gem_object_free (struct kref *kref);
++
++struct drm_gem_object *
++drm_gem_object_alloc(struct drm_device *dev, size_t size);
++
++void
++drm_gem_object_handle_free (struct kref *kref);
++    
++static inline void drm_gem_object_reference(struct drm_gem_object *obj)
++{
++      kref_get(&obj->refcount);
++}
++
++static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
++{
++      if (obj == NULL)
++              return;
++
++      kref_put (&obj->refcount, drm_gem_object_free);
++}
++
++int
++drm_gem_handle_create(struct drm_file *file_priv,
++                    struct drm_gem_object *obj,
++                    int *handlep);
++
++static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
++{
++      drm_gem_object_reference (obj);
++      kref_get(&obj->handlecount);
++}
++
++static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
++{
++      if (obj == NULL)
++              return;
++      
++      /*
++       * Must bump handle count first as this may be the last
++       * ref, in which case the object would disappear before we
++       * checked for a name
++       */
++      kref_put (&obj->handlecount, drm_gem_object_handle_free);
++      drm_gem_object_unreference (obj);
++}
++
++struct drm_gem_object *
++drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
++                    int handle);
++int drm_gem_close_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int drm_gem_open_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++
++void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
++void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
++
++extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
++extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
++extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
++
++static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
++                                                 unsigned int token)
++{
++      struct drm_map_list *_entry;
++      list_for_each_entry(_entry, &dev->maplist, head)
++              if (_entry->user_token == token)
++                      return _entry->map;
++      return NULL;
++}
++
++static __inline__ int drm_device_is_agp(struct drm_device *dev)
++{
++      if ( dev->driver->device_is_agp != NULL ) {
++              int err = (*dev->driver->device_is_agp)(dev);
++
++              if (err != 2) {
++                      return err;
++              }
++      }
++
++      return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
++}
++
++static __inline__ int drm_device_is_pcie(struct drm_device *dev)
++{
++      return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
++}
++
++static __inline__ void drm_core_dropmap(struct drm_map *map)
++{
++}
++
++#ifndef DEBUG_MEMORY
++/** Wrapper around kmalloc() */
++static __inline__ void *drm_alloc(size_t size, int area)
++{
++      return kmalloc(size, GFP_KERNEL);
++}
++
++/** Wrapper around kfree() */
++static __inline__ void drm_free(void *pt, size_t size, int area)
++{
++      kfree(pt);
++}
++#else
++extern void *drm_alloc(size_t size, int area);
++extern void drm_free(void *pt, size_t size, int area);
++#endif
++
++/*
++ * Accounting variants of standard calls.
++ */
++
++static inline void *drm_ctl_alloc(size_t size, int area)
++{
++      void *ret;
++      if (drm_alloc_memctl(size))
++              return NULL;
++      ret = drm_alloc(size, area);
++      if (!ret)
++              drm_free_memctl(size);
++      return ret;
++}
++
++static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
++{
++      void *ret;
++
++      if (drm_alloc_memctl(nmemb*size))
++              return NULL;
++      ret = drm_calloc(nmemb, size, area);
++      if (!ret)
++              drm_free_memctl(nmemb*size);
++      return ret;
++}
++
++static inline void drm_ctl_free(void *pt, size_t size, int area)
++{
++      drm_free(pt, size, area);
++      drm_free_memctl(size);
++}
++
++/*@}*/
++
++#endif                                /* __KERNEL__ */
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_proc.c git-nokia/drivers/gpu/drm-tungsten/drm_proc.c
+--- git/drivers/gpu/drm-tungsten/drm_proc.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_proc.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,743 @@
++/**
++ * \file drm_proc.c
++ * /proc support for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ *
++ * \par Acknowledgements:
++ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
++ *    the problem with the proc files not outputting all their information.
++ */
++
++/*
++ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++static int drm_name_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_vm_info(char *buf, char **start, off_t offset,
++                     int request, int *eof, void *data);
++static int drm_clients_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data);
++static int drm_queues_info(char *buf, char **start, off_t offset,
++                         int request, int *eof, void *data);
++static int drm_bufs_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_objects_info(char *buf, char **start, off_t offset,
++                       int request, int *eof, void *data);
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data);
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data);
++#if DRM_DEBUG_CODE
++static int drm_vma_info(char *buf, char **start, off_t offset,
++                      int request, int *eof, void *data);
++#endif
++
++/**
++ * Proc file list.
++ */
++static struct drm_proc_list {
++      const char *name;       /**< file name */
++      int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
++} drm_proc_list[] = {
++      {"name", drm_name_info},
++      {"mem", drm_mem_info},
++      {"vm", drm_vm_info},
++      {"clients", drm_clients_info},
++      {"queues", drm_queues_info},
++      {"bufs", drm_bufs_info},
++      {"objects", drm_objects_info},
++      {"gem_names", drm_gem_name_info},
++      {"gem_objects", drm_gem_object_info},
++#if DRM_DEBUG_CODE
++      {"vma", drm_vma_info},
++#endif
++};
++
++#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
++
++/**
++ * Initialize the DRI proc filesystem for a device.
++ *
++ * \param dev DRM device.
++ * \param minor device minor number.
++ * \param root DRI proc dir entry.
++ * \param dev_root resulting DRI device proc dir entry.
++ * \return root entry pointer on success, or NULL on failure.
++ *
++ * Create the DRI proc root entry "/proc/dri", the device proc root entry
++ * "/proc/dri/%minor%/", and each entry in proc_list as
++ * "/proc/dri/%minor%/%name%".
++ */
++int drm_proc_init(struct drm_minor *minor, int minor_id,
++                struct proc_dir_entry *root)
++{
++      struct proc_dir_entry *ent;
++      int i, j;
++      char name[64];
++
++      sprintf(name, "%d", minor_id);
++      minor->dev_root = proc_mkdir(name, root);
++      if (!minor->dev_root) {
++              DRM_ERROR("Cannot create /proc/dri/%s\n", name);
++              return -1;
++      }
++
++      for (i = 0; i < DRM_PROC_ENTRIES; i++) {
++              ent = create_proc_entry(drm_proc_list[i].name,
++                                      S_IFREG | S_IRUGO, minor->dev_root);
++              if (!ent) {
++                      DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
++                                name, drm_proc_list[i].name);
++                      for (j = 0; j < i; j++)
++                              remove_proc_entry(drm_proc_list[i].name,
++                                                minor->dev_root);
++                      remove_proc_entry(name, root);
++                      minor->dev_root = NULL;
++                      return -1;
++              }
++              ent->read_proc = drm_proc_list[i].f;
++              ent->data = minor;
++      }
++      return 0;
++}
++
++/**
++ * Cleanup the proc filesystem resources.
++ *
++ * \param minor device minor number.
++ * \param root DRI proc dir entry.
++ * \param dev_root DRI device proc dir entry.
++ * \return always zero.
++ *
++ * Remove all proc entries created by proc_init().
++ */
++int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
++{
++      int i;
++      char name[64];
++
++      if (!root || !minor->dev_root)
++              return 0;
++
++      for (i = 0; i < DRM_PROC_ENTRIES; i++)
++              remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
++      sprintf(name, "%d", minor->index);
++      remove_proc_entry(name, root);
++
++      return 0;
++}
++
++/**
++ * Called when "/proc/dri/.../name" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * Prints the device name together with the bus id if available.
++ */
++static int drm_name_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      if (dev->unique) {
++              DRM_PROC_PRINT("%s %s %s\n",
++                             dev->driver->pci_driver.name,
++                             pci_name(dev->pdev), dev->unique);
++      } else {
++              DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
++                             pci_name(dev->pdev));
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Called when "/proc/dri/.../vm" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ *
++ * Prints information about all mappings in drm_device::maplist.
++ */
++static int drm__vm_info(char *buf, char **start, off_t offset, int request,
++                      int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_map *map;
++      struct drm_map_list *r_list;
++
++      /* Hardcoded from _DRM_FRAME_BUFFER,
++         _DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
++         _DRM_SCATTER_GATHER, and _DRM_CONSISTENT. */
++      const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
++      const char *type;
++      int i;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("slot     offset       size type flags    "
++                     "address mtrr\n\n");
++      i = 0;
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              map = r_list->map;
++              if (!map)
++                      continue;
++              if (map->type < 0 || map->type > 5)
++                      type = "??";
++              else
++                      type = types[map->type];
++              DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
++                             i,
++                             map->offset,
++                             map->size, type, map->flags,
++                             (unsigned long) r_list->user_token);
++
++              if (map->mtrr < 0) {
++                      DRM_PROC_PRINT("none\n");
++              } else {
++                      DRM_PROC_PRINT("%4d\n", map->mtrr);
++              }
++              i++;
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_vm_info(char *buf, char **start, off_t offset, int request,
++                     int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__vm_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../queues" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__queues_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      int i;
++      struct drm_queue *q;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("  ctx/flags   use   fin"
++                     "   blk/rw/rwf  wait    flushed     queued"
++                     "      locks\n\n");
++      for (i = 0; i < dev->queue_count; i++) {
++              q = dev->queuelist[i];
++              atomic_inc(&q->use_count);
++              DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
++                                 "%5d/0x%03x %5d %5d"
++                                 " %5d/%c%c/%c%c%c %5Zd\n",
++                                 i,
++                                 q->flags,
++                                 atomic_read(&q->use_count),
++                                 atomic_read(&q->finalization),
++                                 atomic_read(&q->block_count),
++                                 atomic_read(&q->block_read) ? 'r' : '-',
++                                 atomic_read(&q->block_write) ? 'w' : '-',
++                                 waitqueue_active(&q->read_queue) ? 'r' : '-',
++                                 waitqueue_active(&q->
++                                                  write_queue) ? 'w' : '-',
++                                 waitqueue_active(&q->
++                                                  flush_queue) ? 'f' : '-',
++                                 DRM_BUFCOUNT(&q->waitlist));
++              atomic_dec(&q->use_count);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_queues_info(char *buf, char **start, off_t offset, int request,
++                         int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__queues_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../bufs" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
++                        int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma || offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT(" o     size count  free  segs pages    kB\n\n");
++      for (i = 0; i <= DRM_MAX_ORDER; i++) {
++              if (dma->bufs[i].buf_count)
++                      DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
++                                     i,
++                                     dma->bufs[i].buf_size,
++                                     dma->bufs[i].buf_count,
++                                     atomic_read(&dma->bufs[i]
++                                                 .freelist.count),
++                                     dma->bufs[i].seg_count,
++                                     dma->bufs[i].seg_count
++                                     * (1 << dma->bufs[i].page_order),
++                                     (dma->bufs[i].seg_count
++                                      * (1 << dma->bufs[i].page_order))
++                                     * PAGE_SIZE / 1024);
++      }
++      DRM_PROC_PRINT("\n");
++      for (i = 0; i < dma->buf_count; i++) {
++              if (i && !(i % 32))
++                      DRM_PROC_PRINT("\n");
++              DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
++      }
++      DRM_PROC_PRINT("\n");
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__bufs_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../objects" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__objects_info(char *buf, char **start, off_t offset, int request,
++                        int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_buffer_manager *bm = &dev->bm;
++      struct drm_fence_manager *fm = &dev->fm;
++      uint64_t used_mem;
++      uint64_t used_emer;
++      uint64_t low_mem;
++      uint64_t high_mem;
++      uint64_t emer_mem;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("Object accounting:\n\n");
++      if (fm->initialized) {
++              DRM_PROC_PRINT("Number of active fence objects: %d.\n",
++                             atomic_read(&fm->count));
++      } else {
++              DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
++      }
++
++      if (bm->initialized) {
++              DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
++                             atomic_read(&bm->count));
++      }
++      DRM_PROC_PRINT("Memory accounting:\n\n");
++      if (bm->initialized) {
++              DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
++      } else {
++              DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
++      }
++
++      drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
++
++      if (used_mem > 16*PAGE_SIZE) {
++              DRM_PROC_PRINT("Used object memory is %lu pages.\n",
++                             (unsigned long) (used_mem >> PAGE_SHIFT));
++      } else {
++              DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
++                             (unsigned long) used_mem);
++      }
++      if (used_emer > 16*PAGE_SIZE) {
++              DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
++                             (unsigned long) (used_emer >> PAGE_SHIFT));
++      } else {
++              DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
++                             (unsigned long) used_emer);
++      }
++      DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
++                     (unsigned long) (low_mem >> PAGE_SHIFT));
++      DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
++                     (unsigned long) (high_mem >> PAGE_SHIFT));
++      DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
++                     (unsigned long) (emer_mem >> PAGE_SHIFT));
++
++      DRM_PROC_PRINT("\n");
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_objects_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__objects_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when "/proc/dri/.../clients" is read.
++ *
++ * \param buf output buffer.
++ * \param start start of output data.
++ * \param offset requested start offset.
++ * \param request requested number of bytes.
++ * \param eof whether there is no more data to return.
++ * \param data private data.
++ * \return number of written bytes.
++ */
++static int drm__clients_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_file *priv;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("a dev   pid    uid      magic     ioctls\n\n");
++      list_for_each_entry(priv, &dev->filelist, lhead) {
++              DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
++                             priv->authenticated ? 'y' : 'n',
++                             priv->minor->index,
++                             priv->pid,
++                             priv->uid, priv->magic, priv->ioctl_count);
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++/**
++ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
++ */
++static int drm_clients_info(char *buf, char **start, off_t offset,
++                          int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__clients_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++struct drm_gem_name_info_data {
++      int                     len;
++      char                    *buf;
++      int                     eof;
++};
++
++static int drm_gem_one_name_info(int id, void *ptr, void *data)
++{
++      struct drm_gem_object *obj = ptr;
++      struct drm_gem_name_info_data   *nid = data;
++
++      DRM_INFO("name %d size %d\n", obj->name, obj->size);
++      if (nid->eof)
++              return 0;
++
++      nid->len += sprintf(&nid->buf[nid->len],
++                          "%6d%9d%8d%9d\n",
++                          obj->name, obj->size,
++                          atomic_read(&obj->handlecount.refcount),
++                          atomic_read(&obj->refcount.refcount));
++      if (nid->len > DRM_PROC_LIMIT) {
++              nid->eof = 1;
++              return 0;
++      }
++      return 0;
++}
++
++static int drm_gem_name_info(char *buf, char **start, off_t offset,
++                           int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      struct drm_gem_name_info_data nid;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      nid.len = sprintf(buf, "  name     size handles refcount\n");
++      nid.buf = buf;
++      nid.eof = 0;
++      idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
++
++      *start = &buf[offset];
++      *eof = 0;
++      if (nid.len > request + offset)
++              return request;
++      *eof = 1;
++      return nid.len - offset;
++}
++
++static int drm_gem_object_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
++      DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
++      DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
++      DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
++      DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
++      DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++#if DRM_DEBUG_CODE
++
++static int drm__vma_info(char *buf, char **start, off_t offset, int request,
++                       int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int len = 0;
++      struct drm_vma_entry *pt;
++      struct vm_area_struct *vma;
++#if defined(__i386__)
++      unsigned int pgprot;
++#endif
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++
++      DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
++                     atomic_read(&dev->vma_count),
++                     high_memory, virt_to_phys(high_memory));
++      list_for_each_entry(pt, &dev->vmalist, head) {
++              if (!(vma = pt->vma))
++                      continue;
++              DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
++                             pt->pid,
++                             vma->vm_start,
++                             vma->vm_end,
++                             vma->vm_flags & VM_READ ? 'r' : '-',
++                             vma->vm_flags & VM_WRITE ? 'w' : '-',
++                             vma->vm_flags & VM_EXEC ? 'x' : '-',
++                             vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
++                             vma->vm_flags & VM_LOCKED ? 'l' : '-',
++                             vma->vm_flags & VM_IO ? 'i' : '-',
++                             vma->vm_pgoff);
++
++#if defined(__i386__)
++              pgprot = pgprot_val(vma->vm_page_prot);
++              DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
++                             pgprot & _PAGE_PRESENT ? 'p' : '-',
++                             pgprot & _PAGE_RW ? 'w' : 'r',
++                             pgprot & _PAGE_USER ? 'u' : 's',
++                             pgprot & _PAGE_PWT ? 't' : 'b',
++                             pgprot & _PAGE_PCD ? 'u' : 'c',
++                             pgprot & _PAGE_ACCESSED ? 'a' : '-',
++                             pgprot & _PAGE_DIRTY ? 'd' : '-',
++                             pgprot & _PAGE_PSE ? 'm' : 'k',
++                             pgprot & _PAGE_GLOBAL ? 'g' : 'l');
++#endif
++              DRM_PROC_PRINT("\n");
++      }
++
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int drm_vma_info(char *buf, char **start, off_t offset, int request,
++                      int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data; 
++      struct drm_device *dev = minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm__vma_info(buf, start, offset, request, eof, data);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_regman.c git-nokia/drivers/gpu/drm-tungsten/drm_regman.c
+--- git/drivers/gpu/drm-tungsten/drm_regman.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_regman.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,200 @@
++/**************************************************************************
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * An allocate-fence manager implementation intended for sets of base-registers
++ * or tiling-registers.
++ */
++
++#include "drmP.h"
++
++/*
++ * Allocate a compatible register and put it on the unfenced list.
++ */
++
++int drm_regs_alloc(struct drm_reg_manager *manager,
++                 const void *data,
++                 uint32_t fence_class,
++                 uint32_t fence_type,
++                 int interruptible, int no_wait, struct drm_reg **reg)
++{
++      struct drm_reg *entry, *next_entry;
++      int ret;
++
++      *reg = NULL;
++
++      /*
++       * Search the unfenced list.
++       */
++
++      list_for_each_entry(entry, &manager->unfenced, head) {
++              if (manager->reg_reusable(entry, data)) {
++                      entry->new_fence_type |= fence_type;
++                      goto out;
++              }
++      }
++
++      /*
++       * Search the lru list.
++       */
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++              struct drm_fence_object *fence = entry->fence;
++              if (fence->fence_class == fence_class &&
++                  (entry->fence_type & fence_type) == entry->fence_type &&
++                  manager->reg_reusable(entry, data)) {
++                      list_del(&entry->head);
++                      entry->new_fence_type = fence_type;
++                      list_add_tail(&entry->head, &manager->unfenced);
++                      goto out;
++              }
++      }
++
++      /*
++       * Search the free list.
++       */
++
++      list_for_each_entry(entry, &manager->free, head) {
++              list_del(&entry->head);
++              entry->new_fence_type = fence_type;
++              list_add_tail(&entry->head, &manager->unfenced);
++              goto out;
++      }
++
++      if (no_wait)
++              return -EBUSY;
++
++      /*
++       * Go back to the lru list and try to expire fences.
++       */
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++              BUG_ON(!entry->fence);
++              ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
++                                          entry->fence_type);
++              if (ret)
++                      return ret;
++
++              drm_fence_usage_deref_unlocked(&entry->fence);
++              list_del(&entry->head);
++              entry->new_fence_type = fence_type;
++              list_add_tail(&entry->head, &manager->unfenced);
++              goto out;
++      }
++
++      /*
++       * Oops. All registers are used up :(.
++       */
++
++      return -EBUSY;
++out:
++      *reg = entry;
++      return 0;
++}
++EXPORT_SYMBOL(drm_regs_alloc);
++
++void drm_regs_fence(struct drm_reg_manager *manager,
++                  struct drm_fence_object *fence)
++{
++      struct drm_reg *entry;
++      struct drm_reg *next_entry;
++
++      if (!fence) {
++
++              /*
++               * Old fence (if any) is still valid.
++               * Put back on free and lru lists.
++               */
++
++              list_for_each_entry_safe_reverse(entry, next_entry,
++                                               &manager->unfenced, head) {
++                      list_del(&entry->head);
++                      list_add(&entry->head, (entry->fence) ?
++                               &manager->lru : &manager->free);
++              }
++      } else {
++
++              /*
++               * Fence with a new fence and put on lru list.
++               */
++
++              list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
++                                       head) {
++                      list_del(&entry->head);
++                      if (entry->fence)
++                              drm_fence_usage_deref_unlocked(&entry->fence);
++                      drm_fence_reference_unlocked(&entry->fence, fence);
++
++                      entry->fence_type = entry->new_fence_type;
++                      BUG_ON((entry->fence_type & fence->type) !=
++                             entry->fence_type);
++
++                      list_add_tail(&entry->head, &manager->lru);
++              }
++      }
++}
++EXPORT_SYMBOL(drm_regs_fence);
++
++void drm_regs_free(struct drm_reg_manager *manager)
++{
++      struct drm_reg *entry;
++      struct drm_reg *next_entry;
++
++      drm_regs_fence(manager, NULL);
++
++      list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
++              list_del(&entry->head);
++              manager->reg_destroy(entry);
++      }
++
++      list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
++
++              (void)drm_fence_object_wait(entry->fence, 1, 1,
++                                          entry->fence_type);
++              list_del(&entry->head);
++              drm_fence_usage_deref_unlocked(&entry->fence);
++              manager->reg_destroy(entry);
++      }
++}
++EXPORT_SYMBOL(drm_regs_free);
++
++void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
++{
++      reg->fence = NULL;
++      list_add_tail(&reg->head, &manager->free);
++}
++EXPORT_SYMBOL(drm_regs_add);
++
++void drm_regs_init(struct drm_reg_manager *manager,
++                 int (*reg_reusable) (const struct drm_reg *, const void *),
++                 void (*reg_destroy) (struct drm_reg *))
++{
++      INIT_LIST_HEAD(&manager->free);
++      INIT_LIST_HEAD(&manager->lru);
++      INIT_LIST_HEAD(&manager->unfenced);
++      manager->reg_reusable = reg_reusable;
++      manager->reg_destroy = reg_destroy;
++}
++EXPORT_SYMBOL(drm_regs_init);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sarea.h git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h
+--- git/drivers/gpu/drm-tungsten/drm_sarea.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sarea.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,84 @@
++/**
++ * \file drm_sarea.h
++ * \brief SAREA definitions
++ *
++ * \author Michel D�zer <michel@daenzer.net>
++ */
++
++/*
++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _DRM_SAREA_H_
++#define _DRM_SAREA_H_
++
++#include "drm.h"
++
++/* SAREA area needs to be at least a page */
++#if defined(__alpha__)
++#define SAREA_MAX                       0x2000
++#elif defined(__ia64__)
++#define SAREA_MAX                       0x10000       /* 64kB */
++#else
++/* Intel 830M driver needs at least 8k SAREA */
++#define SAREA_MAX                       0x2000UL
++#endif
++
++/** Maximum number of drawables in the SAREA */
++#define SAREA_MAX_DRAWABLES           256
++
++#define SAREA_DRAWABLE_CLAIMED_ENTRY    0x80000000
++
++/** SAREA drawable */
++struct drm_sarea_drawable {
++      unsigned int stamp;
++      unsigned int flags;
++};
++
++/** SAREA frame */
++struct drm_sarea_frame {
++      unsigned int x;
++      unsigned int y;
++      unsigned int width;
++      unsigned int height;
++      unsigned int fullscreen;
++};
++
++/** SAREA */
++struct drm_sarea {
++    /** first thing is always the DRM locking structure */
++      struct drm_hw_lock lock;
++    /** \todo Use readers/writer lock for drm_sarea::drawable_lock */
++      struct drm_hw_lock drawable_lock;
++      struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES];   /**< drawables */
++      struct drm_sarea_frame frame;   /**< frame */
++      drm_context_t dummy_context;
++};
++
++#ifndef __KERNEL__
++typedef struct drm_sarea_drawable drm_sarea_drawable_t;
++typedef struct drm_sarea_frame drm_sarea_frame_t;
++typedef struct drm_sarea drm_sarea_t;
++#endif
++
++#endif                                /* _DRM_SAREA_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_scatter.c git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c
+--- git/drivers/gpu/drm-tungsten/drm_scatter.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_scatter.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,228 @@
++/**
++ * \file drm_scatter.c
++ * IOCTLs to manage scatter/gather memory
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/vmalloc.h>
++#include "drmP.h"
++
++#define DEBUG_SCATTER 0
++
++static inline void *drm_vmalloc_dma(unsigned long size)
++{
++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
++      return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
++#else
++      return vmalloc_32(size);
++#endif
++}
++
++void drm_sg_cleanup(struct drm_sg_mem *entry)
++{
++      struct page *page;
++      int i;
++
++      for (i = 0; i < entry->pages; i++) {
++              page = entry->pagelist[i];
++              if (page)
++                      ClearPageReserved(page);
++      }
++
++      vfree(entry->virtual);
++
++      drm_free(entry->busaddr,
++               entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
++      drm_free(entry->pagelist,
++               entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
++      drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++}
++EXPORT_SYMBOL(drm_sg_cleanup);
++
++#ifdef _LP64
++# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
++#else
++# define ScatterHandle(x) (unsigned int)(x)
++#endif
++
++int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
++{
++      struct drm_sg_mem *entry;
++      unsigned long pages, i, j;
++
++      DRM_DEBUG("\n");
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      if (dev->sg)
++              return -EINVAL;
++
++      entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
++      if (!entry)
++              return -ENOMEM;
++
++      memset(entry, 0, sizeof(*entry));
++      pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
++      DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
++
++      entry->pages = pages;
++      entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
++                                  DRM_MEM_PAGES);
++      if (!entry->pagelist) {
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++
++      memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
++
++      entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
++                                 DRM_MEM_PAGES);
++      if (!entry->busaddr) {
++              drm_free(entry->pagelist,
++                       entry->pages * sizeof(*entry->pagelist),
++                       DRM_MEM_PAGES);
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++      memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
++
++      entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
++      if (!entry->virtual) {
++              drm_free(entry->busaddr,
++                       entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
++              drm_free(entry->pagelist,
++                       entry->pages * sizeof(*entry->pagelist),
++                       DRM_MEM_PAGES);
++              drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
++              return -ENOMEM;
++      }
++
++      /* This also forces the mapping of COW pages, so our page list
++       * will be valid.  Please don't remove it...
++       */
++      memset(entry->virtual, 0, pages << PAGE_SHIFT);
++
++      entry->handle = ScatterHandle((unsigned long)entry->virtual);
++
++      DRM_DEBUG("handle  = %08lx\n", entry->handle);
++      DRM_DEBUG("virtual = %p\n", entry->virtual);
++
++      for (i = (unsigned long)entry->virtual, j = 0; j < pages;
++           i += PAGE_SIZE, j++) {
++              entry->pagelist[j] = vmalloc_to_page((void *)i);
++              if (!entry->pagelist[j])
++                      goto failed;
++              SetPageReserved(entry->pagelist[j]);
++      }
++
++      request->handle = entry->handle;
++
++      dev->sg = entry;
++
++#if DEBUG_SCATTER
++      /* Verify that each page points to its virtual address, and vice
++       * versa.
++       */
++      {
++              int error = 0;
++
++              for (i = 0; i < pages; i++) {
++                      unsigned long *tmp;
++
++                      tmp = page_address(entry->pagelist[i]);
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              *tmp = 0xcafebabe;
++                      }
++                      tmp = (unsigned long *)((u8 *) entry->virtual +
++                                              (PAGE_SIZE * i));
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              if (*tmp != 0xcafebabe && error == 0) {
++                                      error = 1;
++                                      DRM_ERROR("Scatter allocation error, "
++                                                "pagelist does not match "
++                                                "virtual mapping\n");
++                              }
++                      }
++                      tmp = page_address(entry->pagelist[i]);
++                      for (j = 0;
++                           j < PAGE_SIZE / sizeof(unsigned long);
++                           j++, tmp++) {
++                              *tmp = 0;
++                      }
++              }
++              if (error == 0)
++                      DRM_ERROR("Scatter allocation matches pagelist\n");
++      }
++#endif
++
++      return 0;
++
++      failed:
++      drm_sg_cleanup(entry);
++      return -ENOMEM;
++
++}
++EXPORT_SYMBOL(drm_sg_alloc);
++
++int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_scatter_gather *request = data;
++
++      return drm_sg_alloc(dev, request);
++
++}
++
++int drm_sg_free(struct drm_device *dev, void *data,
++              struct drm_file *file_priv)
++{
++      struct drm_scatter_gather *request = data;
++      struct drm_sg_mem *entry;
++
++      if (!drm_core_check_feature(dev, DRIVER_SG))
++              return -EINVAL;
++
++      entry = dev->sg;
++      dev->sg = NULL;
++
++      if (!entry || entry->handle != request->handle)
++              return -EINVAL;
++
++      DRM_DEBUG("virtual  = %p\n", entry->virtual);
++
++      drm_sg_cleanup(entry);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.c git-nokia/drivers/gpu/drm-tungsten/drm_sman.c
+--- git/drivers/gpu/drm-tungsten/drm_sman.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,353 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple memory manager interface that keeps track on allocate regions on a
++ * per "owner" basis. All regions associated with an "owner" can be released
++ * with a simple call. Typically if the "owner" exists. The owner is any
++ * "unsigned long" identifier. Can typically be a pointer to a file private
++ * struct or a context identifier.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drm_sman.h"
++
++struct drm_owner_item {
++      struct drm_hash_item owner_hash;
++      struct list_head sman_list;
++      struct list_head mem_blocks;
++};
++
++void drm_sman_takedown(struct drm_sman * sman)
++{
++      drm_ht_remove(&sman->user_hash_tab);
++      drm_ht_remove(&sman->owner_hash_tab);
++      if (sman->mm)
++              drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
++                       DRM_MEM_MM);
++}
++
++EXPORT_SYMBOL(drm_sman_takedown);
++
++int
++drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
++            unsigned int user_order, unsigned int owner_order)
++{
++      int ret = 0;
++
++      sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
++                                              DRM_MEM_MM);
++      if (!sman->mm) {
++              ret = -ENOMEM;
++              goto out;
++      }
++      sman->num_managers = num_managers;
++      INIT_LIST_HEAD(&sman->owner_items);
++      ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
++      if (ret)
++              goto out1;
++      ret = drm_ht_create(&sman->user_hash_tab, user_order);
++      if (!ret)
++              goto out;
++
++      drm_ht_remove(&sman->owner_hash_tab);
++out1:
++      drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
++out:
++      return ret;
++}
++
++EXPORT_SYMBOL(drm_sman_init);
++
++static void *drm_sman_mm_allocate(void *private, unsigned long size,
++                                unsigned alignment)
++{
++      struct drm_mm *mm = (struct drm_mm *) private;
++      struct drm_mm_node *tmp;
++
++      tmp = drm_mm_search_free(mm, size, alignment, 1);
++      if (!tmp) {
++              return NULL;
++      }
++      tmp = drm_mm_get_block(tmp, size, alignment);
++      return tmp;
++}
++
++static void drm_sman_mm_free(void *private, void *ref)
++{
++      struct drm_mm_node *node = (struct drm_mm_node *) ref;
++
++      drm_mm_put_block(node);
++}
++
++static void drm_sman_mm_destroy(void *private)
++{
++      struct drm_mm *mm = (struct drm_mm *) private;
++      drm_mm_takedown(mm);
++      drm_free(mm, sizeof(*mm), DRM_MEM_MM);
++}
++
++static unsigned long drm_sman_mm_offset(void *private, void *ref)
++{
++      struct drm_mm_node *node = (struct drm_mm_node *) ref;
++      return node->start;
++}
++
++int
++drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
++                 unsigned long start, unsigned long size)
++{
++      struct drm_sman_mm *sman_mm;
++      struct drm_mm *mm;
++      int ret;
++
++      BUG_ON(manager >= sman->num_managers);
++
++      sman_mm = &sman->mm[manager];
++      mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
++      if (!mm) {
++              return -ENOMEM;
++      }
++      sman_mm->private = mm;
++      ret = drm_mm_init(mm, start, size);
++
++      if (ret) {
++              drm_free(mm, sizeof(*mm), DRM_MEM_MM);
++              return ret;
++      }
++
++      sman_mm->allocate = drm_sman_mm_allocate;
++      sman_mm->free = drm_sman_mm_free;
++      sman_mm->destroy = drm_sman_mm_destroy;
++      sman_mm->offset = drm_sman_mm_offset;
++
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_set_range);
++
++int
++drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
++                   struct drm_sman_mm * allocator)
++{
++      BUG_ON(manager >= sman->num_managers);
++      sman->mm[manager] = *allocator;
++
++      return 0;
++}
++EXPORT_SYMBOL(drm_sman_set_manager);
++
++static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
++                                               unsigned long owner)
++{
++      int ret;
++      struct drm_hash_item *owner_hash_item;
++      struct drm_owner_item *owner_item;
++
++      ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
++      if (!ret) {
++              return drm_hash_entry(owner_hash_item, struct drm_owner_item,
++                                    owner_hash);
++      }
++
++      owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
++      if (!owner_item)
++              goto out;
++
++      INIT_LIST_HEAD(&owner_item->mem_blocks);
++      owner_item->owner_hash.key = owner;
++      if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
++              goto out1;
++
++      list_add_tail(&owner_item->sman_list, &sman->owner_items);
++      return owner_item;
++
++out1:
++      drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
++out:
++      return NULL;
++}
++
++struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
++                                  unsigned long size, unsigned alignment,
++                                  unsigned long owner)
++{
++      void *tmp;
++      struct drm_sman_mm *sman_mm;
++      struct drm_owner_item *owner_item;
++      struct drm_memblock_item *memblock;
++
++      BUG_ON(manager >= sman->num_managers);
++
++      sman_mm = &sman->mm[manager];
++      tmp = sman_mm->allocate(sman_mm->private, size, alignment);
++
++      if (!tmp) {
++              return NULL;
++      }
++
++      memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
++
++      if (!memblock)
++              goto out;
++
++      memblock->mm_info = tmp;
++      memblock->mm = sman_mm;
++      memblock->sman = sman;
++
++      if (drm_ht_just_insert_please
++          (&sman->user_hash_tab, &memblock->user_hash,
++           (unsigned long)memblock, 32, 0, 0))
++              goto out1;
++
++      owner_item = drm_sman_get_owner_item(sman, owner);
++      if (!owner_item)
++              goto out2;
++
++      list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
++
++      return memblock;
++
++out2:
++      drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
++out1:
++      drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
++out:
++      sman_mm->free(sman_mm->private, tmp);
++
++      return NULL;
++}
++
++EXPORT_SYMBOL(drm_sman_alloc);
++
++static void drm_sman_free(struct drm_memblock_item *item)
++{
++      struct drm_sman *sman = item->sman;
++
++      list_del(&item->owner_list);
++      drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
++      item->mm->free(item->mm->private, item->mm_info);
++      drm_free(item, sizeof(*item), DRM_MEM_MM);
++}
++
++int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
++{
++      struct drm_hash_item *hash_item;
++      struct drm_memblock_item *memblock_item;
++
++      if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
++              return -EINVAL;
++
++      memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
++                                     user_hash);
++      drm_sman_free(memblock_item);
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_free_key);
++
++static void drm_sman_remove_owner(struct drm_sman *sman,
++                                struct drm_owner_item *owner_item)
++{
++      list_del(&owner_item->sman_list);
++      drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
++      drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
++}
++
++int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
++{
++
++      struct drm_hash_item *hash_item;
++      struct drm_owner_item *owner_item;
++
++      if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
++              return -1;
++      }
++
++      owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
++      if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
++              drm_sman_remove_owner(sman, owner_item);
++              return -1;
++      }
++
++      return 0;
++}
++
++EXPORT_SYMBOL(drm_sman_owner_clean);
++
++static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
++                                    struct drm_owner_item *owner_item)
++{
++      struct drm_memblock_item *entry, *next;
++
++      list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
++                               owner_list) {
++              drm_sman_free(entry);
++      }
++      drm_sman_remove_owner(sman, owner_item);
++}
++
++void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
++{
++
++      struct drm_hash_item *hash_item;
++      struct drm_owner_item *owner_item;
++
++      if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
++
++              return;
++      }
++
++      owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
++      drm_sman_do_owner_cleanup(sman, owner_item);
++}
++
++EXPORT_SYMBOL(drm_sman_owner_cleanup);
++
++void drm_sman_cleanup(struct drm_sman *sman)
++{
++      struct drm_owner_item *entry, *next;
++      unsigned int i;
++      struct drm_sman_mm *sman_mm;
++
++      list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
++              drm_sman_do_owner_cleanup(sman, entry);
++      }
++      if (sman->mm) {
++              for (i = 0; i < sman->num_managers; ++i) {
++                      sman_mm = &sman->mm[i];
++                      if (sman_mm->private) {
++                              sman_mm->destroy(sman_mm->private);
++                              sman_mm->private = NULL;
++                      }
++              }
++      }
++}
++
++EXPORT_SYMBOL(drm_sman_cleanup);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sman.h git-nokia/drivers/gpu/drm-tungsten/drm_sman.h
+--- git/drivers/gpu/drm-tungsten/drm_sman.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sman.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,176 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++/*
++ * Simple memory MANager interface that keeps track on allocate regions on a
++ * per "owner" basis. All regions associated with an "owner" can be released
++ * with a simple call. Typically if the "owner" exists. The owner is any
++ * "unsigned long" identifier. Can typically be a pointer to a file private
++ * struct or a context identifier.
++ *
++ * Authors:
++ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#ifndef DRM_SMAN_H
++#define DRM_SMAN_H
++
++#include "drmP.h"
++#include "drm_hashtab.h"
++
++/*
++ * A class that is an abstration of a simple memory allocator.
++ * The sman implementation provides a default such allocator
++ * using the drm_mm.c implementation. But the user can replace it.
++ * See the SiS implementation, which may use the SiS FB kernel module
++ * for memory management.
++ */
++
++struct drm_sman_mm {
++      /* private info. If allocated, needs to be destroyed by the destroy
++         function */
++      void *private;
++
++      /* Allocate a memory block with given size and alignment.
++         Return an opaque reference to the memory block */
++
++      void *(*allocate) (void *private, unsigned long size,
++                         unsigned alignment);
++
++      /* Free a memory block. "ref" is the opaque reference that we got from
++         the "alloc" function */
++
++      void (*free) (void *private, void *ref);
++
++      /* Free all resources associated with this allocator */
++
++      void (*destroy) (void *private);
++
++      /* Return a memory offset from the opaque reference returned from the
++         "alloc" function */
++
++      unsigned long (*offset) (void *private, void *ref);
++};
++
++struct drm_memblock_item {
++      struct list_head owner_list;
++      struct drm_hash_item user_hash;
++      void *mm_info;
++      struct drm_sman_mm *mm;
++      struct drm_sman *sman;
++};
++
++struct drm_sman {
++      struct drm_sman_mm *mm;
++      int num_managers;
++      struct drm_open_hash owner_hash_tab;
++      struct drm_open_hash user_hash_tab;
++      struct list_head owner_items;
++};
++
++/*
++ * Take down a memory manager. This function should only be called after a
++ * successful init and after a call to drm_sman_cleanup.
++ */
++
++extern void drm_sman_takedown(struct drm_sman * sman);
++
++/*
++ * Allocate structures for a manager.
++ * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
++ * user_order is the log2 of the number of buckets in the user hash table.
++ *        set this to approximately log2 of the max number of memory regions
++ *        that will be allocated for _all_ pools together.
++ * owner_order is the log2 of the number of buckets in the owner hash table.
++ *        set this to approximately log2 of
++ *        the number of client file connections that will
++ *        be using the manager.
++ *
++ */
++
++extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
++                       unsigned int user_order, unsigned int owner_order);
++
++/*
++ * Initialize a drm_mm.c allocator. Should be called only once for each
++ * manager unless a customized allogator is used.
++ */
++
++extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
++                            unsigned long start, unsigned long size);
++
++/*
++ * Initialize a customized allocator for one of the managers.
++ * (See the SiS module). The object pointed to by "allocator" is copied,
++ * so it can be destroyed after this call.
++ */
++
++extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
++                              struct drm_sman_mm * allocator);
++
++/*
++ * Allocate a memory block. Aligment is not implemented yet.
++ */
++
++extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
++                                              unsigned int manager,
++                                              unsigned long size,
++                                              unsigned alignment,
++                                              unsigned long owner);
++/*
++ * Free a memory block identified by its user hash key.
++ */
++
++extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
++
++/*
++ * returns 1 iff there are no stale memory blocks associated with this owner.
++ * Typically called to determine if we need to idle the hardware and call
++ * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
++ * resources associated with owner.
++ */
++
++extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
++
++/*
++ * Frees all stale memory blocks associated with this owner. Note that this
++ * requires that the hardware is finished with all blocks, so the graphics engine
++ * should be idled before this call is made. This function also frees
++ * any resources associated with "owner" and should be called when owner
++ * is not going to be referenced anymore.
++ */
++
++extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
++
++/*
++ * Frees all stale memory blocks associated with the memory manager.
++ * See idling above.
++ */
++
++extern void drm_sman_cleanup(struct drm_sman * sman);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_stub.c git-nokia/drivers/gpu/drm-tungsten/drm_stub.c
+--- git/drivers/gpu/drm-tungsten/drm_stub.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_stub.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,400 @@
++/**
++ * \file drm_stub.c
++ * Stub support
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ */
++
++/*
++ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
++ *
++ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++#include "drmP.h"
++#include "drm_core.h"
++
++unsigned int drm_debug = 0;           /* 1 to enable debug output */
++EXPORT_SYMBOL(drm_debug);
++
++MODULE_AUTHOR(CORE_AUTHOR);
++MODULE_DESCRIPTION(CORE_DESC);
++MODULE_LICENSE("GPL and additional rights");
++MODULE_PARM_DESC(debug, "Enable debug output");
++
++module_param_named(debug, drm_debug, int, 0600);
++
++struct idr drm_minors_idr;
++
++struct class *drm_class;
++struct proc_dir_entry *drm_proc_root;
++
++static int drm_minor_get_id(struct drm_device *dev, int type)
++{
++      int new_id;
++      int ret;
++      int base = 0, limit = 63;
++
++again:
++      if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
++              DRM_ERROR("Out of memory expanding drawable idr\n");
++              return -ENOMEM;
++      }
++      mutex_lock(&dev->struct_mutex);
++      ret = idr_get_new_above(&drm_minors_idr, NULL,
++                              base, &new_id);
++      mutex_unlock(&dev->struct_mutex);
++      if (ret == -EAGAIN) {
++              goto again;
++      } else if (ret) {
++              return ret;
++      }
++
++      if (new_id >= limit) {
++              idr_remove(&drm_minors_idr, new_id);
++              return -EINVAL;
++      }
++      return new_id;
++}
++
++static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
++                         const struct pci_device_id *ent,
++                         struct drm_driver *driver)
++{
++      int retcode;
++
++      INIT_LIST_HEAD(&dev->filelist);
++      INIT_LIST_HEAD(&dev->ctxlist);
++      INIT_LIST_HEAD(&dev->vmalist);
++      INIT_LIST_HEAD(&dev->maplist);
++
++      spin_lock_init(&dev->count_lock);
++      spin_lock_init(&dev->drw_lock);
++      spin_lock_init(&dev->tasklet_lock);
++      spin_lock_init(&dev->lock.spinlock);
++      init_timer(&dev->timer);
++      mutex_init(&dev->struct_mutex);
++      mutex_init(&dev->ctxlist_mutex);
++      mutex_init(&dev->bm.evict_mutex);
++
++      idr_init(&dev->drw_idr);
++
++      dev->pdev = pdev;
++
++      if (pdev) {
++              dev->pci_device = pdev->device;
++              dev->pci_vendor = pdev->vendor;
++
++#ifdef __alpha__
++              dev->hose = pdev->sysdata;
++#endif
++
++              dev->irq = pdev->irq;
++      }
++
++      dev->irq_enabled = 0;
++
++      if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
++              return -ENOMEM;
++      }
++      if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
++                      DRM_FILE_PAGE_OFFSET_SIZE)) {
++              drm_ht_remove(&dev->map_hash);
++              return -ENOMEM;
++      }
++
++      if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
++              drm_ht_remove(&dev->map_hash);
++              drm_mm_takedown(&dev->offset_manager);
++              return -ENOMEM;
++      }
++
++      /* the DRM has 6 counters */
++      dev->counters = 6;
++      dev->types[0] = _DRM_STAT_LOCK;
++      dev->types[1] = _DRM_STAT_OPENS;
++      dev->types[2] = _DRM_STAT_CLOSES;
++      dev->types[3] = _DRM_STAT_IOCTLS;
++      dev->types[4] = _DRM_STAT_LOCKS;
++      dev->types[5] = _DRM_STAT_UNLOCKS;
++
++      dev->driver = driver;
++
++#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
++      if (drm_core_has_AGP(dev)) {
++              if (drm_device_is_agp(dev))
++                      dev->agp = drm_agp_init(dev);
++              if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
++                  && (dev->agp == NULL)) {
++                      DRM_ERROR("Cannot initialize the agpgart module.\n");
++                      retcode = -EINVAL;
++                      goto error_out_unreg;
++              }
++
++              if (drm_core_has_MTRR(dev)) {
++                      if (dev->agp)
++                              dev->agp->agp_mtrr =
++                                  mtrr_add(dev->agp->agp_info.aper_base,
++                                           dev->agp->agp_info.aper_size *
++                                           1024 * 1024, MTRR_TYPE_WRCOMB, 1);
++              }
++      }
++#endif
++
++      retcode = drm_ctxbitmap_init(dev);
++      if (retcode) {
++              DRM_ERROR("Cannot allocate memory for context bitmap.\n");
++              goto error_out_unreg;
++      }
++
++      if (driver->driver_features & DRIVER_GEM) {
++              retcode = drm_gem_init (dev);
++              if (retcode) {
++                      DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
++                      goto error_out_unreg;
++              }
++      }
++
++      drm_fence_manager_init(dev);
++
++      return 0;
++
++error_out_unreg:
++      drm_lastclose(dev);
++      return retcode;
++}
++
++/**
++ * Get a secondary minor number.
++ *
++ * \param dev device data structure
++ * \param sec-minor structure to hold the assigned minor
++ * \return negative number on failure.
++ *
++ * Search an empty entry and initialize it to the given parameters, and
++ * create the proc init entry via proc_init(). This routines assigns
++ * minor numbers to secondary heads of multi-headed cards
++ */
++static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
++{
++      struct drm_minor *new_minor;
++      int ret;
++      int minor_id;
++
++      DRM_DEBUG("\n");
++
++      minor_id = drm_minor_get_id(dev, type);
++      if (minor_id < 0)
++              return minor_id;
++
++      new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
++      if (!new_minor) {
++              ret = -ENOMEM;
++              goto err_idr;
++      }
++
++      new_minor->type = type;
++      new_minor->device = MKDEV(DRM_MAJOR, minor_id);
++      new_minor->dev = dev;
++      new_minor->index = minor_id;
++
++      idr_replace(&drm_minors_idr, new_minor, minor_id);
++      
++      if (type == DRM_MINOR_LEGACY) {
++              ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
++              if (ret) {
++                      DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
++                      goto err_mem;
++              }
++              if (dev->driver->proc_init) {
++                      ret = dev->driver->proc_init(new_minor);
++                      if (ret) {
++                              DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
++                              goto err_mem;
++                      }
++              }
++      } else
++              new_minor->dev_root = NULL;
++
++      ret = drm_sysfs_device_add(new_minor);
++      if (ret) {
++              printk(KERN_ERR
++                     "DRM: Error sysfs_device_add.\n");
++              goto err_g2;
++      }
++      *minor = new_minor;
++      
++      DRM_DEBUG("new minor assigned %d\n", minor_id);
++      return 0;
++
++
++err_g2:
++      if (new_minor->type == DRM_MINOR_LEGACY) {
++              if (dev->driver->proc_cleanup)
++                      dev->driver->proc_cleanup(new_minor);
++              drm_proc_cleanup(new_minor, drm_proc_root);
++      }
++err_mem:
++      kfree(new_minor);
++err_idr:
++      idr_remove(&drm_minors_idr, minor_id);
++      *minor = NULL;
++      return ret;
++}
++
++/**
++ * Register.
++ *
++ * \param pdev - PCI device structure
++ * \param ent entry from the PCI ID table with device type flags
++ * \return zero on success or a negative number on failure.
++ *
++ * Attempt to gets inter module "drm" information. If we are first
++ * then register the character device and inter module information.
++ * Try and register, if we fail to register, backout previous work.
++ */
++int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
++              struct drm_driver *driver)
++{
++      struct drm_device *dev;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
++      if (!dev)
++              return -ENOMEM;
++
++#ifdef CONFIG_PCI
++      if (!drm_fb_loaded) {
++              pci_set_drvdata(pdev, dev);
++              ret = pci_request_regions(pdev, driver->pci_driver.name);
++              if (ret)
++                      goto err_g1;
++      }
++
++      ret = pci_enable_device(pdev);
++      if (ret)
++              goto err_g2;
++      pci_set_master(pdev);
++#endif
++
++      if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
++              printk(KERN_ERR "DRM: fill_in_dev failed\n");
++              goto err_g3;
++      }
++
++      /* only add the control node on a modesetting platform */
++      if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
++              goto err_g3;
++
++      if (dev->driver->load)
++              if ((ret = dev->driver->load(dev, ent ? ent->driver_data : 0)))
++                      goto err_g4;
++
++      DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
++               driver->name, driver->major, driver->minor, driver->patchlevel,
++               driver->date, dev->primary->index);
++
++      return 0;
++err_g4:
++      drm_put_minor(dev);
++err_g3:
++#ifdef CONFIG_PCI
++      if (!drm_fb_loaded)
++              pci_disable_device(pdev);
++err_g2:
++      if (!drm_fb_loaded)
++              pci_release_regions(pdev);
++err_g1:
++      if (!drm_fb_loaded)
++              pci_set_drvdata(pdev, NULL);
++#endif
++
++      drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
++      printk(KERN_ERR "DRM: drm_get_dev failed.\n");
++      return ret;
++}
++EXPORT_SYMBOL(drm_get_dev);
++
++
++/**
++ * Put a device minor number.
++ *
++ * \param dev device data structure
++ * \return always zero
++ *
++ * Cleans up the proc resources. If it is the last minor then release the foreign
++ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
++ * unregisters the character device.
++ */
++int drm_put_dev(struct drm_device * dev)
++{
++      DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
++
++      if (dev->unique) {
++              drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
++              dev->unique = NULL;
++              dev->unique_len = 0;
++      }
++      if (dev->devname) {
++              drm_free(dev->devname, strlen(dev->devname) + 1,
++                       DRM_MEM_DRIVER);
++              dev->devname = NULL;
++      }
++      drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
++      return 0;
++}
++
++/**
++ * Put a secondary minor number.
++ *
++ * \param sec_minor - structure to be released
++ * \return always zero
++ *
++ * Cleans up the proc resources. Not legal for this to be the
++ * last minor released.
++ *
++ */
++int drm_put_minor(struct drm_device *dev)
++{
++      struct drm_minor **minor_p = &dev->primary;
++      struct drm_minor *minor = *minor_p;
++      DRM_DEBUG("release secondary minor %d\n", minor->index);
++
++      if (minor->type == DRM_MINOR_LEGACY) {
++              if (dev->driver->proc_cleanup)
++                      dev->driver->proc_cleanup(minor);
++              drm_proc_cleanup(minor, drm_proc_root);
++      }
++      drm_sysfs_device_remove(minor);
++
++      idr_remove(&drm_minors_idr, minor->index);
++
++      kfree(minor);
++      *minor_p = NULL;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_sysfs.c git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c
+--- git/drivers/gpu/drm-tungsten/drm_sysfs.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_sysfs.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,212 @@
++
++/*
++ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
++ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
++ *               does not allow adding attributes.
++ *
++ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
++ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
++ * Copyright (c) 2003-2004 IBM Corp.
++ *
++ * This file is released under the GPLv2
++ *
++ */
++
++#include <linux/device.h>
++#include <linux/kdev_t.h>
++#include <linux/err.h>
++
++#include "drm_core.h"
++#include "drmP.h"
++
++#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
++
++/**
++ * drm_sysfs_suspend - DRM class suspend hook
++ * @dev: Linux device to suspend
++ * @state: power state to enter
++ *
++ * Just figures out what the actual struct drm_device associated with
++ * @dev is and calls its suspend hook, if present.
++ */
++static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
++{
++      struct drm_minor *drm_minor = to_drm_minor(dev);
++      struct drm_device *drm_dev = drm_minor->dev;
++
++      printk(KERN_ERR "%s\n", __FUNCTION__);
++
++      if (drm_dev->driver->suspend)
++              return drm_dev->driver->suspend(drm_dev, state);
++
++      return 0;
++}
++
++/**
++ * drm_sysfs_resume - DRM class resume hook
++ * @dev: Linux device to resume
++ *
++ * Just figures out what the actual struct drm_device associated with
++ * @dev is and calls its resume hook, if present.
++ */
++static int drm_sysfs_resume(struct device *dev)
++{
++      struct drm_minor *drm_minor = to_drm_minor(dev);
++      struct drm_device *drm_dev = drm_minor->dev;
++
++      if (drm_dev->driver->resume)
++              return drm_dev->driver->resume(drm_dev);
++
++      return 0;
++}
++
++/* Display the version of drm_core. This doesn't work right in current design */
++static ssize_t version_show(struct class *dev, char *buf)
++{
++      return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
++                     CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
++}
++
++static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
++
++/**
++ * drm_sysfs_create - create a struct drm_sysfs_class structure
++ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
++ * @name: pointer to a string for the name of this class.
++ *
++ * This is used to create DRM class pointer that can then be used
++ * in calls to drm_sysfs_device_add().
++ *
++ * Note, the pointer created here is to be destroyed when finished by making a
++ * call to drm_sysfs_destroy().
++ */
++struct class *drm_sysfs_create(struct module *owner, char *name)
++{
++      struct class *class;
++      int err;
++
++      class = class_create(owner, name);
++      if (IS_ERR(class)) {
++              err = PTR_ERR(class);
++              goto err_out;
++      }
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++      class->suspend = drm_sysfs_suspend;
++      class->resume = drm_sysfs_resume;
++#endif
++
++      err = class_create_file(class, &class_attr_version);
++      if (err)
++              goto err_out_class;
++
++      return class;
++
++err_out_class:
++      class_destroy(class);
++err_out:
++      return ERR_PTR(err);
++}
++
++/**
++ * drm_sysfs_destroy - destroys DRM class
++ *
++ * Destroy the DRM device class.
++ */
++void drm_sysfs_destroy(void)
++{
++      if ((drm_class == NULL) || (IS_ERR(drm_class)))
++              return;
++      class_remove_file(drm_class, &class_attr_version);
++      class_destroy(drm_class);
++}
++
++static ssize_t show_dri(struct device *device, struct device_attribute *attr,
++                      char *buf)
++{
++      struct drm_minor *drm_minor = to_drm_minor(device);
++      struct drm_device *drm_dev = drm_minor->dev;
++      if (drm_dev->driver->dri_library_name)
++              return drm_dev->driver->dri_library_name(drm_dev, buf);
++      return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
++}
++
++static struct device_attribute device_attrs[] = {
++      __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
++};
++
++/**
++ * drm_sysfs_device_release - do nothing
++ * @dev: Linux device
++ *
++ * Normally, this would free the DRM device associated with @dev, along
++ * with cleaning up any other stuff.  But we do that in the DRM core, so
++ * this function can just return and hope that the core does its job.
++ */
++static void drm_sysfs_device_release(struct device *dev)
++{
++      return;
++}
++
++/**
++ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
++ * @dev: DRM device to be added
++ * @head: DRM head in question
++ *
++ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
++ * as the parent for the Linux device, and make sure it has a file containing
++ * the driver we're using (for userspace compatibility).
++ */
++int drm_sysfs_device_add(struct drm_minor *minor)
++{
++      int err;
++      int i, j;
++      char *minor_str;
++
++      minor->kdev.parent = minor->dev->pdev ? &minor->dev->pdev->dev : NULL;
++      minor->kdev.class = drm_class;
++      minor->kdev.release = drm_sysfs_device_release;
++      minor->kdev.devt = minor->device;
++      minor_str = "card%d";
++      
++      snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
++
++      err = device_register(&minor->kdev);
++      if (err) {
++              DRM_ERROR("device add failed: %d\n", err);
++              goto err_out;
++      }
++
++      for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
++              err = device_create_file(&minor->kdev, &device_attrs[i]);
++              if (err)
++                      goto err_out_files;
++      }
++
++      return 0;
++
++err_out_files:
++      if (i > 0)
++              for (j = 0; j < i; j++)
++                      device_remove_file(&minor->kdev, &device_attrs[j]);
++      device_unregister(&minor->kdev);
++err_out:
++
++      return err;
++}
++
++/**
++ * drm_sysfs_device_remove - remove DRM device
++ * @dev: DRM device to remove
++ *
++ * This call unregisters and cleans up a class device that was created with a
++ * call to drm_sysfs_device_add()
++ */
++void drm_sysfs_device_remove(struct drm_minor *minor)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
++              device_remove_file(&minor->kdev, &device_attrs[i]);
++      device_unregister(&minor->kdev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_ttm.c git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c
+--- git/drivers/gpu/drm-tungsten/drm_ttm.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_ttm.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,524 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++
++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++static void drm_clflush_page(struct page *page)
++{
++      uint8_t *page_virtual;
++      unsigned int i;
++
++      if (unlikely(page == NULL))
++              return;
++
++      page_virtual = kmap_atomic(page, KM_USER0);
++
++      for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++              clflush(page_virtual + i);
++
++      kunmap_atomic(page_virtual, KM_USER0);
++}
++
++static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
++{
++      unsigned long i;
++
++      mb();
++      for (i=0; i < num_pages; ++i)
++              drm_clflush_page(*pages++);
++      mb();
++}
++#endif
++
++static void drm_ttm_ipi_handler(void *null)
++{
++#ifdef CONFIG_AGP
++      flush_agp_cache();
++#endif
++}
++
++void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
++{
++
++#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
++      if (cpu_has_clflush) {
++              drm_ttm_cache_flush_clflush(pages, num_pages);
++              return;
++      }
++#endif
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++      if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1))
++#else
++      if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
++#endif
++              DRM_ERROR("Timed out waiting for drm cache flush.\n");
++}
++EXPORT_SYMBOL(drm_ttm_cache_flush);
++
++/**
++ * Allocates storage for pointers to the pages that back the ttm.
++ *
++ * Uses kmalloc if possible. Otherwise falls back to vmalloc.
++ */
++static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
++{
++      unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++      ttm->pages = NULL;
++
++      if (drm_alloc_memctl(size))
++              return;
++
++      if (size <= PAGE_SIZE)
++              ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
++
++      if (!ttm->pages) {
++              ttm->pages = vmalloc_user(size);
++              if (ttm->pages)
++                      ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
++      }
++      if (!ttm->pages)
++              drm_free_memctl(size);
++}
++
++static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
++{
++      unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
++
++      if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
++              vfree(ttm->pages);
++              ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
++      } else {
++              drm_free(ttm->pages, size, DRM_MEM_TTM);
++      }
++      drm_free_memctl(size);
++      ttm->pages = NULL;
++}
++
++static struct page *drm_ttm_alloc_page(void)
++{
++      struct page *page;
++
++      if (drm_alloc_memctl(PAGE_SIZE))
++              return NULL;
++
++      page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++      if (!page) {
++              drm_free_memctl(PAGE_SIZE);
++              return NULL;
++      }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      SetPageReserved(page);
++#endif
++      return page;
++}
++
++/*
++ * Change caching policy for the linear kernel map
++ * for range of pages in a ttm.
++ */
++
++static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
++{
++      int i;
++      struct page **cur_page;
++      int do_tlbflush = 0;
++
++      if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
++              return 0;
++
++      if (noncached)
++              drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              cur_page = ttm->pages + i;
++              if (*cur_page) {
++                      if (!PageHighMem(*cur_page)) {
++#ifdef CONFIG_AGP
++                              if (noncached) {
++                                      map_page_into_agp(*cur_page);
++                              } else {
++                                      unmap_page_from_agp(*cur_page);
++                              }
++#endif
++                              do_tlbflush = 1;
++                      }
++              }
++      }
++#ifdef CONFIG_AGP
++      if (do_tlbflush)
++              flush_agp_mappings();
++#endif
++
++      DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
++
++      return 0;
++}
++
++
++static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
++{
++      int write;
++      int dirty;
++      struct page *page;
++      int i;
++
++      BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
++      write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
++      dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              page = ttm->pages[i];
++              if (page == NULL)
++                      continue;
++
++              if (page == ttm->dummy_read_page) {
++                      BUG_ON(write);
++                      continue;
++              }
++
++              if (write && dirty && !PageReserved(page))
++                      set_page_dirty_lock(page);
++
++              ttm->pages[i] = NULL;
++              put_page(page);
++      }
++}
++
++static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
++{
++      int i;
++      struct drm_buffer_manager *bm = &ttm->dev->bm;
++      struct page **cur_page;
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              cur_page = ttm->pages + i;
++              if (*cur_page) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++                      ClearPageReserved(*cur_page);
++#endif
++                      if (page_count(*cur_page) != 1)
++                              DRM_ERROR("Erroneous page count. Leaking pages.\n");
++                      if (page_mapped(*cur_page))
++                              DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
++                      __free_page(*cur_page);
++                      drm_free_memctl(PAGE_SIZE);
++                      --bm->cur_pages;
++              }
++      }
++}
++
++/*
++ * Free all resources associated with a ttm.
++ */
++
++int drm_ttm_destroy(struct drm_ttm *ttm)
++{
++      struct drm_ttm_backend *be;
++
++      if (!ttm)
++              return 0;
++
++      be = ttm->be;
++      if (be) {
++              be->func->destroy(be);
++              ttm->be = NULL;
++      }
++
++      if (ttm->pages) {
++              if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
++                      drm_ttm_set_caching(ttm, 0);
++
++              if (ttm->page_flags & DRM_TTM_PAGE_USER)
++                      drm_ttm_free_user_pages(ttm);
++              else
++                      drm_ttm_free_alloced_pages(ttm);
++
++              drm_ttm_free_page_directory(ttm);
++      }
++
++      drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
++      return 0;
++}
++
++struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
++{
++      struct page *p;
++      struct drm_buffer_manager *bm = &ttm->dev->bm;
++
++      while(NULL == (p = ttm->pages[index])) {
++              p = drm_ttm_alloc_page();
++              if (!p)
++                      return NULL;
++
++              if (PageHighMem(p))
++                      ttm->pages[--ttm->first_himem_page] = p;
++              else
++                      ttm->pages[++ttm->last_lomem_page] = p;
++
++              ++bm->cur_pages;
++      }
++      return p;
++}
++EXPORT_SYMBOL(drm_ttm_get_page);
++
++/**
++ * drm_ttm_set_user:
++ *
++ * @ttm: the ttm to map pages to. This must always be
++ * a freshly created ttm.
++ *
++ * @tsk: a pointer to the address space from which to map
++ * pages.
++ * 
++ * @write: a boolean indicating that write access is desired
++ *
++ * start: the starting address
++ *
++ * Map a range of user addresses to a new ttm object. This
++ * provides access to user memory from the graphics device.
++ */
++int drm_ttm_set_user(struct drm_ttm *ttm,
++                   struct task_struct *tsk,
++                   unsigned long start,
++                   unsigned long num_pages)
++{
++      struct mm_struct *mm = tsk->mm;
++      int ret;
++      int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
++
++      BUG_ON(num_pages != ttm->num_pages);
++      BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
++
++      down_read(&mm->mmap_sem);
++      ret = get_user_pages(tsk, mm, start, num_pages,
++                           write, 0, ttm->pages, NULL);
++      up_read(&mm->mmap_sem);
++
++      if (ret != num_pages && write) {
++              drm_ttm_free_user_pages(ttm);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++/**
++ * drm_ttm_populate:
++ *
++ * @ttm: the object to allocate pages for
++ *
++ * Allocate pages for all unset page entries, then
++ * call the backend to create the hardware mappings
++ */
++int drm_ttm_populate(struct drm_ttm *ttm)
++{
++      struct page *page;
++      unsigned long i;
++      struct drm_ttm_backend *be;
++
++      if (ttm->state != ttm_unpopulated)
++              return 0;
++
++      be = ttm->be;
++
++      for (i = 0; i < ttm->num_pages; ++i) {
++              page = drm_ttm_get_page(ttm, i);
++              if (!page)
++                      return -ENOMEM;
++      }
++
++      be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
++      ttm->state = ttm_unbound;
++      return 0;
++}
++
++/**
++ * drm_ttm_create:
++ *
++ * @dev: the drm_device
++ *
++ * @size: The size (in bytes) of the desired object
++ *
++ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
++ *
++ * Allocate and initialize a ttm, leaving it unpopulated at this time
++ */
++
++struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
++                             uint32_t page_flags, struct page *dummy_read_page)
++{
++      struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
++      struct drm_ttm *ttm;
++
++      if (!bo_driver)
++              return NULL;
++
++      ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
++      if (!ttm)
++              return NULL;
++
++      ttm->dev = dev;
++      atomic_set(&ttm->vma_count, 0);
++
++      ttm->destroy = 0;
++      ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++      ttm->first_himem_page = ttm->num_pages;
++      ttm->last_lomem_page = -1;
++
++      ttm->page_flags = page_flags;
++
++      ttm->dummy_read_page = dummy_read_page;
++
++      /*
++       * Account also for AGP module memory usage.
++       */
++
++      drm_ttm_alloc_page_directory(ttm);
++      if (!ttm->pages) {
++              drm_ttm_destroy(ttm);
++              DRM_ERROR("Failed allocating page table\n");
++              return NULL;
++      }
++      ttm->be = bo_driver->create_ttm_backend_entry(dev);
++      if (!ttm->be) {
++              drm_ttm_destroy(ttm);
++              DRM_ERROR("Failed creating ttm backend entry\n");
++              return NULL;
++      }
++      ttm->state = ttm_unpopulated;
++      return ttm;
++}
++
++/**
++ * drm_ttm_evict:
++ *
++ * @ttm: the object to be unbound from the aperture.
++ *
++ * Transition a ttm from bound to evicted, where it
++ * isn't present in the aperture, but various caches may
++ * not be consistent.
++ */
++void drm_ttm_evict(struct drm_ttm *ttm)
++{
++      struct drm_ttm_backend *be = ttm->be;
++      int ret;
++
++      if (ttm->state == ttm_bound) {
++              ret = be->func->unbind(be);
++              BUG_ON(ret);
++      }
++
++      ttm->state = ttm_evicted;
++}
++
++/**
++ * drm_ttm_fixup_caching:
++ *
++ * @ttm: the object to set unbound
++ *
++ * XXX this function is misnamed. Transition a ttm from evicted to
++ * unbound, flushing caches as appropriate.
++ */
++void drm_ttm_fixup_caching(struct drm_ttm *ttm)
++{
++
++      if (ttm->state == ttm_evicted) {
++              struct drm_ttm_backend *be = ttm->be;
++              if (be->func->needs_ub_cache_adjust(be))
++                      drm_ttm_set_caching(ttm, 0);
++              ttm->state = ttm_unbound;
++      }
++}
++
++/**
++ * drm_ttm_unbind:
++ *
++ * @ttm: the object to unbind from the graphics device
++ *
++ * Unbind an object from the aperture. This removes the mappings
++ * from the graphics device and flushes caches if necessary.
++ */
++void drm_ttm_unbind(struct drm_ttm *ttm)
++{
++      if (ttm->state == ttm_bound)
++              drm_ttm_evict(ttm);
++
++      drm_ttm_fixup_caching(ttm);
++}
++
++/**
++ * drm_ttm_bind:
++ *
++ * @ttm: the ttm object to bind to the graphics device
++ *
++ * @bo_mem: the aperture memory region which will hold the object
++ *
++ * Bind a ttm object to the aperture. This ensures that the necessary
++ * pages are allocated, flushes CPU caches as needed and marks the
++ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
++ * modified by the GPU
++ */
++int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
++{
++      struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
++      int ret = 0;
++      struct drm_ttm_backend *be;
++
++      if (!ttm)
++              return -EINVAL;
++      if (ttm->state == ttm_bound)
++              return 0;
++
++      be = ttm->be;
++
++      ret = drm_ttm_populate(ttm);
++      if (ret)
++              return ret;
++
++      if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
++              drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
++      else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
++                 bo_driver->ttm_cache_flush)
++              bo_driver->ttm_cache_flush(ttm);
++
++      ret = be->func->bind(be, bo_mem);
++      if (ret) {
++              ttm->state = ttm_evicted;
++              DRM_ERROR("Couldn't bind backend.\n");
++              return ret;
++      }
++
++      ttm->state = ttm_bound;
++      if (ttm->page_flags & DRM_TTM_PAGE_USER)
++              ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
++      return 0;
++}
++EXPORT_SYMBOL(drm_ttm_bind);
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm.c git-nokia/drivers/gpu/drm-tungsten/drm_vm.c
+--- git/drivers/gpu/drm-tungsten/drm_vm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,890 @@
++/**
++ * \file drm_vm.c
++ * Memory mapping for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#if defined(__ia64__)
++#include <linux/efi.h>
++#endif
++
++static void drm_vm_open(struct vm_area_struct *vma);
++static void drm_vm_close(struct vm_area_struct *vma);
++static int drm_bo_mmap_locked(struct vm_area_struct *vma,
++                            struct file *filp,
++                            drm_local_map_t *map);
++
++
++pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
++{
++      pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
++
++#if defined(__i386__) || defined(__x86_64__)
++      if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
++              pgprot_val(tmp) |= _PAGE_PCD;
++              pgprot_val(tmp) &= ~_PAGE_PWT;
++      }
++#elif defined(__powerpc__)
++      pgprot_val(tmp) |= _PAGE_NO_CACHE;
++      if (map_type == _DRM_REGISTERS)
++              pgprot_val(tmp) |= _PAGE_GUARDED;
++#elif defined(__ia64__)
++      if (efi_range_is_wc(vma->vm_start, vma->vm_end -
++                                  vma->vm_start))
++              tmp = pgprot_writecombine(tmp);
++      else
++              tmp = pgprot_noncached(tmp);
++#elif defined(__sparc__)
++      tmp = pgprot_noncached(tmp);
++#endif
++      return tmp;
++}
++
++static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
++{
++      pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
++
++#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
++      tmp |= _PAGE_NO_CACHE;
++#endif
++      return tmp;
++}
++
++#ifndef DRM_VM_NOPAGE
++/**
++ * \c fault method for AGP virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Find the right map and if it's AGP memory find the real physical page to
++ * map, get the page, increment the use count and return it.
++ */
++#if __OS_HAS_AGP
++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list;
++      struct drm_hash_item *hash;
++
++      /*
++       * Find the right map
++       */
++      if (!drm_core_has_AGP(dev))
++              goto vm_fault_error;
++
++      if (!dev->agp || !dev->agp->cant_use_aperture)
++              goto vm_fault_error;
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
++              goto vm_fault_error;
++
++      r_list = drm_hash_entry(hash, struct drm_map_list, hash);
++      map = r_list->map;
++
++      if (map && map->type == _DRM_AGP) {
++              /*
++               * Using vm_pgoff as a selector forces us to use this unusual
++               * addressing scheme.
++               */
++              unsigned long offset = (unsigned long)vmf->virtual_address -
++                                                              vma->vm_start;
++              unsigned long baddr = map->offset + offset;
++              struct drm_agp_mem *agpmem;
++              struct page *page;
++
++#ifdef __alpha__
++              /*
++               * Adjust to a bus-relative address
++               */
++              baddr -= dev->hose->mem_space->start;
++#endif
++
++              /*
++               * It's AGP memory - find the real physical page to map
++               */
++              list_for_each_entry(agpmem, &dev->agp->memory, head) {
++                      if (agpmem->bound <= baddr &&
++                          agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
++                              break;
++              }
++
++              if (!agpmem)
++                      goto vm_fault_error;
++
++              /*
++               * Get the page, inc the use count, and return it
++               */
++              offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
++              page = virt_to_page(__va(agpmem->memory->memory[offset]));
++              get_page(page);
++              vmf->page = page;
++
++              DRM_DEBUG
++                  ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
++                   baddr, __va(agpmem->memory->memory[offset]), offset,
++                   page_count(page));
++              return 0;
++      }
++vm_fault_error:
++      return VM_FAULT_SIGBUS; /* Disallow mremap */
++}
++#else                         /* __OS_HAS_AGP */
++static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      return VM_FAULT_SIGBUS;
++}
++#endif                                /* __OS_HAS_AGP */
++
++/**
++ * \c nopage method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Get the mapping, find the real physical page to map, get the page, and
++ * return it.
++ */
++static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      unsigned long offset;
++      unsigned long i;
++      struct page *page;
++
++      if (!map)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;
++      i = (unsigned long)map->handle + offset;
++      page = vmalloc_to_page((void *)i);
++      if (!page)
++              return VM_FAULT_SIGBUS;
++      get_page(page);
++      vmf->page = page;
++
++      DRM_DEBUG("shm_fault 0x%lx\n", offset);
++      return 0;
++}
++#endif
++
++/**
++ * \c close method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ *
++ * Deletes map information if we are the last
++ * person to close a mapping and it's not in the global maplist.
++ */
++static void drm_vm_shm_close(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *pt, *temp;
++      struct drm_map *map;
++      struct drm_map_list *r_list;
++      int found_maps = 0;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_dec(&dev->vma_count);
++
++      map = vma->vm_private_data;
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
++              if (pt->vma->vm_private_data == map)
++                      found_maps++;
++              if (pt->vma == vma) {
++                      list_del(&pt->head);
++                      drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
++              }
++      }
++      /* We were the only map that was found */
++      if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
++              /* Check to see if we are in the maplist, if we are not, then
++               * we delete this mappings information.
++               */
++              found_maps = 0;
++              list_for_each_entry(r_list, &dev->maplist, head) {
++                      if (r_list->map == map)
++                              found_maps++;
++              }
++
++              if (!found_maps) {
++                      drm_dma_handle_t dmah;
++
++                      switch (map->type) {
++                      case _DRM_REGISTERS:
++                      case _DRM_FRAME_BUFFER:
++                              if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
++                                      int retcode;
++                                      retcode = mtrr_del(map->mtrr,
++                                                         map->offset,
++                                                         map->size);
++                                      DRM_DEBUG("mtrr_del = %d\n", retcode);
++                              }
++                              iounmap(map->handle);
++                              break;
++                      case _DRM_SHM:
++                              vfree(map->handle);
++                              break;
++                      case _DRM_AGP:
++                      case _DRM_SCATTER_GATHER:
++                              break;
++                      case _DRM_CONSISTENT:
++                              dmah.vaddr = map->handle;
++                              dmah.busaddr = map->offset;
++                              dmah.size = map->size;
++                              __drm_pci_free(dev, &dmah);
++                              break;
++                      case _DRM_TTM:
++                              BUG_ON(1);
++                              break;
++                      }
++                      drm_free(map, sizeof(*map), DRM_MEM_MAPS);
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++#ifndef DRM_VM_NOPAGE
++/**
++ * \c fault method for DMA virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
++ */
++static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_device_dma *dma = dev->dma;
++      unsigned long offset;
++      unsigned long page_nr;
++      struct page *page;
++
++      if (!dma)
++              return VM_FAULT_SIGBUS; /* Error */
++      if (!dma->pagelist)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
++      page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
++      page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
++
++      get_page(page);
++      vmf->page = page;
++
++      DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
++      return 0;
++}
++
++/**
++ * \c fault method for scatter-gather virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
++ */
++static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long offset;
++      unsigned long map_offset;
++      unsigned long page_offset;
++      struct page *page;
++
++      if (!entry)
++              return VM_FAULT_SIGBUS; /* Error */
++      if (!entry->pagelist)
++              return VM_FAULT_SIGBUS; /* Nothing allocated */
++
++      offset = (unsigned long)vmf->virtual_address - vma->vm_start;
++      map_offset = map->offset - (unsigned long)dev->sg->virtual;
++      page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
++      page = entry->pagelist[page_offset];
++      get_page(page);
++      vmf->page = page;
++
++      return 0;
++}
++#endif
++
++/** AGP virtual memory operations */
++static struct vm_operations_struct drm_vm_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_nopage,
++#else
++      .fault = drm_do_vm_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/** Shared virtual memory operations */
++static struct vm_operations_struct drm_vm_shm_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_shm_nopage,
++#else
++      .fault = drm_do_vm_shm_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_shm_close,
++};
++
++/** DMA virtual memory operations */
++static struct vm_operations_struct drm_vm_dma_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_dma_nopage,
++#else
++      .fault = drm_do_vm_dma_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/** Scatter-gather virtual memory operations */
++static struct vm_operations_struct drm_vm_sg_ops = {
++#ifdef DRM_VM_NOPAGE
++      .nopage = drm_vm_sg_nopage,
++#else
++      .fault = drm_do_vm_sg_fault,
++#endif
++      .open = drm_vm_open,
++      .close = drm_vm_close,
++};
++
++/**
++ * \c open method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ *
++ * Create a new drm_vma_entry structure as the \p vma private data entry and
++ * add it to drm_device::vmalist.
++ */
++static void drm_vm_open_locked(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *vma_entry;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_inc(&dev->vma_count);
++
++      vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
++      if (vma_entry) {
++              vma_entry->vma = vma;
++              vma_entry->pid = current->pid;
++              list_add(&vma_entry->head, &dev->vmalist);
++      }
++}
++
++static void drm_vm_open(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_vm_open_locked(vma);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * \c close method for all virtual memory types.
++ *
++ * \param vma virtual memory area.
++ *
++ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
++ * free it.
++ */
++static void drm_vm_close(struct vm_area_struct *vma)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_vma_entry *pt, *temp;
++
++      DRM_DEBUG("0x%08lx,0x%08lx\n",
++                vma->vm_start, vma->vm_end - vma->vm_start);
++      atomic_dec(&dev->vma_count);
++
++      mutex_lock(&dev->struct_mutex);
++      list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
++              if (pt->vma == vma) {
++                      list_del(&pt->head);
++                      drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
++                      break;
++              }
++      }
++      mutex_unlock(&dev->struct_mutex);
++}
++
++
++/**
++ * mmap DMA memory.
++ *
++ * \param file_priv DRM file private.
++ * \param vma virtual memory area.
++ * \return zero on success or a negative number on failure.
++ *
++ * Sets the virtual memory area operations structure to vm_dma_ops, the file
++ * pointer, and calls vm_open().
++ */
++static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev;
++      struct drm_device_dma *dma;
++      unsigned long length = vma->vm_end - vma->vm_start;
++
++      dev = priv->minor->dev;
++      dma = dev->dma;
++      DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
++                vma->vm_start, vma->vm_end, vma->vm_pgoff);
++
++      /* Length must match exact page count */
++      if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
++              return -EINVAL;
++      }
++
++      if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
++              vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++#if defined(__i386__) || defined(__x86_64__)
++              pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
++#else
++              /* Ye gads this is ugly.  With more thought
++                 we could move this up higher and use
++                 `protection_map' instead.  */
++              vma->vm_page_prot =
++                  __pgprot(pte_val
++                           (pte_wrprotect
++                            (__pte(pgprot_val(vma->vm_page_prot)))));
++#endif
++      }
++
++      vma->vm_ops = &drm_vm_dma_ops;
++      vma->vm_flags |= VM_RESERVED;   /* Don't swap */
++
++      vma->vm_file = filp;    /* Needed for drm_vm_open() */
++      drm_vm_open_locked(vma);
++      return 0;
++}
++
++unsigned long drm_core_get_map_ofs(struct drm_map * map)
++{
++      return map->offset;
++}
++EXPORT_SYMBOL(drm_core_get_map_ofs);
++
++unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
++{
++#ifdef __alpha__
++      return dev->hose->dense_mem_base - dev->hose->mem_space->start;
++#else
++      return 0;
++#endif
++}
++EXPORT_SYMBOL(drm_core_get_reg_ofs);
++
++/**
++ * mmap DMA memory.
++ *
++ * \param file_priv DRM file private.
++ * \param vma virtual memory area.
++ * \return zero on success or a negative number on failure.
++ *
++ * If the virtual memory area has no offset associated with it then it's a DMA
++ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
++ * checks that the restricted flag is not set, sets the virtual memory operations
++ * according to the mapping type and remaps the pages. Finally sets the file
++ * pointer and calls vm_open().
++ */
++static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      unsigned long offset = 0;
++      struct drm_hash_item *hash;
++
++      DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
++                vma->vm_start, vma->vm_end, vma->vm_pgoff);
++
++      if (!priv->authenticated)
++              return -EACCES;
++
++      /* We check for "dma". On Apple's UniNorth, it's valid to have
++       * the AGP mapped at physical address 0
++       * --BenH.
++       */
++
++      if (!vma->vm_pgoff
++#if __OS_HAS_AGP
++          && (!dev->agp
++              || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
++#endif
++          )
++              return drm_mmap_dma(filp, vma);
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
++              DRM_ERROR("Could not find map\n");
++              return -EINVAL;
++      }
++
++      map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
++      if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
++              return -EPERM;
++
++      /* Check for valid size. */
++      if (map->size < vma->vm_end - vma->vm_start)
++              return -EINVAL;
++
++      if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
++              vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++#if defined(__i386__) || defined(__x86_64__)
++              pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
++#else
++              /* Ye gads this is ugly.  With more thought
++                 we could move this up higher and use
++                 `protection_map' instead.  */
++              vma->vm_page_prot =
++                  __pgprot(pte_val
++                           (pte_wrprotect
++                            (__pte(pgprot_val(vma->vm_page_prot)))));
++#endif
++      }
++
++      switch (map->type) {
++      case _DRM_AGP:
++              if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
++                      /*
++                       * On some platforms we can't talk to bus dma address from the CPU, so for
++                       * memory of type DRM_AGP, we'll deal with sorting out the real physical
++                       * pages and mappings in nopage()
++                       */
++#if defined(__powerpc__)
++                      pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
++#endif
++                      vma->vm_ops = &drm_vm_ops;
++                      break;
++              }
++              /* fall through to _DRM_FRAME_BUFFER... */
++      case _DRM_FRAME_BUFFER:
++      case _DRM_REGISTERS:
++              offset = dev->driver->get_reg_ofs(dev);
++              vma->vm_flags |= VM_IO; /* not in core dump */
++              vma->vm_page_prot = drm_io_prot(map->type, vma);
++              if (io_remap_pfn_range(vma, vma->vm_start,
++                                     (map->offset + offset) >> PAGE_SHIFT,
++                                     vma->vm_end - vma->vm_start,
++                                     vma->vm_page_prot))
++                      return -EAGAIN;
++              DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
++                        " offset = 0x%lx\n",
++                        map->type,
++                        vma->vm_start, vma->vm_end, map->offset + offset);
++              vma->vm_ops = &drm_vm_ops;
++              break;
++      case _DRM_CONSISTENT:
++              /* Consistent memory is really like shared memory. But
++               * it's allocated in a different way, so avoid nopage */
++              if (remap_pfn_range(vma, vma->vm_start,
++                  page_to_pfn(virt_to_page(map->handle)),
++                  vma->vm_end - vma->vm_start, vma->vm_page_prot))
++                      return -EAGAIN;
++              vma->vm_page_prot = drm_dma_prot(map->type, vma);
++      /* fall through to _DRM_SHM */
++      case _DRM_SHM:
++              vma->vm_ops = &drm_vm_shm_ops;
++              vma->vm_private_data = (void *)map;
++              /* Don't let this area swap.  Change when
++                 DRM_KERNEL advisory is supported. */
++              vma->vm_flags |= VM_RESERVED;
++              break;
++      case _DRM_SCATTER_GATHER:
++              vma->vm_ops = &drm_vm_sg_ops;
++              vma->vm_private_data = (void *)map;
++              vma->vm_flags |= VM_RESERVED;
++              vma->vm_page_prot = drm_dma_prot(map->type, vma);
++              break;
++      case _DRM_TTM:
++              return drm_bo_mmap_locked(vma, filp, map);
++      default:
++              return -EINVAL; /* This should never happen. */
++      }
++      vma->vm_flags |= VM_RESERVED;   /* Don't swap */
++
++      vma->vm_file = filp;    /* Needed for drm_vm_open() */
++      drm_vm_open_locked(vma);
++      return 0;
++}
++
++int drm_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_mmap_locked(filp, vma);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++EXPORT_SYMBOL(drm_mmap);
++
++/**
++ * buffer object vm functions.
++ */
++
++/**
++ * \c Pagefault method for buffer objects.
++ *
++ * \param vma Virtual memory area.
++ * \param vmf vm fault data
++ * \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted.
++ *
++ * It's important that pfns are inserted while holding the bo->mutex lock.
++ * otherwise we might race with unmap_mapping_range() which is always
++ * called with the bo->mutex lock held.
++ *
++ * We're modifying the page attribute bits of the vma->vm_page_prot field,
++ * without holding the mmap_sem in write mode. Only in read mode.
++ * These bits are not used by the mm subsystem code, and we consider them
++ * protected by the bo->mutex lock.
++ */
++
++#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT)
++static int drm_bo_vm_fault(struct vm_area_struct *vma,
++                                   struct vm_fault *vmf)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      unsigned long page_offset;
++      struct page *page = NULL;
++      struct drm_ttm *ttm;
++      struct drm_device *dev;
++      unsigned long pfn;
++      int err;
++      unsigned long bus_base;
++      unsigned long bus_offset;
++      unsigned long bus_size;
++      unsigned long ret = VM_FAULT_NOPAGE;
++
++      dev = bo->dev;
++      err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (err)
++              return VM_FAULT_NOPAGE;
++
++      err = mutex_lock_interruptible(&bo->mutex);
++      if (err) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return VM_FAULT_NOPAGE;
++      }
++
++      err = drm_bo_wait(bo, 0, 1, 0, 1);
++      if (err) {
++              ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++              bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++              goto out_unlock;
++      }
++
++      bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
++
++      /*
++       * If buffer happens to be in a non-mappable location,
++       * move it to a mappable.
++       */
++
++      if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
++              uint32_t new_flags = bo->mem.proposed_flags |
++                      DRM_BO_FLAG_MAPPABLE |
++                      DRM_BO_FLAG_FORCE_MAPPABLE;
++              err = drm_bo_move_buffer(bo, new_flags, 0, 0);
++              if (err) {
++                      ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
++                      goto out_unlock;
++              }
++      }
++
++      err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
++                              &bus_size);
++
++      if (err) {
++              ret = VM_FAULT_SIGBUS;
++              goto out_unlock;
++      }
++
++      page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
++
++      if (bus_size) {
++              struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
++
++              pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
++              vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
++      } else {
++              ttm = bo->ttm;
++
++              drm_ttm_fixup_caching(ttm);
++              page = drm_ttm_get_page(ttm, page_offset);
++              if (!page) {
++                      ret = VM_FAULT_OOM;
++                      goto out_unlock;
++              }
++              pfn = page_to_pfn(page);
++              vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
++                      vm_get_page_prot(vma->vm_flags) :
++                      drm_io_prot(_DRM_TTM, vma);
++      }
++
++      err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
++      if (err) {
++              ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
++              goto out_unlock;
++      }
++out_unlock:
++      BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
++      mutex_unlock(&bo->mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
++#endif
++
++static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++
++      drm_vm_open_locked(vma);
++      atomic_inc(&bo->usage);
++#ifdef DRM_ODD_MM_COMPAT
++      drm_bo_add_vma(bo, vma);
++#endif
++}
++
++/**
++ * \c vma open method for buffer objects.
++ *
++ * \param vma virtual memory area.
++ */
++
++static void drm_bo_vm_open(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      struct drm_device *dev = bo->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_bo_vm_open_locked(vma);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * \c vma close method for buffer objects.
++ *
++ * \param vma virtual memory area.
++ */
++
++static void drm_bo_vm_close(struct vm_area_struct *vma)
++{
++      struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
++      struct drm_device *dev = bo->dev;
++
++      drm_vm_close(vma);
++      if (bo) {
++              mutex_lock(&dev->struct_mutex);
++#ifdef DRM_ODD_MM_COMPAT
++              drm_bo_delete_vma(bo, vma);
++#endif
++              drm_bo_usage_deref_locked((struct drm_buffer_object **)
++                                        &vma->vm_private_data);
++              mutex_unlock(&dev->struct_mutex);
++      }
++      return;
++}
++
++static struct vm_operations_struct drm_bo_vm_ops = {
++#ifdef DRM_FULL_MM_COMPAT
++#ifdef DRM_NO_FAULT
++      .nopfn = drm_bo_vm_nopfn,
++#else
++      .fault = drm_bo_vm_fault,
++#endif
++#else
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      .nopfn = drm_bo_vm_nopfn,
++#else
++      .nopage = drm_bo_vm_nopage,
++#endif
++#endif
++      .open = drm_bo_vm_open,
++      .close = drm_bo_vm_close,
++};
++
++/**
++ * mmap buffer object memory.
++ *
++ * \param vma virtual memory area.
++ * \param file_priv DRM file private.
++ * \param map The buffer object drm map.
++ * \return zero on success or a negative number on failure.
++ */
++
++int drm_bo_mmap_locked(struct vm_area_struct *vma,
++                     struct file *filp,
++                     drm_local_map_t *map)
++{
++      vma->vm_ops = &drm_bo_vm_ops;
++      vma->vm_private_data = map->handle;
++      vma->vm_file = filp;
++      vma->vm_flags |= VM_RESERVED | VM_IO;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      vma->vm_flags |= VM_PFNMAP;
++#endif
++      drm_bo_vm_open_locked(vma);
++#ifdef DRM_ODD_MM_COMPAT
++      drm_bo_map_bound(vma);
++#endif
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c
+--- git/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/drm_vm_nopage_compat.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,267 @@
++/**
++ * \file drm_vm.c
++ * Memory mapping for DRM
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#ifdef DRM_VM_NOPAGE
++/**
++ * \c nopage method for AGP virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Find the right map and if it's AGP memory find the real physical page to
++ * map, get the page, increment the use count and return it.
++ */
++#if __OS_HAS_AGP
++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
++                                              unsigned long address)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_map *map = NULL;
++      struct drm_map_list *r_list;
++      struct drm_hash_item *hash;
++
++      /*
++       * Find the right map
++       */
++      if (!drm_core_has_AGP(dev))
++              goto vm_nopage_error;
++
++      if (!dev->agp || !dev->agp->cant_use_aperture)
++              goto vm_nopage_error;
++
++      if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
++              goto vm_nopage_error;
++
++      r_list = drm_hash_entry(hash, struct drm_map_list, hash);
++      map = r_list->map;
++
++      if (map && map->type == _DRM_AGP) {
++              unsigned long offset = address - vma->vm_start;
++              unsigned long baddr = map->offset + offset;
++              struct drm_agp_mem *agpmem;
++              struct page *page;
++
++#ifdef __alpha__
++              /*
++               * Adjust to a bus-relative address
++               */
++              baddr -= dev->hose->mem_space->start;
++#endif
++
++              /*
++               * It's AGP memory - find the real physical page to map
++               */
++              list_for_each_entry(agpmem, &dev->agp->memory, head) {
++                      if (agpmem->bound <= baddr &&
++                          agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
++                              break;
++              }
++
++              if (!agpmem)
++                      goto vm_nopage_error;
++
++              /*
++               * Get the page, inc the use count, and return it
++               */
++              offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
++              page = virt_to_page(__va(agpmem->memory->memory[offset]));
++              get_page(page);
++
++#if 0
++              /* page_count() not defined everywhere */
++              DRM_DEBUG
++                  ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
++                   baddr, __va(agpmem->memory->memory[offset]), offset,
++                   page_count(page));
++#endif
++
++              return page;
++      }
++      vm_nopage_error:
++      return NOPAGE_SIGBUS;   /* Disallow mremap */
++}
++#else                         /* __OS_HAS_AGP */
++static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
++                                              unsigned long address)
++{
++      return NOPAGE_SIGBUS;
++}
++#endif                                /* __OS_HAS_AGP */
++
++/**
++ * \c nopage method for shared virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Get the mapping, find the real physical page to map, get the page, and
++ * return it.
++ */
++static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
++                                                  unsigned long address)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      unsigned long offset;
++      unsigned long i;
++      struct page *page;
++
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!map)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;
++      i = (unsigned long)map->handle + offset;
++      page = vmalloc_to_page((void *)i);
++      if (!page)
++              return NOPAGE_SIGBUS;
++      get_page(page);
++
++      DRM_DEBUG("0x%lx\n", address);
++      return page;
++}
++
++/**
++ * \c nopage method for DMA virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
++ */
++static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
++                                                  unsigned long address)
++{
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_device_dma *dma = dev->dma;
++      unsigned long offset;
++      unsigned long page_nr;
++      struct page *page;
++
++      if (!dma)
++              return NOPAGE_SIGBUS;   /* Error */
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!dma->pagelist)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
++      page_nr = offset >> PAGE_SHIFT;
++      page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
++
++      get_page(page);
++
++      DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
++      return page;
++}
++
++/**
++ * \c nopage method for scatter-gather virtual memory.
++ *
++ * \param vma virtual memory area.
++ * \param address access address.
++ * \return pointer to the page structure.
++ *
++ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
++ */
++static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
++                                                 unsigned long address)
++{
++      struct drm_map *map = (struct drm_map *) vma->vm_private_data;
++      struct drm_file *priv = vma->vm_file->private_data;
++      struct drm_device *dev = priv->minor->dev;
++      struct drm_sg_mem *entry = dev->sg;
++      unsigned long offset;
++      unsigned long map_offset;
++      unsigned long page_offset;
++      struct page *page;
++
++      DRM_DEBUG("\n");
++      if (!entry)
++              return NOPAGE_SIGBUS;   /* Error */
++      if (address > vma->vm_end)
++              return NOPAGE_SIGBUS;   /* Disallow mremap */
++      if (!entry->pagelist)
++              return NOPAGE_SIGBUS;   /* Nothing allocated */
++
++      offset = address - vma->vm_start;
++      map_offset = map->offset - (unsigned long)dev->sg->virtual;
++      page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
++      page = entry->pagelist[page_offset];
++      get_page(page);
++
++      return page;
++}
++
++
++struct page *drm_vm_nopage(struct vm_area_struct *vma,
++                         unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_nopage(vma, address);
++}
++
++struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
++                             unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_shm_nopage(vma, address);
++}
++
++struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
++                             unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_dma_nopage(vma, address);
++}
++
++struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
++                            unsigned long address, int *type)
++{
++      if (type)
++              *type = VM_FAULT_MINOR;
++      return drm_do_vm_sg_nopage(vma, address);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_context.c git-nokia/drivers/gpu/drm-tungsten/ffb_context.c
+--- git/drivers/gpu/drm-tungsten/ffb_context.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_context.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,582 @@
++/* $Id$
++ * ffb_context.c: Creator/Creator3D DRI/DRM context switching.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ *
++ * Almost entirely stolen from tdfx_context.c, see there
++ * for authors.
++ */
++
++#include <linux/sched.h>
++#include <asm/upa.h>
++
++#include "drmP.h"
++#include "ffb_drv.h"
++
++static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) {
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int i;
++
++      for (i = 0; i < FFB_MAX_CTXS; i++) {
++              if (fpriv->hw_state[i] == NULL)
++                      break;
++      }
++      if (i == FFB_MAX_CTXS)
++              return -1;
++
++      fpriv->hw_state[i] = kmalloc(sizeof(struct ffb_hw_context), GFP_KERNEL);
++      if (fpriv->hw_state[i] == NULL)
++              return -1;
++
++      fpriv->hw_state[i]->is_2d_only = is_2d_only;
++
++      /* Plus one because 0 is the special DRM_KERNEL_CONTEXT. */
++      return i + 1;
++}
++
++static void ffb_save_context(ffb_dev_priv_t * fpriv, int idx)
++{
++      ffb_fbcPtr ffb = fpriv->regs;
++      struct ffb_hw_context *ctx;
++      int i;
++
++      ctx = fpriv->hw_state[idx - 1];
++      if (idx == 0 || ctx == NULL)
++              return;
++
++      if (ctx->is_2d_only) {
++              /* 2D applications only care about certain pieces
++               * of state.
++               */
++              ctx->drawop = upa_readl(&ffb->drawop);
++              ctx->ppc = upa_readl(&ffb->ppc);
++              ctx->wid = upa_readl(&ffb->wid);
++              ctx->fg = upa_readl(&ffb->fg);
++              ctx->bg = upa_readl(&ffb->bg);
++              ctx->xclip = upa_readl(&ffb->xclip);
++              ctx->fbc = upa_readl(&ffb->fbc);
++              ctx->rop = upa_readl(&ffb->rop);
++              ctx->cmp = upa_readl(&ffb->cmp);
++              ctx->matchab = upa_readl(&ffb->matchab);
++              ctx->magnab = upa_readl(&ffb->magnab);
++              ctx->pmask = upa_readl(&ffb->pmask);
++              ctx->xpmask = upa_readl(&ffb->xpmask);
++              ctx->lpat = upa_readl(&ffb->lpat);
++              ctx->fontxy = upa_readl(&ffb->fontxy);
++              ctx->fontw = upa_readl(&ffb->fontw);
++              ctx->fontinc = upa_readl(&ffb->fontinc);
++
++              /* stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      ctx->stencil = upa_readl(&ffb->stencil);
++                      ctx->stencilctl = upa_readl(&ffb->stencilctl);
++              }
++
++              for (i = 0; i < 32; i++)
++                      ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
++              ctx->ucsr = upa_readl(&ffb->ucsr);
++              return;
++      }
++
++      /* Fetch drawop. */
++      ctx->drawop = upa_readl(&ffb->drawop);
++
++      /* If we were saving the vertex registers, this is where
++       * we would do it.  We would save 32 32-bit words starting
++       * at ffb->suvtx.
++       */
++
++      /* Capture rendering attributes. */
++
++      ctx->ppc = upa_readl(&ffb->ppc);        /* Pixel Processor Control */
++      ctx->wid = upa_readl(&ffb->wid);        /* Current WID */
++      ctx->fg = upa_readl(&ffb->fg);  /* Constant FG color */
++      ctx->bg = upa_readl(&ffb->bg);  /* Constant BG color */
++      ctx->consty = upa_readl(&ffb->consty);  /* Constant Y */
++      ctx->constz = upa_readl(&ffb->constz);  /* Constant Z */
++      ctx->xclip = upa_readl(&ffb->xclip);    /* X plane clip */
++      ctx->dcss = upa_readl(&ffb->dcss);      /* Depth Cue Scale Slope */
++      ctx->vclipmin = upa_readl(&ffb->vclipmin);      /* Primary XY clip, minimum */
++      ctx->vclipmax = upa_readl(&ffb->vclipmax);      /* Primary XY clip, maximum */
++      ctx->vclipzmin = upa_readl(&ffb->vclipzmin);    /* Primary Z clip, minimum */
++      ctx->vclipzmax = upa_readl(&ffb->vclipzmax);    /* Primary Z clip, maximum */
++      ctx->dcsf = upa_readl(&ffb->dcsf);      /* Depth Cue Scale Front Bound */
++      ctx->dcsb = upa_readl(&ffb->dcsb);      /* Depth Cue Scale Back Bound */
++      ctx->dczf = upa_readl(&ffb->dczf);      /* Depth Cue Scale Z Front */
++      ctx->dczb = upa_readl(&ffb->dczb);      /* Depth Cue Scale Z Back */
++      ctx->blendc = upa_readl(&ffb->blendc);  /* Alpha Blend Control */
++      ctx->blendc1 = upa_readl(&ffb->blendc1);        /* Alpha Blend Color 1 */
++      ctx->blendc2 = upa_readl(&ffb->blendc2);        /* Alpha Blend Color 2 */
++      ctx->fbc = upa_readl(&ffb->fbc);        /* Frame Buffer Control */
++      ctx->rop = upa_readl(&ffb->rop);        /* Raster Operation */
++      ctx->cmp = upa_readl(&ffb->cmp);        /* Compare Controls */
++      ctx->matchab = upa_readl(&ffb->matchab);        /* Buffer A/B Match Ops */
++      ctx->matchc = upa_readl(&ffb->matchc);  /* Buffer C Match Ops */
++      ctx->magnab = upa_readl(&ffb->magnab);  /* Buffer A/B Magnitude Ops */
++      ctx->magnc = upa_readl(&ffb->magnc);    /* Buffer C Magnitude Ops */
++      ctx->pmask = upa_readl(&ffb->pmask);    /* RGB Plane Mask */
++      ctx->xpmask = upa_readl(&ffb->xpmask);  /* X Plane Mask */
++      ctx->ypmask = upa_readl(&ffb->ypmask);  /* Y Plane Mask */
++      ctx->zpmask = upa_readl(&ffb->zpmask);  /* Z Plane Mask */
++
++      /* Auxiliary Clips. */
++      ctx->auxclip0min = upa_readl(&ffb->auxclip[0].min);
++      ctx->auxclip0max = upa_readl(&ffb->auxclip[0].max);
++      ctx->auxclip1min = upa_readl(&ffb->auxclip[1].min);
++      ctx->auxclip1max = upa_readl(&ffb->auxclip[1].max);
++      ctx->auxclip2min = upa_readl(&ffb->auxclip[2].min);
++      ctx->auxclip2max = upa_readl(&ffb->auxclip[2].max);
++      ctx->auxclip3min = upa_readl(&ffb->auxclip[3].min);
++      ctx->auxclip3max = upa_readl(&ffb->auxclip[3].max);
++
++      ctx->lpat = upa_readl(&ffb->lpat);      /* Line Pattern */
++      ctx->fontxy = upa_readl(&ffb->fontxy);  /* XY Font Coordinate */
++      ctx->fontw = upa_readl(&ffb->fontw);    /* Font Width */
++      ctx->fontinc = upa_readl(&ffb->fontinc);        /* Font X/Y Increment */
++
++      /* These registers/features only exist on FFB2 and later chips. */
++      if (fpriv->ffb_type >= ffb2_prototype) {
++              ctx->dcss1 = upa_readl(&ffb->dcss1);    /* Depth Cue Scale Slope 1 */
++              ctx->dcss2 = upa_readl(&ffb->dcss2);    /* Depth Cue Scale Slope 2 */
++              ctx->dcss2 = upa_readl(&ffb->dcss3);    /* Depth Cue Scale Slope 3 */
++              ctx->dcs2 = upa_readl(&ffb->dcs2);      /* Depth Cue Scale 2 */
++              ctx->dcs3 = upa_readl(&ffb->dcs3);      /* Depth Cue Scale 3 */
++              ctx->dcs4 = upa_readl(&ffb->dcs4);      /* Depth Cue Scale 4 */
++              ctx->dcd2 = upa_readl(&ffb->dcd2);      /* Depth Cue Depth 2 */
++              ctx->dcd3 = upa_readl(&ffb->dcd3);      /* Depth Cue Depth 3 */
++              ctx->dcd4 = upa_readl(&ffb->dcd4);      /* Depth Cue Depth 4 */
++
++              /* And stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      ctx->stencil = upa_readl(&ffb->stencil);
++                      ctx->stencilctl = upa_readl(&ffb->stencilctl);
++              }
++      }
++
++      /* Save the 32x32 area pattern. */
++      for (i = 0; i < 32; i++)
++              ctx->area_pattern[i] = upa_readl(&ffb->pattern[i]);
++
++      /* Finally, stash away the User Constol/Status Register. */
++      ctx->ucsr = upa_readl(&ffb->ucsr);
++}
++
++static void ffb_restore_context(ffb_dev_priv_t * fpriv, int old, int idx)
++{
++      ffb_fbcPtr ffb = fpriv->regs;
++      struct ffb_hw_context *ctx;
++      int i;
++
++      ctx = fpriv->hw_state[idx - 1];
++      if (idx == 0 || ctx == NULL)
++              return;
++
++      if (ctx->is_2d_only) {
++              /* 2D applications only care about certain pieces
++               * of state.
++               */
++              upa_writel(ctx->drawop, &ffb->drawop);
++
++              /* If we were restoring the vertex registers, this is where
++               * we would do it.  We would restore 32 32-bit words starting
++               * at ffb->suvtx.
++               */
++
++              upa_writel(ctx->ppc, &ffb->ppc);
++              upa_writel(ctx->wid, &ffb->wid);
++              upa_writel(ctx->fg, &ffb->fg);
++              upa_writel(ctx->bg, &ffb->bg);
++              upa_writel(ctx->xclip, &ffb->xclip);
++              upa_writel(ctx->fbc, &ffb->fbc);
++              upa_writel(ctx->rop, &ffb->rop);
++              upa_writel(ctx->cmp, &ffb->cmp);
++              upa_writel(ctx->matchab, &ffb->matchab);
++              upa_writel(ctx->magnab, &ffb->magnab);
++              upa_writel(ctx->pmask, &ffb->pmask);
++              upa_writel(ctx->xpmask, &ffb->xpmask);
++              upa_writel(ctx->lpat, &ffb->lpat);
++              upa_writel(ctx->fontxy, &ffb->fontxy);
++              upa_writel(ctx->fontw, &ffb->fontw);
++              upa_writel(ctx->fontinc, &ffb->fontinc);
++
++              /* stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      upa_writel(ctx->stencil, &ffb->stencil);
++                      upa_writel(ctx->stencilctl, &ffb->stencilctl);
++                      upa_writel(0x80000000, &ffb->fbc);
++                      upa_writel((ctx->stencilctl | 0x80000),
++                                 &ffb->rawstencilctl);
++                      upa_writel(ctx->fbc, &ffb->fbc);
++              }
++
++              for (i = 0; i < 32; i++)
++                      upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
++              upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
++              return;
++      }
++
++      /* Restore drawop. */
++      upa_writel(ctx->drawop, &ffb->drawop);
++
++      /* If we were restoring the vertex registers, this is where
++       * we would do it.  We would restore 32 32-bit words starting
++       * at ffb->suvtx.
++       */
++
++      /* Restore rendering attributes. */
++
++      upa_writel(ctx->ppc, &ffb->ppc);        /* Pixel Processor Control */
++      upa_writel(ctx->wid, &ffb->wid);        /* Current WID */
++      upa_writel(ctx->fg, &ffb->fg);  /* Constant FG color */
++      upa_writel(ctx->bg, &ffb->bg);  /* Constant BG color */
++      upa_writel(ctx->consty, &ffb->consty);  /* Constant Y */
++      upa_writel(ctx->constz, &ffb->constz);  /* Constant Z */
++      upa_writel(ctx->xclip, &ffb->xclip);    /* X plane clip */
++      upa_writel(ctx->dcss, &ffb->dcss);      /* Depth Cue Scale Slope */
++      upa_writel(ctx->vclipmin, &ffb->vclipmin);      /* Primary XY clip, minimum */
++      upa_writel(ctx->vclipmax, &ffb->vclipmax);      /* Primary XY clip, maximum */
++      upa_writel(ctx->vclipzmin, &ffb->vclipzmin);    /* Primary Z clip, minimum */
++      upa_writel(ctx->vclipzmax, &ffb->vclipzmax);    /* Primary Z clip, maximum */
++      upa_writel(ctx->dcsf, &ffb->dcsf);      /* Depth Cue Scale Front Bound */
++      upa_writel(ctx->dcsb, &ffb->dcsb);      /* Depth Cue Scale Back Bound */
++      upa_writel(ctx->dczf, &ffb->dczf);      /* Depth Cue Scale Z Front */
++      upa_writel(ctx->dczb, &ffb->dczb);      /* Depth Cue Scale Z Back */
++      upa_writel(ctx->blendc, &ffb->blendc);  /* Alpha Blend Control */
++      upa_writel(ctx->blendc1, &ffb->blendc1);        /* Alpha Blend Color 1 */
++      upa_writel(ctx->blendc2, &ffb->blendc2);        /* Alpha Blend Color 2 */
++      upa_writel(ctx->fbc, &ffb->fbc);        /* Frame Buffer Control */
++      upa_writel(ctx->rop, &ffb->rop);        /* Raster Operation */
++      upa_writel(ctx->cmp, &ffb->cmp);        /* Compare Controls */
++      upa_writel(ctx->matchab, &ffb->matchab);        /* Buffer A/B Match Ops */
++      upa_writel(ctx->matchc, &ffb->matchc);  /* Buffer C Match Ops */
++      upa_writel(ctx->magnab, &ffb->magnab);  /* Buffer A/B Magnitude Ops */
++      upa_writel(ctx->magnc, &ffb->magnc);    /* Buffer C Magnitude Ops */
++      upa_writel(ctx->pmask, &ffb->pmask);    /* RGB Plane Mask */
++      upa_writel(ctx->xpmask, &ffb->xpmask);  /* X Plane Mask */
++      upa_writel(ctx->ypmask, &ffb->ypmask);  /* Y Plane Mask */
++      upa_writel(ctx->zpmask, &ffb->zpmask);  /* Z Plane Mask */
++
++      /* Auxiliary Clips. */
++      upa_writel(ctx->auxclip0min, &ffb->auxclip[0].min);
++      upa_writel(ctx->auxclip0max, &ffb->auxclip[0].max);
++      upa_writel(ctx->auxclip1min, &ffb->auxclip[1].min);
++      upa_writel(ctx->auxclip1max, &ffb->auxclip[1].max);
++      upa_writel(ctx->auxclip2min, &ffb->auxclip[2].min);
++      upa_writel(ctx->auxclip2max, &ffb->auxclip[2].max);
++      upa_writel(ctx->auxclip3min, &ffb->auxclip[3].min);
++      upa_writel(ctx->auxclip3max, &ffb->auxclip[3].max);
++
++      upa_writel(ctx->lpat, &ffb->lpat);      /* Line Pattern */
++      upa_writel(ctx->fontxy, &ffb->fontxy);  /* XY Font Coordinate */
++      upa_writel(ctx->fontw, &ffb->fontw);    /* Font Width */
++      upa_writel(ctx->fontinc, &ffb->fontinc);        /* Font X/Y Increment */
++
++      /* These registers/features only exist on FFB2 and later chips. */
++      if (fpriv->ffb_type >= ffb2_prototype) {
++              upa_writel(ctx->dcss1, &ffb->dcss1);    /* Depth Cue Scale Slope 1 */
++              upa_writel(ctx->dcss2, &ffb->dcss2);    /* Depth Cue Scale Slope 2 */
++              upa_writel(ctx->dcss3, &ffb->dcss2);    /* Depth Cue Scale Slope 3 */
++              upa_writel(ctx->dcs2, &ffb->dcs2);      /* Depth Cue Scale 2 */
++              upa_writel(ctx->dcs3, &ffb->dcs3);      /* Depth Cue Scale 3 */
++              upa_writel(ctx->dcs4, &ffb->dcs4);      /* Depth Cue Scale 4 */
++              upa_writel(ctx->dcd2, &ffb->dcd2);      /* Depth Cue Depth 2 */
++              upa_writel(ctx->dcd3, &ffb->dcd3);      /* Depth Cue Depth 3 */
++              upa_writel(ctx->dcd4, &ffb->dcd4);      /* Depth Cue Depth 4 */
++
++              /* And stencil/stencilctl only exists on FFB2+ and later
++               * due to the introduction of 3DRAM-III.
++               */
++              if (fpriv->ffb_type == ffb2_vertical_plus ||
++                  fpriv->ffb_type == ffb2_horizontal_plus) {
++                      /* Unfortunately, there is a hardware bug on
++                       * the FFB2+ chips which prevents a normal write
++                       * to the stencil control register from working
++                       * as it should.
++                       *
++                       * The state controlled by the FFB stencilctl register
++                       * really gets transferred to the per-buffer instances
++                       * of the stencilctl register in the 3DRAM chips.
++                       *
++                       * The bug is that FFB does not update buffer C correctly,
++                       * so we have to do it by hand for them.
++                       */
++
++                      /* This will update buffers A and B. */
++                      upa_writel(ctx->stencil, &ffb->stencil);
++                      upa_writel(ctx->stencilctl, &ffb->stencilctl);
++
++                      /* Force FFB to use buffer C 3dram regs. */
++                      upa_writel(0x80000000, &ffb->fbc);
++                      upa_writel((ctx->stencilctl | 0x80000),
++                                 &ffb->rawstencilctl);
++
++                      /* Now restore the correct FBC controls. */
++                      upa_writel(ctx->fbc, &ffb->fbc);
++              }
++      }
++
++      /* Restore the 32x32 area pattern. */
++      for (i = 0; i < 32; i++)
++              upa_writel(ctx->area_pattern[i], &ffb->pattern[i]);
++
++      /* Finally, stash away the User Constol/Status Register.
++       * The only state we really preserve here is the picking
++       * control.
++       */
++      upa_writel((ctx->ucsr & 0xf0000), &ffb->ucsr);
++}
++
++#define FFB_UCSR_FB_BUSY       0x01000000
++#define FFB_UCSR_RP_BUSY       0x02000000
++#define FFB_UCSR_ALL_BUSY      (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY)
++
++static void FFBWait(ffb_fbcPtr ffb)
++{
++      int limit = 100000;
++
++      do {
++              u32 regval = upa_readl(&ffb->ucsr);
++
++              if ((regval & FFB_UCSR_ALL_BUSY) == 0)
++                      break;
++      } while (--limit);
++}
++
++int ffb_context_switch(struct drm_device * dev, int old, int new) {
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++
++#if DRM_DMA_HISTOGRAM
++      dev->ctx_start = get_cycles();
++#endif
++
++      DRM_DEBUG("Context switch from %d to %d\n", old, new);
++
++      if (new == dev->last_context || dev->last_context == 0) {
++              dev->last_context = new;
++              return 0;
++      }
++
++      FFBWait(fpriv->regs);
++      ffb_save_context(fpriv, old);
++      ffb_restore_context(fpriv, old, new);
++      FFBWait(fpriv->regs);
++
++      dev->last_context = new;
++
++      return 0;
++}
++
++int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_ctx_res_t res;
++      drm_ctx_t ctx;
++      int i;
++
++      DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
++      if (copy_from_user(&res, (drm_ctx_res_t __user *) arg, sizeof(res)))
++              return -EFAULT;
++      if (res.count >= DRM_RESERVED_CONTEXTS) {
++              memset(&ctx, 0, sizeof(ctx));
++              for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
++                      ctx.handle = i;
++                      if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
++                              return -EFAULT;
++              }
++      }
++      res.count = DRM_RESERVED_CONTEXTS;
++      if (copy_to_user((drm_ctx_res_t __user *) arg, &res, sizeof(res)))
++              return -EFAULT;
++      return 0;
++}
++
++int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      idx = ffb_alloc_queue(dev, (ctx.flags & _DRM_CONTEXT_2DONLY));
++      if (idx < 0)
++              return -ENFILE;
++
++      DRM_DEBUG("%d\n", ctx.handle);
++      ctx.handle = idx;
++      if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
++              return -EFAULT;
++      return 0;
++}
++
++int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      struct ffb_hw_context *hwctx;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++
++      idx = ctx.handle;
++      if (idx <= 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      hwctx = fpriv->hw_state[idx - 1];
++      if (hwctx == NULL)
++              return -EINVAL;
++
++      if ((ctx.flags & _DRM_CONTEXT_2DONLY) == 0)
++              hwctx->is_2d_only = 0;
++      else
++              hwctx->is_2d_only = 1;
++
++      return 0;
++}
++
++int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      struct ffb_hw_context *hwctx;
++      drm_ctx_t ctx;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++
++      idx = ctx.handle;
++      if (idx <= 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      hwctx = fpriv->hw_state[idx - 1];
++      if (hwctx == NULL)
++              return -EINVAL;
++
++      if (hwctx->is_2d_only != 0)
++              ctx.flags = _DRM_CONTEXT_2DONLY;
++      else
++              ctx.flags = 0;
++
++      if (copy_to_user((drm_ctx_t __user *) arg, &ctx, sizeof(ctx)))
++              return -EFAULT;
++
++      return 0;
++}
++
++int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd,
++                  unsigned long arg) {
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      drm_ctx_t ctx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++      return ffb_context_switch(dev, dev->last_context, ctx.handle);
++}
++
++int ffb_newctx(struct inode * inode, struct file * filp, unsigned int cmd,
++               unsigned long arg) {
++      drm_ctx_t ctx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++
++      return 0;
++}
++
++int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd,
++              unsigned long arg) {
++      drm_ctx_t ctx;
++      drm_file_t *priv = filp->private_data;
++      struct drm_device *dev = priv->dev;
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int idx;
++
++      if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
++              return -EFAULT;
++      DRM_DEBUG("%d\n", ctx.handle);
++
++      idx = ctx.handle - 1;
++      if (idx < 0 || idx >= FFB_MAX_CTXS)
++              return -EINVAL;
++
++      if (fpriv->hw_state[idx] != NULL) {
++              kfree(fpriv->hw_state[idx]);
++              fpriv->hw_state[idx] = NULL;
++      }
++      return 0;
++}
++
++static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev)
++{
++      ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
++      int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
++      int idx;
++
++      idx = context - 1;
++      if (fpriv &&
++          context != DRM_KERNEL_CONTEXT && fpriv->hw_state[idx] != NULL) {
++              kfree(fpriv->hw_state[idx]);
++              fpriv->hw_state[idx] = NULL;
++      }
++}
++
++static void ffb_driver_lastclose(struct drm_device * dev)
++{
++      if (dev->dev_private)
++              kfree(dev->dev_private);
++}
++
++static void ffb_driver_unload(struct drm_device * dev)
++{
++      if (ffb_position != NULL)
++              kfree(ffb_position);
++}
++
++static int ffb_driver_kernel_context_switch_unlock(struct drm_device *dev)
++{
++      dev->lock.filp = 0;
++      {
++              __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
++              unsigned int old, new, prev, ctx;
++
++              ctx = lock.context;
++              do {
++                      old = *plock;
++                      new = ctx;
++                      prev = cmpxchg(plock, old, new);
++              } while (prev != old);
++      }
++      wake_up_interruptible(&dev->lock.lock_queue);
++}
++
++unsigned long ffb_driver_get_map_ofs(drm_map_t * map)
++{
++      return (map->offset & 0xffffffff);
++}
++
++unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev)
++{
++      ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private;
++
++      if (ffb_priv)
++              return ffb_priv->card_phys_base;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.c git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c
+--- git/drivers/gpu/drm-tungsten/ffb_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,329 @@
++/* $Id$
++ * ffb_drv.c: Creator/Creator3D direct rendering driver.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ */
++
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <asm/shmparam.h>
++#include <asm/oplib.h>
++#include <asm/upa.h>
++
++#include "drmP.h"
++#include "ffb_drv.h"
++
++#define DRIVER_AUTHOR         "David S. Miller"
++
++#define DRIVER_NAME           "ffb"
++#define DRIVER_DESC           "Creator/Creator3D"
++#define DRIVER_DATE           "20000517"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     1
++
++typedef struct _ffb_position_t {
++      int node;
++      int root;
++} ffb_position_t;
++
++static ffb_position_t *ffb_position;
++
++static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance)
++{
++      volatile unsigned char *strap_bits;
++      unsigned char val;
++
++      strap_bits = (volatile unsigned char *)
++              (ffb_priv->card_phys_base + 0x00200000UL);
++
++      /* Don't ask, you have to read the value twice for whatever
++       * reason to get correct contents.
++       */
++      val = upa_readb(strap_bits);
++      val = upa_readb(strap_bits);
++      switch (val & 0x78) {
++      case (0x0 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb1_prototype;
++              printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
++              break;
++      case (0x0 << 5) | (0x1 << 3):
++              ffb_priv->ffb_type = ffb1_standard;
++              printk("ffb%d: Detected FFB1\n", instance);
++              break;
++      case (0x0 << 5) | (0x3 << 3):
++              ffb_priv->ffb_type = ffb1_speedsort;
++              printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
++              break;
++      case (0x1 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb2_prototype;
++              printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance);
++              break;
++      case (0x1 << 5) | (0x1 << 3):
++              ffb_priv->ffb_type = ffb2_vertical;
++              printk("ffb%d: Detected FFB2/vertical\n", instance);
++              break;
++      case (0x1 << 5) | (0x2 << 3):
++              ffb_priv->ffb_type = ffb2_vertical_plus;
++              printk("ffb%d: Detected FFB2+/vertical\n", instance);
++              break;
++      case (0x2 << 5) | (0x0 << 3):
++              ffb_priv->ffb_type = ffb2_horizontal;
++              printk("ffb%d: Detected FFB2/horizontal\n", instance);
++              break;
++      case (0x2 << 5) | (0x2 << 3):
++              ffb_priv->ffb_type = ffb2_horizontal;
++              printk("ffb%d: Detected FFB2+/horizontal\n", instance);
++              break;
++      default:
++              ffb_priv->ffb_type = ffb2_vertical;
++              printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val);
++              break;
++      };
++}
++
++static void ffb_apply_upa_parent_ranges(int parent,
++                                      struct linux_prom64_registers *regs)
++{
++      struct linux_prom64_ranges ranges[PROMREG_MAX];
++      char name[128];
++      int len, i;
++
++      prom_getproperty(parent, "name", name, sizeof(name));
++      if (strcmp(name, "upa") != 0)
++              return;
++
++      len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges));
++      if (len <= 0)
++              return;
++
++      len /= sizeof(struct linux_prom64_ranges);
++      for (i = 0; i < len; i++) {
++              struct linux_prom64_ranges *rng = &ranges[i];
++              u64 phys_addr = regs->phys_addr;
++
++              if (phys_addr >= rng->ot_child_base &&
++                  phys_addr < (rng->ot_child_base + rng->or_size)) {
++                      regs->phys_addr -= rng->ot_child_base;
++                      regs->phys_addr += rng->ot_parent_base;
++                      return;
++              }
++      }
++
++      return;
++}
++
++static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node,
++                      int instance)
++{
++      struct linux_prom64_registers regs[2*PROMREG_MAX];
++      ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *)dev->dev_private;
++      int i;
++
++      ffb_priv->prom_node = prom_node;
++      if (prom_getproperty(ffb_priv->prom_node, "reg",
++                           (void *)regs, sizeof(regs)) <= 0) {
++              return -EINVAL;
++      }
++      ffb_apply_upa_parent_ranges(parent_node, &regs[0]);
++      ffb_priv->card_phys_base = regs[0].phys_addr;
++      ffb_priv->regs = (ffb_fbcPtr)
++              (regs[0].phys_addr + 0x00600000UL);
++      get_ffb_type(ffb_priv, instance);
++      for (i = 0; i < FFB_MAX_CTXS; i++)
++              ffb_priv->hw_state[i] = NULL;
++
++      return 0;
++}
++
++static int __init ffb_count_siblings(int root)
++{
++      int node, child, count = 0;
++
++      child = prom_getchild(root);
++      for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
++           node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
++              count++;
++
++      return count;
++}
++
++static int __init ffb_scan_siblings(int root, int instance)
++{
++      int node, child;
++
++      child = prom_getchild(root);
++      for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
++           node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) {
++              ffb_position[instance].node = node;
++              ffb_position[instance].root = root;
++              instance++;
++      }
++
++      return instance;
++}
++
++static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
++{
++      drm_file_t      *priv   = filp->private_data;
++      struct drm_device       *dev;
++      drm_map_list_t  *r_list;
++      struct list_head *list;
++      drm_map_t       *map;
++
++      if (!priv || (dev = priv->dev) == NULL)
++              return NULL;
++
++      list_for_each(list, &dev->maplist->head) {
++              unsigned long uoff;
++
++              r_list = (drm_map_list_t *)list;
++              map = r_list->map;
++              if (!map)
++                      continue;
++              uoff = (map->offset & 0xffffffff);
++              if (uoff == off)
++                      return map;
++      }
++
++      return NULL;
++}
++
++unsigned long ffb_get_unmapped_area(struct file *filp,
++                                  unsigned long hint,
++                                  unsigned long len,
++                                  unsigned long pgoff,
++                                  unsigned long flags)
++{
++      drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);
++      unsigned long addr = -ENOMEM;
++
++      if (!map)
++              return get_unmapped_area(NULL, hint, len, pgoff, flags);
++
++      if (map->type == _DRM_FRAME_BUFFER ||
++          map->type == _DRM_REGISTERS) {
++#ifdef HAVE_ARCH_FB_UNMAPPED_AREA
++              addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
++#else
++              addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
++#endif
++      } else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
++              unsigned long slack = SHMLBA - PAGE_SIZE;
++
++              addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
++              if (!(addr & ~PAGE_MASK)) {
++                      unsigned long kvirt = (unsigned long) map->handle;
++
++                      if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
++                              unsigned long koff, aoff;
++
++                              koff = kvirt & (SHMLBA - 1);
++                              aoff = addr & (SHMLBA - 1);
++                              if (koff < aoff)
++                                      koff += SHMLBA;
++
++                              addr += (koff - aoff);
++                      }
++              }
++      } else {
++              addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
++      }
++
++      return addr;
++}
++
++/* This functions must be here since it references drm_numdevs)
++ * which drm_drv.h declares.
++ */
++static int ffb_driver_firstopen(struct drm_device *dev)
++{
++      ffb_dev_priv_t  *ffb_priv;
++      struct drm_device *temp_dev;
++      int ret = 0;
++      int i;
++
++      /* Check for the case where no device was found. */
++      if (ffb_position == NULL)
++              return -ENODEV;
++
++      /* Find our instance number by finding our device in dev structure */
++      for (i = 0; i < drm_numdevs; i++) {
++              temp_dev = &(drm_device[i]);
++              if(temp_dev == dev)
++                      break;
++      }
++
++      if (i == drm_numdevs)
++              return -ENODEV;
++
++      ffb_priv = kmalloc(sizeof(ffb_dev_priv_t), GFP_KERNEL);
++      if (!ffb_priv)
++              return -ENOMEM;
++      memset(ffb_priv, 0, sizeof(*ffb_priv));
++      dev->dev_private = ffb_priv;
++
++      ret = ffb_init_one(dev,
++                         ffb_position[i].node,
++                         ffb_position[i].root,
++                         i);
++      return ret;
++}
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      ffb_PCI_IDS
++};
++
++static struct drm_driver ffb_driver = {
++      .release = ffb_driver_reclaim_buffers_locked,
++      .firstopen = ffb_driver_firstopen,
++      .lastclose = ffb_driver_lastclose,
++      .unload = ffb_driver_unload,
++      .kernel_context_switch = ffb_context_switch,
++      .kernel_context_switch_unlock = ffb_driver_kernel_context_switch_unlock,
++      .get_map_ofs = ffb_driver_get_map_ofs,
++      .get_reg_ofs = ffb_driver_get_reg_ofs,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      fops = {
++              .owner   = THIS_MODULE,
++              .open    = drm_open,
++              .release = drm_release,
++              .ioctl   = drm_ioctl,
++              .mmap    = drm_mmap,
++              .fasync  = drm_fasync,
++              .poll    = drm_poll,
++              .get_unmapped_area = ffb_get_unmapped_area,
++      },
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_probe(pdev, ent, &driver);
++}
++
++static struct pci_driver pci_driver = {
++      .name          = DRIVER_NAME,
++      .id_table      = pciidlist,
++      .probe         = probe,
++      .remove        = __devexit_p(drm_cleanup_pci),
++};
++
++static int __init ffb_init(void)
++{
++      return drm_init(&pci_driver, pciidlist, &driver);
++}
++
++static void __exit ffb_exit(void)
++{
++      drm_exit(&pci_driver);
++}
++
++module_init(ffb_init);
++module_exit(ffb_exit));
++
++MODULE_AUTHOR( DRIVER_AUTHOR );
++MODULE_DESCRIPTION( DRIVER_DESC );
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/ffb_drv.h git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h
+--- git/drivers/gpu/drm-tungsten/ffb_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/ffb_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,284 @@
++/* $Id$
++ * ffb_drv.h: Creator/Creator3D direct rendering driver.
++ *
++ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
++ */
++
++/* Auxilliary clips. */
++typedef struct  {
++      volatile unsigned int min;
++      volatile unsigned int max;
++} ffb_auxclip, *ffb_auxclipPtr;
++
++/* FFB register set. */
++typedef struct _ffb_fbc {
++      /* Next vertex registers, on the right we list which drawops
++       * use said register and the logical name the register has in
++       * that context.
++       */                                     /* DESCRIPTION          DRAWOP(NAME)    */
++/*0x00*/unsigned int          pad1[3];        /* Reserved                             */
++/*0x0c*/volatile unsigned int alpha;          /* ALPHA Transparency                   */
++/*0x10*/volatile unsigned int red;            /* RED                                  */
++/*0x14*/volatile unsigned int green;          /* GREEN                                */
++/*0x18*/volatile unsigned int blue;           /* BLUE                                 */
++/*0x1c*/volatile unsigned int z;              /* DEPTH                                */
++/*0x20*/volatile unsigned int y;              /* Y                    triangle(DOYF)  */
++                                              /*                      aadot(DYF)      */
++                                              /*                      ddline(DYF)     */
++                                              /*                      aaline(DYF)     */
++/*0x24*/volatile unsigned int x;              /* X                    triangle(DOXF)  */
++                                              /*                      aadot(DXF)      */
++                                              /*                      ddline(DXF)     */
++                                              /*                      aaline(DXF)     */
++/*0x28*/unsigned int          pad2[2];        /* Reserved                             */
++/*0x30*/volatile unsigned int ryf;            /* Y (alias to DOYF)    ddline(RYF)     */
++                                              /*                      aaline(RYF)     */
++                                              /*                      triangle(RYF)   */
++/*0x34*/volatile unsigned int rxf;            /* X                    ddline(RXF)     */
++                                              /*                      aaline(RXF)     */
++                                              /*                      triangle(RXF)   */
++/*0x38*/unsigned int          pad3[2];        /* Reserved                             */
++/*0x40*/volatile unsigned int dmyf;           /* Y (alias to DOYF)    triangle(DMYF)  */
++/*0x44*/volatile unsigned int dmxf;           /* X                    triangle(DMXF)  */
++/*0x48*/unsigned int          pad4[2];        /* Reserved                             */
++/*0x50*/volatile unsigned int ebyi;           /* Y (alias to RYI)     polygon(EBYI)   */
++/*0x54*/volatile unsigned int ebxi;           /* X                    polygon(EBXI)   */
++/*0x58*/unsigned int          pad5[2];        /* Reserved                             */
++/*0x60*/volatile unsigned int by;             /* Y                    brline(RYI)     */
++                                              /*                      fastfill(OP)    */
++                                              /*                      polygon(YI)     */
++                                              /*                      rectangle(YI)   */
++                                              /*                      bcopy(SRCY)     */
++                                              /*                      vscroll(SRCY)   */
++/*0x64*/volatile unsigned int bx;             /* X                    brline(RXI)     */
++                                              /*                      polygon(XI)     */
++                                              /*                      rectangle(XI)   */
++                                              /*                      bcopy(SRCX)     */
++                                              /*                      vscroll(SRCX)   */
++                                              /*                      fastfill(GO)    */
++/*0x68*/volatile unsigned int dy;             /* destination Y        fastfill(DSTY)  */
++                                              /*                      bcopy(DSRY)     */
++                                              /*                      vscroll(DSRY)   */
++/*0x6c*/volatile unsigned int dx;             /* destination X        fastfill(DSTX)  */
++                                              /*                      bcopy(DSTX)     */
++                                              /*                      vscroll(DSTX)   */
++/*0x70*/volatile unsigned int bh;             /* Y (alias to RYI)     brline(DYI)     */
++                                              /*                      dot(DYI)        */
++                                              /*                      polygon(ETYI)   */
++                                              /* Height               fastfill(H)     */
++                                              /*                      bcopy(H)        */
++                                              /*                      vscroll(H)      */
++                                              /* Y count              fastfill(NY)    */
++/*0x74*/volatile unsigned int bw;             /* X                    dot(DXI)        */
++                                              /*                      brline(DXI)     */
++                                              /*                      polygon(ETXI)   */
++                                              /*                      fastfill(W)     */
++                                              /*                      bcopy(W)        */
++                                              /*                      vscroll(W)      */
++                                              /*                      fastfill(NX)    */
++/*0x78*/unsigned int          pad6[2];        /* Reserved                             */
++/*0x80*/unsigned int          pad7[32];       /* Reserved                             */
++
++      /* Setup Unit's vertex state register */
++/*100*/       volatile unsigned int   suvtx;
++/*104*/       unsigned int            pad8[63];       /* Reserved                             */
++
++      /* Frame Buffer Control Registers */
++/*200*/       volatile unsigned int   ppc;            /* Pixel Processor Control              */
++/*204*/       volatile unsigned int   wid;            /* Current WID                          */
++/*208*/       volatile unsigned int   fg;             /* FG data                              */
++/*20c*/       volatile unsigned int   bg;             /* BG data                              */
++/*210*/       volatile unsigned int   consty;         /* Constant Y                           */
++/*214*/       volatile unsigned int   constz;         /* Constant Z                           */
++/*218*/       volatile unsigned int   xclip;          /* X Clip                               */
++/*21c*/       volatile unsigned int   dcss;           /* Depth Cue Scale Slope                */
++/*220*/       volatile unsigned int   vclipmin;       /* Viewclip XY Min Bounds               */
++/*224*/       volatile unsigned int   vclipmax;       /* Viewclip XY Max Bounds               */
++/*228*/       volatile unsigned int   vclipzmin;      /* Viewclip Z Min Bounds                */
++/*22c*/       volatile unsigned int   vclipzmax;      /* Viewclip Z Max Bounds                */
++/*230*/       volatile unsigned int   dcsf;           /* Depth Cue Scale Front Bound          */
++/*234*/       volatile unsigned int   dcsb;           /* Depth Cue Scale Back Bound           */
++/*238*/       volatile unsigned int   dczf;           /* Depth Cue Z Front                    */
++/*23c*/       volatile unsigned int   dczb;           /* Depth Cue Z Back                     */
++/*240*/       unsigned int            pad9;           /* Reserved                             */
++/*244*/       volatile unsigned int   blendc;         /* Alpha Blend Control                  */
++/*248*/       volatile unsigned int   blendc1;        /* Alpha Blend Color 1                  */
++/*24c*/       volatile unsigned int   blendc2;        /* Alpha Blend Color 2                  */
++/*250*/       volatile unsigned int   fbramitc;       /* FB RAM Interleave Test Control       */
++/*254*/       volatile unsigned int   fbc;            /* Frame Buffer Control                 */
++/*258*/       volatile unsigned int   rop;            /* Raster OPeration                     */
++/*25c*/       volatile unsigned int   cmp;            /* Frame Buffer Compare                 */
++/*260*/       volatile unsigned int   matchab;        /* Buffer AB Match Mask                 */
++/*264*/       volatile unsigned int   matchc;         /* Buffer C(YZ) Match Mask              */
++/*268*/       volatile unsigned int   magnab;         /* Buffer AB Magnitude Mask             */
++/*26c*/       volatile unsigned int   magnc;          /* Buffer C(YZ) Magnitude Mask          */
++/*270*/       volatile unsigned int   fbcfg0;         /* Frame Buffer Config 0                */
++/*274*/       volatile unsigned int   fbcfg1;         /* Frame Buffer Config 1                */
++/*278*/       volatile unsigned int   fbcfg2;         /* Frame Buffer Config 2                */
++/*27c*/       volatile unsigned int   fbcfg3;         /* Frame Buffer Config 3                */
++/*280*/       volatile unsigned int   ppcfg;          /* Pixel Processor Config               */
++/*284*/       volatile unsigned int   pick;           /* Picking Control                      */
++/*288*/       volatile unsigned int   fillmode;       /* FillMode                             */
++/*28c*/       volatile unsigned int   fbramwac;       /* FB RAM Write Address Control         */
++/*290*/       volatile unsigned int   pmask;          /* RGB PlaneMask                        */
++/*294*/       volatile unsigned int   xpmask;         /* X PlaneMask                          */
++/*298*/       volatile unsigned int   ypmask;         /* Y PlaneMask                          */
++/*29c*/       volatile unsigned int   zpmask;         /* Z PlaneMask                          */
++/*2a0*/       ffb_auxclip             auxclip[4];     /* Auxilliary Viewport Clip             */
++
++      /* New 3dRAM III support regs */
++/*2c0*/       volatile unsigned int   rawblend2;
++/*2c4*/       volatile unsigned int   rawpreblend;
++/*2c8*/       volatile unsigned int   rawstencil;
++/*2cc*/       volatile unsigned int   rawstencilctl;
++/*2d0*/       volatile unsigned int   threedram1;
++/*2d4*/       volatile unsigned int   threedram2;
++/*2d8*/       volatile unsigned int   passin;
++/*2dc*/       volatile unsigned int   rawclrdepth;
++/*2e0*/       volatile unsigned int   rawpmask;
++/*2e4*/       volatile unsigned int   rawcsrc;
++/*2e8*/       volatile unsigned int   rawmatch;
++/*2ec*/       volatile unsigned int   rawmagn;
++/*2f0*/       volatile unsigned int   rawropblend;
++/*2f4*/       volatile unsigned int   rawcmp;
++/*2f8*/       volatile unsigned int   rawwac;
++/*2fc*/       volatile unsigned int   fbramid;
++
++/*300*/       volatile unsigned int   drawop;         /* Draw OPeration                       */
++/*304*/       unsigned int            pad10[2];       /* Reserved                             */
++/*30c*/       volatile unsigned int   lpat;           /* Line Pattern control                 */
++/*310*/       unsigned int            pad11;          /* Reserved                             */
++/*314*/       volatile unsigned int   fontxy;         /* XY Font coordinate                   */
++/*318*/       volatile unsigned int   fontw;          /* Font Width                           */
++/*31c*/       volatile unsigned int   fontinc;        /* Font Increment                       */
++/*320*/       volatile unsigned int   font;           /* Font bits                            */
++/*324*/       unsigned int            pad12[3];       /* Reserved                             */
++/*330*/       volatile unsigned int   blend2;
++/*334*/       volatile unsigned int   preblend;
++/*338*/       volatile unsigned int   stencil;
++/*33c*/       volatile unsigned int   stencilctl;
++
++/*340*/       unsigned int            pad13[4];       /* Reserved                             */
++/*350*/       volatile unsigned int   dcss1;          /* Depth Cue Scale Slope 1              */
++/*354*/       volatile unsigned int   dcss2;          /* Depth Cue Scale Slope 2              */
++/*358*/       volatile unsigned int   dcss3;          /* Depth Cue Scale Slope 3              */
++/*35c*/       volatile unsigned int   widpmask;
++/*360*/       volatile unsigned int   dcs2;
++/*364*/       volatile unsigned int   dcs3;
++/*368*/       volatile unsigned int   dcs4;
++/*36c*/       unsigned int            pad14;          /* Reserved                             */
++/*370*/       volatile unsigned int   dcd2;
++/*374*/       volatile unsigned int   dcd3;
++/*378*/       volatile unsigned int   dcd4;
++/*37c*/       unsigned int            pad15;          /* Reserved                             */
++/*380*/       volatile unsigned int   pattern[32];    /* area Pattern                         */
++/*400*/       unsigned int            pad16[8];       /* Reserved                             */
++/*420*/       volatile unsigned int   reset;          /* chip RESET                           */
++/*424*/       unsigned int            pad17[247];     /* Reserved                             */
++/*800*/       volatile unsigned int   devid;          /* Device ID                            */
++/*804*/       unsigned int            pad18[63];      /* Reserved                             */
++/*900*/       volatile unsigned int   ucsr;           /* User Control & Status Register       */
++/*904*/       unsigned int            pad19[31];      /* Reserved                             */
++/*980*/       volatile unsigned int   mer;            /* Mode Enable Register                 */
++/*984*/       unsigned int            pad20[1439];    /* Reserved                             */
++} ffb_fbc, *ffb_fbcPtr;
++
++struct ffb_hw_context {
++      int is_2d_only;
++
++      unsigned int ppc;
++      unsigned int wid;
++      unsigned int fg;
++      unsigned int bg;
++      unsigned int consty;
++      unsigned int constz;
++      unsigned int xclip;
++      unsigned int dcss;
++      unsigned int vclipmin;
++      unsigned int vclipmax;
++      unsigned int vclipzmin;
++      unsigned int vclipzmax;
++      unsigned int dcsf;
++      unsigned int dcsb;
++      unsigned int dczf;
++      unsigned int dczb;
++      unsigned int blendc;
++      unsigned int blendc1;
++      unsigned int blendc2;
++      unsigned int fbc;
++      unsigned int rop;
++      unsigned int cmp;
++      unsigned int matchab;
++      unsigned int matchc;
++      unsigned int magnab;
++      unsigned int magnc;
++      unsigned int pmask;
++      unsigned int xpmask;
++      unsigned int ypmask;
++      unsigned int zpmask;
++      unsigned int auxclip0min;
++      unsigned int auxclip0max;
++      unsigned int auxclip1min;
++      unsigned int auxclip1max;
++      unsigned int auxclip2min;
++      unsigned int auxclip2max;
++      unsigned int auxclip3min;
++      unsigned int auxclip3max;
++      unsigned int drawop;
++      unsigned int lpat;
++      unsigned int fontxy;
++      unsigned int fontw;
++      unsigned int fontinc;
++      unsigned int area_pattern[32];
++      unsigned int ucsr;
++      unsigned int stencil;
++      unsigned int stencilctl;
++      unsigned int dcss1;
++      unsigned int dcss2;
++      unsigned int dcss3;
++      unsigned int dcs2;
++      unsigned int dcs3;
++      unsigned int dcs4;
++      unsigned int dcd2;
++      unsigned int dcd3;
++      unsigned int dcd4;
++      unsigned int mer;
++};
++
++#define FFB_MAX_CTXS  32
++
++enum ffb_chip_type {
++      ffb1_prototype = 0,     /* Early pre-FCS FFB */
++      ffb1_standard,          /* First FCS FFB, 100Mhz UPA, 66MHz gclk */
++      ffb1_speedsort,         /* Second FCS FFB, 100Mhz UPA, 75MHz gclk */
++      ffb2_prototype,         /* Early pre-FCS vertical FFB2 */
++      ffb2_vertical,          /* First FCS FFB2/vertical, 100Mhz UPA, 100MHZ gclk,
++                                 75(SingleBuffer)/83(DoubleBuffer) MHz fclk */
++      ffb2_vertical_plus,     /* Second FCS FFB2/vertical, same timings */
++      ffb2_horizontal,        /* First FCS FFB2/horizontal, same timings as FFB2/vert */
++      ffb2_horizontal_plus,   /* Second FCS FFB2/horizontal, same timings */
++      afb_m3,                 /* FCS Elite3D, 3 float chips */
++      afb_m6                  /* FCS Elite3D, 6 float chips */
++};
++
++typedef struct ffb_dev_priv {
++      /* Misc software state. */
++      int                     prom_node;
++      enum ffb_chip_type      ffb_type;
++      u64                     card_phys_base;
++      struct miscdevice       miscdev;
++
++      /* Controller registers. */
++      ffb_fbcPtr              regs;
++
++      /* Context table. */
++      struct ffb_hw_context   *hw_state[FFB_MAX_CTXS];
++} ffb_dev_priv_t;
++
++extern unsigned long ffb_get_unmapped_area(struct file *filp,
++                                         unsigned long hint,
++                                         unsigned long len,
++                                         unsigned long pgoff,
++                                         unsigned long flags);
++extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map)
++extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev)
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_dma.c git-nokia/drivers/gpu/drm-tungsten/i810_dma.c
+--- git/drivers/gpu/drm-tungsten/i810_dma.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_dma.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1301 @@
++/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
++ *        Jeff Hartmann <jhartmann@valinux.com>
++ *          Keith Whitwell <keith@tungstengraphics.com>
++ *
++ */
++
++#include <linux/interrupt.h>  /* For task queue support */
++#include <linux/delay.h>
++#include <linux/pagemap.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "i810_drm.h"
++#include "i810_drv.h"
++
++#define I810_BUF_FREE         2
++#define I810_BUF_CLIENT               1
++#define I810_BUF_HARDWARE     0
++
++#define I810_BUF_UNMAPPED 0
++#define I810_BUF_MAPPED   1
++
++static inline void i810_print_status_page(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      u32 *temp = dev_priv->hw_status_page;
++      int i;
++
++      DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]);
++      DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]);
++      DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]);
++      DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]);
++      DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]);
++      DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]);
++      for (i = 6; i < dma->buf_count + 6; i++) {
++              DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]);
++      }
++}
++
++static struct drm_buf *i810_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++      int used;
++
++      /* Linear search might not be the best solution */
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++              /* In use is already a pointer */
++              used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
++                             I810_BUF_CLIENT);
++              if (used == I810_BUF_FREE) {
++                      return buf;
++              }
++      }
++      return NULL;
++}
++
++/* This should only be called if the buffer is not sent to the hardware
++ * yet, the hardware updates in use for us once its on the ring buffer.
++ */
++
++static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      int used;
++
++      /* In use is already a pointer */
++      used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
++      if (used != I810_BUF_CLIENT) {
++              DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
++{
++      struct drm_file *priv = filp->private_data;
++      struct drm_device *dev;
++      drm_i810_private_t *dev_priv;
++      struct drm_buf *buf;
++      drm_i810_buf_priv_t *buf_priv;
++
++      lock_kernel();
++      dev = priv->minor->dev;
++      dev_priv = dev->dev_private;
++      buf = dev_priv->mmap_buffer;
++      buf_priv = buf->dev_private;
++
++      vma->vm_flags |= (VM_IO | VM_DONTCOPY);
++      vma->vm_file = filp;
++
++      buf_priv->currently_mapped = I810_BUF_MAPPED;
++      unlock_kernel();
++
++      if (io_remap_pfn_range(vma, vma->vm_start,
++                             vma->vm_pgoff,
++                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
++              return -EAGAIN;
++      return 0;
++}
++
++static const struct file_operations i810_buffer_fops = {
++      .open = drm_open,
++      .release = drm_release,
++      .ioctl = drm_ioctl,
++      .mmap = i810_mmap_buffers,
++      .fasync = drm_fasync,
++};
++
++static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      const struct file_operations *old_fops;
++      int retcode = 0;
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED)
++              return -EINVAL;
++
++      down_write(&current->mm->mmap_sem);
++      old_fops = file_priv->filp->f_op;
++      file_priv->filp->f_op = &i810_buffer_fops;
++      dev_priv->mmap_buffer = buf;
++      buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
++                                          PROT_READ | PROT_WRITE,
++                                          MAP_SHARED, buf->bus_address);
++      dev_priv->mmap_buffer = NULL;
++      file_priv->filp->f_op = old_fops;
++      if (IS_ERR(buf_priv->virtual)) {
++              /* Real error */
++              DRM_ERROR("mmap error\n");
++              retcode = PTR_ERR(buf_priv->virtual);
++              buf_priv->virtual = NULL;
++      }
++      up_write(&current->mm->mmap_sem);
++
++      return retcode;
++}
++
++static int i810_unmap_buffer(struct drm_buf * buf)
++{
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      int retcode = 0;
++
++      if (buf_priv->currently_mapped != I810_BUF_MAPPED)
++              return -EINVAL;
++
++      down_write(&current->mm->mmap_sem);
++      retcode = do_munmap(current->mm,
++                          (unsigned long)buf_priv->virtual,
++                          (size_t) buf->total);
++      up_write(&current->mm->mmap_sem);
++
++      buf_priv->currently_mapped = I810_BUF_UNMAPPED;
++      buf_priv->virtual = NULL;
++
++      return retcode;
++}
++
++static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
++                             struct drm_file *file_priv)
++{
++      struct drm_buf *buf;
++      drm_i810_buf_priv_t *buf_priv;
++      int retcode = 0;
++
++      buf = i810_freelist_get(dev);
++      if (!buf) {
++              retcode = -ENOMEM;
++              DRM_DEBUG("retcode=%d\n", retcode);
++              return retcode;
++      }
++
++      retcode = i810_map_buffer(buf, file_priv);
++      if (retcode) {
++              i810_freelist_put(dev, buf);
++              DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
++              return retcode;
++      }
++      buf->file_priv = file_priv;
++      buf_priv = buf->dev_private;
++      d->granted = 1;
++      d->request_idx = buf->idx;
++      d->request_size = buf->total;
++      d->virtual = buf_priv->virtual;
++
++      return retcode;
++}
++
++static int i810_dma_cleanup(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              int i;
++              drm_i810_private_t *dev_priv =
++                  (drm_i810_private_t *) dev->dev_private;
++
++              if (dev_priv->ring.virtual_start) {
++                      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++              }
++              if (dev_priv->hw_status_page) {
++                      pci_free_consistent(dev->pdev, PAGE_SIZE,
++                                          dev_priv->hw_status_page,
++                                          dev_priv->dma_status_page);
++                      /* Need to rewrite hardware status page */
++                      I810_WRITE(0x02080, 0x1ffff000);
++              }
++              drm_free(dev->dev_private, sizeof(drm_i810_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      struct drm_buf *buf = dma->buflist[i];
++                      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++                      if (buf_priv->kernel_virtual && buf->total)
++                              drm_core_ioremapfree(&buf_priv->map, dev);
++              }
++      }
++      return 0;
++}
++
++static int i810_wait_ring(struct drm_device * dev, int n)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
++      int iters = 0;
++      unsigned long end;
++      unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++
++      end = jiffies + (HZ * 3);
++      while (ring->space < n) {
++              ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++              ring->space = ring->head - (ring->tail + 8);
++              if (ring->space < 0)
++                      ring->space += ring->Size;
++
++              if (ring->head != last_head) {
++                      end = jiffies + (HZ * 3);
++                      last_head = ring->head;
++              }
++
++              iters++;
++              if (time_before(end, jiffies)) {
++                      DRM_ERROR("space: %d wanted %d\n", ring->space, n);
++                      DRM_ERROR("lockup\n");
++                      goto out_wait_ring;
++              }
++              udelay(1);
++      }
++
++      out_wait_ring:
++      return iters;
++}
++
++static void i810_kernel_lost_context(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
++
++      ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
++      ring->tail = I810_READ(LP_RING + RING_TAIL);
++      ring->space = ring->head - (ring->tail + 8);
++      if (ring->space < 0)
++              ring->space += ring->Size;
++}
++
++static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int my_idx = 24;
++      u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
++      int i;
++
++      if (dma->buf_count > 1019) {
++              /* Not enough space in the status page for the freelist */
++              return -EINVAL;
++      }
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              buf_priv->in_use = hw_status++;
++              buf_priv->my_use_idx = my_idx;
++              my_idx += 4;
++
++              *buf_priv->in_use = I810_BUF_FREE;
++
++              buf_priv->map.offset = buf->bus_address;
++              buf_priv->map.size = buf->total;
++              buf_priv->map.type = _DRM_AGP;
++              buf_priv->map.flags = 0;
++              buf_priv->map.mtrr = 0;
++
++              drm_core_ioremap(&buf_priv->map, dev);
++              buf_priv->kernel_virtual = buf_priv->map.handle;
++
++      }
++      return 0;
++}
++
++static int i810_dma_initialize(struct drm_device * dev,
++                             drm_i810_private_t * dev_priv,
++                             drm_i810_init_t * init)
++{
++      struct drm_map_list *r_list;
++      memset(dev_priv, 0, sizeof(drm_i810_private_t));
++
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              if (r_list->map &&
++                  r_list->map->type == _DRM_SHM &&
++                  r_list->map->flags & _DRM_CONTAINS_LOCK) {
++                      dev_priv->sarea_map = r_list->map;
++                      break;
++              }
++      }
++      if (!dev_priv->sarea_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find sarea!\n");
++              return -EINVAL;
++      }
++      dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find mmio map!\n");
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not find dma buffer map!\n");
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv = (drm_i810_sarea_t *)
++          ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
++
++      dev_priv->ring.Start = init->ring_start;
++      dev_priv->ring.End = init->ring_end;
++      dev_priv->ring.Size = init->ring_size;
++
++      dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
++      dev_priv->ring.map.size = init->ring_size;
++      dev_priv->ring.map.type = _DRM_AGP;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++
++      if (dev_priv->ring.map.handle == NULL) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
++
++      dev_priv->w = init->w;
++      dev_priv->h = init->h;
++      dev_priv->pitch = init->pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->front_offset = init->front_offset;
++
++      dev_priv->overlay_offset = init->overlay_offset;
++      dev_priv->overlay_physical = init->overlay_physical;
++
++      dev_priv->front_di1 = init->front_offset | init->pitch_bits;
++      dev_priv->back_di1 = init->back_offset | init->pitch_bits;
++      dev_priv->zi1 = init->depth_offset | init->pitch_bits;
++
++      /* Program Hardware Status Page */
++      dev_priv->hw_status_page =
++          pci_alloc_consistent(dev->pdev, PAGE_SIZE,
++                               &dev_priv->dma_status_page);
++      if (!dev_priv->hw_status_page) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("Can not allocate hardware status page\n");
++              return -ENOMEM;
++      }
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
++
++      I810_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++
++      /* Now we need to init our freelist */
++      if (i810_freelist_init(dev, dev_priv) != 0) {
++              dev->dev_private = (void *)dev_priv;
++              i810_dma_cleanup(dev);
++              DRM_ERROR("Not enough space in the status page for"
++                        " the freelist\n");
++              return -ENOMEM;
++      }
++      dev->dev_private = (void *)dev_priv;
++
++      return 0;
++}
++
++static int i810_dma_init(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv;
++      drm_i810_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case I810_INIT_DMA_1_4:
++              DRM_INFO("Using v1.4 init.\n");
++              dev_priv = drm_alloc(sizeof(drm_i810_private_t),
++                                   DRM_MEM_DRIVER);
++              if (dev_priv == NULL)
++                      return -ENOMEM;
++              retcode = i810_dma_initialize(dev, dev_priv, init);
++              break;
++
++      case I810_CLEANUP_DMA:
++              DRM_INFO("DMA Cleanup\n");
++              retcode = i810_dma_cleanup(dev);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return retcode;
++}
++
++/* Most efficient way to verify state for the i810 is as it is
++ * emitted.  Non-conformant state is silently dropped.
++ *
++ * Use 'volatile' & local var tmp to force the emitted values to be
++ * identical to the verified ones.
++ */
++static void i810EmitContextVerified(struct drm_device * dev,
++                                  volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int i, j = 0;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
++
++      OUT_RING(GFX_OP_COLOR_FACTOR);
++      OUT_RING(code[I810_CTXREG_CF1]);
++
++      OUT_RING(GFX_OP_STIPPLE);
++      OUT_RING(code[I810_CTXREG_ST1]);
++
++      for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
++              tmp = code[i];
++
++              if ((tmp & (7 << 29)) == (3 << 29) &&
++                  (tmp & (0x1f << 24)) < (0x1d << 24)) {
++                      OUT_RING(tmp);
++                      j++;
++              } else
++                      printk("constext state dropped!!!\n");
++      }
++
++      if (j & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int i, j = 0;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
++
++      OUT_RING(GFX_OP_MAP_INFO);
++      OUT_RING(code[I810_TEXREG_MI1]);
++      OUT_RING(code[I810_TEXREG_MI2]);
++      OUT_RING(code[I810_TEXREG_MI3]);
++
++      for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
++              tmp = code[i];
++
++              if ((tmp & (7 << 29)) == (3 << 29) &&
++                  (tmp & (0x1f << 24)) < (0x1d << 24)) {
++                      OUT_RING(tmp);
++                      j++;
++              } else
++                      printk("texture state dropped!!!\n");
++      }
++
++      if (j & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++/* Need to do some additional checking when setting the dest buffer.
++ */
++static void i810EmitDestVerified(struct drm_device * dev,
++                               volatile unsigned int *code)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      unsigned int tmp;
++      RING_LOCALS;
++
++      BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
++
++      tmp = code[I810_DESTREG_DI1];
++      if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
++              OUT_RING(CMD_OP_DESTBUFFER_INFO);
++              OUT_RING(tmp);
++      } else
++              DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
++                        tmp, dev_priv->front_di1, dev_priv->back_di1);
++
++      /* invarient:
++       */
++      OUT_RING(CMD_OP_Z_BUFFER_INFO);
++      OUT_RING(dev_priv->zi1);
++
++      OUT_RING(GFX_OP_DESTBUFFER_VARS);
++      OUT_RING(code[I810_DESTREG_DV1]);
++
++      OUT_RING(GFX_OP_DRAWRECT_INFO);
++      OUT_RING(code[I810_DESTREG_DR1]);
++      OUT_RING(code[I810_DESTREG_DR2]);
++      OUT_RING(code[I810_DESTREG_DR3]);
++      OUT_RING(code[I810_DESTREG_DR4]);
++      OUT_RING(0);
++
++      ADVANCE_LP_RING();
++}
++
++static void i810EmitState(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      DRM_DEBUG("%x\n", dirty);
++
++      if (dirty & I810_UPLOAD_BUFFERS) {
++              i810EmitDestVerified(dev, sarea_priv->BufferState);
++              sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
++      }
++
++      if (dirty & I810_UPLOAD_CTX) {
++              i810EmitContextVerified(dev, sarea_priv->ContextState);
++              sarea_priv->dirty &= ~I810_UPLOAD_CTX;
++      }
++
++      if (dirty & I810_UPLOAD_TEX0) {
++              i810EmitTexVerified(dev, sarea_priv->TexState[0]);
++              sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
++      }
++
++      if (dirty & I810_UPLOAD_TEX1) {
++              i810EmitTexVerified(dev, sarea_priv->TexState[1]);
++              sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
++      }
++}
++
++/* need to verify
++ */
++static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
++                                  unsigned int clear_color,
++                                  unsigned int clear_zval)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int pitch = dev_priv->pitch;
++      int cpp = 2;
++      int i;
++      RING_LOCALS;
++
++      if (dev_priv->current_page == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(I810_FRONT | I810_BACK);
++              if (tmp & I810_FRONT)
++                      flags |= I810_BACK;
++              if (tmp & I810_BACK)
++                      flags |= I810_FRONT;
++      }
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      for (i = 0; i < nbox; i++, pbox++) {
++              unsigned int x = pbox->x1;
++              unsigned int y = pbox->y1;
++              unsigned int width = (pbox->x2 - x) * cpp;
++              unsigned int height = pbox->y2 - y;
++              unsigned int start = y * pitch + x * cpp;
++
++              if (pbox->x1 > pbox->x2 ||
++                  pbox->y1 > pbox->y2 ||
++                  pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
++                      continue;
++
++              if (flags & I810_FRONT) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(start);
++                      OUT_RING(clear_color);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++
++              if (flags & I810_BACK) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(dev_priv->back_offset + start);
++                      OUT_RING(clear_color);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++
++              if (flags & I810_DEPTH) {
++                      BEGIN_LP_RING(6);
++                      OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
++                      OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
++                      OUT_RING((height << 16) | width);
++                      OUT_RING(dev_priv->depth_offset + start);
++                      OUT_RING(clear_zval);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              }
++      }
++}
++
++static void i810_dma_dispatch_swap(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int pitch = dev_priv->pitch;
++      int cpp = 2;
++      int i;
++      RING_LOCALS;
++
++      DRM_DEBUG("swapbuffers\n");
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      for (i = 0; i < nbox; i++, pbox++) {
++              unsigned int w = pbox->x2 - pbox->x1;
++              unsigned int h = pbox->y2 - pbox->y1;
++              unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
++              unsigned int start = dst;
++
++              if (pbox->x1 > pbox->x2 ||
++                  pbox->y1 > pbox->y2 ||
++                  pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
++                      continue;
++
++              BEGIN_LP_RING(6);
++              OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
++              OUT_RING(pitch | (0xCC << 16));
++              OUT_RING((h << 16) | (w * cpp));
++              if (dev_priv->current_page == 0)
++                      OUT_RING(dev_priv->front_offset + start);
++              else
++                      OUT_RING(dev_priv->back_offset + start);
++              OUT_RING(pitch);
++              if (dev_priv->current_page == 0)
++                      OUT_RING(dev_priv->back_offset + start);
++              else
++                      OUT_RING(dev_priv->front_offset + start);
++              ADVANCE_LP_RING();
++      }
++}
++
++static void i810_dma_dispatch_vertex(struct drm_device * dev,
++                                   struct drm_buf * buf, int discard, int used)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_clip_rect *box = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      unsigned long address = (unsigned long)buf->bus_address;
++      unsigned long start = address - dev->agp->base;
++      int i = 0;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      if (nbox > I810_NR_SAREA_CLIPRECTS)
++              nbox = I810_NR_SAREA_CLIPRECTS;
++
++      if (used > 4 * 1024)
++              used = 0;
++
++      if (sarea_priv->dirty)
++              i810EmitState(dev);
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
++              unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
++
++              *(u32 *) buf_priv->kernel_virtual =
++                  ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
++
++              if (used & 4) {
++                      *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
++                      used += 4;
++              }
++
++              i810_unmap_buffer(buf);
++      }
++
++      if (used) {
++              do {
++                      if (i < nbox) {
++                              BEGIN_LP_RING(4);
++                              OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
++                                       SC_ENABLE);
++                              OUT_RING(GFX_OP_SCISSOR_INFO);
++                              OUT_RING(box[i].x1 | (box[i].y1 << 16));
++                              OUT_RING((box[i].x2 -
++                                        1) | ((box[i].y2 - 1) << 16));
++                              ADVANCE_LP_RING();
++                      }
++
++                      BEGIN_LP_RING(4);
++                      OUT_RING(CMD_OP_BATCH_BUFFER);
++                      OUT_RING(start | BB1_PROTECTED);
++                      OUT_RING(start + used - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++
++              } while (++i < nbox);
++      }
++
++      if (discard) {
++              dev_priv->counter++;
++
++              (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
++                            I810_BUF_HARDWARE);
++
++              BEGIN_LP_RING(8);
++              OUT_RING(CMD_STORE_DWORD_IDX);
++              OUT_RING(20);
++              OUT_RING(dev_priv->counter);
++              OUT_RING(CMD_STORE_DWORD_IDX);
++              OUT_RING(buf_priv->my_use_idx);
++              OUT_RING(I810_BUF_FREE);
++              OUT_RING(CMD_REPORT_HEAD);
++              OUT_RING(0);
++              ADVANCE_LP_RING();
++      }
++}
++
++static void i810_dma_dispatch_flip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      int pitch = dev_priv->pitch;
++      RING_LOCALS;
++
++      DRM_DEBUG("page=%d pfCurrentPage=%d\n",
++                dev_priv->current_page,
++                dev_priv->sarea_priv->pf_current_page);
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
++      /* On i815 at least ASYNC is buggy */
++      /* pitch<<5 is from 11.2.8 p158,
++         its the pitch / 8 then left shifted 8,
++         so (pitch >> 3) << 8 */
++      OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
++      if (dev_priv->current_page == 0) {
++              OUT_RING(dev_priv->back_offset);
++              dev_priv->current_page = 1;
++      } else {
++              OUT_RING(dev_priv->front_offset);
++              dev_priv->current_page = 0;
++      }
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(2);
++      OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
++
++}
++
++static void i810_dma_quiescent(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
++      OUT_RING(CMD_REPORT_HEAD);
++      OUT_RING(0);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      i810_wait_ring(dev, dev_priv->ring.Size - 8);
++}
++
++static int i810_flush_queue(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      int i, ret = 0;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(CMD_REPORT_HEAD);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      i810_wait_ring(dev, dev_priv->ring.Size - 8);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
++                                 I810_BUF_FREE);
++
++              if (used == I810_BUF_HARDWARE)
++                      DRM_DEBUG("reclaimed from HARDWARE\n");
++              if (used == I810_BUF_CLIENT)
++                      DRM_DEBUG("still on client\n");
++      }
++
++      return ret;
++}
++
++/* Must be called with the lock held */
++static void i810_reclaim_buffers(struct drm_device *dev,
++                               struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      if (!dma)
++              return;
++      if (!dev->dev_private)
++              return;
++      if (!dma->buflist)
++              return;
++
++      i810_flush_queue(dev);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++
++              if (buf->file_priv == file_priv && buf_priv) {
++                      int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
++                                         I810_BUF_FREE);
++
++                      if (used == I810_BUF_CLIENT)
++                              DRM_DEBUG("reclaimed from client\n");
++                      if (buf_priv->currently_mapped == I810_BUF_MAPPED)
++                              buf_priv->currently_mapped = I810_BUF_UNMAPPED;
++              }
++      }
++}
++
++static int i810_flush_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      i810_flush_queue(dev);
++      return 0;
++}
++
++static int i810_dma_vertex(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i810_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("idx %d used %d discard %d\n",
++                vertex->idx, vertex->used, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx > dma->buf_count)
++              return -EINVAL;
++
++      i810_dma_dispatch_vertex(dev,
++                               dma->buflist[vertex->idx],
++                               vertex->discard, vertex->used);
++
++      atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      sarea_priv->last_enqueue = dev_priv->counter - 1;
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return 0;
++}
++
++static int i810_clear_bufs(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv)
++{
++      drm_i810_clear_t *clear = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* GH: Someone's doing nasty things... */
++      if (!dev->dev_private) {
++              return -EINVAL;
++      }
++
++      i810_dma_dispatch_clear(dev, clear->flags,
++                              clear->clear_color, clear->clear_depth);
++      return 0;
++}
++
++static int i810_swap_bufs(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      i810_dma_dispatch_swap(dev);
++      return 0;
++}
++
++static int i810_getage(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++
++      sarea_priv->last_dispatch = (int)hw_status[5];
++      return 0;
++}
++
++static int i810_getbuf(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      int retcode = 0;
++      drm_i810_dma_t *d = data;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      d->granted = 0;
++
++      retcode = i810_dma_get_buffer(dev, d, file_priv);
++
++      DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
++                current->pid, retcode, d->granted);
++
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return retcode;
++}
++
++static int i810_copybuf(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      /* Never copy - 2.4.x doesn't need it */
++      return 0;
++}
++
++static int i810_docopy(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      /* Never copy - 2.4.x doesn't need it */
++      return 0;
++}
++
++static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
++                               unsigned int last_render)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++      drm_i810_buf_priv_t *buf_priv = buf->dev_private;
++      drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned long address = (unsigned long)buf->bus_address;
++      unsigned long start = address - dev->agp->base;
++      int u;
++      RING_LOCALS;
++
++      i810_kernel_lost_context(dev);
++
++      u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
++      if (u != I810_BUF_CLIENT) {
++              DRM_DEBUG("MC found buffer that isn't mine!\n");
++      }
++
++      if (used > 4 * 1024)
++              used = 0;
++
++      sarea_priv->dirty = 0x7f;
++
++      DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
++
++      dev_priv->counter++;
++      DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
++      DRM_DEBUG("start : %lx\n", start);
++      DRM_DEBUG("used : %d\n", used);
++      DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
++
++      if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
++              if (used & 4) {
++                      *(u32 *) ((char *) buf_priv->virtual + used) = 0;
++                      used += 4;
++              }
++
++              i810_unmap_buffer(buf);
++      }
++      BEGIN_LP_RING(4);
++      OUT_RING(CMD_OP_BATCH_BUFFER);
++      OUT_RING(start | BB1_PROTECTED);
++      OUT_RING(start + used - 4);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      BEGIN_LP_RING(8);
++      OUT_RING(CMD_STORE_DWORD_IDX);
++      OUT_RING(buf_priv->my_use_idx);
++      OUT_RING(I810_BUF_FREE);
++      OUT_RING(0);
++
++      OUT_RING(CMD_STORE_DWORD_IDX);
++      OUT_RING(16);
++      OUT_RING(last_render);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++static int i810_dma_mc(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      u32 *hw_status = dev_priv->hw_status_page;
++      drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i810_mc_t *mc = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (mc->idx >= dma->buf_count || mc->idx < 0)
++              return -EINVAL;
++
++      i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
++                           mc->last_render);
++
++      atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
++      atomic_inc(&dev->counts[_DRM_STAT_DMA]);
++      sarea_priv->last_enqueue = dev_priv->counter - 1;
++      sarea_priv->last_dispatch = (int)hw_status[5];
++
++      return 0;
++}
++
++static int i810_rstatus(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
++}
++
++static int i810_ov0_info(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++      drm_i810_overlay_t *ov = data;
++
++      ov->offset = dev_priv->overlay_offset;
++      ov->physical = dev_priv->overlay_physical;
++
++      return 0;
++}
++
++static int i810_fstatus(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      return I810_READ(0x30008);
++}
++
++static int i810_ov0_flip(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++      //Tell the overlay to update
++      I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
++
++      return 0;
++}
++
++/* Not sure why this isn't set all the time:
++ */
++static void i810_do_init_pageflip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      dev_priv->page_flipping = 1;
++      dev_priv->current_page = 0;
++      dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
++}
++
++static int i810_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      if (dev_priv->current_page != 0)
++              i810_dma_dispatch_flip(dev);
++
++      dev_priv->page_flipping = 0;
++      return 0;
++}
++
++static int i810_flip_bufs(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      drm_i810_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv->page_flipping)
++              i810_do_init_pageflip(dev);
++
++      i810_dma_dispatch_flip(dev);
++      return 0;
++}
++
++int i810_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      /* i810 has 4 more counters */
++      dev->counters += 4;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++      dev->types[9] = _DRM_STAT_DMA;
++
++      return 0;
++}
++
++void i810_driver_lastclose(struct drm_device * dev)
++{
++      i810_dma_cleanup(dev);
++}
++
++void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_i810_private_t *dev_priv = dev->dev_private;
++              if (dev_priv->page_flipping) {
++                      i810_do_cleanup_pageflip(dev);
++              }
++      }
++}
++
++void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                      struct drm_file *file_priv)
++{
++      i810_reclaim_buffers(dev, file_priv);
++}
++
++int i810_driver_dma_quiescent(struct drm_device * dev)
++{
++      i810_dma_quiescent(dev);
++      return 0;
++}
++
++struct drm_ioctl_desc i810_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
++};
++
++int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * All Intel graphics chipsets are treated as AGP, even if they are really
++ * PCI-e.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * A value of 1 is always retured to indictate every i810 is AGP.
++ */
++int i810_driver_device_is_agp(struct drm_device * dev)
++{
++      return 1;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drm.h git-nokia/drivers/gpu/drm-tungsten/i810_drm.h
+--- git/drivers/gpu/drm-tungsten/i810_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,263 @@
++#ifndef _I810_DRM_H_
++#define _I810_DRM_H_
++
++/* WARNING: These defines must be the same as what the Xserver uses.
++ * if you change them, you must change the defines in the Xserver.
++ */
++
++#ifndef _I810_DEFINES_
++#define _I810_DEFINES_
++
++#define I810_DMA_BUF_ORDER            12
++#define I810_DMA_BUF_SZ               (1<<I810_DMA_BUF_ORDER)
++#define I810_DMA_BUF_NR               256
++#define I810_NR_SAREA_CLIPRECTS       8
++
++/* Each region is a minimum of 64k, and there are at most 64 of them.
++ */
++#define I810_NR_TEX_REGIONS 64
++#define I810_LOG_MIN_TEX_REGION_SIZE 16
++#endif
++
++#define I810_UPLOAD_TEX0IMAGE  0x1    /* handled clientside */
++#define I810_UPLOAD_TEX1IMAGE  0x2    /* handled clientside */
++#define I810_UPLOAD_CTX        0x4
++#define I810_UPLOAD_BUFFERS    0x8
++#define I810_UPLOAD_TEX0       0x10
++#define I810_UPLOAD_TEX1       0x20
++#define I810_UPLOAD_CLIPRECTS  0x40
++
++/* Indices into buf.Setup where various bits of state are mirrored per
++ * context and per buffer.  These can be fired at the card as a unit,
++ * or in a piecewise fashion as required.
++ */
++
++/* Destbuffer state
++ *    - backbuffer linear offset and pitch -- invarient in the current dri
++ *    - zbuffer linear offset and pitch -- also invarient
++ *    - drawing origin in back and depth buffers.
++ *
++ * Keep the depth/back buffer state here to accommodate private buffers
++ * in the future.
++ */
++#define I810_DESTREG_DI0  0   /* CMD_OP_DESTBUFFER_INFO (2 dwords) */
++#define I810_DESTREG_DI1  1
++#define I810_DESTREG_DV0  2   /* GFX_OP_DESTBUFFER_VARS (2 dwords) */
++#define I810_DESTREG_DV1  3
++#define I810_DESTREG_DR0  4   /* GFX_OP_DRAWRECT_INFO (4 dwords) */
++#define I810_DESTREG_DR1  5
++#define I810_DESTREG_DR2  6
++#define I810_DESTREG_DR3  7
++#define I810_DESTREG_DR4  8
++#define I810_DEST_SETUP_SIZE 10
++
++/* Context state
++ */
++#define I810_CTXREG_CF0   0   /* GFX_OP_COLOR_FACTOR */
++#define I810_CTXREG_CF1   1
++#define I810_CTXREG_ST0   2   /* GFX_OP_STIPPLE */
++#define I810_CTXREG_ST1   3
++#define I810_CTXREG_VF    4   /* GFX_OP_VERTEX_FMT */
++#define I810_CTXREG_MT    5   /* GFX_OP_MAP_TEXELS */
++#define I810_CTXREG_MC0   6   /* GFX_OP_MAP_COLOR_STAGES - stage 0 */
++#define I810_CTXREG_MC1   7   /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
++#define I810_CTXREG_MC2   8   /* GFX_OP_MAP_COLOR_STAGES - stage 2 */
++#define I810_CTXREG_MA0   9   /* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
++#define I810_CTXREG_MA1   10  /* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
++#define I810_CTXREG_MA2   11  /* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
++#define I810_CTXREG_SDM   12  /* GFX_OP_SRC_DEST_MONO */
++#define I810_CTXREG_FOG   13  /* GFX_OP_FOG_COLOR */
++#define I810_CTXREG_B1    14  /* GFX_OP_BOOL_1 */
++#define I810_CTXREG_B2    15  /* GFX_OP_BOOL_2 */
++#define I810_CTXREG_LCS   16  /* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
++#define I810_CTXREG_PV    17  /* GFX_OP_PV_RULE -- Invarient! */
++#define I810_CTXREG_ZA    18  /* GFX_OP_ZBIAS_ALPHAFUNC */
++#define I810_CTXREG_AA    19  /* GFX_OP_ANTIALIAS */
++#define I810_CTX_SETUP_SIZE 20
++
++/* Texture state (per tex unit)
++ */
++#define I810_TEXREG_MI0  0    /* GFX_OP_MAP_INFO (4 dwords) */
++#define I810_TEXREG_MI1  1
++#define I810_TEXREG_MI2  2
++#define I810_TEXREG_MI3  3
++#define I810_TEXREG_MF   4    /* GFX_OP_MAP_FILTER */
++#define I810_TEXREG_MLC  5    /* GFX_OP_MAP_LOD_CTL */
++#define I810_TEXREG_MLL  6    /* GFX_OP_MAP_LOD_LIMITS */
++#define I810_TEXREG_MCS  7    /* GFX_OP_MAP_COORD_SETS ??? */
++#define I810_TEX_SETUP_SIZE 8
++
++/* Flags for clear ioctl
++ */
++#define I810_FRONT   0x1
++#define I810_BACK    0x2
++#define I810_DEPTH   0x4
++
++typedef enum _drm_i810_init_func {
++      I810_INIT_DMA = 0x01,
++      I810_CLEANUP_DMA = 0x02,
++      I810_INIT_DMA_1_4 = 0x03
++} drm_i810_init_func_t;
++
++/* This is the init structure after v1.2 */
++typedef struct _drm_i810_init {
++      drm_i810_init_func_t func;
++      unsigned int mmio_offset;
++      unsigned int buffers_offset;
++      int sarea_priv_offset;
++      unsigned int ring_start;
++      unsigned int ring_end;
++      unsigned int ring_size;
++      unsigned int front_offset;
++      unsigned int back_offset;
++      unsigned int depth_offset;
++      unsigned int overlay_offset;
++      unsigned int overlay_physical;
++      unsigned int w;
++      unsigned int h;
++      unsigned int pitch;
++      unsigned int pitch_bits;
++} drm_i810_init_t;
++
++/* Warning: If you change the SAREA structure you must change the Xserver
++ * structure as well */
++
++typedef struct _drm_i810_tex_region {
++      unsigned char next, prev;       /* indices to form a circular LRU  */
++      unsigned char in_use;   /* owned by a client, or free? */
++      int age;                /* tracked by clients to update local LRU's */
++} drm_i810_tex_region_t;
++
++typedef struct _drm_i810_sarea {
++      unsigned int ContextState[I810_CTX_SETUP_SIZE];
++      unsigned int BufferState[I810_DEST_SETUP_SIZE];
++      unsigned int TexState[2][I810_TEX_SETUP_SIZE];
++      unsigned int dirty;
++
++      unsigned int nbox;
++      struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
++
++      /* Maintain an LRU of contiguous regions of texture space.  If
++       * you think you own a region of texture memory, and it has an
++       * age different to the one you set, then you are mistaken and
++       * it has been stolen by another client.  If global texAge
++       * hasn't changed, there is no need to walk the list.
++       *
++       * These regions can be used as a proxy for the fine-grained
++       * texture information of other clients - by maintaining them
++       * in the same lru which is used to age their own textures,
++       * clients have an approximate lru for the whole of global
++       * texture space, and can make informed decisions as to which
++       * areas to kick out.  There is no need to choose whether to
++       * kick out your own texture or someone else's - simply eject
++       * them all in LRU order.
++       */
++
++      drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS + 1];
++      /* Last elt is sentinal */
++      int texAge;             /* last time texture was uploaded */
++      int last_enqueue;       /* last time a buffer was enqueued */
++      int last_dispatch;      /* age of the most recently dispatched buffer */
++      int last_quiescent;     /*  */
++      int ctxOwner;           /* last context to upload state */
++
++      int vertex_prim;
++
++      int pf_enabled;         /* is pageflipping allowed? */
++      int pf_active;
++      int pf_current_page;    /* which buffer is being displayed? */
++} drm_i810_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmMga.h)
++ */
++
++/* i810 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_I810_INIT         0x00
++#define DRM_I810_VERTEX               0x01
++#define DRM_I810_CLEAR                0x02
++#define DRM_I810_FLUSH                0x03
++#define DRM_I810_GETAGE               0x04
++#define DRM_I810_GETBUF               0x05
++#define DRM_I810_SWAP         0x06
++#define DRM_I810_COPY         0x07
++#define DRM_I810_DOCOPY               0x08
++#define DRM_I810_OV0INFO      0x09
++#define DRM_I810_FSTATUS      0x0a
++#define DRM_I810_OV0FLIP      0x0b
++#define DRM_I810_MC           0x0c
++#define DRM_I810_RSTATUS      0x0d
++#define DRM_I810_FLIP         0x0e
++
++#define DRM_IOCTL_I810_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I810_INIT, drm_i810_init_t)
++#define DRM_IOCTL_I810_VERTEX         DRM_IOW( DRM_COMMAND_BASE + DRM_I810_VERTEX, drm_i810_vertex_t)
++#define DRM_IOCTL_I810_CLEAR          DRM_IOW( DRM_COMMAND_BASE + DRM_I810_CLEAR, drm_i810_clear_t)
++#define DRM_IOCTL_I810_FLUSH          DRM_IO(  DRM_COMMAND_BASE + DRM_I810_FLUSH)
++#define DRM_IOCTL_I810_GETAGE         DRM_IO(  DRM_COMMAND_BASE + DRM_I810_GETAGE)
++#define DRM_IOCTL_I810_GETBUF         DRM_IOWR(DRM_COMMAND_BASE + DRM_I810_GETBUF, drm_i810_dma_t)
++#define DRM_IOCTL_I810_SWAP           DRM_IO(  DRM_COMMAND_BASE + DRM_I810_SWAP)
++#define DRM_IOCTL_I810_COPY           DRM_IOW( DRM_COMMAND_BASE + DRM_I810_COPY, drm_i810_copy_t)
++#define DRM_IOCTL_I810_DOCOPY         DRM_IO(  DRM_COMMAND_BASE + DRM_I810_DOCOPY)
++#define DRM_IOCTL_I810_OV0INFO                DRM_IOR( DRM_COMMAND_BASE + DRM_I810_OV0INFO, drm_i810_overlay_t)
++#define DRM_IOCTL_I810_FSTATUS                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FSTATUS)
++#define DRM_IOCTL_I810_OV0FLIP                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_OV0FLIP)
++#define DRM_IOCTL_I810_MC             DRM_IOW( DRM_COMMAND_BASE + DRM_I810_MC, drm_i810_mc_t)
++#define DRM_IOCTL_I810_RSTATUS                DRM_IO ( DRM_COMMAND_BASE + DRM_I810_RSTATUS)
++#define DRM_IOCTL_I810_FLIP             DRM_IO ( DRM_COMMAND_BASE + DRM_I810_FLIP)
++
++typedef struct _drm_i810_clear {
++      int clear_color;
++      int clear_depth;
++      int flags;
++} drm_i810_clear_t;
++
++/* These may be placeholders if we have more cliprects than
++ * I810_NR_SAREA_CLIPRECTS.  In that case, the client sets discard to
++ * false, indicating that the buffer will be dispatched again with a
++ * new set of cliprects.
++ */
++typedef struct _drm_i810_vertex {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      int discard;            /* client is finished with the buffer? */
++} drm_i810_vertex_t;
++
++typedef struct _drm_i810_copy_t {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      void *address;          /* Address to copy from */
++} drm_i810_copy_t;
++
++#define PR_TRIANGLES         (0x0<<18)
++#define PR_TRISTRIP_0        (0x1<<18)
++#define PR_TRISTRIP_1        (0x2<<18)
++#define PR_TRIFAN            (0x3<<18)
++#define PR_POLYGON           (0x4<<18)
++#define PR_LINES             (0x5<<18)
++#define PR_LINESTRIP         (0x6<<18)
++#define PR_RECTS             (0x7<<18)
++#define PR_MASK              (0x7<<18)
++
++typedef struct drm_i810_dma {
++      void *virtual;
++      int request_idx;
++      int request_size;
++      int granted;
++} drm_i810_dma_t;
++
++typedef struct _drm_i810_overlay_t {
++      unsigned int offset;    /* Address of the Overlay Regs */
++      unsigned int physical;
++} drm_i810_overlay_t;
++
++typedef struct _drm_i810_mc {
++      int idx;                /* buffer index */
++      int used;               /* nr bytes in use */
++      int num_blocks;         /* number of GFXBlocks */
++      int *length;            /* List of lengths for GFXBlocks (FUTURE) */
++      unsigned int last_render;       /* Last Render Request */
++} drm_i810_mc_t;
++
++#endif                                /* _I810_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drv.c git-nokia/drivers/gpu/drm-tungsten/i810_drv.c
+--- git/drivers/gpu/drm-tungsten/i810_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,104 @@
++/* i810_drv.c -- I810 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i810_drm.h"
++#include "i810_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      i810_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
++          DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
++      .dev_priv_size = sizeof(drm_i810_buf_priv_t),
++      .load = i810_driver_load,
++      .lastclose = i810_driver_lastclose,
++      .preclose = i810_driver_preclose,
++      .device_is_agp = i810_driver_device_is_agp,
++      .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
++      .dma_quiescent = i810_driver_dma_quiescent,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = i810_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++              },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init i810_init(void)
++{
++      driver.num_ioctls = i810_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit i810_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(i810_init);
++module_exit(i810_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/i810_drv.h git-nokia/drivers/gpu/drm-tungsten/i810_drv.h
+--- git/drivers/gpu/drm-tungsten/i810_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i810_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,242 @@
++/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
++ *        Jeff Hartmann <jhartmann@valinux.com>
++ *
++ */
++
++#ifndef _I810_DRV_H_
++#define _I810_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "VA Linux Systems Inc."
++
++#define DRIVER_NAME           "i810"
++#define DRIVER_DESC           "Intel i810"
++#define DRIVER_DATE           "20030605"
++
++/* Interface history
++ *
++ * 1.1   - XFree86 4.1
++ * 1.2   - XvMC interfaces
++ *       - XFree86 4.2
++ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
++ *       - Remove requirement for interrupt (leave stubs again)
++ * 1.3   - Add page flipping.
++ * 1.4   - fix DRM interface
++ */
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          4
++#define DRIVER_PATCHLEVEL     0
++
++typedef struct drm_i810_buf_priv {
++      u32 *in_use;
++      int my_use_idx;
++      int currently_mapped;
++      void *virtual;
++      void *kernel_virtual;
++      drm_local_map_t map;
++} drm_i810_buf_priv_t;
++
++typedef struct _drm_i810_ring_buffer {
++      int tail_mask;
++      unsigned long Start;
++      unsigned long End;
++      unsigned long Size;
++      u8 *virtual_start;
++      int head;
++      int tail;
++      int space;
++      drm_local_map_t map;
++} drm_i810_ring_buffer_t;
++
++typedef struct drm_i810_private {
++      struct drm_map *sarea_map;
++      struct drm_map *mmio_map;
++
++      drm_i810_sarea_t *sarea_priv;
++      drm_i810_ring_buffer_t ring;
++
++      void *hw_status_page;
++      unsigned long counter;
++
++      dma_addr_t dma_status_page;
++
++      struct drm_buf *mmap_buffer;
++
++      u32 front_di1, back_di1, zi1;
++
++      int back_offset;
++      int depth_offset;
++      int overlay_offset;
++      int overlay_physical;
++      int w, h;
++      int pitch;
++      int back_pitch;
++      int depth_pitch;
++
++      int do_boxes;
++      int dma_used;
++
++      int current_page;
++      int page_flipping;
++
++      wait_queue_head_t irq_queue;
++      atomic_t irq_received;
++      atomic_t irq_emitted;
++
++      int front_offset;
++} drm_i810_private_t;
++
++                              /* i810_dma.c */
++extern int i810_driver_dma_quiescent(struct drm_device * dev);
++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                             struct drm_file *file_priv);
++extern int i810_driver_load(struct drm_device *, unsigned long flags);
++extern void i810_driver_lastclose(struct drm_device * dev);
++extern void i810_driver_preclose(struct drm_device * dev,
++                               struct drm_file *file_priv);
++extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
++                                             struct drm_file *file_priv);
++extern int i810_driver_device_is_agp(struct drm_device * dev);
++
++extern struct drm_ioctl_desc i810_ioctls[];
++extern int i810_max_ioctl;
++
++#define I810_BASE(reg)                ((unsigned long) \
++                              dev_priv->mmio_map->handle)
++#define I810_ADDR(reg)                (I810_BASE(reg) + reg)
++#define I810_DEREF(reg)               *(__volatile__ int *)I810_ADDR(reg)
++#define I810_READ(reg)                I810_DEREF(reg)
++#define I810_WRITE(reg,val)   do { I810_DEREF(reg) = val; } while (0)
++#define I810_DEREF16(reg)     *(__volatile__ u16 *)I810_ADDR(reg)
++#define I810_READ16(reg)      I810_DEREF16(reg)
++#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
++
++#define I810_VERBOSE 0
++#define RING_LOCALS   unsigned int outring, ringmask; \
++                        volatile char *virt;
++
++#define BEGIN_LP_RING(n) do {                                         \
++      if (I810_VERBOSE)                                               \
++              DRM_DEBUG("BEGIN_LP_RING(%d)\n", n);                    \
++      if (dev_priv->ring.space < n*4)                                 \
++              i810_wait_ring(dev, n*4);                               \
++      dev_priv->ring.space -= n*4;                                    \
++      outring = dev_priv->ring.tail;                                  \
++      ringmask = dev_priv->ring.tail_mask;                            \
++      virt = dev_priv->ring.virtual_start;                            \
++} while (0)
++
++#define ADVANCE_LP_RING() do {                                        \
++      if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n");       \
++      dev_priv->ring.tail = outring;                          \
++      I810_WRITE(LP_RING + RING_TAIL, outring);               \
++} while(0)
++
++#define OUT_RING(n) do {                                              \
++      if (I810_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
++      *(volatile unsigned int *)(virt + outring) = n;                 \
++      outring += 4;                                                   \
++      outring &= ringmask;                                            \
++} while (0)
++
++#define GFX_OP_USER_INTERRUPT         ((0<<29)|(2<<23))
++#define GFX_OP_BREAKPOINT_INTERRUPT   ((0<<29)|(1<<23))
++#define CMD_REPORT_HEAD                       (7<<23)
++#define CMD_STORE_DWORD_IDX           ((0x21<<23) | 0x1)
++#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
++
++#define INST_PARSER_CLIENT   0x00000000
++#define INST_OP_FLUSH        0x02000000
++#define INST_FLUSH_MAP_CACHE 0x00000001
++
++#define BB1_START_ADDR_MASK   (~0x7)
++#define BB1_PROTECTED         (1<<0)
++#define BB1_UNPROTECTED       (0<<0)
++#define BB2_END_ADDR_MASK     (~0x7)
++
++#define I810REG_HWSTAM                0x02098
++#define I810REG_INT_IDENTITY_R        0x020a4
++#define I810REG_INT_MASK_R    0x020a8
++#define I810REG_INT_ENABLE_R  0x020a0
++
++#define LP_RING                       0x2030
++#define HP_RING                       0x2040
++#define RING_TAIL             0x00
++#define TAIL_ADDR             0x000FFFF8
++#define RING_HEAD             0x04
++#define HEAD_WRAP_COUNT               0xFFE00000
++#define HEAD_WRAP_ONE         0x00200000
++#define HEAD_ADDR             0x001FFFFC
++#define RING_START            0x08
++#define START_ADDR            0x00FFFFF8
++#define RING_LEN              0x0C
++#define RING_NR_PAGES         0x000FF000
++#define RING_REPORT_MASK      0x00000006
++#define RING_REPORT_64K               0x00000002
++#define RING_REPORT_128K      0x00000004
++#define RING_NO_REPORT                0x00000000
++#define RING_VALID_MASK               0x00000001
++#define RING_VALID            0x00000001
++#define RING_INVALID          0x00000000
++
++#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define SC_UPDATE_SCISSOR       (0x1<<1)
++#define SC_ENABLE_MASK          (0x1<<0)
++#define SC_ENABLE               (0x1<<0)
++
++#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
++#define SCI_YMIN_MASK      (0xffff<<16)
++#define SCI_XMIN_MASK      (0xffff<<0)
++#define SCI_YMAX_MASK      (0xffff<<16)
++#define SCI_XMAX_MASK      (0xffff<<0)
++
++#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
++#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
++#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x2)
++#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
++#define GFX_OP_PRIMITIVE         ((0x3<<29)|(0x1f<<24))
++
++#define CMD_OP_Z_BUFFER_INFO     ((0x0<<29)|(0x16<<23))
++#define CMD_OP_DESTBUFFER_INFO   ((0x0<<29)|(0x15<<23))
++#define CMD_OP_FRONTBUFFER_INFO  ((0x0<<29)|(0x14<<23))
++#define CMD_OP_WAIT_FOR_EVENT    ((0x0<<29)|(0x03<<23))
++
++#define BR00_BITBLT_CLIENT   0x40000000
++#define BR00_OP_COLOR_BLT    0x10000000
++#define BR00_OP_SRC_COPY_BLT 0x10C00000
++#define BR13_SOLID_PATTERN   0x80000000
++
++#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
++#define WAIT_FOR_PLANE_A_FLIP      (1<<2)
++#define WAIT_FOR_VBLANK (1<<3)
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_buffer.c git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c
+--- git/drivers/gpu/drm-tungsten/i915_buffer.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_buffer.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,303 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
++{
++      return drm_agp_init_ttm(dev);
++}
++
++int i915_fence_type(struct drm_buffer_object *bo,
++                   uint32_t *fclass,
++                   uint32_t *type)
++{
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
++              *type = 3;
++      else
++              *type = 1;
++      return 0;
++}
++
++int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
++{
++      /*
++       * FIXME: Only emit once per batchbuffer submission.
++       */
++
++      uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
++
++      if (flags & DRM_BO_FLAG_READ)
++              flush_cmd |= MI_READ_FLUSH;
++      if (flags & DRM_BO_FLAG_EXE)
++              flush_cmd |= MI_EXE_FLUSH;
++
++      return i915_emit_mi_flush(dev, flush_cmd);
++}
++
++int i915_init_mem_type(struct drm_device *dev, uint32_t type,
++                     struct drm_mem_type_manager *man)
++{
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              man->gpu_offset = 0;
++              break;
++      case DRM_BO_MEM_TT:
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              man->gpu_offset = 0;
++              break;
++      case DRM_BO_MEM_PRIV0:
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              man->gpu_offset = 0;
++              break;
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++/*
++ * i915_evict_flags:
++ *
++ * @bo: the buffer object to be evicted
++ *
++ * Return the bo flags for a buffer which is not mapped to the hardware.
++ * These will be placed in proposed_flags so that when the move is
++ * finished, they'll end up in bo->mem.flags
++ */
++uint64_t i915_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL;
++      default:
++              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
++      }
++}
++
++#if 0 /* See comment below */
++
++static void i915_emit_copy_blit(struct drm_device * dev,
++                              uint32_t src_offset,
++                              uint32_t dst_offset,
++                              uint32_t pages, int direction)
++{
++      uint32_t cur_pages;
++      uint32_t stride = PAGE_SIZE;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      if (!dev_priv)
++              return;
++
++      i915_kernel_lost_context(dev);
++      while (pages > 0) {
++              cur_pages = pages;
++              if (cur_pages > 2048)
++                      cur_pages = 2048;
++              pages -= cur_pages;
++
++              BEGIN_LP_RING(6);
++              OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
++                       XY_SRC_COPY_BLT_WRITE_RGB);
++              OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
++                       (1 << 25) | (direction ? (1 << 30) : 0));
++              OUT_RING((cur_pages << 16) | PAGE_SIZE);
++              OUT_RING(dst_offset);
++              OUT_RING(stride & 0xffff);
++              OUT_RING(src_offset);
++              ADVANCE_LP_RING();
++      }
++      return;
++}
++
++static int i915_move_blit(struct drm_buffer_object * bo,
++                        int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      int dir = 0;
++
++      if ((old_mem->mem_type == new_mem->mem_type) &&
++          (new_mem->mm_node->start <
++           old_mem->mm_node->start + old_mem->mm_node->size)) {
++              dir = 1;
++      }
++
++      i915_emit_copy_blit(bo->dev,
++                          old_mem->mm_node->start << PAGE_SHIFT,
++                          new_mem->mm_node->start << PAGE_SHIFT,
++                          new_mem->num_pages, dir);
++
++      i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
++
++      return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
++                                       DRM_FENCE_TYPE_EXE |
++                                       DRM_I915_FENCE_TYPE_RW,
++                                       DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
++}
++
++/*
++ * Flip destination ttm into cached-coherent AGP,
++ * then blit and subsequently move out again.
++ */
++
++static int i915_move_flip(struct drm_buffer_object * bo,
++                        int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_bo_mem_reg tmp_mem;
++      int ret;
++
++      tmp_mem = *new_mem;
++      tmp_mem.mm_node = NULL;
++      tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
++          DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
++
++      ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
++      if (ret)
++              return ret;
++
++      ret = drm_bind_ttm(bo->ttm, &tmp_mem);
++      if (ret)
++              goto out_cleanup;
++
++      ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
++      if (ret)
++              goto out_cleanup;
++
++      ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out_cleanup:
++      if (tmp_mem.mm_node) {
++              mutex_lock(&dev->struct_mutex);
++              if (tmp_mem.mm_node != bo->pinned_node)
++                      drm_mm_put_block(tmp_mem.mm_node);
++              tmp_mem.mm_node = NULL;
++              mutex_unlock(&dev->struct_mutex);
++      }
++      return ret;
++}
++
++#endif
++
++/*
++ * Disable i915_move_flip for now, since we can't guarantee that the hardware
++ * lock is held here. To re-enable we need to make sure either
++ * a) The X server is using DRM to submit commands to the ring, or
++ * b) DRM can use the HP ring for these blits. This means i915 needs to
++ *    implement a new ring submission mechanism and fence class.
++ */
++int i915_move(struct drm_buffer_object *bo,
++            int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      } else {
++              if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      return 0;
++}
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
++static inline void clflush(volatile void *__p)
++{
++      asm volatile("clflush %0" : "+m" (*(char __force *)__p));
++}
++#endif
++
++static inline void drm_cache_flush_addr(void *virt)
++{
++      int i;
++
++      for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++              clflush(virt+i);
++}
++
++static inline void drm_cache_flush_page(struct page *p)
++{
++      drm_cache_flush_addr(page_address(p));
++}
++
++void i915_flush_ttm(struct drm_ttm *ttm)
++{
++      int i;
++
++      if (!ttm)
++              return;
++
++      DRM_MEMORYBARRIER();
++
++#ifdef CONFIG_X86_32
++      /* Hopefully nobody has built an x86-64 processor without clflush */
++      if (!cpu_has_clflush) {
++              wbinvd();
++              DRM_MEMORYBARRIER();
++              return;
++      }
++#endif
++
++      for (i = ttm->num_pages - 1; i >= 0; i--)
++              drm_cache_flush_page(drm_ttm_get_page(ttm, i));
++
++      DRM_MEMORYBARRIER();
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_compat.c git-nokia/drivers/gpu/drm-tungsten/i915_compat.c
+--- git/drivers/gpu/drm-tungsten/i915_compat.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_compat.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,215 @@
++#include "drmP.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
++#define PCI_DEVICE_ID_INTEL_82965G_1_HB     0x2980
++#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990
++#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0
++#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00
++#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10
++#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
++#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
++#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
++#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
++
++#define I915_IFPADDR    0x60
++#define I965_IFPADDR    0x70
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
++#define upper_32_bits(_val) (((u64)(_val)) >> 32)
++#endif
++
++static struct _i9xx_private_compat {
++      void __iomem *flush_page;
++      int resource_valid;
++      struct resource ifp_resource;
++} i9xx_private;
++
++static struct _i8xx_private_compat {
++      void *flush_page;
++      struct page *page;
++} i8xx_private;
++
++static void
++intel_compat_align_resource(void *data, struct resource *res,
++                        resource_size_t size, resource_size_t align)
++{
++      return;
++}
++
++
++static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
++{
++      int ret;
++      ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
++                                   PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
++                                   intel_compat_align_resource, pdev);
++      if (ret != 0)
++              return ret;
++
++      return 0;
++}
++
++static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
++{
++      int ret;
++      u32 temp;
++
++      pci_read_config_dword(pdev, I915_IFPADDR, &temp);
++      if (!(temp & 0x1)) {
++              intel_alloc_chipset_flush_resource(pdev);
++              i9xx_private.resource_valid = 1;
++              pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
++      } else {
++              temp &= ~1;
++
++              i9xx_private.resource_valid = 1;
++              i9xx_private.ifp_resource.start = temp;
++              i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
++              ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
++              if (ret) {
++                      i9xx_private.resource_valid = 0;
++                      printk("Failed inserting resource into tree\n");
++              }
++      }
++}
++
++static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
++{
++      u32 temp_hi, temp_lo;
++      int ret;
++
++      pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
++      pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
++
++      if (!(temp_lo & 0x1)) {
++
++              intel_alloc_chipset_flush_resource(pdev);
++
++              i9xx_private.resource_valid = 1;
++              pci_write_config_dword(pdev, I965_IFPADDR + 4,
++                      upper_32_bits(i9xx_private.ifp_resource.start));
++              pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
++      } else {
++              u64 l64;
++
++              temp_lo &= ~0x1;
++              l64 = ((u64)temp_hi << 32) | temp_lo;
++
++              i9xx_private.resource_valid = 1;
++              i9xx_private.ifp_resource.start = l64;
++              i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
++              ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
++              if (ret) {
++                      i9xx_private.resource_valid = 0;
++                      printk("Failed inserting resource into tree\n");
++              }
++      }
++}
++
++static void intel_i8xx_fini_flush(struct drm_device *dev)
++{
++      kunmap(i8xx_private.page);
++      i8xx_private.flush_page = NULL;
++      unmap_page_from_agp(i8xx_private.page);
++      flush_agp_mappings();
++
++      __free_page(i8xx_private.page);
++}
++
++static void intel_i8xx_setup_flush(struct drm_device *dev)
++{
++
++      i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
++      if (!i8xx_private.page) {
++              return;
++      }
++
++      /* make page uncached */
++      map_page_into_agp(i8xx_private.page);
++      flush_agp_mappings();
++
++      i8xx_private.flush_page = kmap(i8xx_private.page);
++      if (!i8xx_private.flush_page)
++              intel_i8xx_fini_flush(dev);
++}
++
++
++static void intel_i8xx_flush_page(struct drm_device *dev)
++{
++      unsigned int *pg = i8xx_private.flush_page;
++      int i;
++
++      /* HAI NUT CAN I HAZ HAMMER?? */
++      for (i = 0; i < 256; i++)
++              *(pg + i) = i;
++      
++      DRM_MEMORYBARRIER();
++}
++
++static void intel_i9xx_setup_flush(struct drm_device *dev)
++{
++      struct pci_dev *agp_dev = dev->agp->agp_info.device;
++
++      i9xx_private.ifp_resource.name = "GMCH IFPBAR";
++      i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
++
++      /* Setup chipset flush for 915 */
++      if (IS_I965G(dev) || IS_G33(dev)) {
++              intel_i965_g33_setup_chipset_flush(agp_dev);
++      } else {
++              intel_i915_setup_chipset_flush(agp_dev);
++      }
++
++      if (i9xx_private.ifp_resource.start) {
++              i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
++              if (!i9xx_private.flush_page)
++                      printk("unable to ioremap flush  page - no chipset flushing");
++      }
++}
++
++static void intel_i9xx_fini_flush(struct drm_device *dev)
++{
++      iounmap(i9xx_private.flush_page);
++      if (i9xx_private.resource_valid)
++              release_resource(&i9xx_private.ifp_resource);
++      i9xx_private.resource_valid = 0;
++}
++
++static void intel_i9xx_flush_page(struct drm_device *dev)
++{
++      if (i9xx_private.flush_page)
++              writel(1, i9xx_private.flush_page);
++}
++
++void intel_init_chipset_flush_compat(struct drm_device *dev)
++{
++      /* not flush on i8xx */
++      if (IS_I9XX(dev))       
++              intel_i9xx_setup_flush(dev);
++      else
++              intel_i8xx_setup_flush(dev);
++      
++}
++
++void intel_fini_chipset_flush_compat(struct drm_device *dev)
++{
++      /* not flush on i8xx */
++      if (IS_I9XX(dev))
++              intel_i9xx_fini_flush(dev);
++      else
++              intel_i8xx_fini_flush(dev);
++}
++
++void drm_agp_chipset_flush(struct drm_device *dev)
++{
++      if (IS_I9XX(dev))
++              intel_i9xx_flush_page(dev);
++      else
++              intel_i8xx_flush_page(dev);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_dma.c git-nokia/drivers/gpu/drm-tungsten/i915_dma.c
+--- git/drivers/gpu/drm-tungsten/i915_dma.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_dma.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1276 @@
++/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/* Really want an OS-independent resettable timer.  Would like to have
++ * this loop run for (eg) 3 sec, but have the timer reset every time
++ * the head pointer changes, so that EBUSY only happens if the ring
++ * actually stalls for (eg) 3 seconds.
++ */
++int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++      u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++      u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
++      u32 last_acthd = I915_READ(acthd_reg);
++      u32 acthd;
++      int i;
++
++      for (i = 0; i < 100000; i++) {
++              ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++              acthd = I915_READ(acthd_reg);
++              ring->space = ring->head - (ring->tail + 8);
++              if (ring->space < 0)
++                      ring->space += ring->Size;
++              if (ring->space >= n)
++                      return 0;
++
++              if (ring->head != last_head)
++                      i = 0;
++
++              if (acthd != last_acthd)
++                      i = 0;
++
++              last_head = ring->head;
++              last_acthd = acthd;
++              DRM_UDELAY(10 * 1000);
++      }
++
++      return -EBUSY;
++}
++
++int i915_init_hardware_status(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_dma_handle_t *dmah;
++
++      /* Program Hardware Status Page */
++#ifdef __FreeBSD__
++      DRM_UNLOCK();
++#endif
++      dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
++#ifdef __FreeBSD__
++      DRM_LOCK();
++#endif
++      if (!dmah) {
++              DRM_ERROR("Can not allocate hardware status page\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->status_page_dmah = dmah;
++      dev_priv->hw_status_page = dmah->vaddr;
++      dev_priv->dma_status_page = dmah->busaddr;
++
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++
++      I915_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++      return 0;
++}
++
++void i915_free_hardware_status(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      if (dev_priv->status_page_dmah) {
++              drm_pci_free(dev, dev_priv->status_page_dmah);
++              dev_priv->status_page_dmah = NULL;
++              /* Need to rewrite hardware status page */
++              I915_WRITE(0x02080, 0x1ffff000);
++      }
++
++      if (dev_priv->status_gfx_addr) {
++              dev_priv->status_gfx_addr = 0;
++              drm_core_ioremapfree(&dev_priv->hws_map, dev);
++              I915_WRITE(0x02080, 0x1ffff000);
++      }
++}
++
++#if I915_RING_VALIDATE
++/**
++ * Validate the cached ring tail value
++ *
++ * If the X server writes to the ring and DRM doesn't
++ * reload the head and tail pointers, it will end up writing
++ * data to the wrong place in the ring, causing havoc.
++ */
++void i915_ring_validate(struct drm_device *dev, const char *func, int line)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++      u32     tail = I915_READ(PRB0_TAIL) & HEAD_ADDR;
++      u32     head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++
++      if (tail != ring->tail) {
++              DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
++                        func, line,
++                        ring->head, head, ring->tail, tail);
++#ifdef __linux__
++              BUG_ON(1);
++#endif
++      }
++}
++#endif
++
++void i915_kernel_lost_context(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
++
++      ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
++      ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
++      ring->space = ring->head - (ring->tail + 8);
++      if (ring->space < 0)
++              ring->space += ring->Size;
++}
++
++static int i915_dma_cleanup(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev_priv->ring.virtual_start) {
++              drm_core_ioremapfree(&dev_priv->ring.map, dev);
++              dev_priv->ring.virtual_start = 0;
++              dev_priv->ring.map.handle = 0;
++              dev_priv->ring.map.size = 0;
++      }
++
++      if (I915_NEED_GFX_HWS(dev))
++              i915_free_hardware_status(dev);
++
++      return 0;
++}
++
++#if defined(I915_HAVE_BUFFER)
++#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
++#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
++#define DRI2_SAREA_BLOCK_NEXT(p)                              \
++      ((void *) ((unsigned char *) (p) +                      \
++                 DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p)))
++
++#define DRI2_SAREA_BLOCK_END          0x0000
++#define DRI2_SAREA_BLOCK_LOCK         0x0001
++#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002
++
++static int
++setup_dri2_sarea(struct drm_device * dev,
++               struct drm_file *file_priv,
++               drm_i915_init_t * init)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++      unsigned int *p, *end, *next;
++
++      mutex_lock(&dev->struct_mutex);
++      dev_priv->sarea_bo =
++              drm_lookup_buffer_object(file_priv,
++                                       init->sarea_handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (!dev_priv->sarea_bo) {
++              DRM_ERROR("did not find sarea bo\n");
++              return -EINVAL;
++      }
++
++      ret = drm_bo_kmap(dev_priv->sarea_bo, 0,
++                        dev_priv->sarea_bo->num_pages,
++                        &dev_priv->sarea_kmap);
++      if (ret) {
++              DRM_ERROR("could not map sarea bo\n");
++              return ret;
++      }
++
++      p = dev_priv->sarea_kmap.virtual;
++      end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT);
++      while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) {
++              switch (DRI2_SAREA_BLOCK_TYPE(*p)) {
++              case DRI2_SAREA_BLOCK_LOCK:
++                      dev->lock.hw_lock = (void *) (p + 1);
++                      dev->sigdata.lock = dev->lock.hw_lock;
++                      break;
++              }
++              next = DRI2_SAREA_BLOCK_NEXT(p);
++              if (next <= p || end < next) {
++                      DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n",
++                                next, p, end);
++                      return -EINVAL;
++              }
++              p = next;
++      }
++
++      return 0;
++}
++#endif
++
++static int i915_initialize(struct drm_device * dev,
++                         struct drm_file *file_priv,
++                         drm_i915_init_t * init)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++#if defined(I915_HAVE_BUFFER)
++      int ret;
++#endif
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              i915_dma_cleanup(dev);
++              return -EINVAL;
++      }
++
++#ifdef I915_HAVE_BUFFER
++      dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
++#endif
++
++      if (init->sarea_priv_offset)
++              dev_priv->sarea_priv = (drm_i915_sarea_t *)
++                      ((u8 *) dev_priv->sarea->handle +
++                       init->sarea_priv_offset);
++      else {
++              /* No sarea_priv for you! */
++              dev_priv->sarea_priv = NULL;
++      }
++
++      if (init->ring_size != 0) {
++              dev_priv->ring.Size = init->ring_size;
++              dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
++
++              dev_priv->ring.map.offset = init->ring_start;
++              dev_priv->ring.map.size = init->ring_size;
++              dev_priv->ring.map.type = 0;
++              dev_priv->ring.map.flags = 0;
++              dev_priv->ring.map.mtrr = 0;
++
++              drm_core_ioremap(&dev_priv->ring.map, dev);
++
++              if (dev_priv->ring.map.handle == NULL) {
++                      i915_dma_cleanup(dev);
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " ring buffer\n");
++                      return -ENOMEM;
++              }
++
++              dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++      }
++
++      dev_priv->cpp = init->cpp;
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->pf_current_page = 0;
++
++      /* We are using separate values as placeholders for mechanisms for
++       * private backbuffer/depthbuffer usage.
++       */
++
++      /* Allow hardware batchbuffers unless told otherwise.
++       */
++      dev_priv->allow_batchbuffer = 1;
++
++      /* Enable vblank on pipe A for older X servers
++       */
++      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
++
++#ifdef I915_HAVE_BUFFER
++      mutex_init(&dev_priv->cmdbuf_mutex);
++#endif
++#if defined(I915_HAVE_BUFFER)
++      if (init->func == I915_INIT_DMA2) {
++              ret = setup_dri2_sarea(dev, file_priv, init);
++              if (ret) {
++                      i915_dma_cleanup(dev);
++                      DRM_ERROR("could not set up dri2 sarea\n");
++                      return ret;
++              }
++      }
++#endif
++
++      return 0;
++}
++
++static int i915_dma_resume(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              return -EINVAL;
++      }
++
++      if (dev_priv->ring.map.handle == NULL) {
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      /* Program Hardware Status Page */
++      if (!dev_priv->hw_status_page) {
++              DRM_ERROR("Can not find hardware status page\n");
++              return -EINVAL;
++      }
++      DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
++
++      if (dev_priv->status_gfx_addr != 0)
++              I915_WRITE(0x02080, dev_priv->status_gfx_addr);
++      else
++              I915_WRITE(0x02080, dev_priv->dma_status_page);
++      DRM_DEBUG("Enabled hardware status page\n");
++
++      return 0;
++}
++
++static int i915_dma_init(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case I915_INIT_DMA:
++      case I915_INIT_DMA2:
++              retcode = i915_initialize(dev, file_priv, init);
++              break;
++      case I915_CLEANUP_DMA:
++              retcode = i915_dma_cleanup(dev);
++              break;
++      case I915_RESUME_DMA:
++              retcode = i915_dma_resume(dev);
++              break;
++      default:
++              retcode = -EINVAL;
++              break;
++      }
++
++      return retcode;
++}
++
++/* Implement basically the same security restrictions as hardware does
++ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
++ *
++ * Most of the calculations below involve calculating the size of a
++ * particular instruction.  It's important to get the size right as
++ * that tells us where the next instruction to check is.  Any illegal
++ * instruction detected will be given a size of zero, which is a
++ * signal to abort the rest of the buffer.
++ */
++static int do_validate_cmd(int cmd)
++{
++      switch (((cmd >> 29) & 0x7)) {
++      case 0x0:
++              switch ((cmd >> 23) & 0x3f) {
++              case 0x0:
++                      return 1;       /* MI_NOOP */
++              case 0x4:
++                      return 1;       /* MI_FLUSH */
++              default:
++                      return 0;       /* disallow everything else */
++              }
++              break;
++      case 0x1:
++              return 0;       /* reserved */
++      case 0x2:
++              return (cmd & 0xff) + 2;        /* 2d commands */
++      case 0x3:
++              if (((cmd >> 24) & 0x1f) <= 0x18)
++                      return 1;
++
++              switch ((cmd >> 24) & 0x1f) {
++              case 0x1c:
++                      return 1;
++              case 0x1d:
++                      switch ((cmd >> 16) & 0xff) {
++                      case 0x3:
++                              return (cmd & 0x1f) + 2;
++                      case 0x4:
++                              return (cmd & 0xf) + 2;
++                      default:
++                              return (cmd & 0xffff) + 2;
++                      }
++              case 0x1e:
++                      if (cmd & (1 << 23))
++                              return (cmd & 0xffff) + 1;
++                      else
++                              return 1;
++              case 0x1f:
++                      if ((cmd & (1 << 23)) == 0)     /* inline vertices */
++                              return (cmd & 0x1ffff) + 2;
++                      else if (cmd & (1 << 17))       /* indirect random */
++                              if ((cmd & 0xffff) == 0)
++                                      return 0;       /* unknown length, too hard */
++                              else
++                                      return (((cmd & 0xffff) + 1) / 2) + 1;
++                      else
++                              return 2;       /* indirect sequential */
++              default:
++                      return 0;
++              }
++      default:
++              return 0;
++      }
++
++      return 0;
++}
++
++static int validate_cmd(int cmd)
++{
++      int ret = do_validate_cmd(cmd);
++
++/*    printk("validate_cmd( %x ): %d\n", cmd, ret); */
++
++      return ret;
++}
++
++static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
++                        int dwords)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++
++      if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
++              return -EINVAL;
++
++      BEGIN_LP_RING((dwords+1)&~1);
++
++      for (i = 0; i < dwords;) {
++              int cmd, sz;
++
++              if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
++                      return -EINVAL;
++
++              if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
++                      return -EINVAL;
++
++              OUT_RING(cmd);
++
++              while (++i, --sz) {
++                      if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
++                                                       sizeof(cmd))) {
++                              return -EINVAL;
++                      }
++                      OUT_RING(cmd);
++              }
++      }
++
++      if (dwords & 1)
++              OUT_RING(0);
++
++      ADVANCE_LP_RING();
++
++      return 0;
++}
++
++int i915_emit_box(struct drm_device * dev,
++                struct drm_clip_rect __user * boxes,
++                int i, int DR1, int DR4)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect box;
++      RING_LOCALS;
++
++      if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
++              return -EFAULT;
++      }
++
++      if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
++              DRM_ERROR("Bad box %d,%d..%d,%d\n",
++                        box.x1, box.y1, box.x2, box.y2);
++              return -EINVAL;
++      }
++
++      if (IS_I965G(dev)) {
++              BEGIN_LP_RING(4);
++              OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
++              OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++              OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++              OUT_RING(DR4);
++              ADVANCE_LP_RING();
++      } else {
++              BEGIN_LP_RING(6);
++              OUT_RING(GFX_OP_DRAWRECT_INFO);
++              OUT_RING(DR1);
++              OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
++              OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
++              OUT_RING(DR4);
++              OUT_RING(0);
++              ADVANCE_LP_RING();
++      }
++
++      return 0;
++}
++
++/* XXX: Emitting the counter should really be moved to part of the IRQ
++ * emit. For now, do it in both places:
++ */
++
++void i915_emit_breadcrumb(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      if (++dev_priv->counter > BREADCRUMB_MASK) {
++               dev_priv->counter = 1;
++               DRM_DEBUG("Breadcrumb counter wrapped around\n");
++      }
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
++
++      BEGIN_LP_RING(4);
++      OUT_RING(MI_STORE_DWORD_INDEX);
++      OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
++      OUT_RING(dev_priv->counter);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++
++int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t flush_cmd = MI_FLUSH;
++      RING_LOCALS;
++
++      flush_cmd |= flush;
++
++      i915_kernel_lost_context(dev);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(flush_cmd);
++      OUT_RING(0);
++      OUT_RING(0);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++
++      return 0;
++}
++
++
++static int i915_dispatch_cmdbuffer(struct drm_device * dev,
++                                 drm_i915_cmdbuffer_t * cmd)
++{
++#ifdef I915_HAVE_FENCE
++      drm_i915_private_t *dev_priv = dev->dev_private;
++#endif
++      int nbox = cmd->num_cliprects;
++      int i = 0, count, ret;
++
++      if (cmd->sz & 0x3) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      ret = i915_emit_box(dev, cmd->cliprects, i,
++                                          cmd->DR1, cmd->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
++              if (ret)
++                      return ret;
++      }
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely((dev_priv->counter & 0xFF) == 0))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++      return 0;
++}
++
++int i915_dispatch_batchbuffer(struct drm_device * dev,
++                            drm_i915_batchbuffer_t * batch)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect __user *boxes = batch->cliprects;
++      int nbox = batch->num_cliprects;
++      int i = 0, count;
++      RING_LOCALS;
++
++      if ((batch->start | batch->used) & 0x7) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      int ret = i915_emit_box(dev, boxes, i,
++                                              batch->DR1, batch->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              if (IS_I830(dev) || IS_845G(dev)) {
++                      BEGIN_LP_RING(4);
++                      OUT_RING(MI_BATCH_BUFFER);
++                      OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++                      OUT_RING(batch->start + batch->used - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              } else {
++                      BEGIN_LP_RING(2);
++                      if (IS_I965G(dev)) {
++                              OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
++                              OUT_RING(batch->start);
++                      } else {
++                              OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
++                              OUT_RING(batch->start | MI_BATCH_NON_SECURE);
++                      }
++                      ADVANCE_LP_RING();
++              }
++      }
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely((dev_priv->counter & 0xFF) == 0))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++      return 0;
++}
++
++static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      u32 num_pages, current_page, next_page, dspbase;
++      int shift = 2 * plane, x, y;
++      RING_LOCALS;
++
++      /* Calculate display base offset */
++      num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
++      current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3;
++      next_page = (current_page + 1) % num_pages;
++
++      switch (next_page) {
++      default:
++      case 0:
++              dspbase = dev_priv->sarea_priv->front_offset;
++              break;
++      case 1:
++              dspbase = dev_priv->sarea_priv->back_offset;
++              break;
++      case 2:
++              dspbase = dev_priv->sarea_priv->third_offset;
++              break;
++      }
++
++      if (plane == 0) {
++              x = dev_priv->sarea_priv->planeA_x;
++              y = dev_priv->sarea_priv->planeA_y;
++      } else {
++              x = dev_priv->sarea_priv->planeB_x;
++              y = dev_priv->sarea_priv->planeB_y;
++      }
++
++      dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
++
++      DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
++                dspbase);
++
++      BEGIN_LP_RING(4);
++      OUT_RING(sync ? 0 :
++               (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
++                                     MI_WAIT_FOR_PLANE_A_FLIP)));
++      OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
++               (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
++      OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
++      OUT_RING(dspbase);
++      ADVANCE_LP_RING();
++
++      dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift);
++      dev_priv->sarea_priv->pf_current_page |= next_page << shift;
++}
++
++void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
++                planes, dev_priv->sarea_priv->pf_current_page);
++
++      i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
++
++      for (i = 0; i < 2; i++)
++              if (planes & (1 << i))
++                      i915_do_dispatch_flip(dev, i, sync);
++
++      i915_emit_breadcrumb(dev);
++#ifdef I915_HAVE_FENCE
++      if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
++              drm_fence_flush_old(dev, 0, dev_priv->counter);
++#endif
++}
++
++int i915_quiescent(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      i915_kernel_lost_context(dev);
++      ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
++      if (ret)
++      {
++              i915_kernel_lost_context (dev);
++              DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
++                         dev_priv->ring.head,
++                         dev_priv->ring.tail,
++                         dev_priv->ring.space);
++      }
++      return ret;
++}
++
++static int i915_flush_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return i915_quiescent(dev);
++}
++
++static int i915_batchbuffer(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i915_batchbuffer_t *batch = data;
++      int ret;
++
++      if (!dev_priv->allow_batchbuffer) {
++              DRM_ERROR("Batchbuffer ioctl disabled\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
++                batch->start, batch->used, batch->num_cliprects);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
++                                                      batch->num_cliprects *
++                                                      sizeof(struct drm_clip_rect)))
++              return -EFAULT;
++
++      ret = i915_dispatch_batchbuffer(dev, batch);
++
++      sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      return ret;
++}
++
++static int i915_cmdbuffer(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      drm_i915_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
++                cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (cmdbuf->num_cliprects &&
++          DRM_VERIFYAREA_READ(cmdbuf->cliprects,
++                              cmdbuf->num_cliprects *
++                              sizeof(struct drm_clip_rect))) {
++              DRM_ERROR("Fault accessing cliprects\n");
++              return -EFAULT;
++      }
++
++      ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
++              return ret;
++      }
++
++      sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      return 0;
++}
++
++#if defined(DRM_DEBUG_CODE)
++#define DRM_DEBUG_RELOCATION  (drm_debug != 0)
++#else
++#define DRM_DEBUG_RELOCATION  0
++#endif
++
++static int i915_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0, planes = 0; i < 2; i++)
++              if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
++                      dev_priv->sarea_priv->pf_current_page =
++                              (dev_priv->sarea_priv->pf_current_page &
++                               ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
++
++                      planes |= 1 << i;
++              }
++
++      if (planes)
++              i915_dispatch_flip(dev, planes, 0);
++
++      return 0;
++}
++
++static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_i915_flip_t *param = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* This is really planes */
++      if (param->pipes & ~0x3) {
++              DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
++                        param->pipes);
++              return -EINVAL;
++      }
++
++      i915_dispatch_flip(dev, param->pipes, 0);
++
++      return 0;
++}
++
++
++static int i915_getparam(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case I915_PARAM_IRQ_ACTIVE:
++              value = dev->irq_enabled ? 1 : 0;
++              break;
++      case I915_PARAM_ALLOW_BATCHBUFFER:
++              value = dev_priv->allow_batchbuffer ? 1 : 0;
++              break;
++      case I915_PARAM_LAST_DISPATCH:
++              value = READ_BREADCRUMB(dev_priv);
++              break;
++      case I915_PARAM_CHIPSET_ID:
++              value = dev->pci_device;
++              break;
++      case I915_PARAM_HAS_GEM:
++              value = 1;
++              break;
++      default:
++              DRM_ERROR("Unknown parameter %d\n", param->param);
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("DRM_COPY_TO_USER failed\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int i915_setparam(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_setparam_t *param = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
++              break;
++      case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
++              dev_priv->tex_lru_log_granularity = param->value;
++              break;
++      case I915_SETPARAM_ALLOW_BATCHBUFFER:
++              dev_priv->allow_batchbuffer = param->value;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %d\n", param->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++drm_i915_mmio_entry_t mmio_table[] = {
++      [MMIO_REGS_PS_DEPTH_COUNT] = {
++              I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,
++              0x2350,
++              8
++      }
++};
++
++static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
++
++static int i915_mmio(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      uint32_t buf[8];
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mmio_entry_t *e;
++      drm_i915_mmio_t *mmio = data;
++      void __iomem *base;
++      int i;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (mmio->reg >= mmio_table_size)
++              return -EINVAL;
++
++      e = &mmio_table[mmio->reg];
++      base = (u8 *) dev_priv->mmio_map->handle + e->offset;
++
++      switch (mmio->read_write) {
++      case I915_MMIO_READ:
++              if (!(e->flag & I915_MMIO_MAY_READ))
++                      return -EINVAL;
++              for (i = 0; i < e->size / 4; i++)
++                      buf[i] = I915_READ(e->offset + i * 4);
++              if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
++                      DRM_ERROR("DRM_COPY_TO_USER failed\n");
++                      return -EFAULT;
++              }
++              break;
++              
++      case I915_MMIO_WRITE:
++              if (!(e->flag & I915_MMIO_MAY_WRITE))
++                      return -EINVAL;
++              if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
++                      DRM_ERROR("DRM_COPY_TO_USER failed\n");
++                      return -EFAULT;
++              }
++              for (i = 0; i < e->size / 4; i++)
++                      I915_WRITE(e->offset + i * 4, buf[i]);
++              break;
++      }
++      return 0;
++}
++
++static int i915_set_status_page(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_hws_addr_t *hws = data;
++
++      if (!I915_NEED_GFX_HWS(dev))
++              return -EINVAL;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++      DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
++
++      dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
++
++      dev_priv->hws_map.offset = dev->agp->base + hws->addr;
++      dev_priv->hws_map.size = 4*1024;
++      dev_priv->hws_map.type = 0;
++      dev_priv->hws_map.flags = 0;
++      dev_priv->hws_map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->hws_map, dev);
++      if (dev_priv->hws_map.handle == NULL) {
++              i915_dma_cleanup(dev);
++              dev_priv->status_gfx_addr = 0;
++              DRM_ERROR("can not ioremap virtual address for"
++                              " G33 hw status page\n");
++              return -ENOMEM;
++      }
++      dev_priv->hw_status_page = dev_priv->hws_map.handle;
++
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++      DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
++                      dev_priv->status_gfx_addr);
++      DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
++      return 0;
++}
++
++int i915_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      struct drm_i915_private *dev_priv;
++      unsigned long base, size;
++      int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++
++      /* i915 has 4 more counters */
++      dev->counters += 4;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++      dev->types[9] = _DRM_STAT_DMA;
++
++      dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_i915_private_t));
++
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->dev = dev;
++
++      /* Add register map (needed for suspend/resume) */
++      base = drm_get_resource_start(dev, mmio_bar);
++      size = drm_get_resource_len(dev, mmio_bar);
++
++      ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
++              _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
++#ifdef I915_HAVE_GEM
++      i915_gem_load(dev);
++#endif
++      DRM_SPININIT(&dev_priv->swaps_lock, "swap");
++      DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++      intel_init_chipset_flush_compat(dev);
++#endif
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_init(dev);
++#endif
++#endif
++
++      /* Init HWS */
++      if (!I915_NEED_GFX_HWS(dev)) {
++              ret = i915_init_hardware_status(dev);
++              if(ret)
++                      return ret;
++      }
++
++      return ret;
++}
++
++int i915_driver_unload(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      i915_free_hardware_status(dev);
++
++      drm_rmmap(dev, dev_priv->mmio_map);
++
++      DRM_SPINUNINIT(&dev_priv->swaps_lock);
++      DRM_SPINUNINIT(&dev_priv->user_irq_lock);
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_free(dev);
++#endif
++#endif
++
++      drm_free(dev->dev_private, sizeof(drm_i915_private_t),
++               DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++      intel_fini_chipset_flush_compat(dev);
++#endif
++#endif
++      return 0;
++}
++
++void i915_driver_lastclose(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      /* agp off can use this to get called before dev_priv */
++      if (!dev_priv)
++              return;
++
++#ifdef I915_HAVE_BUFFER
++      if (dev_priv->val_bufs) {
++              vfree(dev_priv->val_bufs);
++              dev_priv->val_bufs = NULL;
++      }
++#endif
++#ifdef I915_HAVE_GEM
++      i915_gem_lastclose(dev);
++#endif
++      if (drm_getsarea(dev) && dev_priv->sarea_priv)
++              i915_do_cleanup_pageflip(dev);
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv = NULL;
++      if (dev_priv->agp_heap)
++              i915_mem_takedown(&(dev_priv->agp_heap));
++#if defined(I915_HAVE_BUFFER)
++      if (dev_priv->sarea_kmap.virtual) {
++              drm_bo_kunmap(&dev_priv->sarea_kmap);
++              dev_priv->sarea_kmap.virtual = NULL;
++              dev->lock.hw_lock = NULL;
++              dev->sigdata.lock = NULL;
++      }
++
++      if (dev_priv->sarea_bo) {
++              mutex_lock(&dev->struct_mutex);
++              drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
++              mutex_unlock(&dev->struct_mutex);
++              dev_priv->sarea_bo = NULL;
++      }
++#endif
++      i915_dma_cleanup(dev);
++}
++
++int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv;
++
++      DRM_DEBUG("\n");
++      i915_file_priv = (struct drm_i915_file_private *)
++          drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
++
++      if (!i915_file_priv)
++              return -ENOMEM;
++
++      file_priv->driver_priv = i915_file_priv;
++
++      i915_file_priv->mm.last_gem_seqno = 0;
++      i915_file_priv->mm.last_gem_throttle_seqno = 0;
++
++      return 0;
++}
++
++void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      i915_mem_release(dev, file_priv, dev_priv->agp_heap);
++}
++
++void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++
++      drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
++}
++
++struct drm_ioctl_desc i915_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
++      DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
++      DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
++      DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
++#ifdef I915_HAVE_BUFFER
++      DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
++#endif
++#ifdef I915_HAVE_GEM
++      DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
++      DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
++#endif
++};
++
++int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * All Intel graphics chipsets are treated as AGP, even if they are really
++ * PCI-e.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * A value of 1 is always retured to indictate every i9x5 is AGP.
++ */
++int i915_driver_device_is_agp(struct drm_device * dev)
++{
++      return 1;
++}
++
++int i915_driver_firstopen(struct drm_device *dev)
++{
++#ifdef I915_HAVE_BUFFER
++      drm_bo_driver_init(dev);
++#endif
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drm.h git-nokia/drivers/gpu/drm-tungsten/i915_drm.h
+--- git/drivers/gpu/drm-tungsten/i915_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,719 @@
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _I915_DRM_H_
++#define _I915_DRM_H_
++
++/* Please note that modifications to all structs defined here are
++ * subject to backwards-compatibility constraints.
++ */
++
++#include "drm.h"
++
++/* Each region is a minimum of 16k, and there are at most 255 of them.
++ */
++#define I915_NR_TEX_REGIONS 255       /* table size 2k - maximum due to use
++                               * of chars for next/prev indices */
++#define I915_LOG_MIN_TEX_REGION_SIZE 14
++
++typedef struct _drm_i915_init {
++      enum {
++              I915_INIT_DMA = 0x01,
++              I915_CLEANUP_DMA = 0x02,
++              I915_RESUME_DMA = 0x03,
++
++              /* Since this struct isn't versioned, just used a new
++               * 'func' code to indicate the presence of dri2 sarea
++               * info. */
++              I915_INIT_DMA2 = 0x04
++      } func;
++      unsigned int mmio_offset;
++      int sarea_priv_offset;
++      unsigned int ring_start;
++      unsigned int ring_end;
++      unsigned int ring_size;
++      unsigned int front_offset;
++      unsigned int back_offset;
++      unsigned int depth_offset;
++      unsigned int w;
++      unsigned int h;
++      unsigned int pitch;
++      unsigned int pitch_bits;
++      unsigned int back_pitch;
++      unsigned int depth_pitch;
++      unsigned int cpp;
++      unsigned int chipset;
++      unsigned int sarea_handle;
++} drm_i915_init_t;
++
++typedef struct drm_i915_sarea {
++      struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
++      int last_upload;        /* last time texture was uploaded */
++      int last_enqueue;       /* last time a buffer was enqueued */
++      int last_dispatch;      /* age of the most recently dispatched buffer */
++      int ctxOwner;           /* last context to upload state */
++      int texAge;
++      int pf_enabled;         /* is pageflipping allowed? */
++      int pf_active;
++      int pf_current_page;    /* which buffer is being displayed? */
++      int perf_boxes;         /* performance boxes to be displayed */
++      int width, height;      /* screen size in pixels */
++
++      drm_handle_t front_handle;
++      int front_offset;
++      int front_size;
++
++      drm_handle_t back_handle;
++      int back_offset;
++      int back_size;
++
++      drm_handle_t depth_handle;
++      int depth_offset;
++      int depth_size;
++
++      drm_handle_t tex_handle;
++      int tex_offset;
++      int tex_size;
++      int log_tex_granularity;
++      int pitch;
++      int rotation;           /* 0, 90, 180 or 270 */
++      int rotated_offset;
++      int rotated_size;
++      int rotated_pitch;
++      int virtualX, virtualY;
++
++      unsigned int front_tiled;
++      unsigned int back_tiled;
++      unsigned int depth_tiled;
++      unsigned int rotated_tiled;
++      unsigned int rotated2_tiled;
++
++      int planeA_x;
++      int planeA_y;
++      int planeA_w;
++      int planeA_h;
++      int planeB_x;
++      int planeB_y;
++      int planeB_w;
++      int planeB_h;
++
++      /* Triple buffering */
++      drm_handle_t third_handle;
++      int third_offset;
++      int third_size;
++      unsigned int third_tiled;
++
++      /* buffer object handles for the static buffers.  May change
++       * over the lifetime of the client, though it doesn't in our current
++       * implementation.
++       */
++      unsigned int front_bo_handle;
++      unsigned int back_bo_handle;
++      unsigned int third_bo_handle;
++      unsigned int depth_bo_handle;
++} drm_i915_sarea_t;
++
++/* Driver specific fence types and classes.
++ */
++
++/* The only fence class we support */
++#define DRM_I915_FENCE_CLASS_ACCEL 0
++/* Fence type that guarantees read-write flush */
++#define DRM_I915_FENCE_TYPE_RW 2
++/* MI_FLUSH programmed just before the fence */
++#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
++
++/* Flags for perf_boxes
++ */
++#define I915_BOX_RING_EMPTY    0x1
++#define I915_BOX_FLIP          0x2
++#define I915_BOX_WAIT          0x4
++#define I915_BOX_TEXTURE_LOAD  0x8
++#define I915_BOX_LOST_CONTEXT  0x10
++
++/* I915 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_I915_INIT         0x00
++#define DRM_I915_FLUSH                0x01
++#define DRM_I915_FLIP         0x02
++#define DRM_I915_BATCHBUFFER  0x03
++#define DRM_I915_IRQ_EMIT     0x04
++#define DRM_I915_IRQ_WAIT     0x05
++#define DRM_I915_GETPARAM     0x06
++#define DRM_I915_SETPARAM     0x07
++#define DRM_I915_ALLOC                0x08
++#define DRM_I915_FREE         0x09
++#define DRM_I915_INIT_HEAP    0x0a
++#define DRM_I915_CMDBUFFER    0x0b
++#define DRM_I915_DESTROY_HEAP 0x0c
++#define DRM_I915_SET_VBLANK_PIPE      0x0d
++#define DRM_I915_GET_VBLANK_PIPE      0x0e
++#define DRM_I915_VBLANK_SWAP  0x0f
++#define DRM_I915_MMIO         0x10
++#define DRM_I915_HWS_ADDR     0x11
++#define DRM_I915_EXECBUFFER   0x12
++#define DRM_I915_GEM_INIT     0x13
++#define DRM_I915_GEM_EXECBUFFER       0x14
++#define DRM_I915_GEM_PIN      0x15
++#define DRM_I915_GEM_UNPIN    0x16
++#define DRM_I915_GEM_BUSY     0x17
++#define DRM_I915_GEM_THROTTLE 0x18
++#define DRM_I915_GEM_ENTERVT  0x19
++#define DRM_I915_GEM_LEAVEVT  0x1a
++#define DRM_I915_GEM_CREATE   0x1b
++#define DRM_I915_GEM_PREAD    0x1c
++#define DRM_I915_GEM_PWRITE   0x1d
++#define DRM_I915_GEM_MMAP     0x1e
++#define DRM_I915_GEM_SET_DOMAIN       0x1f
++#define DRM_I915_GEM_SW_FINISH        0x20
++#define DRM_I915_GEM_SET_TILING       0x21
++#define DRM_I915_GEM_GET_TILING       0x22
++
++#define DRM_IOCTL_I915_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
++#define DRM_IOCTL_I915_FLUSH          DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
++#define DRM_IOCTL_I915_FLIP           DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
++#define DRM_IOCTL_I915_BATCHBUFFER    DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
++#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
++#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
++#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
++#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
++#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
++#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
++#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
++#define DRM_IOCTL_I915_CMDBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
++#define DRM_IOCTL_I915_DESTROY_HEAP   DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
++#define DRM_IOCTL_I915_SET_VBLANK_PIPE        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
++#define DRM_IOCTL_I915_GET_VBLANK_PIPE        DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
++#define DRM_IOCTL_I915_VBLANK_SWAP    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
++#define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
++#define DRM_IOCTL_I915_EXECBUFFER     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
++#define DRM_IOCTL_I915_GEM_INIT               DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
++#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
++#define DRM_IOCTL_I915_GEM_PIN                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
++#define DRM_IOCTL_I915_GEM_UNPIN      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
++#define DRM_IOCTL_I915_GEM_BUSY               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
++#define DRM_IOCTL_I915_GEM_THROTTLE   DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
++#define DRM_IOCTL_I915_GEM_ENTERVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
++#define DRM_IOCTL_I915_GEM_LEAVEVT    DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
++#define DRM_IOCTL_I915_GEM_CREATE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
++#define DRM_IOCTL_I915_GEM_PREAD      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
++#define DRM_IOCTL_I915_GEM_PWRITE     DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
++#define DRM_IOCTL_I915_GEM_MMAP               DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
++#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
++#define DRM_IOCTL_I915_GEM_SW_FINISH  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
++#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
++#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
++
++/* Asynchronous page flipping:
++ */
++typedef struct drm_i915_flip {
++      /*
++       * This is really talking about planes, and we could rename it
++       * except for the fact that some of the duplicated i915_drm.h files
++       * out there check for HAVE_I915_FLIP and so might pick up this
++       * version.
++       */
++      int pipes;
++} drm_i915_flip_t;
++
++/* Allow drivers to submit batchbuffers directly to hardware, relying
++ * on the security mechanisms provided by hardware.
++ */
++typedef struct drm_i915_batchbuffer {
++      int start;              /* agp offset */
++      int used;               /* nr bytes in use */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
++} drm_i915_batchbuffer_t;
++
++/* As above, but pass a pointer to userspace buffer which can be
++ * validated by the kernel prior to sending to hardware.
++ */
++typedef struct _drm_i915_cmdbuffer {
++      char __user *buf;       /* pointer to userspace command buffer */
++      int sz;                 /* nr bytes in buf */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
++} drm_i915_cmdbuffer_t;
++
++/* Userspace can request & wait on irq's:
++ */
++typedef struct drm_i915_irq_emit {
++      int __user *irq_seq;
++} drm_i915_irq_emit_t;
++
++typedef struct drm_i915_irq_wait {
++      int irq_seq;
++} drm_i915_irq_wait_t;
++
++/* Ioctl to query kernel params:
++ */
++#define I915_PARAM_IRQ_ACTIVE            1
++#define I915_PARAM_ALLOW_BATCHBUFFER     2
++#define I915_PARAM_LAST_DISPATCH         3
++#define I915_PARAM_CHIPSET_ID            4
++#define I915_PARAM_HAS_GEM               5
++
++typedef struct drm_i915_getparam {
++      int param;
++      int __user *value;
++} drm_i915_getparam_t;
++
++/* Ioctl to set kernel params:
++ */
++#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
++#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
++#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
++
++typedef struct drm_i915_setparam {
++      int param;
++      int value;
++} drm_i915_setparam_t;
++
++/* A memory manager for regions of shared memory:
++ */
++#define I915_MEM_REGION_AGP 1
++
++typedef struct drm_i915_mem_alloc {
++      int region;
++      int alignment;
++      int size;
++      int __user *region_offset;      /* offset from start of fb or agp */
++} drm_i915_mem_alloc_t;
++
++typedef struct drm_i915_mem_free {
++      int region;
++      int region_offset;
++} drm_i915_mem_free_t;
++
++typedef struct drm_i915_mem_init_heap {
++      int region;
++      int size;
++      int start;
++} drm_i915_mem_init_heap_t;
++
++/* Allow memory manager to be torn down and re-initialized (eg on
++ * rotate):
++ */
++typedef struct drm_i915_mem_destroy_heap {
++      int region;
++} drm_i915_mem_destroy_heap_t;
++
++/* Allow X server to configure which pipes to monitor for vblank signals
++ */
++#define       DRM_I915_VBLANK_PIPE_A  1
++#define       DRM_I915_VBLANK_PIPE_B  2
++
++typedef struct drm_i915_vblank_pipe {
++      int pipe;
++} drm_i915_vblank_pipe_t;
++
++/* Schedule buffer swap at given vertical blank:
++ */
++typedef struct drm_i915_vblank_swap {
++      drm_drawable_t drawable;
++      enum drm_vblank_seq_type seqtype;
++      unsigned int sequence;
++} drm_i915_vblank_swap_t;
++
++#define I915_MMIO_READ        0
++#define I915_MMIO_WRITE 1
++
++#define I915_MMIO_MAY_READ    0x1
++#define I915_MMIO_MAY_WRITE   0x2
++
++#define MMIO_REGS_IA_PRIMATIVES_COUNT         0
++#define MMIO_REGS_IA_VERTICES_COUNT           1
++#define MMIO_REGS_VS_INVOCATION_COUNT         2
++#define MMIO_REGS_GS_PRIMITIVES_COUNT         3
++#define MMIO_REGS_GS_INVOCATION_COUNT         4
++#define MMIO_REGS_CL_PRIMITIVES_COUNT         5
++#define MMIO_REGS_CL_INVOCATION_COUNT         6
++#define MMIO_REGS_PS_INVOCATION_COUNT         7
++#define MMIO_REGS_PS_DEPTH_COUNT              8
++
++typedef struct drm_i915_mmio_entry {
++      unsigned int flag;
++      unsigned int offset;
++      unsigned int size;
++} drm_i915_mmio_entry_t;
++
++typedef struct drm_i915_mmio {
++      unsigned int read_write:1;
++      unsigned int reg:31;
++      void __user *data;
++} drm_i915_mmio_t;
++
++typedef struct drm_i915_hws_addr {
++      uint64_t addr;
++} drm_i915_hws_addr_t;
++
++/*
++ * Relocation header is 4 uint32_ts
++ * 0 - 32 bit reloc count
++ * 1 - 32-bit relocation type
++ * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
++ */
++#define I915_RELOC_HEADER 4
++
++/*
++ * type 0 relocation has 4-uint32_t stride
++ * 0 - offset into buffer
++ * 1 - delta to add in
++ * 2 - buffer handle
++ * 3 - reserved (for optimisations later).
++ */
++/*
++ * type 1 relocation has 4-uint32_t stride.
++ * Hangs off the first item in the op list.
++ * Performed after all valiations are done.
++ * Try to group relocs into the same relocatee together for
++ * performance reasons.
++ * 0 - offset into buffer
++ * 1 - delta to add in
++ * 2 - buffer index in op list.
++ * 3 - relocatee index in op list.
++ */
++#define I915_RELOC_TYPE_0 0
++#define I915_RELOC0_STRIDE 4
++#define I915_RELOC_TYPE_1 1
++#define I915_RELOC1_STRIDE 4
++
++
++struct drm_i915_op_arg {
++      uint64_t next;
++      uint64_t reloc_ptr;
++      int handled;
++      unsigned int pad64;
++      union {
++              struct drm_bo_op_req req;
++              struct drm_bo_arg_rep rep;
++      } d;
++
++};
++
++struct drm_i915_execbuffer {
++      uint64_t ops_list;
++      uint32_t num_buffers;
++      struct drm_i915_batchbuffer batch;
++      drm_context_t context; /* for lockless use in the future */
++      struct drm_fence_arg fence_arg;
++};
++
++struct drm_i915_gem_init {
++      /**
++       * Beginning offset in the GTT to be managed by the DRM memory
++       * manager.
++       */
++      uint64_t gtt_start;
++      /**
++       * Ending offset in the GTT to be managed by the DRM memory
++       * manager.
++       */
++      uint64_t gtt_end;
++};
++
++struct drm_i915_gem_create {
++      /**
++       * Requested size for the object.
++       *
++       * The (page-aligned) allocated size for the object will be returned.
++       */
++      uint64_t size;
++      /**
++       * Returned handle for the object.
++       *
++       * Object handles are nonzero.
++       */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_i915_gem_pread {
++      /** Handle for the object being read. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset into the object to read from */
++      uint64_t offset;
++      /** Length of data to read */
++      uint64_t size;
++      /** Pointer to write the data into. */
++      uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_pwrite {
++      /** Handle for the object being written to. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset into the object to write to */
++      uint64_t offset;
++      /** Length of data to write */
++      uint64_t size;
++      /** Pointer to read the data from. */
++      uint64_t data_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_mmap {
++      /** Handle for the object being mapped. */
++      uint32_t handle;
++      uint32_t pad;
++      /** Offset in the object to map. */
++      uint64_t offset;
++      /**
++       * Length of data to map.
++       *
++       * The value will be page-aligned.
++       */
++      uint64_t size;
++      /** Returned pointer the data was mapped at */
++      uint64_t addr_ptr;      /* void *, but pointers are not 32/64 compatible */
++};
++
++struct drm_i915_gem_set_domain {
++      /** Handle for the object */
++      uint32_t handle;
++
++      /** New read domains */
++      uint32_t read_domains;
++
++      /** New write domain */
++      uint32_t write_domain;
++};
++
++struct drm_i915_gem_sw_finish {
++      /** Handle for the object */
++      uint32_t handle;
++};
++
++struct drm_i915_gem_relocation_entry {
++      /**
++       * Handle of the buffer being pointed to by this relocation entry.
++       *
++       * It's appealing to make this be an index into the mm_validate_entry
++       * list to refer to the buffer, but this allows the driver to create
++       * a relocation list for state buffers and not re-write it per
++       * exec using the buffer.
++       */
++      uint32_t target_handle;
++
++      /**
++       * Value to be added to the offset of the target buffer to make up
++       * the relocation entry.
++       */
++      uint32_t delta;
++
++      /** Offset in the buffer the relocation entry will be written into */
++      uint64_t offset;
++
++      /**
++       * Offset value of the target buffer that the relocation entry was last
++       * written as.
++       *
++       * If the buffer has the same offset as last time, we can skip syncing
++       * and writing the relocation.  This value is written back out by
++       * the execbuffer ioctl when the relocation is written.
++       */
++      uint64_t presumed_offset;
++
++      /**
++       * Target memory domains read by this operation.
++       */
++      uint32_t read_domains;
++
++      /**
++       * Target memory domains written by this operation.
++       *
++       * Note that only one domain may be written by the whole
++       * execbuffer operation, so that where there are conflicts,
++       * the application will get -EINVAL back.
++       */
++      uint32_t write_domain;
++};
++
++/** @{
++ * Intel memory domains
++ *
++ * Most of these just align with the various caches in
++ * the system and are used to flush and invalidate as
++ * objects end up cached in different domains.
++ */
++/** CPU cache */
++#define I915_GEM_DOMAIN_CPU           0x00000001
++/** Render cache, used by 2D and 3D drawing */
++#define I915_GEM_DOMAIN_RENDER                0x00000002
++/** Sampler cache, used by texture engine */
++#define I915_GEM_DOMAIN_SAMPLER               0x00000004
++/** Command queue, used to load batch buffers */
++#define I915_GEM_DOMAIN_COMMAND               0x00000008
++/** Instruction cache, used by shader programs */
++#define I915_GEM_DOMAIN_INSTRUCTION   0x00000010
++/** Vertex address cache */
++#define I915_GEM_DOMAIN_VERTEX                0x00000020
++/** GTT domain - aperture and scanout */
++#define I915_GEM_DOMAIN_GTT           0x00000040
++/** @} */
++
++struct drm_i915_gem_exec_object {
++      /**
++       * User's handle for a buffer to be bound into the GTT for this
++       * operation.
++       */
++      uint32_t handle;
++
++      /** Number of relocations to be performed on this buffer */
++      uint32_t relocation_count;
++      /**
++       * Pointer to array of struct drm_i915_gem_relocation_entry containing
++       * the relocations to be performed in this buffer.
++       */
++      uint64_t relocs_ptr;
++
++      /** Required alignment in graphics aperture */
++      uint64_t alignment;
++
++      /**
++       * Returned value of the updated offset of the object, for future
++       * presumed_offset writes.
++       */
++      uint64_t offset;
++};
++
++struct drm_i915_gem_execbuffer {
++      /**
++       * List of buffers to be validated with their relocations to be
++       * performend on them.
++       *
++       * This is a pointer to an array of struct drm_i915_gem_validate_entry.
++       *
++       * These buffers must be listed in an order such that all relocations
++       * a buffer is performing refer to buffers that have already appeared
++       * in the validate list.
++       */
++      uint64_t buffers_ptr;
++      uint32_t buffer_count;
++
++      /** Offset in the batchbuffer to start execution from. */
++      uint32_t batch_start_offset;
++      /** Bytes used in batchbuffer from batch_start_offset */
++      uint32_t batch_len;
++      uint32_t DR1;
++      uint32_t DR4;
++      uint32_t num_cliprects;
++      uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
++};
++
++struct drm_i915_gem_pin {
++      /** Handle of the buffer to be pinned. */
++      uint32_t handle;
++      uint32_t pad;
++
++      /** alignment required within the aperture */
++      uint64_t alignment;
++
++      /** Returned GTT offset of the buffer. */
++      uint64_t offset;
++};
++
++struct drm_i915_gem_unpin {
++      /** Handle of the buffer to be unpinned. */
++      uint32_t handle;
++      uint32_t pad;
++};
++
++struct drm_i915_gem_busy {
++      /** Handle of the buffer to check for busy */
++      uint32_t handle;
++
++      /** Return busy status (1 if busy, 0 if idle) */
++      uint32_t busy;
++};
++
++#define I915_TILING_NONE      0
++#define I915_TILING_X         1
++#define I915_TILING_Y         2
++
++#define I915_BIT_6_SWIZZLE_NONE               0
++#define I915_BIT_6_SWIZZLE_9          1
++#define I915_BIT_6_SWIZZLE_9_10               2
++#define I915_BIT_6_SWIZZLE_9_11               3
++#define I915_BIT_6_SWIZZLE_9_10_11    4
++/* Not seen by userland */
++#define I915_BIT_6_SWIZZLE_UNKNOWN    5
++
++struct drm_i915_gem_set_tiling {
++      /** Handle of the buffer to have its tiling state updated */
++      uint32_t handle;
++
++      /**
++       * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++       * I915_TILING_Y).
++       *
++       * This value is to be set on request, and will be updated by the
++       * kernel on successful return with the actual chosen tiling layout.
++       *
++       * The tiling mode may be demoted to I915_TILING_NONE when the system
++       * has bit 6 swizzling that can't be managed correctly by GEM.
++       *
++       * Buffer contents become undefined when changing tiling_mode.
++       */
++      uint32_t tiling_mode;
++
++      /**
++       * Stride in bytes for the object when in I915_TILING_X or
++       * I915_TILING_Y.
++       */
++      uint32_t stride;
++
++      /**
++       * Returned address bit 6 swizzling required for CPU access through
++       * mmap mapping.
++       */
++      uint32_t swizzle_mode;
++};
++
++struct drm_i915_gem_get_tiling {
++      /** Handle of the buffer to get tiling state for. */
++      uint32_t handle;
++
++      /**
++       * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
++       * I915_TILING_Y).
++       */
++      uint32_t tiling_mode;
++
++      /**
++       * Returned address bit 6 swizzling required for CPU access through
++       * mmap mapping.
++       */
++      uint32_t swizzle_mode;
++};
++
++#endif                                /* _I915_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.c git-nokia/drivers/gpu/drm-tungsten/i915_drv.c
+--- git/drivers/gpu/drm-tungsten/i915_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,222 @@
++/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      i915_PCI_IDS
++};
++
++#ifdef I915_HAVE_FENCE
++extern struct drm_fence_driver i915_fence_driver;
++#endif
++
++#ifdef I915_HAVE_BUFFER
++
++static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
++static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
++
++static struct drm_bo_driver i915_bo_driver = {
++      .mem_type_prio = i915_mem_prios,
++      .mem_busy_prio = i915_busy_prios,
++      .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
++      .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
++      .create_ttm_backend_entry = i915_create_ttm_backend_entry,
++      .fence_type = i915_fence_type,
++      .invalidate_caches = i915_invalidate_caches,
++      .init_mem_type = i915_init_mem_type,
++      .evict_flags = i915_evict_flags,
++      .move = i915_move,
++      .ttm_cache_flush = i915_flush_ttm,
++      .command_stream_barrier = NULL,
++};
++#endif
++
++static int i915_suspend(struct drm_device *dev, pm_message_t state)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      if (!dev || !dev_priv) {
++              printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
++              printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
++              return -ENODEV;
++      }
++
++      if (state.event == PM_EVENT_PRETHAW)
++              return 0;
++
++      pci_save_state(dev->pdev);
++
++      i915_save_state(dev);
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_free(dev);
++#endif
++
++      if (state.event == PM_EVENT_SUSPEND) {
++              /* Shut down the device */
++              pci_disable_device(dev->pdev);
++              pci_set_power_state(dev->pdev, PCI_D3hot);
++      }
++
++      return 0;
++}
++
++static int i915_resume(struct drm_device *dev)
++{
++      pci_set_power_state(dev->pdev, PCI_D0);
++      pci_restore_state(dev->pdev);
++      if (pci_enable_device(dev->pdev))
++              return -1;
++      pci_set_master(dev->pdev);
++
++      i915_restore_state(dev);
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      intel_opregion_init(dev);
++#endif
++
++      return 0;
++}
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static void remove(struct pci_dev *pdev);
++
++static struct drm_driver driver = {
++      /* don't use mtrr's here, the Xserver or user space app should
++       * deal with them for intel hardware.
++       */
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
++          DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
++      .load = i915_driver_load,
++      .unload = i915_driver_unload,
++      .firstopen = i915_driver_firstopen,
++      .open = i915_driver_open,
++      .lastclose = i915_driver_lastclose,
++      .preclose = i915_driver_preclose,
++      .postclose = i915_driver_postclose,
++      .suspend = i915_suspend,
++      .resume = i915_resume,
++      .device_is_agp = i915_driver_device_is_agp,
++      .get_vblank_counter = i915_get_vblank_counter,
++      .enable_vblank = i915_enable_vblank,
++      .disable_vblank = i915_disable_vblank,
++      .irq_preinstall = i915_driver_irq_preinstall,
++      .irq_postinstall = i915_driver_irq_postinstall,
++      .irq_uninstall = i915_driver_irq_uninstall,
++      .irq_handler = i915_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .proc_init = i915_gem_proc_init,
++      .proc_cleanup = i915_gem_proc_cleanup,
++      .ioctls = i915_ioctls,
++      .gem_init_object = i915_gem_init_object,
++      .gem_free_object = i915_gem_free_object,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = i915_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = remove,
++              },
++#ifdef I915_HAVE_FENCE
++      .fence_driver = &i915_fence_driver,
++#endif
++#ifdef I915_HAVE_BUFFER
++      .bo_driver = &i915_bo_driver,
++#endif
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      int ret;
++
++      /* On the 945G/GM, the chipset reports the MSI capability on the
++       * integrated graphics even though the support isn't actually there
++       * according to the published specs.  It doesn't appear to function
++       * correctly in testing on 945G.
++       * This may be a side effect of MSI having been made available for PEG
++       * and the registers being closely associated.
++       */
++      if (pdev->device != 0x2772 && pdev->device != 0x27A2)
++              (void )pci_enable_msi(pdev);
++
++      ret = drm_get_dev(pdev, ent, &driver);
++      if (ret && pdev->msi_enabled)
++              pci_disable_msi(pdev);
++      return ret;
++}
++static void remove(struct pci_dev *pdev)
++{
++      if (pdev->msi_enabled)
++              pci_disable_msi(pdev);
++      drm_cleanup_pci(pdev);
++}
++
++static int __init i915_init(void)
++{
++      driver.num_ioctls = i915_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit i915_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(i915_init);
++module_exit(i915_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_drv.h git-nokia/drivers/gpu/drm-tungsten/i915_drv.h
+--- git/drivers/gpu/drm-tungsten/i915_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2123 @@
++/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _I915_DRV_H_
++#define _I915_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Tungsten Graphics, Inc."
++
++#define DRIVER_NAME           "i915"
++#define DRIVER_DESC           "Intel Graphics"
++#define DRIVER_DATE           "20080730"
++
++#if defined(__linux__)
++#define I915_HAVE_FENCE
++#define I915_HAVE_BUFFER
++#define I915_HAVE_GEM
++#endif
++
++/* Interface history:
++ *
++ * 1.1: Original.
++ * 1.2: Add Power Management
++ * 1.3: Add vblank support
++ * 1.4: Fix cmdbuffer path, add heap destroy
++ * 1.5: Add vblank pipe configuration
++ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
++ *      - Support vertical blank on secondary display pipe
++ * 1.8: New ioctl for ARB_Occlusion_Query
++ * 1.9: Usable page flipping and triple buffering
++ * 1.10: Plane/pipe disentangling
++ * 1.11: TTM superioctl
++ * 1.12: TTM relocation optimization
++ */
++#define DRIVER_MAJOR          1
++#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
++#define DRIVER_MINOR          13
++#else
++#define DRIVER_MINOR          6
++#endif
++#define DRIVER_PATCHLEVEL     0
++
++enum pipe {
++    PIPE_A = 0,
++    PIPE_B,
++};
++
++#ifdef I915_HAVE_BUFFER
++#define I915_MAX_VALIDATE_BUFFERS 4096
++struct drm_i915_validate_buffer;
++#endif
++
++#define WATCH_COHERENCY       0
++#define WATCH_BUF     0
++#define WATCH_EXEC    0
++#define WATCH_LRU     0
++#define WATCH_RELOC   0
++#define WATCH_INACTIVE        0
++#define WATCH_PWRITE  0
++
++typedef struct _drm_i915_ring_buffer {
++      int tail_mask;
++      unsigned long Size;
++      u8 *virtual_start;
++      int head;
++      int tail;
++      int space;
++      drm_local_map_t map;
++      struct drm_gem_object *ring_obj;
++} drm_i915_ring_buffer_t;
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      int start;
++      int size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++};
++
++typedef struct _drm_i915_vbl_swap {
++      struct list_head head;
++      drm_drawable_t drw_id;
++      unsigned int plane;
++      unsigned int sequence;
++      int flip;
++} drm_i915_vbl_swap_t;
++
++#ifdef __linux__
++struct opregion_header;
++struct opregion_acpi;
++struct opregion_swsci;
++struct opregion_asle;
++
++struct intel_opregion {
++      struct opregion_header *header;
++      struct opregion_acpi *acpi;
++      struct opregion_swsci *swsci;
++      struct opregion_asle *asle;
++
++      int enabled;
++};
++#endif
++
++typedef struct drm_i915_private {
++      struct drm_device *dev;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio_map;
++
++      drm_i915_sarea_t *sarea_priv;
++      drm_i915_ring_buffer_t ring;
++
++      drm_dma_handle_t *status_page_dmah;
++      void *hw_status_page;
++      dma_addr_t dma_status_page;
++      uint32_t counter;
++      unsigned int status_gfx_addr;
++      drm_local_map_t hws_map;
++      struct drm_gem_object *hws_obj;
++
++      unsigned int cpp;
++
++      wait_queue_head_t irq_queue;
++      atomic_t irq_received;
++
++      int tex_lru_log_granularity;
++      int allow_batchbuffer;
++      struct mem_block *agp_heap;
++      unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
++      int vblank_pipe;
++      DRM_SPINTYPE user_irq_lock;
++      int user_irq_refcount;
++      int fence_irq_on;
++      uint32_t irq_mask_reg;
++      int irq_enabled;
++
++#ifdef I915_HAVE_FENCE
++      uint32_t flush_sequence;
++      uint32_t flush_flags;
++      uint32_t flush_pending;
++      uint32_t saved_flush_status;
++#endif
++#ifdef I915_HAVE_BUFFER
++      void *agp_iomap;
++      unsigned int max_validate_buffers;
++      struct mutex cmdbuf_mutex;
++      struct drm_i915_validate_buffer *val_bufs;
++#endif
++
++      DRM_SPINTYPE swaps_lock;
++      drm_i915_vbl_swap_t vbl_swaps;
++      unsigned int swaps_pending;
++#if defined(I915_HAVE_BUFFER)
++      /* DRI2 sarea */
++      struct drm_buffer_object *sarea_bo;
++      struct drm_bo_kmap_obj sarea_kmap;
++#endif
++
++#ifdef __linux__
++      struct intel_opregion opregion;
++#endif
++
++      /* Register state */
++      u8 saveLBB;
++      u32 saveDSPACNTR;
++      u32 saveDSPBCNTR;
++      u32 saveDSPARB;
++      u32 savePIPEACONF;
++      u32 savePIPEBCONF;
++      u32 savePIPEASRC;
++      u32 savePIPEBSRC;
++      u32 saveFPA0;
++      u32 saveFPA1;
++      u32 saveDPLL_A;
++      u32 saveDPLL_A_MD;
++      u32 saveHTOTAL_A;
++      u32 saveHBLANK_A;
++      u32 saveHSYNC_A;
++      u32 saveVTOTAL_A;
++      u32 saveVBLANK_A;
++      u32 saveVSYNC_A;
++      u32 saveBCLRPAT_A;
++      u32 savePIPEASTAT;
++      u32 saveDSPASTRIDE;
++      u32 saveDSPASIZE;
++      u32 saveDSPAPOS;
++      u32 saveDSPAADDR;
++      u32 saveDSPASURF;
++      u32 saveDSPATILEOFF;
++      u32 savePFIT_PGM_RATIOS;
++      u32 saveBLC_PWM_CTL;
++      u32 saveBLC_PWM_CTL2;
++      u32 saveFPB0;
++      u32 saveFPB1;
++      u32 saveDPLL_B;
++      u32 saveDPLL_B_MD;
++      u32 saveHTOTAL_B;
++      u32 saveHBLANK_B;
++      u32 saveHSYNC_B;
++      u32 saveVTOTAL_B;
++      u32 saveVBLANK_B;
++      u32 saveVSYNC_B;
++      u32 saveBCLRPAT_B;
++      u32 savePIPEBSTAT;
++      u32 saveDSPBSTRIDE;
++      u32 saveDSPBSIZE;
++      u32 saveDSPBPOS;
++      u32 saveDSPBADDR;
++      u32 saveDSPBSURF;
++      u32 saveDSPBTILEOFF;
++      u32 saveVGA0;
++      u32 saveVGA1;
++      u32 saveVGA_PD;
++      u32 saveVGACNTRL;
++      u32 saveADPA;
++      u32 saveLVDS;
++      u32 savePP_ON_DELAYS;
++      u32 savePP_OFF_DELAYS;
++      u32 saveDVOA;
++      u32 saveDVOB;
++      u32 saveDVOC;
++      u32 savePP_ON;
++      u32 savePP_OFF;
++      u32 savePP_CONTROL;
++      u32 savePP_DIVISOR;
++      u32 savePFIT_CONTROL;
++      u32 save_palette_a[256];
++      u32 save_palette_b[256];
++      u32 saveFBC_CFB_BASE;
++      u32 saveFBC_LL_BASE;
++      u32 saveFBC_CONTROL;
++      u32 saveFBC_CONTROL2;
++      u32 saveIER;
++      u32 saveIIR;
++      u32 saveIMR;
++      u32 saveCACHE_MODE_0;
++      u32 saveD_STATE;
++      u32 saveCG_2D_DIS;
++      u32 saveMI_ARB_STATE;
++      u32 saveSWF0[16];
++      u32 saveSWF1[16];
++      u32 saveSWF2[3];
++      u8 saveMSR;
++      u8 saveSR[8];
++      u8 saveGR[25];
++      u8 saveAR_INDEX;
++      u8 saveAR[21];
++      u8 saveDACMASK;
++      u8 saveDACDATA[256*3]; /* 256 3-byte colors */
++      u8 saveCR[37];
++
++      struct {
++#ifdef __linux__
++              struct drm_mm gtt_space;
++#endif
++              /**
++               * List of objects currently involved in rendering from the
++               * ringbuffer.
++               *
++               * A reference is held on the buffer while on this list.
++               */
++              struct list_head active_list;
++
++              /**
++               * List of objects which are not in the ringbuffer but which
++               * still have a write_domain which needs to be flushed before
++               * unbinding.
++               *
++               * A reference is held on the buffer while on this list.
++               */
++              struct list_head flushing_list;
++
++              /**
++               * LRU list of objects which are not in the ringbuffer and
++               * are ready to unbind, but are still in the GTT.
++               *
++               * A reference is not held on the buffer while on this list,
++               * as merely being GTT-bound shouldn't prevent its being
++               * freed, and we'll pull it off the list in the free path.
++               */
++              struct list_head inactive_list;
++
++              /**
++               * List of breadcrumbs associated with GPU requests currently
++               * outstanding.
++               */
++              struct list_head request_list;
++#ifdef __linux__
++              /**
++               * We leave the user IRQ off as much as possible,
++               * but this means that requests will finish and never
++               * be retired once the system goes idle. Set a timer to
++               * fire periodically while the ring is running. When it
++               * fires, go retire requests.
++               */
++              struct delayed_work retire_work;
++#endif
++              uint32_t next_gem_seqno;
++
++              /**
++               * Waiting sequence number, if any
++               */
++              uint32_t waiting_gem_seqno;
++
++              /**
++               * Last seq seen at irq time
++               */
++              uint32_t irq_gem_seqno;
++
++              /**
++               * Flag if the X Server, and thus DRM, is not currently in
++               * control of the device.
++               *
++               * This is set between LeaveVT and EnterVT.  It needs to be
++               * replaced with a semaphore.  It also needs to be
++               * transitioned away from for kernel modesetting.
++               */
++              int suspended;
++
++              /**
++               * Flag if the hardware appears to be wedged.
++               *
++               * This is set when attempts to idle the device timeout.
++               * It prevents command submission from occuring and makes
++               * every pending request fail
++               */
++              int wedged;
++
++              /** Bit 6 swizzling required for X tiling */
++              uint32_t bit_6_swizzle_x;
++              /** Bit 6 swizzling required for Y tiling */
++              uint32_t bit_6_swizzle_y;
++      } mm;
++} drm_i915_private_t;
++
++struct drm_i915_file_private {
++      struct {
++              uint32_t last_gem_seqno;
++              uint32_t last_gem_throttle_seqno;
++      } mm;
++};
++
++enum intel_chip_family {
++      CHIP_I8XX = 0x01,
++      CHIP_I9XX = 0x02,
++      CHIP_I915 = 0x04,
++      CHIP_I965 = 0x08,
++};
++
++/** driver private structure attached to each drm_gem_object */
++struct drm_i915_gem_object {
++      struct drm_gem_object *obj;
++
++      /** Current space allocated to this object in the GTT, if any. */
++      struct drm_mm_node *gtt_space;
++
++      /** This object's place on the active/flushing/inactive lists */
++      struct list_head list;
++
++      /**
++       * This is set if the object is on the active or flushing lists
++       * (has pending rendering), and is not set if it's on inactive (ready
++       * to be unbound).
++       */
++      int active;
++
++      /**
++       * This is set if the object has been written to since last bound
++       * to the GTT
++       */
++      int dirty;
++
++      /** AGP memory structure for our GTT binding. */
++      DRM_AGP_MEM *agp_mem;
++
++      struct page **page_list;
++
++      /**
++       * Current offset of the object in GTT space.
++       *
++       * This is the same as gtt_space->start
++       */
++      uint32_t gtt_offset;
++
++      /** Boolean whether this object has a valid gtt offset. */
++      int gtt_bound;
++
++      /** How many users have pinned this object in GTT space */
++      int pin_count;
++
++      /** Breadcrumb of last rendering to the buffer. */
++      uint32_t last_rendering_seqno;
++
++      /** Current tiling mode for the object. */
++      uint32_t tiling_mode;
++
++      /**
++       * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
++       * GEM_DOMAIN_CPU is not in the object's read domain.
++       */
++      uint8_t *page_cpu_valid;
++};
++
++/**
++ * Request queue structure.
++ *
++ * The request queue allows us to note sequence numbers that have been emitted
++ * and may be associated with active buffers to be retired.
++ *
++ * By keeping this list, we can avoid having to do questionable
++ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
++ * an emission time with seqnos for tracking how far ahead of the GPU we are.
++ */
++struct drm_i915_gem_request {
++      /** GEM sequence number associated with this request. */
++      uint32_t seqno;
++
++      /** Time at which this request was emitted, in jiffies. */
++      unsigned long emitted_jiffies;
++
++      /** Cache domains that were flushed at the start of the request. */
++      uint32_t flush_domains;
++
++      struct list_head list;
++};
++
++extern struct drm_ioctl_desc i915_ioctls[];
++extern int i915_max_ioctl;
++
++                              /* i915_dma.c */
++extern void i915_kernel_lost_context(struct drm_device * dev);
++extern int i915_driver_load(struct drm_device *, unsigned long flags);
++extern int i915_driver_unload(struct drm_device *);
++extern void i915_driver_lastclose(struct drm_device * dev);
++extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
++extern void i915_driver_preclose(struct drm_device *dev,
++                               struct drm_file *file_priv);
++extern void i915_driver_postclose(struct drm_device *dev,
++                                struct drm_file *file_priv);
++extern int i915_driver_device_is_agp(struct drm_device * dev);
++extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
++                            unsigned long arg);
++extern void i915_emit_breadcrumb(struct drm_device *dev);
++extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
++extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
++extern int i915_driver_firstopen(struct drm_device *dev);
++extern int i915_dispatch_batchbuffer(struct drm_device * dev,
++                                   drm_i915_batchbuffer_t * batch);
++extern int i915_quiescent(struct drm_device *dev);
++extern int i915_init_hardware_status(struct drm_device *dev);
++extern void i915_free_hardware_status(struct drm_device *dev);
++
++int i915_emit_box(struct drm_device * dev,
++                struct drm_clip_rect __user * boxes,
++                int i, int DR1, int DR4);
++
++/* i915_irq.c */
++extern int i915_irq_emit(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int i915_irq_wait(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++
++extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
++extern void i915_driver_irq_preinstall(struct drm_device * dev);
++extern int i915_driver_irq_postinstall(struct drm_device * dev);
++extern void i915_driver_irq_uninstall(struct drm_device * dev);
++extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv);
++extern int i915_emit_irq(struct drm_device * dev);
++extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
++extern int i915_enable_vblank(struct drm_device *dev, int crtc);
++extern void i915_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int i915_vblank_swap(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
++extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
++
++/* i915_mem.c */
++extern int i915_mem_alloc(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++extern int i915_mem_free(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int i915_mem_init_heap(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv);
++extern void i915_mem_takedown(struct mem_block **heap);
++extern void i915_mem_release(struct drm_device * dev,
++                           struct drm_file *file_priv,
++                           struct mem_block *heap);
++
++/* i915_suspend.c */
++extern int i915_save_state(struct drm_device *dev);
++extern int i915_restore_state(struct drm_device *dev);
++
++#ifdef I915_HAVE_FENCE
++/* i915_fence.c */
++extern void i915_fence_handler(struct drm_device *dev);
++extern void i915_invalidate_reported_sequence(struct drm_device *dev);
++
++#endif
++
++#ifdef I915_HAVE_BUFFER
++/* i915_buffer.c */
++extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
++extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
++                             struct drm_mem_type_manager *man);
++extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
++extern int i915_move(struct drm_buffer_object *bo, int evict,
++              int no_wait, struct drm_bo_mem_reg *new_mem);
++void i915_flush_ttm(struct drm_ttm *ttm);
++/* i915_execbuf.c */
++int i915_execbuffer(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv);
++/* i915_gem.c */
++int i915_gem_init_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_create_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv);
++int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++int i915_gem_execbuffer(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv);
++int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++int i915_gem_set_tiling(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++int i915_gem_get_tiling(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++void i915_gem_load(struct drm_device *dev);
++int i915_gem_proc_init(struct drm_minor *minor);
++void i915_gem_proc_cleanup(struct drm_minor *minor);
++int i915_gem_init_object(struct drm_gem_object *obj);
++void i915_gem_free_object(struct drm_gem_object *obj);
++int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
++void i915_gem_object_unpin(struct drm_gem_object *obj);
++void i915_gem_lastclose(struct drm_device *dev);
++uint32_t i915_get_gem_seqno(struct drm_device *dev);
++void i915_gem_retire_requests(struct drm_device *dev);
++void i915_gem_retire_work_handler(struct work_struct *work);
++void i915_gem_clflush_object(struct drm_gem_object *obj);
++#endif
++
++/* i915_gem_tiling.c */
++void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
++
++/* i915_gem_debug.c */
++#if WATCH_INACTIVE
++void i915_verify_inactive(struct drm_device *dev, char *file, int line);
++#else
++#define i915_verify_inactive(dev,file,line)
++#endif
++void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
++void i915_gem_dump_object(struct drm_gem_object *obj, int len,
++                        const char *where, uint32_t mark);
++void i915_dump_lru(struct drm_device *dev, const char *where);
++
++#ifdef __linux__
++/* i915_opregion.c */
++extern int intel_opregion_init(struct drm_device *dev);
++extern void intel_opregion_free(struct drm_device *dev);
++extern void opregion_asle_intr(struct drm_device *dev);
++extern void opregion_enable_asle(struct drm_device *dev);
++#endif
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++extern void intel_init_chipset_flush_compat(struct drm_device *dev);
++extern void intel_fini_chipset_flush_compat(struct drm_device *dev);
++#endif
++#endif
++
++#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
++#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
++#define I915_READ16(reg)      DRM_READ16(dev_priv->mmio_map, (reg))
++#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
++#define I915_READ8(reg)               DRM_READ8(dev_priv->mmio_map, (reg))
++#define I915_WRITE8(reg,val)  DRM_WRITE8(dev_priv->mmio_map, (reg), (val))
++
++#if defined(__FreeBSD__)
++typedef boolean_t bool;
++#endif
++
++#define I915_VERBOSE 0
++#define I915_RING_VALIDATE 0
++
++#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
++
++#define RING_LOCALS   unsigned int outring, ringmask, outcount; \
++                      volatile char *virt;
++
++#if I915_RING_VALIDATE
++void i915_ring_validate(struct drm_device *dev, const char *func, int line);
++#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
++#else
++#define I915_RING_DO_VALIDATE(dev)
++#endif
++
++#define BEGIN_LP_RING(n) do {                         \
++      if (I915_VERBOSE)                               \
++              DRM_DEBUG("BEGIN_LP_RING(%d)\n",        \
++                               (n));                  \
++      I915_RING_DO_VALIDATE(dev);                     \
++      if (dev_priv->ring.space < (n)*4)                      \
++              i915_wait_ring(dev, (n)*4, __FUNCTION__);      \
++      outcount = 0;                                   \
++      outring = dev_priv->ring.tail;                  \
++      ringmask = dev_priv->ring.tail_mask;            \
++      virt = dev_priv->ring.virtual_start;            \
++} while (0)
++
++#define OUT_RING(n) do {                                      \
++      if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));      \
++      *(volatile unsigned int *)(virt + outring) = (n);               \
++      outcount++;                                             \
++      outring += 4;                                           \
++      outring &= ringmask;                                    \
++} while (0)
++
++#define ADVANCE_LP_RING() do {                                                \
++      if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);   \
++      I915_RING_DO_VALIDATE(dev);                                     \
++      dev_priv->ring.tail = outring;                                  \
++      dev_priv->ring.space -= outcount * 4;                           \
++      I915_WRITE(PRB0_TAIL, outring);                 \
++} while(0)
++
++extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
++
++#define BREADCRUMB_BITS 31
++#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
++
++#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
++/**
++ * Reads a dword out of the status page, which is written to from the command
++ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
++ * MI_STORE_DATA_IMM.
++ *
++ * The following dwords have a reserved meaning:
++ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
++ * 4: ring 0 head pointer
++ * 5: ring 1 head pointer (915-class)
++ * 6: ring 2 head pointer (915-class)
++ *
++ * The area from dword 0x10 to 0x3ff is available for driver usage.
++ */
++#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
++#define I915_GEM_HWS_INDEX            0x10
++
++/* MCH MMIO space */
++/** 915-945 and GM965 MCH register controlling DRAM channel access */
++#define DCC           0x200
++#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL            (0 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC   (1 << 0)
++#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED  (2 << 0)
++#define DCC_ADDRESSING_MODE_MASK                      (3 << 0)
++#define DCC_CHANNEL_XOR_DISABLE                               (1 << 10)
++
++/** 965 MCH register controlling DRAM channel configuration */
++#define CHDECMISC             0x111
++#define CHDECMISC_FLEXMEMORY          (1 << 1)
++
++/*
++ * The Bridge device's PCI config space has information about the
++ * fb aperture size and the amount of pre-reserved memory.
++ */
++#define INTEL_GMCH_CTRL               0x52
++#define INTEL_GMCH_ENABLED    0x4
++#define INTEL_GMCH_MEM_MASK   0x1
++#define INTEL_GMCH_MEM_64M    0x1
++#define INTEL_GMCH_MEM_128M   0
++
++#define INTEL_855_GMCH_GMS_MASK               (0x7 << 4)
++#define INTEL_855_GMCH_GMS_DISABLED   (0x0 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_1M  (0x1 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_4M  (0x2 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_8M  (0x3 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4)
++#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4)
++
++#define INTEL_915G_GMCH_GMS_STOLEN_48M        (0x6 << 4)
++#define INTEL_915G_GMCH_GMS_STOLEN_64M        (0x7 << 4)
++
++/* PCI config space */
++
++#define HPLLCC        0xc0 /* 855 only */
++#define   GC_CLOCK_CONTROL_MASK               (3 << 0)
++#define   GC_CLOCK_133_200            (0 << 0)
++#define   GC_CLOCK_100_200            (1 << 0)
++#define   GC_CLOCK_100_133            (2 << 0)
++#define   GC_CLOCK_166_250            (3 << 0)
++#define GCFGC 0xf0 /* 915+ only */
++#define   GC_LOW_FREQUENCY_ENABLE     (1 << 7)
++#define   GC_DISPLAY_CLOCK_190_200_MHZ        (0 << 4)
++#define   GC_DISPLAY_CLOCK_333_MHZ    (4 << 4)
++#define   GC_DISPLAY_CLOCK_MASK               (7 << 4)
++#define LBB   0xf4
++
++/* VGA stuff */
++
++#define VGA_ST01_MDA 0x3ba
++#define VGA_ST01_CGA 0x3da
++
++#define VGA_MSR_WRITE 0x3c2
++#define VGA_MSR_READ 0x3cc
++#define   VGA_MSR_MEM_EN (1<<1)
++#define   VGA_MSR_CGA_MODE (1<<0)
++
++#define VGA_SR_INDEX 0x3c4
++#define VGA_SR_DATA 0x3c5
++
++#define VGA_AR_INDEX 0x3c0
++#define   VGA_AR_VID_EN (1<<5)
++#define VGA_AR_DATA_WRITE 0x3c0
++#define VGA_AR_DATA_READ 0x3c1
++
++#define VGA_GR_INDEX 0x3ce
++#define VGA_GR_DATA 0x3cf
++/* GR05 */
++#define   VGA_GR_MEM_READ_MODE_SHIFT 3
++#define     VGA_GR_MEM_READ_MODE_PLANE 1
++/* GR06 */
++#define   VGA_GR_MEM_MODE_MASK 0xc
++#define   VGA_GR_MEM_MODE_SHIFT 2
++#define   VGA_GR_MEM_A0000_AFFFF 0
++#define   VGA_GR_MEM_A0000_BFFFF 1
++#define   VGA_GR_MEM_B0000_B7FFF 2
++#define   VGA_GR_MEM_B0000_BFFFF 3
++
++#define VGA_DACMASK 0x3c6
++#define VGA_DACRX 0x3c7
++#define VGA_DACWX 0x3c8
++#define VGA_DACDATA 0x3c9
++
++#define VGA_CR_INDEX_MDA 0x3b4
++#define VGA_CR_DATA_MDA 0x3b5
++#define VGA_CR_INDEX_CGA 0x3d4
++#define VGA_CR_DATA_CGA 0x3d5
++
++/*
++ * Memory interface instructions used by the kernel
++ */
++#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
++
++#define MI_NOOP                       MI_INSTR(0, 0)
++#define MI_USER_INTERRUPT     MI_INSTR(0x02, 0)
++#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
++#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
++#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
++#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
++#define MI_FLUSH              MI_INSTR(0x04, 0)
++#define   MI_READ_FLUSH               (1 << 0)
++#define   MI_EXE_FLUSH                (1 << 1)
++#define   MI_NO_WRITE_FLUSH   (1 << 2)
++#define   MI_SCENE_COUNT      (1 << 3) /* just increment scene count */
++#define   MI_END_SCENE                (1 << 4) /* flush binner and incr scene count */
++#define MI_BATCH_BUFFER_END   MI_INSTR(0x0a, 0)
++#define MI_REPORT_HEAD                MI_INSTR(0x07, 0)
++#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
++#define MI_STORE_DWORD_IMM    MI_INSTR(0x20, 1)
++#define   MI_MEM_VIRTUAL      (1 << 22) /* 965+ only */
++#define MI_STORE_DWORD_INDEX  MI_INSTR(0x21, 1)
++#define   MI_STORE_DWORD_INDEX_SHIFT 2
++#define MI_LOAD_REGISTER_IMM  MI_INSTR(0x22, 1)
++#define MI_BATCH_BUFFER               MI_INSTR(0x30, 1)
++#define   MI_BATCH_NON_SECURE (1)
++#define   MI_BATCH_NON_SECURE_I965 (1<<8)
++#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
++
++/*
++ * 3D instructions used by the kernel
++ */
++#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
++
++#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
++#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define   SC_UPDATE_SCISSOR       (0x1<<1)
++#define   SC_ENABLE_MASK          (0x1<<0)
++#define   SC_ENABLE               (0x1<<0)
++#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
++#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
++#define   SCI_YMIN_MASK      (0xffff<<16)
++#define   SCI_XMIN_MASK      (0xffff<<0)
++#define   SCI_YMAX_MASK      (0xffff<<16)
++#define   SCI_XMAX_MASK      (0xffff<<0)
++#define GFX_OP_SCISSOR_ENABLE  ((0x3<<29)|(0x1c<<24)|(0x10<<19))
++#define GFX_OP_SCISSOR_RECT    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
++#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
++#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
++#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
++#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
++#define GFX_OP_DESTBUFFER_INFO         ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
++#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
++#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
++#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
++#define XY_SRC_COPY_BLT_CMD           ((2<<29)|(0x53<<22)|6)
++#define XY_MONO_SRC_COPY_IMM_BLT      ((2<<29)|(0x71<<22)|5)
++#define XY_SRC_COPY_BLT_WRITE_ALPHA   (1<<21)
++#define XY_SRC_COPY_BLT_WRITE_RGB     (1<<20)
++#define   BLT_DEPTH_8                 (0<<24)
++#define   BLT_DEPTH_16_565            (1<<24)
++#define   BLT_DEPTH_16_1555           (2<<24)
++#define   BLT_DEPTH_32                        (3<<24)
++#define   BLT_ROP_GXCOPY              (0xcc<<16)
++#define XY_SRC_COPY_BLT_SRC_TILED     (1<<15) /* 965+ only */
++#define XY_SRC_COPY_BLT_DST_TILED     (1<<11) /* 965+ only */
++#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
++#define   ASYNC_FLIP                (1<<22)
++#define   DISPLAY_PLANE_A           (0<<20)
++#define   DISPLAY_PLANE_B           (1<<20)
++
++/*
++ * Instruction and interrupt control regs
++ */
++
++#define PRB0_TAIL     0x02030
++#define PRB0_HEAD     0x02034
++#define PRB0_START    0x02038
++#define PRB0_CTL      0x0203c
++#define   TAIL_ADDR           0x001FFFF8
++#define   HEAD_WRAP_COUNT     0xFFE00000
++#define   HEAD_WRAP_ONE               0x00200000
++#define   HEAD_ADDR           0x001FFFFC
++#define   RING_NR_PAGES               0x001FF000
++#define   RING_REPORT_MASK    0x00000006
++#define   RING_REPORT_64K     0x00000002
++#define   RING_REPORT_128K    0x00000004
++#define   RING_NO_REPORT      0x00000000
++#define   RING_VALID_MASK     0x00000001
++#define   RING_VALID          0x00000001
++#define   RING_INVALID                0x00000000
++#define PRB1_TAIL     0x02040 /* 915+ only */
++#define PRB1_HEAD     0x02044 /* 915+ only */
++#define PRB1_START    0x02048 /* 915+ only */
++#define PRB1_CTL      0x0204c /* 915+ only */
++#define ACTHD_I965    0x02074
++#define HWS_PGA               0x02080
++#define HWS_ADDRESS_MASK      0xfffff000
++#define HWS_START_ADDRESS_SHIFT       4
++#define IPEIR         0x02088
++#define NOPID         0x02094
++#define HWSTAM                0x02098
++#define SCPD0         0x0209c /* 915+ only */
++#define IER           0x020a0
++#define IIR           0x020a4
++#define IMR           0x020a8
++#define ISR           0x020ac
++#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT          (1<<18)
++#define   I915_DISPLAY_PORT_INTERRUPT                 (1<<17)
++#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT  (1<<15)
++#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT    (1<<14)
++#define   I915_HWB_OOM_INTERRUPT                      (1<<13)
++#define   I915_SYNC_STATUS_INTERRUPT                  (1<<12)
++#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
++#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
++#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT   (1<<9)
++#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
++#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT                (1<<7)
++#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT         (1<<6)
++#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT                (1<<5)
++#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT         (1<<4)
++#define   I915_DEBUG_INTERRUPT                                (1<<2)
++#define   I915_USER_INTERRUPT                         (1<<1)
++#define   I915_ASLE_INTERRUPT                         (1<<0)
++#define EIR           0x020b0
++#define EMR           0x020b4
++#define ESR           0x020b8
++#define INSTPM                0x020c0
++#define ACTHD         0x020c8
++#define FW_BLC                0x020d8
++#define FW_BLC_SELF   0x020e0 /* 915+ only */
++#define MI_ARB_STATE  0x020e4 /* 915+ only */
++#define CACHE_MODE_0  0x02120 /* 915+ only */
++#define   CM0_MASK_SHIFT          16
++#define   CM0_IZ_OPT_DISABLE      (1<<6)
++#define   CM0_ZR_OPT_DISABLE      (1<<5)
++#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
++#define   CM0_COLOR_EVICT_DISABLE (1<<3)
++#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
++#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
++#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
++
++/*
++ * Framebuffer compression (915+ only)
++ */
++
++#define FBC_CFB_BASE          0x03200 /* 4k page aligned */
++#define FBC_LL_BASE           0x03204 /* 4k page aligned */
++#define FBC_CONTROL           0x03208
++#define   FBC_CTL_EN          (1<<31)
++#define   FBC_CTL_PERIODIC    (1<<30)
++#define   FBC_CTL_INTERVAL_SHIFT (16)
++#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
++#define   FBC_CTL_STRIDE_SHIFT        (5)
++#define   FBC_CTL_FENCENO     (1<<0)
++#define FBC_COMMAND           0x0320c
++#define   FBC_CMD_COMPRESS    (1<<0)
++#define FBC_STATUS            0x03210
++#define   FBC_STAT_COMPRESSING        (1<<31)
++#define   FBC_STAT_COMPRESSED (1<<30)
++#define   FBC_STAT_MODIFIED   (1<<29)
++#define   FBC_STAT_CURRENT_LINE       (1<<0)
++#define FBC_CONTROL2          0x03214
++#define   FBC_CTL_FENCE_DBL   (0<<4)
++#define   FBC_CTL_IDLE_IMM    (0<<2)
++#define   FBC_CTL_IDLE_FULL   (1<<2)
++#define   FBC_CTL_IDLE_LINE   (2<<2)
++#define   FBC_CTL_IDLE_DEBUG  (3<<2)
++#define   FBC_CTL_CPU_FENCE   (1<<1)
++#define   FBC_CTL_PLANEA      (0<<0)
++#define   FBC_CTL_PLANEB      (1<<0)
++#define FBC_FENCE_OFF         0x0321b
++
++#define FBC_LL_SIZE           (1536)
++
++/*
++ * GPIO regs
++ */
++#define GPIOA                 0x5010
++#define GPIOB                 0x5014
++#define GPIOC                 0x5018
++#define GPIOD                 0x501c
++#define GPIOE                 0x5020
++#define GPIOF                 0x5024
++#define GPIOG                 0x5028
++#define GPIOH                 0x502c
++# define GPIO_CLOCK_DIR_MASK          (1 << 0)
++# define GPIO_CLOCK_DIR_IN            (0 << 1)
++# define GPIO_CLOCK_DIR_OUT           (1 << 1)
++# define GPIO_CLOCK_VAL_MASK          (1 << 2)
++# define GPIO_CLOCK_VAL_OUT           (1 << 3)
++# define GPIO_CLOCK_VAL_IN            (1 << 4)
++# define GPIO_CLOCK_PULLUP_DISABLE    (1 << 5)
++# define GPIO_DATA_DIR_MASK           (1 << 8)
++# define GPIO_DATA_DIR_IN             (0 << 9)
++# define GPIO_DATA_DIR_OUT            (1 << 9)
++# define GPIO_DATA_VAL_MASK           (1 << 10)
++# define GPIO_DATA_VAL_OUT            (1 << 11)
++# define GPIO_DATA_VAL_IN             (1 << 12)
++# define GPIO_DATA_PULLUP_DISABLE     (1 << 13)
++
++/*
++ * Clock control & power management
++ */
++
++#define VGA0  0x6000
++#define VGA1  0x6004
++#define VGA_PD        0x6010
++#define   VGA0_PD_P2_DIV_4    (1 << 7)
++#define   VGA0_PD_P1_DIV_2    (1 << 5)
++#define   VGA0_PD_P1_SHIFT    0
++#define   VGA0_PD_P1_MASK     (0x1f << 0)
++#define   VGA1_PD_P2_DIV_4    (1 << 15)
++#define   VGA1_PD_P1_DIV_2    (1 << 13)
++#define   VGA1_PD_P1_SHIFT    8
++#define   VGA1_PD_P1_MASK     (0x1f << 8)
++#define DPLL_A        0x06014
++#define DPLL_B        0x06018
++#define   DPLL_VCO_ENABLE             (1 << 31)
++#define   DPLL_DVO_HIGH_SPEED         (1 << 30)
++#define   DPLL_SYNCLOCK_ENABLE                (1 << 29)
++#define   DPLL_VGA_MODE_DIS           (1 << 28)
++#define   DPLLB_MODE_DAC_SERIAL               (1 << 26) /* i915 */
++#define   DPLLB_MODE_LVDS             (2 << 26) /* i915 */
++#define   DPLL_MODE_MASK              (3 << 26)
++#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
++#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
++#define   DPLLB_LVDS_P2_CLOCK_DIV_14  (0 << 24) /* i915 */
++#define   DPLLB_LVDS_P2_CLOCK_DIV_7   (1 << 24) /* i915 */
++#define   DPLL_P2_CLOCK_DIV_MASK      0x03000000 /* i915 */
++#define   DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
++
++#define I915_FIFO_UNDERRUN_STATUS             (1UL<<31)
++#define I915_CRC_ERROR_ENABLE                 (1UL<<29)
++#define I915_CRC_DONE_ENABLE                  (1UL<<28)
++#define I915_GMBUS_EVENT_ENABLE                       (1UL<<27)
++#define I915_VSYNC_INTERRUPT_ENABLE           (1UL<<25)
++#define I915_DISPLAY_LINE_COMPARE_ENABLE      (1UL<<24)
++#define I915_DPST_EVENT_ENABLE                        (1UL<<23)
++#define I915_LEGACY_BLC_EVENT_ENABLE          (1UL<<22)
++#define I915_ODD_FIELD_INTERRUPT_ENABLE               (1UL<<21)
++#define I915_EVEN_FIELD_INTERRUPT_ENABLE      (1UL<<20)
++#define I915_START_VBLANK_INTERRUPT_ENABLE    (1UL<<18)       /* 965 or later */
++#define I915_VBLANK_INTERRUPT_ENABLE          (1UL<<17)
++#define I915_OVERLAY_UPDATED_ENABLE           (1UL<<16)
++#define I915_CRC_ERROR_INTERRUPT_STATUS               (1UL<<13)
++#define I915_CRC_DONE_INTERRUPT_STATUS                (1UL<<12)
++#define I915_GMBUS_INTERRUPT_STATUS           (1UL<<11)
++#define I915_VSYNC_INTERRUPT_STATUS           (1UL<<9)
++#define I915_DISPLAY_LINE_COMPARE_STATUS      (1UL<<8)
++#define I915_DPST_EVENT_STATUS                        (1UL<<7)
++#define I915_LEGACY_BLC_EVENT_STATUS          (1UL<<6)
++#define I915_ODD_FIELD_INTERRUPT_STATUS               (1UL<<5)
++#define I915_EVEN_FIELD_INTERRUPT_STATUS      (1UL<<4)
++#define I915_START_VBLANK_INTERRUPT_STATUS    (1UL<<2)        /* 965 or later */
++#define I915_VBLANK_INTERRUPT_STATUS          (1UL<<1)
++#define I915_OVERLAY_UPDATED_STATUS           (1UL<<0)
++
++#define SRX_INDEX             0x3c4
++#define SRX_DATA              0x3c5
++#define SR01                  1
++#define SR01_SCREEN_OFF               (1<<5)
++
++#define PPCR                  0x61204
++#define PPCR_ON                       (1<<0)
++
++#define DVOB                  0x61140
++#define DVOB_ON                       (1<<31)
++#define DVOC                  0x61160
++#define DVOC_ON                       (1<<31)
++#define LVDS                  0x61180
++#define LVDS_ON                       (1<<31)
++
++#define ADPA                  0x61100
++#define ADPA_DPMS_MASK                (~(3<<10))
++#define ADPA_DPMS_ON          (0<<10)
++#define ADPA_DPMS_SUSPEND     (1<<10)
++#define ADPA_DPMS_STANDBY     (2<<10)
++#define ADPA_DPMS_OFF         (3<<10)
++
++#define RING_TAIL             0x00
++#define TAIL_ADDR             0x001FFFF8
++#define RING_HEAD             0x04
++#define HEAD_WRAP_COUNT               0xFFE00000
++#define HEAD_WRAP_ONE         0x00200000
++#define HEAD_ADDR             0x001FFFFC
++#define RING_START            0x08
++#define START_ADDR            0xFFFFF000
++#define RING_LEN              0x0C
++#define RING_NR_PAGES         0x001FF000
++#define RING_REPORT_MASK      0x00000006
++#define RING_REPORT_64K               0x00000002
++#define RING_REPORT_128K      0x00000004
++#define RING_NO_REPORT                0x00000000
++#define RING_VALID_MASK               0x00000001
++#define RING_VALID            0x00000001
++#define RING_INVALID          0x00000000
++
++/* Scratch pad debug 0 reg:
++ */
++#define   DPLL_FPA01_P1_POST_DIV_MASK_I830    0x001f0000
++/*
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS       0x003f0000
++#define   DPLL_FPA01_P1_POST_DIV_SHIFT        16
++/* i830, required in DVO non-gang */
++#define   PLL_P2_DIVIDE_BY_4          (1 << 23)
++#define   PLL_P1_DIVIDE_BY_TWO                (1 << 21) /* i830 */
++#define   PLL_REF_INPUT_DREFCLK               (0 << 13)
++#define   PLL_REF_INPUT_TVCLKINA      (1 << 13) /* i830 */
++#define   PLL_REF_INPUT_TVCLKINBC     (2 << 13) /* SDVO TVCLKIN */
++#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
++#define   PLL_REF_INPUT_MASK          (3 << 13)
++#define   PLL_LOAD_PULSE_PHASE_SHIFT          9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++#define   PLL_LOAD_PULSE_PHASE_MASK           (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++#define   DISPLAY_RATE_SELECT_FPA1            (1 << 8)
++/*
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ */
++#define   SDVO_MULTIPLIER_MASK                        0x000000ff
++#define   SDVO_MULTIPLIER_SHIFT_HIRES         4
++#define   SDVO_MULTIPLIER_SHIFT_VGA           0
++#define DPLL_A_MD 0x0601c /* 965+ only */
++/*
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
++ */
++#define   DPLL_MD_UDI_DIVIDER_MASK            0x3f000000
++#define   DPLL_MD_UDI_DIVIDER_SHIFT           24
++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++#define   DPLL_MD_VGA_UDI_DIVIDER_MASK                0x003f0000
++#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT               16
++/*
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++#define   DPLL_MD_UDI_MULTIPLIER_MASK         0x00003f00
++#define   DPLL_MD_UDI_MULTIPLIER_SHIFT                8
++/*
++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK     0x0000003f
++#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT    0
++#define DPLL_B_MD 0x06020 /* 965+ only */
++#define FPA0  0x06040
++#define FPA1  0x06044
++#define FPB0  0x06048
++#define FPB1  0x0604c
++#define   FP_N_DIV_MASK               0x003f0000
++#define   FP_N_DIV_SHIFT              16
++#define   FP_M1_DIV_MASK      0x00003f00
++#define   FP_M1_DIV_SHIFT              8
++#define   FP_M2_DIV_MASK      0x0000003f
++#define   FP_M2_DIV_SHIFT              0
++#define DPLL_TEST     0x606c
++#define   DPLLB_TEST_SDVO_DIV_1               (0 << 22)
++#define   DPLLB_TEST_SDVO_DIV_2               (1 << 22)
++#define   DPLLB_TEST_SDVO_DIV_4               (2 << 22)
++#define   DPLLB_TEST_SDVO_DIV_MASK    (3 << 22)
++#define   DPLLB_TEST_N_BYPASS         (1 << 19)
++#define   DPLLB_TEST_M_BYPASS         (1 << 18)
++#define   DPLLB_INPUT_BUFFER_ENABLE   (1 << 16)
++#define   DPLLA_TEST_N_BYPASS         (1 << 3)
++#define   DPLLA_TEST_M_BYPASS         (1 << 2)
++#define   DPLLA_INPUT_BUFFER_ENABLE   (1 << 0)
++#define D_STATE               0x6104
++#define CG_2D_DIS     0x6200
++#define CG_3D_DIS     0x6204
++
++/*
++ * Palette regs
++ */
++
++#define PALETTE_A             0x0a000
++#define PALETTE_B             0x0a800
++
++/*
++ * Overlay regs
++ */
++
++#define OVADD                 0x30000
++#define DOVSTA                        0x30008
++#define OC_BUF                        (0x3<<20)
++#define OGAMC5                        0x30010
++#define OGAMC4                        0x30014
++#define OGAMC3                        0x30018
++#define OGAMC2                        0x3001c
++#define OGAMC1                        0x30020
++#define OGAMC0                        0x30024
++
++/*
++ * Display engine regs
++ */
++
++/* Pipe A timing regs */
++#define HTOTAL_A      0x60000
++#define HBLANK_A      0x60004
++#define HSYNC_A               0x60008
++#define VTOTAL_A      0x6000c
++#define VBLANK_A      0x60010
++#define VSYNC_A               0x60014
++#define PIPEASRC      0x6001c
++#define BCLRPAT_A     0x60020
++
++/* Pipe B timing regs */
++#define HTOTAL_B      0x61000
++#define HBLANK_B      0x61004
++#define HSYNC_B               0x61008
++#define VTOTAL_B      0x6100c
++#define VBLANK_B      0x61010
++#define VSYNC_B               0x61014
++#define PIPEBSRC      0x6101c
++#define BCLRPAT_B     0x61020
++
++/* VGA port control */
++#define ADPA                  0x61100
++#define   ADPA_DAC_ENABLE     (1<<31)
++#define   ADPA_DAC_DISABLE    0
++#define   ADPA_PIPE_SELECT_MASK       (1<<30)
++#define   ADPA_PIPE_A_SELECT  0
++#define   ADPA_PIPE_B_SELECT  (1<<30)
++#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
++#define   ADPA_SETS_HVPOLARITY        0
++#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
++#define   ADPA_VSYNC_CNTL_ENABLE 0
++#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
++#define   ADPA_HSYNC_CNTL_ENABLE 0
++#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
++#define   ADPA_VSYNC_ACTIVE_LOW       0
++#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
++#define   ADPA_HSYNC_ACTIVE_LOW       0
++#define   ADPA_DPMS_MASK      (~(3<<10))
++#define   ADPA_DPMS_ON                (0<<10)
++#define   ADPA_DPMS_SUSPEND   (1<<10)
++#define   ADPA_DPMS_STANDBY   (2<<10)
++#define   ADPA_DPMS_OFF               (3<<10)
++
++/* Hotplug control (945+ only) */
++#define PORT_HOTPLUG_EN               0x61110
++#define   SDVOB_HOTPLUG_INT_EN                        (1 << 26)
++#define   SDVOC_HOTPLUG_INT_EN                        (1 << 25)
++#define   TV_HOTPLUG_INT_EN                   (1 << 18)
++#define   CRT_HOTPLUG_INT_EN                  (1 << 9)
++#define   CRT_HOTPLUG_FORCE_DETECT            (1 << 3)
++
++#define PORT_HOTPLUG_STAT     0x61114
++#define   CRT_HOTPLUG_INT_STATUS              (1 << 11)
++#define   TV_HOTPLUG_INT_STATUS                       (1 << 10)
++#define   CRT_HOTPLUG_MONITOR_MASK            (3 << 8)
++#define   CRT_HOTPLUG_MONITOR_COLOR           (3 << 8)
++#define   CRT_HOTPLUG_MONITOR_MONO            (2 << 8)
++#define   CRT_HOTPLUG_MONITOR_NONE            (0 << 8)
++#define   SDVOC_HOTPLUG_INT_STATUS            (1 << 7)
++#define   SDVOB_HOTPLUG_INT_STATUS            (1 << 6)
++
++/* SDVO port control */
++#define SDVOB                 0x61140
++#define SDVOC                 0x61160
++#define   SDVO_ENABLE         (1 << 31)
++#define   SDVO_PIPE_B_SELECT  (1 << 30)
++#define   SDVO_STALL_SELECT   (1 << 29)
++#define   SDVO_INTERRUPT_ENABLE       (1 << 26)
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define   SDVO_PORT_MULTIPLY_MASK     (7 << 23)
++#define   SDVO_PORT_MULTIPLY_SHIFT            23
++#define   SDVO_PHASE_SELECT_MASK      (15 << 19)
++#define   SDVO_PHASE_SELECT_DEFAULT   (6 << 19)
++#define   SDVO_CLOCK_OUTPUT_INVERT    (1 << 18)
++#define   SDVOC_GANG_MODE             (1 << 16)
++#define   SDVO_BORDER_ENABLE          (1 << 7)
++#define   SDVOB_PCIE_CONCURRENCY      (1 << 3)
++#define   SDVO_DETECTED                       (1 << 2)
++/* Bits to be preserved when writing */
++#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
++#define   SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
++
++/* DVO port control */
++#define DVOA                  0x61120
++#define DVOB                  0x61140
++#define DVOC                  0x61160
++#define   DVO_ENABLE                  (1 << 31)
++#define   DVO_PIPE_B_SELECT           (1 << 30)
++#define   DVO_PIPE_STALL_UNUSED               (0 << 28)
++#define   DVO_PIPE_STALL              (1 << 28)
++#define   DVO_PIPE_STALL_TV           (2 << 28)
++#define   DVO_PIPE_STALL_MASK         (3 << 28)
++#define   DVO_USE_VGA_SYNC            (1 << 15)
++#define   DVO_DATA_ORDER_I740         (0 << 14)
++#define   DVO_DATA_ORDER_FP           (1 << 14)
++#define   DVO_VSYNC_DISABLE           (1 << 11)
++#define   DVO_HSYNC_DISABLE           (1 << 10)
++#define   DVO_VSYNC_TRISTATE          (1 << 9)
++#define   DVO_HSYNC_TRISTATE          (1 << 8)
++#define   DVO_BORDER_ENABLE           (1 << 7)
++#define   DVO_DATA_ORDER_GBRG         (1 << 6)
++#define   DVO_DATA_ORDER_RGGB         (0 << 6)
++#define   DVO_DATA_ORDER_GBRG_ERRATA  (0 << 6)
++#define   DVO_DATA_ORDER_RGGB_ERRATA  (1 << 6)
++#define   DVO_VSYNC_ACTIVE_HIGH               (1 << 4)
++#define   DVO_HSYNC_ACTIVE_HIGH               (1 << 3)
++#define   DVO_BLANK_ACTIVE_HIGH               (1 << 2)
++#define   DVO_OUTPUT_CSTATE_PIXELS    (1 << 1)        /* SDG only */
++#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS       (1 << 0)        /* SDG only */
++#define   DVO_PRESERVE_MASK           (0x7<<24)
++#define DVOA_SRCDIM           0x61124
++#define DVOB_SRCDIM           0x61144
++#define DVOC_SRCDIM           0x61164
++#define   DVO_SRCDIM_HORIZONTAL_SHIFT 12
++#define   DVO_SRCDIM_VERTICAL_SHIFT   0
++
++/* LVDS port control */
++#define LVDS                  0x61180
++/*
++ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++#define   LVDS_PORT_EN                        (1 << 31)
++/* Selects pipe B for LVDS data.  Must be set on pre-965. */
++#define   LVDS_PIPEB_SELECT           (1 << 30)
++/*
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++#define   LVDS_A0A2_CLKA_POWER_MASK   (3 << 8)
++#define   LVDS_A0A2_CLKA_POWER_DOWN   (0 << 8)
++#define   LVDS_A0A2_CLKA_POWER_UP     (3 << 8)
++/*
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++#define   LVDS_A3_POWER_MASK          (3 << 6)
++#define   LVDS_A3_POWER_DOWN          (0 << 6)
++#define   LVDS_A3_POWER_UP            (3 << 6)
++/*
++ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++#define   LVDS_CLKB_POWER_MASK                (3 << 4)
++#define   LVDS_CLKB_POWER_DOWN                (0 << 4)
++#define   LVDS_CLKB_POWER_UP          (3 << 4)
++/*
++ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode.  The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++#define   LVDS_B0B3_POWER_MASK                (3 << 2)
++#define   LVDS_B0B3_POWER_DOWN                (0 << 2)
++#define   LVDS_B0B3_POWER_UP          (3 << 2)
++
++/* Panel power sequencing */
++#define PP_STATUS     0x61200
++#define   PP_ON               (1 << 31)
++/*
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++#define   PP_READY            (1 << 30)
++#define   PP_SEQUENCE_NONE    (0 << 28)
++#define   PP_SEQUENCE_ON      (1 << 28)
++#define   PP_SEQUENCE_OFF     (2 << 28)
++#define   PP_SEQUENCE_MASK    0x30000000
++#define PP_CONTROL    0x61204
++#define   POWER_TARGET_ON     (1 << 0)
++#define PP_ON_DELAYS  0x61208
++#define PP_OFF_DELAYS 0x6120c
++#define PP_DIVISOR    0x61210
++
++/* Panel fitting */
++#define PFIT_CONTROL  0x61230
++#define   PFIT_ENABLE         (1 << 31)
++#define   PFIT_PIPE_MASK      (3 << 29)
++#define   PFIT_PIPE_SHIFT     29
++#define   VERT_INTERP_DISABLE (0 << 10)
++#define   VERT_INTERP_BILINEAR        (1 << 10)
++#define   VERT_INTERP_MASK    (3 << 10)
++#define   VERT_AUTO_SCALE     (1 << 9)
++#define   HORIZ_INTERP_DISABLE        (0 << 6)
++#define   HORIZ_INTERP_BILINEAR       (1 << 6)
++#define   HORIZ_INTERP_MASK   (3 << 6)
++#define   HORIZ_AUTO_SCALE    (1 << 5)
++#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
++#define PFIT_PGM_RATIOS       0x61234
++#define   PFIT_VERT_SCALE_MASK                        0xfff00000
++#define   PFIT_HORIZ_SCALE_MASK                       0x0000fff0
++#define PFIT_AUTO_RATIOS 0x61238
++
++/* Backlight control */
++#define BLC_PWM_CTL           0x61254
++#define   BACKLIGHT_MODULATION_FREQ_SHIFT             (17)
++#define BLC_PWM_CTL2          0x61250 /* 965+ only */
++/*
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define   BACKLIGHT_MODULATION_FREQ_MASK              (0x7fff << 17)
++#define   BLM_LEGACY_MODE                             (1 << 16)
++/*
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define   BACKLIGHT_DUTY_CYCLE_SHIFT          (0)
++#define   BACKLIGHT_DUTY_CYCLE_MASK           (0xffff)
++
++/* TV port control */
++#define TV_CTL                        0x68000
++/** Enables the TV encoder */
++# define TV_ENC_ENABLE                        (1 << 31)
++/** Sources the TV encoder input from pipe B instead of A. */
++# define TV_ENC_PIPEB_SELECT          (1 << 30)
++/** Outputs composite video (DAC A only) */
++# define TV_ENC_OUTPUT_COMPOSITE      (0 << 28)
++/** Outputs SVideo video (DAC B/C) */
++# define TV_ENC_OUTPUT_SVIDEO         (1 << 28)
++/** Outputs Component video (DAC A/B/C) */
++# define TV_ENC_OUTPUT_COMPONENT      (2 << 28)
++/** Outputs Composite and SVideo (DAC A/B/C) */
++# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE       (3 << 28)
++# define TV_TRILEVEL_SYNC             (1 << 21)
++/** Enables slow sync generation (945GM only) */
++# define TV_SLOW_SYNC                 (1 << 20)
++/** Selects 4x oversampling for 480i and 576p */
++# define TV_OVERSAMPLE_4X             (0 << 18)
++/** Selects 2x oversampling for 720p and 1080i */
++# define TV_OVERSAMPLE_2X             (1 << 18)
++/** Selects no oversampling for 1080p */
++# define TV_OVERSAMPLE_NONE           (2 << 18)
++/** Selects 8x oversampling */
++# define TV_OVERSAMPLE_8X             (3 << 18)
++/** Selects progressive mode rather than interlaced */
++# define TV_PROGRESSIVE                       (1 << 17)
++/** Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
++# define TV_PAL_BURST                 (1 << 16)
++/** Field for setting delay of Y compared to C */
++# define TV_YC_SKEW_MASK              (7 << 12)
++/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
++# define TV_ENC_SDP_FIX                       (1 << 11)
++/**
++ * Enables a fix for the 915GM only.
++ *
++ * Not sure what it does.
++ */
++# define TV_ENC_C0_FIX                        (1 << 10)
++/** Bits that must be preserved by software */
++# define TV_CTL_SAVE                  ((3 << 8) | (3 << 6))
++# define TV_FUSE_STATE_MASK           (3 << 4)
++/** Read-only state that reports all features enabled */
++# define TV_FUSE_STATE_ENABLED                (0 << 4)
++/** Read-only state that reports that Macrovision is disabled in hardware*/
++# define TV_FUSE_STATE_NO_MACROVISION (1 << 4)
++/** Read-only state that reports that TV-out is disabled in hardware. */
++# define TV_FUSE_STATE_DISABLED               (2 << 4)
++/** Normal operation */
++# define TV_TEST_MODE_NORMAL          (0 << 0)
++/** Encoder test pattern 1 - combo pattern */
++# define TV_TEST_MODE_PATTERN_1               (1 << 0)
++/** Encoder test pattern 2 - full screen vertical 75% color bars */
++# define TV_TEST_MODE_PATTERN_2               (2 << 0)
++/** Encoder test pattern 3 - full screen horizontal 75% color bars */
++# define TV_TEST_MODE_PATTERN_3               (3 << 0)
++/** Encoder test pattern 4 - random noise */
++# define TV_TEST_MODE_PATTERN_4               (4 << 0)
++/** Encoder test pattern 5 - linear color ramps */
++# define TV_TEST_MODE_PATTERN_5               (5 << 0)
++/**
++ * This test mode forces the DACs to 50% of full output.
++ *
++ * This is used for load detection in combination with TVDAC_SENSE_MASK
++ */
++# define TV_TEST_MODE_MONITOR_DETECT  (7 << 0)
++# define TV_TEST_MODE_MASK            (7 << 0)
++
++#define TV_DAC                        0x68004
++/**
++ * Reports that DAC state change logic has reported change (RO).
++ *
++ * This gets cleared when TV_DAC_STATE_EN is cleared
++*/
++# define TVDAC_STATE_CHG              (1 << 31)
++# define TVDAC_SENSE_MASK             (7 << 28)
++/** Reports that DAC A voltage is above the detect threshold */
++# define TVDAC_A_SENSE                        (1 << 30)
++/** Reports that DAC B voltage is above the detect threshold */
++# define TVDAC_B_SENSE                        (1 << 29)
++/** Reports that DAC C voltage is above the detect threshold */
++# define TVDAC_C_SENSE                        (1 << 28)
++/**
++ * Enables DAC state detection logic, for load-based TV detection.
++ *
++ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
++ * to off, for load detection to work.
++ */
++# define TVDAC_STATE_CHG_EN           (1 << 27)
++/** Sets the DAC A sense value to high */
++# define TVDAC_A_SENSE_CTL            (1 << 26)
++/** Sets the DAC B sense value to high */
++# define TVDAC_B_SENSE_CTL            (1 << 25)
++/** Sets the DAC C sense value to high */
++# define TVDAC_C_SENSE_CTL            (1 << 24)
++/** Overrides the ENC_ENABLE and DAC voltage levels */
++# define DAC_CTL_OVERRIDE             (1 << 7)
++/** Sets the slew rate.  Must be preserved in software */
++# define ENC_TVDAC_SLEW_FAST          (1 << 6)
++# define DAC_A_1_3_V                  (0 << 4)
++# define DAC_A_1_1_V                  (1 << 4)
++# define DAC_A_0_7_V                  (2 << 4)
++# define DAC_A_OFF                    (3 << 4)
++# define DAC_B_1_3_V                  (0 << 2)
++# define DAC_B_1_1_V                  (1 << 2)
++# define DAC_B_0_7_V                  (2 << 2)
++# define DAC_B_OFF                    (3 << 2)
++# define DAC_C_1_3_V                  (0 << 0)
++# define DAC_C_1_1_V                  (1 << 0)
++# define DAC_C_0_7_V                  (2 << 0)
++# define DAC_C_OFF                    (3 << 0)
++
++/**
++ * CSC coefficients are stored in a floating point format with 9 bits of
++ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
++ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
++ * -1 (0x3) being the only legal negative value.
++ */
++#define TV_CSC_Y              0x68010
++# define TV_RY_MASK                   0x07ff0000
++# define TV_RY_SHIFT                  16
++# define TV_GY_MASK                   0x00000fff
++# define TV_GY_SHIFT                  0
++
++#define TV_CSC_Y2             0x68014
++# define TV_BY_MASK                   0x07ff0000
++# define TV_BY_SHIFT                  16
++/**
++ * Y attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AY_MASK                   0x000003ff
++# define TV_AY_SHIFT                  0
++
++#define TV_CSC_U              0x68018
++# define TV_RU_MASK                   0x07ff0000
++# define TV_RU_SHIFT                  16
++# define TV_GU_MASK                   0x000007ff
++# define TV_GU_SHIFT                  0
++
++#define TV_CSC_U2             0x6801c
++# define TV_BU_MASK                   0x07ff0000
++# define TV_BU_SHIFT                  16
++/**
++ * U attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AU_MASK                   0x000003ff
++# define TV_AU_SHIFT                  0
++
++#define TV_CSC_V              0x68020
++# define TV_RV_MASK                   0x0fff0000
++# define TV_RV_SHIFT                  16
++# define TV_GV_MASK                   0x000007ff
++# define TV_GV_SHIFT                  0
++
++#define TV_CSC_V2             0x68024
++# define TV_BV_MASK                   0x07ff0000
++# define TV_BV_SHIFT                  16
++/**
++ * V attenuation for component video.
++ *
++ * Stored in 1.9 fixed point.
++ */
++# define TV_AV_MASK                   0x000007ff
++# define TV_AV_SHIFT                  0
++
++#define TV_CLR_KNOBS          0x68028
++/** 2s-complement brightness adjustment */
++# define TV_BRIGHTNESS_MASK           0xff000000
++# define TV_BRIGHTNESS_SHIFT          24
++/** Contrast adjustment, as a 2.6 unsigned floating point number */
++# define TV_CONTRAST_MASK             0x00ff0000
++# define TV_CONTRAST_SHIFT            16
++/** Saturation adjustment, as a 2.6 unsigned floating point number */
++# define TV_SATURATION_MASK           0x0000ff00
++# define TV_SATURATION_SHIFT          8
++/** Hue adjustment, as an integer phase angle in degrees */
++# define TV_HUE_MASK                  0x000000ff
++# define TV_HUE_SHIFT                 0
++
++#define TV_CLR_LEVEL          0x6802c
++/** Controls the DAC level for black */
++# define TV_BLACK_LEVEL_MASK          0x01ff0000
++# define TV_BLACK_LEVEL_SHIFT         16
++/** Controls the DAC level for blanking */
++# define TV_BLANK_LEVEL_MASK          0x000001ff
++# define TV_BLANK_LEVEL_SHIFT         0
++
++#define TV_H_CTL_1            0x68030
++/** Number of pixels in the hsync. */
++# define TV_HSYNC_END_MASK            0x1fff0000
++# define TV_HSYNC_END_SHIFT           16
++/** Total number of pixels minus one in the line (display and blanking). */
++# define TV_HTOTAL_MASK                       0x00001fff
++# define TV_HTOTAL_SHIFT              0
++
++#define TV_H_CTL_2            0x68034
++/** Enables the colorburst (needed for non-component color) */
++# define TV_BURST_ENA                 (1 << 31)
++/** Offset of the colorburst from the start of hsync, in pixels minus one. */
++# define TV_HBURST_START_SHIFT                16
++# define TV_HBURST_START_MASK         0x1fff0000
++/** Length of the colorburst */
++# define TV_HBURST_LEN_SHIFT          0
++# define TV_HBURST_LEN_MASK           0x0001fff
++
++#define TV_H_CTL_3            0x68038
++/** End of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_END_SHIFT          16
++# define TV_HBLANK_END_MASK           0x1fff0000
++/** Start of hblank, measured in pixels minus one from start of hsync */
++# define TV_HBLANK_START_SHIFT                0
++# define TV_HBLANK_START_MASK         0x0001fff
++
++#define TV_V_CTL_1            0x6803c
++/** XXX */
++# define TV_NBR_END_SHIFT             16
++# define TV_NBR_END_MASK              0x07ff0000
++/** XXX */
++# define TV_VI_END_F1_SHIFT           8
++# define TV_VI_END_F1_MASK            0x00003f00
++/** XXX */
++# define TV_VI_END_F2_SHIFT           0
++# define TV_VI_END_F2_MASK            0x0000003f
++
++#define TV_V_CTL_2            0x68040
++/** Length of vsync, in half lines */
++# define TV_VSYNC_LEN_MASK            0x07ff0000
++# define TV_VSYNC_LEN_SHIFT           16
++/** Offset of the start of vsync in field 1, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F1_MASK               0x00007f00
++# define TV_VSYNC_START_F1_SHIFT      8
++/**
++ * Offset of the start of vsync in field 2, measured in one less than the
++ * number of half lines.
++ */
++# define TV_VSYNC_START_F2_MASK               0x0000007f
++# define TV_VSYNC_START_F2_SHIFT      0
++
++#define TV_V_CTL_3            0x68044
++/** Enables generation of the equalization signal */
++# define TV_EQUAL_ENA                 (1 << 31)
++/** Length of vsync, in half lines */
++# define TV_VEQ_LEN_MASK              0x007f0000
++# define TV_VEQ_LEN_SHIFT             16
++/** Offset of the start of equalization in field 1, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F1_MASK         0x0007f00
++# define TV_VEQ_START_F1_SHIFT                8
++/**
++ * Offset of the start of equalization in field 2, measured in one less than
++ * the number of half lines.
++ */
++# define TV_VEQ_START_F2_MASK         0x000007f
++# define TV_VEQ_START_F2_SHIFT                0
++
++#define TV_V_CTL_4            0x68048
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F1_MASK      0x003f0000
++# define TV_VBURST_START_F1_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F1_MASK                0x000000ff
++# define TV_VBURST_END_F1_SHIFT               0
++
++#define TV_V_CTL_5            0x6804c
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F2_MASK      0x003f0000
++# define TV_VBURST_START_F2_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F2_MASK                0x000000ff
++# define TV_VBURST_END_F2_SHIFT               0
++
++#define TV_V_CTL_6            0x68050
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F3_MASK      0x003f0000
++# define TV_VBURST_START_F3_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F3_MASK                0x000000ff
++# define TV_VBURST_END_F3_SHIFT               0
++
++#define TV_V_CTL_7            0x68054
++/**
++ * Offset to start of vertical colorburst, measured in one less than the
++ * number of lines from vertical start.
++ */
++# define TV_VBURST_START_F4_MASK      0x003f0000
++# define TV_VBURST_START_F4_SHIFT     16
++/**
++ * Offset to the end of vertical colorburst, measured in one less than the
++ * number of lines from the start of NBR.
++ */
++# define TV_VBURST_END_F4_MASK                0x000000ff
++# define TV_VBURST_END_F4_SHIFT               0
++
++#define TV_SC_CTL_1           0x68060
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA1_EN                        (1 << 31)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA2_EN                        (1 << 30)
++/** Turns on the first subcarrier phase generation DDA */
++# define TV_SC_DDA3_EN                        (1 << 29)
++/** Sets the subcarrier DDA to reset frequency every other field */
++# define TV_SC_RESET_EVERY_2          (0 << 24)
++/** Sets the subcarrier DDA to reset frequency every fourth field */
++# define TV_SC_RESET_EVERY_4          (1 << 24)
++/** Sets the subcarrier DDA to reset frequency every eighth field */
++# define TV_SC_RESET_EVERY_8          (2 << 24)
++/** Sets the subcarrier DDA to never reset the frequency */
++# define TV_SC_RESET_NEVER            (3 << 24)
++/** Sets the peak amplitude of the colorburst.*/
++# define TV_BURST_LEVEL_MASK          0x00ff0000
++# define TV_BURST_LEVEL_SHIFT         16
++/** Sets the increment of the first subcarrier phase generation DDA */
++# define TV_SCDDA1_INC_MASK           0x00000fff
++# define TV_SCDDA1_INC_SHIFT          0
++
++#define TV_SC_CTL_2           0x68064
++/** Sets the rollover for the second subcarrier phase generation DDA */
++# define TV_SCDDA2_SIZE_MASK          0x7fff0000
++# define TV_SCDDA2_SIZE_SHIFT         16
++/** Sets the increent of the second subcarrier phase generation DDA */
++# define TV_SCDDA2_INC_MASK           0x00007fff
++# define TV_SCDDA2_INC_SHIFT          0
++
++#define TV_SC_CTL_3           0x68068
++/** Sets the rollover for the third subcarrier phase generation DDA */
++# define TV_SCDDA3_SIZE_MASK          0x7fff0000
++# define TV_SCDDA3_SIZE_SHIFT         16
++/** Sets the increent of the third subcarrier phase generation DDA */
++# define TV_SCDDA3_INC_MASK           0x00007fff
++# define TV_SCDDA3_INC_SHIFT          0
++
++#define TV_WIN_POS            0x68070
++/** X coordinate of the display from the start of horizontal active */
++# define TV_XPOS_MASK                 0x1fff0000
++# define TV_XPOS_SHIFT                        16
++/** Y coordinate of the display from the start of vertical active (NBR) */
++# define TV_YPOS_MASK                 0x00000fff
++# define TV_YPOS_SHIFT                        0
++
++#define TV_WIN_SIZE           0x68074
++/** Horizontal size of the display window, measured in pixels*/
++# define TV_XSIZE_MASK                        0x1fff0000
++# define TV_XSIZE_SHIFT                       16
++/**
++ * Vertical size of the display window, measured in pixels.
++ *
++ * Must be even for interlaced modes.
++ */
++# define TV_YSIZE_MASK                        0x00000fff
++# define TV_YSIZE_SHIFT                       0
++
++#define TV_FILTER_CTL_1               0x68080
++/**
++ * Enables automatic scaling calculation.
++ *
++ * If set, the rest of the registers are ignored, and the calculated values can
++ * be read back from the register.
++ */
++# define TV_AUTO_SCALE                        (1 << 31)
++/**
++ * Disables the vertical filter.
++ *
++ * This is required on modes more than 1024 pixels wide */
++# define TV_V_FILTER_BYPASS           (1 << 29)
++/** Enables adaptive vertical filtering */
++# define TV_VADAPT                    (1 << 28)
++# define TV_VADAPT_MODE_MASK          (3 << 26)
++/** Selects the least adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_LEAST         (0 << 26)
++/** Selects the moderately adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MODERATE      (1 << 26)
++/** Selects the most adaptive vertical filtering mode */
++# define TV_VADAPT_MODE_MOST          (3 << 26)
++/**
++ * Sets the horizontal scaling factor.
++ *
++ * This should be the fractional part of the horizontal scaling factor divided
++ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
++ *
++ * (src width - 1) / ((oversample * dest width) - 1)
++ */
++# define TV_HSCALE_FRAC_MASK          0x00003fff
++# define TV_HSCALE_FRAC_SHIFT         0
++
++#define TV_FILTER_CTL_2               0x68084
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
++ */
++# define TV_VSCALE_INT_MASK           0x00038000
++# define TV_VSCALE_INT_SHIFT          15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * \sa TV_VSCALE_INT_MASK
++ */
++# define TV_VSCALE_FRAC_MASK          0x00007fff
++# define TV_VSCALE_FRAC_SHIFT         0
++
++#define TV_FILTER_CTL_3               0x68088
++/**
++ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ */
++# define TV_VSCALE_IP_INT_MASK                0x00038000
++# define TV_VSCALE_IP_INT_SHIFT               15
++/**
++ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
++ *
++ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
++ *
++ * \sa TV_VSCALE_IP_INT_MASK
++ */
++# define TV_VSCALE_IP_FRAC_MASK               0x00007fff
++# define TV_VSCALE_IP_FRAC_SHIFT              0
++
++#define TV_CC_CONTROL         0x68090
++# define TV_CC_ENABLE                 (1 << 31)
++/**
++ * Specifies which field to send the CC data in.
++ *
++ * CC data is usually sent in field 0.
++ */
++# define TV_CC_FID_MASK                       (1 << 27)
++# define TV_CC_FID_SHIFT              27
++/** Sets the horizontal position of the CC data.  Usually 135. */
++# define TV_CC_HOFF_MASK              0x03ff0000
++# define TV_CC_HOFF_SHIFT             16
++/** Sets the vertical position of the CC data.  Usually 21 */
++# define TV_CC_LINE_MASK              0x0000003f
++# define TV_CC_LINE_SHIFT             0
++
++#define TV_CC_DATA            0x68094
++# define TV_CC_RDY                    (1 << 31)
++/** Second word of CC data to be transmitted. */
++# define TV_CC_DATA_2_MASK            0x007f0000
++# define TV_CC_DATA_2_SHIFT           16
++/** First word of CC data to be transmitted. */
++# define TV_CC_DATA_1_MASK            0x0000007f
++# define TV_CC_DATA_1_SHIFT           0
++
++#define TV_H_LUMA_0           0x68100
++#define TV_H_LUMA_59          0x681ec
++#define TV_H_CHROMA_0         0x68200
++#define TV_H_CHROMA_59                0x682ec
++#define TV_V_LUMA_0           0x68300
++#define TV_V_LUMA_42          0x683a8
++#define TV_V_CHROMA_0         0x68400
++#define TV_V_CHROMA_42                0x684a8
++
++/* Display & cursor control */
++
++/* Pipe A */
++#define PIPEADSL              0x70000
++#define PIPEACONF              0x70008
++#define   PIPEACONF_ENABLE    (1<<31)
++#define   PIPEACONF_DISABLE   0
++#define   PIPEACONF_DOUBLE_WIDE       (1<<30)
++#define   I965_PIPECONF_ACTIVE        (1<<30)
++#define   PIPEACONF_SINGLE_WIDE       0
++#define   PIPEACONF_PIPE_UNLOCKED 0
++#define   PIPEACONF_PIPE_LOCKED       (1<<25)
++#define   PIPEACONF_PALETTE   0
++#define   PIPEACONF_GAMMA             (1<<24)
++#define   PIPECONF_FORCE_BORDER       (1<<25)
++#define   PIPECONF_PROGRESSIVE        (0 << 21)
++#define   PIPECONF_INTERLACE_W_FIELD_INDICATION       (6 << 21)
++#define   PIPECONF_INTERLACE_FIELD_0_ONLY             (7 << 21)
++#define PIPEASTAT             0x70024
++#define   PIPE_FIFO_UNDERRUN_STATUS           (1UL<<31)
++#define   PIPE_CRC_ERROR_ENABLE                       (1UL<<29)
++#define   PIPE_CRC_DONE_ENABLE                        (1UL<<28)
++#define   PIPE_GMBUS_EVENT_ENABLE             (1UL<<27)
++#define   PIPE_HOTPLUG_INTERRUPT_ENABLE               (1UL<<26)
++#define   PIPE_VSYNC_INTERRUPT_ENABLE         (1UL<<25)
++#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE    (1UL<<24)
++#define   PIPE_DPST_EVENT_ENABLE              (1UL<<23)
++#define   PIPE_LEGACY_BLC_EVENT_ENABLE                (1UL<<22)
++#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE     (1UL<<21)
++#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE    (1UL<<20)
++#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE    (1UL<<18) /* pre-965 */
++#define   PIPE_START_VBLANK_INTERRUPT_ENABLE  (1UL<<18) /* 965 or later */
++#define   PIPE_VBLANK_INTERRUPT_ENABLE                (1UL<<17)
++#define   PIPE_OVERLAY_UPDATED_ENABLE         (1UL<<16)
++#define   PIPE_CRC_ERROR_INTERRUPT_STATUS     (1UL<<13)
++#define   PIPE_CRC_DONE_INTERRUPT_STATUS      (1UL<<12)
++#define   PIPE_GMBUS_INTERRUPT_STATUS         (1UL<<11)
++#define   PIPE_HOTPLUG_INTERRUPT_STATUS               (1UL<<10)
++#define   PIPE_VSYNC_INTERRUPT_STATUS         (1UL<<9)
++#define   PIPE_DISPLAY_LINE_COMPARE_STATUS    (1UL<<8)
++#define   PIPE_DPST_EVENT_STATUS              (1UL<<7)
++#define   PIPE_LEGACY_BLC_EVENT_STATUS                (1UL<<6)
++#define   PIPE_ODD_FIELD_INTERRUPT_STATUS     (1UL<<5)
++#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS    (1UL<<4)
++#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS    (1UL<<2) /* pre-965 */
++#define   PIPE_START_VBLANK_INTERRUPT_STATUS  (1UL<<2) /* 965 or later */
++#define   PIPE_VBLANK_INTERRUPT_STATUS                (1UL<<1)
++#define   PIPE_OVERLAY_UPDATED_STATUS         (1UL<<0)
++
++#define DSPARB                        0x70030
++#define   DSPARB_CSTART_MASK  (0x7f << 7)
++#define   DSPARB_CSTART_SHIFT 7
++#define   DSPARB_BSTART_MASK  (0x7f)           
++#define   DSPARB_BSTART_SHIFT 0
++/*
++ * The two pipe frame counter registers are not synchronized, so
++ * reading a stable value is somewhat tricky. The following code 
++ * should work:
++ *
++ *  do {
++ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ *             PIPE_FRAME_HIGH_SHIFT;
++ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
++ *             PIPE_FRAME_LOW_SHIFT);
++ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
++ *             PIPE_FRAME_HIGH_SHIFT);
++ *  } while (high1 != high2);
++ *  frame = (high1 << 8) | low1;
++ */
++#define PIPEAFRAMEHIGH          0x70040
++#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
++#define   PIPE_FRAME_HIGH_SHIFT   0
++#define PIPEAFRAMEPIXEL         0x70044
++#define   PIPE_FRAME_LOW_MASK     0xff000000
++#define   PIPE_FRAME_LOW_SHIFT    24
++#define   PIPE_PIXEL_MASK         0x00ffffff
++#define   PIPE_PIXEL_SHIFT        0
++
++/* Cursor A & B regs */
++#define CURACNTR              0x70080
++#define   CURSOR_MODE_DISABLE   0x00
++#define   CURSOR_MODE_64_32B_AX 0x07
++#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
++#define CURABASE              0x70084
++#define CURAPOS                       0x70088
++#define   CURSOR_POS_MASK       0x007FF
++#define   CURSOR_POS_SIGN       0x8000
++#define   CURSOR_X_SHIFT        0
++#define   CURSOR_Y_SHIFT        16
++#define CURBCNTR              0x700c0
++#define CURBBASE              0x700c4
++#define CURBPOS                       0x700c8
++
++/* Display A control */
++#define DSPACNTR                0x70180
++#define   DISPLAY_PLANE_ENABLE                        (1<<31)
++#define   DISPLAY_PLANE_DISABLE                       0
++#define   DISPPLANE_GAMMA_ENABLE              (1<<30)
++#define   DISPPLANE_GAMMA_DISABLE             0
++#define   DISPPLANE_PIXFORMAT_MASK            (0xf<<26)
++#define   DISPPLANE_8BPP                      (0x2<<26)
++#define   DISPPLANE_15_16BPP                  (0x4<<26)
++#define   DISPPLANE_16BPP                     (0x5<<26)
++#define   DISPPLANE_32BPP_NO_ALPHA            (0x6<<26)
++#define   DISPPLANE_32BPP                     (0x7<<26)
++#define   DISPPLANE_STEREO_ENABLE             (1<<25)
++#define   DISPPLANE_STEREO_DISABLE            0
++#define   DISPPLANE_SEL_PIPE_MASK             (1<<24)
++#define   DISPPLANE_SEL_PIPE_A                        0
++#define   DISPPLANE_SEL_PIPE_B                        (1<<24)
++#define   DISPPLANE_SRC_KEY_ENABLE            (1<<22)
++#define   DISPPLANE_SRC_KEY_DISABLE           0
++#define   DISPPLANE_LINE_DOUBLE                       (1<<20)
++#define   DISPPLANE_NO_LINE_DOUBLE            0
++#define   DISPPLANE_STEREO_POLARITY_FIRST     0
++#define   DISPPLANE_STEREO_POLARITY_SECOND    (1<<18)
++#define DSPAADDR              0x70184
++#define DSPASTRIDE            0x70188
++#define DSPAPOS                       0x7018C /* reserved */
++#define DSPASIZE              0x70190
++#define DSPASURF              0x7019C /* 965+ only */
++#define DSPATILEOFF           0x701A4 /* 965+ only */
++
++/* VBIOS flags */
++#define SWF00                 0x71410
++#define SWF01                 0x71414
++#define SWF02                 0x71418
++#define SWF03                 0x7141c
++#define SWF04                 0x71420
++#define SWF05                 0x71424
++#define SWF06                 0x71428
++#define SWF10                 0x70410
++#define SWF11                 0x70414
++#define SWF14                 0x71420
++#define SWF30                 0x72414
++#define SWF31                 0x72418
++#define SWF32                 0x7241c
++
++/* Pipe B */
++#define PIPEBDSL              0x71000
++#define PIPEBCONF             0x71008
++#define PIPEBSTAT             0x71024
++#define PIPEBFRAMEHIGH                0x71040
++#define PIPEBFRAMEPIXEL               0x71044
++
++/* Display B control */
++#define DSPBCNTR              0x71180
++#define   DISPPLANE_ALPHA_TRANS_ENABLE                (1<<15)
++#define   DISPPLANE_ALPHA_TRANS_DISABLE               0
++#define   DISPPLANE_SPRITE_ABOVE_DISPLAY      0
++#define   DISPPLANE_SPRITE_ABOVE_OVERLAY      (1)
++#define DSPBADDR              0x71184
++#define DSPBSTRIDE            0x71188
++#define DSPBPOS                       0x7118C
++#define DSPBSIZE              0x71190
++#define DSPBSURF              0x7119C
++#define DSPBTILEOFF           0x711A4
++
++/* VBIOS regs */
++#define VGACNTRL              0x71400
++# define VGA_DISP_DISABLE                     (1 << 31)
++# define VGA_2X_MODE                          (1 << 30)
++# define VGA_PIPE_B_SELECT                    (1 << 29)
++
++/* Chipset type macros */
++
++#define IS_I830(dev) ((dev)->pci_device == 0x3577)
++#define IS_845G(dev) ((dev)->pci_device == 0x2562)
++#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
++#define IS_I855(dev) ((dev)->pci_device == 0x3582)
++#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
++
++#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
++#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
++#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
++#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
++                      (dev)->pci_device == 0x27AE)
++#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
++                     (dev)->pci_device == 0x2982 || \
++                     (dev)->pci_device == 0x2992 || \
++                     (dev)->pci_device == 0x29A2 || \
++                     (dev)->pci_device == 0x2A02 || \
++                     (dev)->pci_device == 0x2A12 || \
++                     (dev)->pci_device == 0x2A42 || \
++                     (dev)->pci_device == 0x2E02 || \
++                     (dev)->pci_device == 0x2E12 || \
++                     (dev)->pci_device == 0x2E22)
++
++#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
++
++#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
++
++#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
++                   (dev)->pci_device == 0x2E12 || \
++                   (dev)->pci_device == 0x2E22)
++
++#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||        \
++                      (dev)->pci_device == 0x29B2 ||  \
++                      (dev)->pci_device == 0x29D2)
++
++#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
++                    IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
++
++#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
++                      IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
++
++#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_execbuf.c git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c
+--- git/drivers/gpu/drm-tungsten/i915_execbuf.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_execbuf.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,917 @@
++/*
++ * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *     Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ *     Dave Airlie
++ *     Keith Packard
++ *     ... ?
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if DRM_DEBUG_CODE
++#define DRM_DEBUG_RELOCATION  (drm_debug != 0)
++#else
++#define DRM_DEBUG_RELOCATION  0
++#endif
++
++enum i915_buf_idle {
++      I915_RELOC_UNCHECKED,
++      I915_RELOC_IDLE,
++      I915_RELOC_BUSY
++};
++
++struct i915_relocatee_info {
++      struct drm_buffer_object *buf;
++      unsigned long offset;
++      uint32_t *data_page;
++      unsigned page_offset;
++      struct drm_bo_kmap_obj kmap;
++      int is_iomem;
++      int dst;
++      int idle;
++      int performed_ring_relocs;
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++      unsigned long pfn;
++      pgprot_t pg_prot;
++#endif
++};
++
++struct drm_i915_validate_buffer {
++      struct drm_buffer_object *buffer;
++      int presumed_offset_correct;
++      void __user *data;
++      int ret;
++      enum i915_buf_idle idle;
++};
++
++/*
++ * I'd like to use MI_STORE_DATA_IMM here, but I can't make
++ * it work. Seems like GART writes are broken with that
++ * instruction. Also I'm not sure that MI_FLUSH will
++ * act as a memory barrier for that instruction. It will
++ * for this single dword 2D blit.
++ */
++
++static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
++                               uint32_t value)
++{
++      struct drm_i915_private *dev_priv =
++          (struct drm_i915_private *)dev->dev_private;
++
++      RING_LOCALS;
++      i915_kernel_lost_context(dev);
++      BEGIN_LP_RING(6);
++      OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
++      OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
++      OUT_RING((0x1 << 16) | (0x4));
++      OUT_RING(offset);
++      OUT_RING(value);
++      OUT_RING(0);
++      ADVANCE_LP_RING();
++}
++
++static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
++                                          *buffers, unsigned num_buffers)
++{
++      while (num_buffers--)
++              drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
++}
++
++int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
++                   struct drm_i915_validate_buffer *buffers,
++                   struct i915_relocatee_info *relocatee, uint32_t * reloc)
++{
++      unsigned index;
++      unsigned long new_cmd_offset;
++      u32 val;
++      int ret, i;
++      int buf_index = -1;
++
++      /*
++       * FIXME: O(relocs * buffers) complexity.
++       */
++
++      for (i = 0; i <= num_buffers; i++)
++              if (buffers[i].buffer)
++                      if (reloc[2] == buffers[i].buffer->base.hash.key)
++                              buf_index = i;
++
++      if (buf_index == -1) {
++              DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
++              return -EINVAL;
++      }
++
++      /*
++       * Short-circuit relocations that were correctly
++       * guessed by the client
++       */
++      if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
++              return 0;
++
++      new_cmd_offset = reloc[0];
++      if (!relocatee->data_page ||
++          !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
++              struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
++
++              drm_bo_kunmap(&relocatee->kmap);
++              relocatee->data_page = NULL;
++              relocatee->offset = new_cmd_offset;
++
++              if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
++                ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
++                      if (ret)
++                              return ret;
++                      relocatee->idle = I915_RELOC_IDLE;
++              }
++
++              if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
++                           (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
++                      drm_bo_evict_cached(relocatee->buf);
++
++              ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
++                                1, &relocatee->kmap);
++              if (ret) {
++                      DRM_ERROR
++                          ("Could not map command buffer to apply relocs\n %08lx",
++                           new_cmd_offset);
++                      return ret;
++              }
++              relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
++                                                     &relocatee->is_iomem);
++              relocatee->page_offset = (relocatee->offset & PAGE_MASK);
++      }
++
++      val = buffers[buf_index].buffer->offset;
++      index = (reloc[0] - relocatee->page_offset) >> 2;
++
++      /* add in validate */
++      val = val + reloc[1];
++
++      if (DRM_DEBUG_RELOCATION) {
++              if (buffers[buf_index].presumed_offset_correct &&
++                  relocatee->data_page[index] != val) {
++                      DRM_DEBUG
++                          ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
++                           reloc[0], reloc[1], buf_index,
++                           relocatee->data_page[index], val);
++              }
++      }
++
++      if (relocatee->is_iomem)
++              iowrite32(val, relocatee->data_page + index);
++      else
++              relocatee->data_page[index] = val;
++      return 0;
++}
++
++int i915_process_relocs(struct drm_file *file_priv,
++                      uint32_t buf_handle,
++                      uint32_t __user ** reloc_user_ptr,
++                      struct i915_relocatee_info *relocatee,
++                      struct drm_i915_validate_buffer *buffers,
++                      uint32_t num_buffers)
++{
++      int ret, reloc_stride;
++      uint32_t cur_offset;
++      uint32_t reloc_count;
++      uint32_t reloc_type;
++      uint32_t reloc_buf_size;
++      uint32_t *reloc_buf = NULL;
++      int i;
++
++      /* do a copy from user from the user ptr */
++      ret = get_user(reloc_count, *reloc_user_ptr);
++      if (ret) {
++              DRM_ERROR("Could not map relocation buffer.\n");
++              goto out;
++      }
++
++      ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
++      if (ret) {
++              DRM_ERROR("Could not map relocation buffer.\n");
++              goto out;
++      }
++
++      if (reloc_type != 0) {
++              DRM_ERROR("Unsupported relocation type requested\n");
++              ret = -EINVAL;
++              goto out;
++      }
++
++      reloc_buf_size =
++          (I915_RELOC_HEADER +
++           (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
++      reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
++      if (!reloc_buf) {
++              DRM_ERROR("Out of memory for reloc buffer\n");
++              ret = -ENOMEM;
++              goto out;
++      }
++
++      if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
++              ret = -EFAULT;
++              goto out;
++      }
++
++      /* get next relocate buffer handle */
++      *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
++
++      reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);   /* may be different for other types of relocs */
++
++      DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
++                *reloc_user_ptr);
++
++      for (i = 0; i < reloc_count; i++) {
++              cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
++
++              ret = i915_apply_reloc(file_priv, num_buffers, buffers,
++                                     relocatee, reloc_buf + cur_offset);
++              if (ret)
++                      goto out;
++      }
++
++      out:
++      if (reloc_buf)
++              kfree(reloc_buf);
++
++      if (relocatee->data_page) {
++              drm_bo_kunmap(&relocatee->kmap);
++              relocatee->data_page = NULL;
++      }
++
++      return ret;
++}
++
++static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
++                         uint32_t __user * reloc_user_ptr,
++                         struct drm_i915_validate_buffer *buffers,
++                         uint32_t buf_count)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      struct i915_relocatee_info relocatee;
++      int ret = 0;
++      int b;
++
++      /*
++       * Short circuit relocations when all previous
++       * buffers offsets were correctly guessed by
++       * the client
++       */
++      if (!DRM_DEBUG_RELOCATION) {
++              for (b = 0; b < buf_count; b++)
++                      if (!buffers[b].presumed_offset_correct)
++                              break;
++
++              if (b == buf_count)
++                      return 0;
++      }
++
++      memset(&relocatee, 0, sizeof(relocatee));
++      relocatee.idle = I915_RELOC_UNCHECKED;
++
++      mutex_lock(&dev->struct_mutex);
++      relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
++      mutex_unlock(&dev->struct_mutex);
++      if (!relocatee.buf) {
++              DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
++              ret = -EINVAL;
++              goto out_err;
++      }
++
++      mutex_lock(&relocatee.buf->mutex);
++      while (reloc_user_ptr) {
++              ret =
++                  i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
++                                      &relocatee, buffers, buf_count);
++              if (ret) {
++                      DRM_ERROR("process relocs failed\n");
++                      goto out_err1;
++              }
++      }
++
++      out_err1:
++      mutex_unlock(&relocatee.buf->mutex);
++      drm_bo_usage_deref_unlocked(&relocatee.buf);
++      out_err:
++      return ret;
++}
++
++static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
++{
++      if (relocatee->data_page) {
++#ifndef DRM_KMAP_ATOMIC_PROT_PFN
++              drm_bo_kunmap(&relocatee->kmap);
++#else
++              kunmap_atomic(relocatee->data_page, KM_USER0);
++#endif
++              relocatee->data_page = NULL;
++      }
++      relocatee->buf = NULL;
++      relocatee->dst = ~0;
++}
++
++static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
++                               struct drm_i915_validate_buffer *buffers,
++                               unsigned int dst, unsigned long dst_offset)
++{
++      int ret;
++
++      if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
++              i915_clear_relocatee(relocatee);
++              relocatee->dst = dst;
++              relocatee->buf = buffers[dst].buffer;
++              relocatee->idle = buffers[dst].idle;
++
++              /*
++               * Check for buffer idle. If the buffer is busy, revert to
++               * ring relocations.
++               */
++
++              if (relocatee->idle == I915_RELOC_UNCHECKED) {
++                      preempt_enable();
++                      mutex_lock(&relocatee->buf->mutex);
++
++                      ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
++                      if (ret == 0)
++                              relocatee->idle = I915_RELOC_IDLE;
++                      else {
++                              relocatee->idle = I915_RELOC_BUSY;
++                              relocatee->performed_ring_relocs = 1;
++                      }
++                      mutex_unlock(&relocatee->buf->mutex);
++                      preempt_disable();
++                      buffers[dst].idle = relocatee->idle;
++              }
++      }
++
++      if (relocatee->idle == I915_RELOC_BUSY)
++              return 0;
++
++      if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
++              DRM_ERROR("Relocation destination out of bounds.\n");
++              return -EINVAL;
++      }
++      if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
++                   NULL == relocatee->data_page)) {
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++              if (NULL != relocatee->data_page) {
++                      kunmap_atomic(relocatee->data_page, KM_USER0);
++                      relocatee->data_page = NULL;
++              }
++              ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
++                                    &relocatee->pfn, &relocatee->pg_prot);
++              if (ret) {
++                      DRM_ERROR("Can't map relocation destination.\n");
++                      return -EINVAL;
++              }
++              relocatee->data_page =
++                  kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
++                                       relocatee->pg_prot);
++#else
++              if (NULL != relocatee->data_page) {
++                      drm_bo_kunmap(&relocatee->kmap);
++                      relocatee->data_page = NULL;
++              }
++
++              ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
++                                1, &relocatee->kmap);
++              if (ret) {
++                      DRM_ERROR("Can't map relocation destination.\n");
++                      return ret;
++              }
++
++              relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
++                                                     &relocatee->is_iomem);
++#endif
++              relocatee->page_offset = dst_offset & PAGE_MASK;
++      }
++      return 0;
++}
++
++static int i915_apply_post_reloc(uint32_t reloc[],
++                               struct drm_i915_validate_buffer *buffers,
++                               uint32_t num_buffers,
++                               struct i915_relocatee_info *relocatee)
++{
++      uint32_t reloc_buffer = reloc[2];
++      uint32_t dst_buffer = reloc[3];
++      uint32_t val;
++      uint32_t index;
++      int ret;
++
++      if (likely(buffers[reloc_buffer].presumed_offset_correct))
++              return 0;
++      if (unlikely(reloc_buffer >= num_buffers)) {
++              DRM_ERROR("Invalid reloc buffer index.\n");
++              return -EINVAL;
++      }
++      if (unlikely(dst_buffer >= num_buffers)) {
++              DRM_ERROR("Invalid dest buffer index.\n");
++              return -EINVAL;
++      }
++
++      ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
++      if (unlikely(ret))
++              return ret;
++
++      val = buffers[reloc_buffer].buffer->offset;
++      index = (reloc[0] - relocatee->page_offset) >> 2;
++      val = val + reloc[1];
++
++      if (relocatee->idle == I915_RELOC_BUSY) {
++              i915_emit_ring_reloc(relocatee->buf->dev,
++                                   relocatee->buf->offset + reloc[0], val);
++              return 0;
++      }
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++      relocatee->data_page[index] = val;
++#else
++      if (likely(relocatee->is_iomem))
++              iowrite32(val, relocatee->data_page + index);
++      else
++              relocatee->data_page[index] = val;
++#endif
++
++      return 0;
++}
++
++static int i915_post_relocs(struct drm_file *file_priv,
++                          uint32_t __user * new_reloc_ptr,
++                          struct drm_i915_validate_buffer *buffers,
++                          unsigned int num_buffers)
++{
++      uint32_t *reloc;
++      uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
++      uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
++      struct i915_relocatee_info relocatee;
++      uint32_t reloc_type;
++      uint32_t num_relocs;
++      uint32_t count;
++      int ret = 0;
++      int i;
++      int short_circuit = 1;
++      uint32_t __user *reloc_ptr;
++      uint64_t new_reloc_data;
++      uint32_t reloc_buf_size;
++      uint32_t *reloc_buf;
++
++      for (i = 0; i < num_buffers; ++i) {
++              if (unlikely(!buffers[i].presumed_offset_correct)) {
++                      short_circuit = 0;
++                      break;
++              }
++      }
++
++      if (likely(short_circuit))
++              return 0;
++
++      memset(&relocatee, 0, sizeof(relocatee));
++
++      while (new_reloc_ptr) {
++              reloc_ptr = new_reloc_ptr;
++
++              ret = get_user(num_relocs, reloc_ptr);
++              if (unlikely(ret))
++                      goto out;
++              if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
++                                      header_size +
++                                      num_relocs * reloc_stride)))
++                      return -EFAULT;
++
++              ret = __get_user(reloc_type, reloc_ptr + 1);
++              if (unlikely(ret))
++                      goto out;
++
++              if (unlikely(reloc_type != 1)) {
++                      DRM_ERROR("Unsupported relocation type requested.\n");
++                      ret = -EINVAL;
++                      goto out;
++              }
++
++              ret = __get_user(new_reloc_data, reloc_ptr + 2);
++              new_reloc_ptr = (uint32_t __user *) (unsigned long)
++                  new_reloc_data;
++
++              reloc_ptr += I915_RELOC_HEADER;
++
++              if (num_relocs == 0)
++                      goto out;
++
++              reloc_buf_size =
++                  (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
++              reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
++              if (!reloc_buf) {
++                      DRM_ERROR("Out of memory for reloc buffer\n");
++                      ret = -ENOMEM;
++                      goto out;
++              }
++
++              if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
++                      ret = -EFAULT;
++                      goto out;
++              }
++              reloc = reloc_buf;
++              preempt_disable();
++              for (count = 0; count < num_relocs; ++count) {
++                      ret = i915_apply_post_reloc(reloc, buffers,
++                                                  num_buffers, &relocatee);
++                      if (unlikely(ret)) {
++                              preempt_enable();
++                              goto out;
++                      }
++                      reloc += I915_RELOC0_STRIDE;
++              }
++              preempt_enable();
++
++              if (reloc_buf) {
++                      kfree(reloc_buf);
++                      reloc_buf = NULL;
++              }
++              i915_clear_relocatee(&relocatee);
++      }
++
++      out:
++      /*
++       * Flush ring relocs so the command parser will pick them up.
++       */
++
++      if (relocatee.performed_ring_relocs)
++              (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
++
++      i915_clear_relocatee(&relocatee);
++      if (reloc_buf) {
++              kfree(reloc_buf);
++              reloc_buf = NULL;
++      }
++
++      return ret;
++}
++
++static int i915_check_presumed(struct drm_i915_op_arg *arg,
++                             struct drm_buffer_object *bo,
++                             uint32_t __user * data, int *presumed_ok)
++{
++      struct drm_bo_op_req *req = &arg->d.req;
++      uint32_t hint_offset;
++      uint32_t hint = req->bo_req.hint;
++
++      *presumed_ok = 0;
++
++      if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
++              return 0;
++      if (bo->offset == req->bo_req.presumed_offset) {
++              *presumed_ok = 1;
++              return 0;
++      }
++
++      /*
++       * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
++       * the user-space IOCTL argument list, since the buffer has moved,
++       * we're about to apply relocations and we might subsequently
++       * hit an -EAGAIN. In that case the argument list will be reused by
++       * user-space, but the presumed offset is no longer valid.
++       *
++       * Needless to say, this is a bit ugly.
++       */
++
++      hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
++      hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
++      return __put_user(hint, data + hint_offset);
++}
++
++/*
++ * Validate, add fence and relocate a block of bos from a userspace list
++ */
++int i915_validate_buffer_list(struct drm_file *file_priv,
++                            unsigned int fence_class, uint64_t data,
++                            struct drm_i915_validate_buffer *buffers,
++                            uint32_t * num_buffers,
++                            uint32_t __user ** post_relocs)
++{
++      struct drm_i915_op_arg arg;
++      struct drm_bo_op_req *req = &arg.d.req;
++      int ret = 0;
++      unsigned buf_count = 0;
++      uint32_t buf_handle;
++      uint32_t __user *reloc_user_ptr;
++      struct drm_i915_validate_buffer *item = buffers;
++      *post_relocs = NULL;
++
++      do {
++              if (buf_count >= *num_buffers) {
++                      DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
++                      ret = -EINVAL;
++                      goto out_err;
++              }
++              item = buffers + buf_count;
++              item->buffer = NULL;
++              item->presumed_offset_correct = 0;
++              item->idle = I915_RELOC_UNCHECKED;
++
++              if (copy_from_user
++                  (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
++                      ret = -EFAULT;
++                      goto out_err;
++              }
++
++              ret = 0;
++              if (req->op != drm_bo_validate) {
++                      DRM_ERROR
++                          ("Buffer object operation wasn't \"validate\".\n");
++                      ret = -EINVAL;
++                      goto out_err;
++              }
++              item->ret = 0;
++              item->data = (void __user *)(unsigned long)data;
++
++              buf_handle = req->bo_req.handle;
++              reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
++
++              /*
++               * Switch mode to post-validation relocations?
++               */
++
++              if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
++                           (reloc_user_ptr != NULL))) {
++                      uint32_t reloc_type;
++
++                      ret = get_user(reloc_type, reloc_user_ptr + 1);
++                      if (ret)
++                              goto out_err;
++
++                      if (reloc_type == 1)
++                              *post_relocs = reloc_user_ptr;
++
++              }
++
++              if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
++                      ret =
++                          i915_exec_reloc(file_priv, buf_handle,
++                                          reloc_user_ptr, buffers, buf_count);
++                      if (ret)
++                              goto out_err;
++                      DRM_MEMORYBARRIER();
++              }
++
++              ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
++                                           req->bo_req.flags,
++                                           req->bo_req.mask, req->bo_req.hint,
++                                           req->bo_req.fence_class,
++                                           NULL, &item->buffer);
++              if (ret) {
++                      DRM_ERROR("error on handle validate %d\n", ret);
++                      goto out_err;
++              }
++
++              buf_count++;
++
++              ret = i915_check_presumed(&arg, item->buffer,
++                                        (uint32_t __user *)
++                                        (unsigned long)data,
++                                        &item->presumed_offset_correct);
++              if (ret)
++                      goto out_err;
++
++              data = arg.next;
++      } while (data != 0);
++      out_err:
++      *num_buffers = buf_count;
++      item->ret = (ret != -EAGAIN) ? ret : 0;
++      return ret;
++}
++
++/*
++ * Remove all buffers from the unfenced list.
++ * If the execbuffer operation was aborted, for example due to a signal,
++ * this also make sure that buffers retain their original state and
++ * fence pointers.
++ * Copy back buffer information to user-space unless we were interrupted
++ * by a signal. In which case the IOCTL must be rerun.
++ */
++
++static int i915_handle_copyback(struct drm_device *dev,
++                              struct drm_i915_validate_buffer *buffers,
++                              unsigned int num_buffers, int ret)
++{
++      int err = ret;
++      int i;
++      struct drm_i915_op_arg arg;
++      struct drm_buffer_object *bo;
++
++      if (ret)
++              drm_putback_buffer_objects(dev);
++
++      if (ret != -EAGAIN) {
++              for (i = 0; i < num_buffers; ++i) {
++                      arg.handled = 1;
++                      arg.d.rep.ret = buffers->ret;
++                      bo = buffers->buffer;
++                      mutex_lock(&bo->mutex);
++                      drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
++                      mutex_unlock(&bo->mutex);
++                      if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
++                              err = -EFAULT;
++                      buffers++;
++              }
++      }
++
++      return err;
++}
++
++/*
++ * Create a fence object, and if that fails, pretend that everything is
++ * OK and just idle the GPU.
++ */
++
++void i915_fence_or_sync(struct drm_file *file_priv,
++                      uint32_t fence_flags,
++                      struct drm_fence_arg *fence_arg,
++                      struct drm_fence_object **fence_p)
++{
++      struct drm_device *dev = file_priv->minor->dev;
++      int ret;
++      struct drm_fence_object *fence;
++
++      ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
++
++      if (ret) {
++
++              /*
++               * Fence creation failed.
++               * Fall back to synchronous operation and idle the engine.
++               */
++
++              (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
++              (void)i915_quiescent(dev);
++
++              if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
++
++                      /*
++                       * Communicate to user-space that
++                       * fence creation has failed and that
++                       * the engine is idle.
++                       */
++
++                      fence_arg->handle = ~0;
++                      fence_arg->error = ret;
++              }
++              drm_putback_buffer_objects(dev);
++              if (fence_p)
++                      *fence_p = NULL;
++              return;
++      }
++
++      if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
++
++              ret = drm_fence_add_user_object(file_priv, fence,
++                                              fence_flags &
++                                              DRM_FENCE_FLAG_SHAREABLE);
++              if (!ret)
++                      drm_fence_fill_arg(fence, fence_arg);
++              else {
++                      /*
++                       * Fence user object creation failed.
++                       * We must idle the engine here as well, as user-
++                       * space expects a fence object to wait on. Since we
++                       * have a fence object we wait for it to signal
++                       * to indicate engine "sufficiently" idle.
++                       */
++
++                      (void)drm_fence_object_wait(fence, 0, 1, fence->type);
++                      drm_fence_usage_deref_unlocked(&fence);
++                      fence_arg->handle = ~0;
++                      fence_arg->error = ret;
++              }
++      }
++
++      if (fence_p)
++              *fence_p = fence;
++      else if (fence)
++              drm_fence_usage_deref_unlocked(&fence);
++}
++
++int i915_execbuffer(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
++          dev_priv->sarea_priv;
++      struct drm_i915_execbuffer *exec_buf = data;
++      struct drm_i915_batchbuffer *batch = &exec_buf->batch;
++      struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
++      int num_buffers;
++      int ret;
++      uint32_t __user *post_relocs;
++
++      if (!dev_priv->allow_batchbuffer) {
++              DRM_ERROR("Batchbuffer ioctl disabled\n");
++              return -EINVAL;
++      }
++
++      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
++                                                      batch->num_cliprects *
++                                                      sizeof(struct
++                                                             drm_clip_rect)))
++              return -EFAULT;
++
++      if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
++              return -EINVAL;
++
++      ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
++      if (ret)
++              return ret;
++
++      /*
++       * The cmdbuf_mutex makes sure the validate-submit-fence
++       * operation is atomic.
++       */
++
++      ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++      if (ret) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              return -EAGAIN;
++      }
++
++      num_buffers = exec_buf->num_buffers;
++
++      if (!dev_priv->val_bufs) {
++              dev_priv->val_bufs =
++                  vmalloc(sizeof(struct drm_i915_validate_buffer) *
++                          dev_priv->max_validate_buffers);
++      }
++      if (!dev_priv->val_bufs) {
++              drm_bo_read_unlock(&dev->bm.bm_lock);
++              mutex_unlock(&dev_priv->cmdbuf_mutex);
++              return -ENOMEM;
++      }
++
++      /* validate buffer list + fixup relocations */
++      ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
++                                      dev_priv->val_bufs, &num_buffers,
++                                      &post_relocs);
++      if (ret)
++              goto out_err0;
++
++      if (post_relocs) {
++              ret = i915_post_relocs(file_priv, post_relocs,
++                                     dev_priv->val_bufs, num_buffers);
++              if (ret)
++                      goto out_err0;
++      }
++
++      /* make sure all previous memory operations have passed */
++      DRM_MEMORYBARRIER();
++
++      if (!post_relocs) {
++              drm_agp_chipset_flush(dev);
++              batch->start =
++                  dev_priv->val_bufs[num_buffers - 1].buffer->offset;
++      } else {
++              batch->start += dev_priv->val_bufs[0].buffer->offset;
++      }
++
++      DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
++                batch->start, batch->used, batch->num_cliprects);
++
++      ret = i915_dispatch_batchbuffer(dev, batch);
++      if (ret)
++              goto out_err0;
++      if (sarea_priv)
++              sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++      i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
++
++      out_err0:
++      ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
++      mutex_lock(&dev->struct_mutex);
++      i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
++      mutex_unlock(&dev->struct_mutex);
++      mutex_unlock(&dev_priv->cmdbuf_mutex);
++      drm_bo_read_unlock(&dev->bm.bm_lock);
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_fence.c git-nokia/drivers/gpu/drm-tungsten/i915_fence.c
+--- git/drivers/gpu/drm-tungsten/i915_fence.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_fence.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,273 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/*
++ * Initiate a sync flush if it's not already pending.
++ */
++
++static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
++                                       struct drm_fence_class_manager *fc)
++{
++      if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
++          !dev_priv->flush_pending) {
++              dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
++              dev_priv->flush_flags = fc->pending_flush;
++              dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
++              I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
++              dev_priv->flush_pending = 1;
++              fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
++      }
++}
++
++static inline void i915_report_rwflush(struct drm_device *dev,
++                                     struct drm_i915_private *dev_priv)
++{
++      if (unlikely(dev_priv->flush_pending)) {
++
++              uint32_t flush_flags;
++              uint32_t i_status;
++              uint32_t flush_sequence;
++
++              i_status = READ_HWSP(dev_priv, 0);
++              if ((i_status & (1 << 12)) !=
++                  (dev_priv->saved_flush_status & (1 << 12))) {
++                      flush_flags = dev_priv->flush_flags;
++                      flush_sequence = dev_priv->flush_sequence;
++                      dev_priv->flush_pending = 0;
++                      drm_fence_handler(dev, 0, flush_sequence,
++                                        flush_flags, 0);
++              }
++      }
++}
++
++static void i915_fence_flush(struct drm_device *dev,
++                           uint32_t fence_class)
++{
++      struct drm_i915_private *dev_priv = 
++              (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      unsigned long irq_flags;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      write_lock_irqsave(&fm->lock, irq_flags);
++      i915_initiate_rwflush(dev_priv, fc);
++      write_unlock_irqrestore(&fm->lock, irq_flags);
++}
++
++
++static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
++                          uint32_t waiting_types)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      uint32_t sequence;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      /*
++       * First, report any executed sync flush:
++       */
++
++      i915_report_rwflush(dev, dev_priv);
++
++      /*
++       * Report A new breadcrumb, and adjust IRQs.
++       */
++
++      if (waiting_types & DRM_FENCE_TYPE_EXE) {
++
++              sequence = READ_BREADCRUMB(dev_priv);
++              drm_fence_handler(dev, 0, sequence,
++                                DRM_FENCE_TYPE_EXE, 0);
++
++              if (dev_priv->fence_irq_on &&
++                  !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
++                      i915_user_irq_off(dev_priv);
++                      dev_priv->fence_irq_on = 0;
++              } else if (!dev_priv->fence_irq_on &&
++                         (fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
++                      i915_user_irq_on(dev_priv);
++                      dev_priv->fence_irq_on = 1;
++              }
++      }
++
++      /*
++       * There may be new RW flushes pending. Start them.
++       */
++      
++      i915_initiate_rwflush(dev_priv, fc); 
++
++      /*
++       * And possibly, but unlikely, they finish immediately.
++       */
++
++      i915_report_rwflush(dev, dev_priv);
++
++}
++
++static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
++                           uint32_t flags, uint32_t *sequence,
++                           uint32_t *native_type)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      if (unlikely(!dev_priv))
++              return -EINVAL;
++
++      i915_emit_irq(dev);
++      *sequence = (uint32_t) dev_priv->counter;
++      *native_type = DRM_FENCE_TYPE_EXE;
++      if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
++              *native_type |= DRM_I915_FENCE_TYPE_RW;
++
++      return 0;
++}
++
++void i915_fence_handler(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++
++      write_lock(&fm->lock);
++      if (likely(dev_priv->fence_irq_on))
++              i915_fence_poll(dev, 0, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++/*
++ * We need a separate wait function since we need to poll for
++ * sync flushes.
++ */
++
++static int i915_fence_wait(struct drm_fence_object *fence,
++                         int lazy, int interruptible, uint32_t mask)
++{
++      struct drm_device *dev = fence->dev;
++      drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++      int ret;
++      unsigned long  _end = jiffies + 3 * DRM_HZ;
++
++      drm_fence_object_flush(fence, mask);
++      if (likely(interruptible))
++              ret = wait_event_interruptible_timeout
++                      (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
++                       3 * DRM_HZ);
++      else 
++              ret = wait_event_timeout
++                      (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), 
++                       3 * DRM_HZ);
++
++      if (unlikely(ret == -ERESTARTSYS))
++              return -EAGAIN;
++
++      if (unlikely(ret == 0))
++              return -EBUSY;
++
++      if (likely(mask == DRM_FENCE_TYPE_EXE || 
++                 drm_fence_object_signaled(fence, mask))) 
++              return 0;
++
++      /*
++       * Remove this code snippet when fixed. HWSTAM doesn't let
++       * flush info through...
++       */
++
++      if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
++              unsigned long irq_flags;
++
++              DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
++              msleep(100);
++              dev_priv->flush_pending = 0;
++              write_lock_irqsave(&fm->lock, irq_flags);
++              drm_fence_handler(dev, fence->fence_class, 
++                                fence->sequence, fence->type, 0);
++              write_unlock_irqrestore(&fm->lock, irq_flags);
++      }
++
++      /*
++       * Poll for sync flush completion.
++       */
++
++      return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
++}
++
++static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
++{
++      uint32_t flush_flags = fence->waiting_types & 
++              ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
++
++      if (likely(flush_flags == 0 || 
++                 ((flush_flags & ~fence->native_types) == 0) || 
++                 (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
++              return 0;
++      else {
++              struct drm_device *dev = fence->dev;
++              struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
++              struct drm_fence_driver *driver = dev->driver->fence_driver;
++              
++              if (unlikely(!dev_priv))
++                      return 0;
++
++              if (dev_priv->flush_pending) {
++                      uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & 
++                              driver->sequence_mask;
++
++                      if (diff < driver->wrap_diff)
++                              return 0;
++              }
++      }
++      return flush_flags;
++}
++
++struct drm_fence_driver i915_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
++      .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
++      .sequence_mask = BREADCRUMB_MASK,
++      .has_irq = NULL,
++      .emit = i915_fence_emit_sequence,
++      .flush = i915_fence_flush,
++      .poll = i915_fence_poll,
++      .needed_flush = i915_fence_needed_flush,
++      .wait = i915_fence_wait,
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem.c git-nokia/drivers/gpu/drm-tungsten/i915_gem.c
+--- git/drivers/gpu/drm-tungsten/i915_gem.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2502 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include <linux/swap.h>
++
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++                          uint32_t read_domains,
++                          uint32_t write_domain);
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++                               uint64_t offset,
++                               uint64_t size,
++                               uint32_t read_domains,
++                               uint32_t write_domain);
++int
++i915_gem_set_domain(struct drm_gem_object *obj,
++                  struct drm_file *file_priv,
++                  uint32_t read_domains,
++                  uint32_t write_domain);
++static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
++static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
++static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
++
++int
++i915_gem_init_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_init *args = data;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (args->gtt_start >= args->gtt_end ||
++          (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
++          (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
++          args->gtt_end - args->gtt_start);
++
++      dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++
++/**
++ * Creates a new mm object and returns a handle to it.
++ */
++int
++i915_gem_create_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_create *args = data;
++      struct drm_gem_object *obj;
++      int handle, ret;
++
++      args->size = roundup(args->size, PAGE_SIZE);
++
++      /* Allocate the new object */
++      obj = drm_gem_object_alloc(dev, args->size);
++      if (obj == NULL)
++              return -ENOMEM;
++
++      ret = drm_gem_handle_create(file_priv, obj, &handle);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_handle_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      if (ret)
++              return ret;
++
++      args->handle = handle;
++
++      return 0;
++}
++
++/**
++ * Reads data from the object referenced by handle.
++ *
++ * On error, the contents of *data are undefined.
++ */
++int
++i915_gem_pread_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pread *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      ssize_t read;
++      loff_t offset;
++      int ret;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++      obj_priv = obj->driver_private;
++
++      /* Bounds check source.
++       *
++       * XXX: This could use review for overflow issues...
++       */
++      if (args->offset > obj->size || args->size > obj->size ||
++          args->offset + args->size > obj->size) {
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
++                                             I915_GEM_DOMAIN_CPU, 0);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++      }
++
++      offset = args->offset;
++
++      read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
++                      args->size, &offset);
++      if (read != args->size) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              if (read < 0)
++                      return read;
++              else
++                      return -EINVAL;
++      }
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++#include "drm_compat.h"
++
++static int
++i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++                  struct drm_i915_gem_pwrite *args,
++                  struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset;
++      char __user *user_data;
++      char *vaddr;
++      int i, o, l;
++      int ret = 0;
++      unsigned long pfn;
++      unsigned long unwritten;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
++      if (!access_ok(VERIFY_READ, user_data, remain))
++              return -EFAULT;
++
++
++      mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_object_pin(obj, 0);
++      if (ret) {
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++      ret = i915_gem_set_domain(obj, file_priv,
++                                I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
++      if (ret)
++              goto fail;
++
++      obj_priv = obj->driver_private;
++      offset = obj_priv->gtt_offset + args->offset;
++      obj_priv->dirty = 1;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * i = page number
++               * o = offset within page
++               * l = bytes to copy
++               */
++              i = offset >> PAGE_SHIFT;
++              o = offset & (PAGE_SIZE-1);
++              l = remain;
++              if ((o + l) > PAGE_SIZE)
++                      l = PAGE_SIZE - o;
++
++              pfn = (dev->agp->base >> PAGE_SHIFT) + i;
++
++#ifdef DRM_KMAP_ATOMIC_PROT_PFN
++              /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
++               */
++              vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
++                                           __pgprot(__PAGE_KERNEL));
++#if WATCH_PWRITE
++              DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
++                       i, o, l, pfn, vaddr);
++#endif
++              unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
++                                                            user_data, l);
++              kunmap_atomic(vaddr, KM_USER0);
++
++              if (unwritten)
++#endif
++              {
++                      vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
++#if WATCH_PWRITE
++                      DRM_INFO("pwrite slow i %d o %d l %d "
++                               "pfn %ld vaddr %p\n",
++                               i, o, l, pfn, vaddr);
++#endif
++                      if (vaddr == NULL) {
++                              ret = -EFAULT;
++                              goto fail;
++                      }
++                      unwritten = __copy_from_user(vaddr + o, user_data, l);
++#if WATCH_PWRITE
++                      DRM_INFO("unwritten %ld\n", unwritten);
++#endif
++                      iounmap(vaddr);
++                      if (unwritten) {
++                              ret = -EFAULT;
++                              goto fail;
++                      }
++              }
++
++              remain -= l;
++              user_data += l;
++              offset += l;
++      }
++#if WATCH_PWRITE && 1
++      i915_gem_clflush_object(obj);
++      i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
++      i915_gem_clflush_object(obj);
++#endif
++
++fail:
++      i915_gem_object_unpin(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++int
++i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
++                    struct drm_i915_gem_pwrite *args,
++                    struct drm_file *file_priv)
++{
++      int ret;
++      loff_t offset;
++      ssize_t written;
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_set_domain(obj, file_priv,
++                                I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
++      if (ret) {
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      offset = args->offset;
++
++      written = vfs_write(obj->filp,
++                          (char __user *)(uintptr_t) args->data_ptr,
++                          args->size, &offset);
++      if (written != args->size) {
++              mutex_unlock(&dev->struct_mutex);
++              if (written < 0)
++                      return written;
++              else
++                      return -EINVAL;
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++/**
++ * Writes data to the object referenced by handle.
++ *
++ * On error, the contents of the buffer that were to be modified are undefined.
++ */
++int
++i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pwrite *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++      obj_priv = obj->driver_private;
++
++      /* Bounds check destination.
++       *
++       * XXX: This could use review for overflow issues...
++       */
++      if (args->offset > obj->size || args->size > obj->size ||
++          args->offset + args->size > obj->size) {
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++
++      /* We can only do the GTT pwrite on untiled buffers, as otherwise
++       * it would end up going through the fenced access, and we'll get
++       * different detiling behavior between reading and writing.
++       * pread/pwrite currently are reading and writing from the CPU
++       * perspective, requiring manual detiling by the client.
++       */
++      if (obj_priv->tiling_mode == I915_TILING_NONE &&
++          dev->gtt_total != 0)
++              ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
++      else
++              ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
++
++#if WATCH_PWRITE
++      if (ret)
++              DRM_INFO("pwrite failed %d\n", ret);
++#endif
++
++      drm_gem_object_unreference(obj);
++
++      return ret;
++}
++
++/**
++ * Called when user space prepares to use an object
++ */
++int
++i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_set_domain *args = data;
++      struct drm_gem_object *obj;
++      int ret;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++
++      mutex_lock(&dev->struct_mutex);
++#if WATCH_BUF
++      DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
++               obj, obj->size, args->read_domains, args->write_domain);
++#endif
++      ret = i915_gem_set_domain(obj, file_priv,
++                                args->read_domains, args->write_domain);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Called when user space has done writes to this buffer
++ */
++int
++i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      struct drm_i915_gem_sw_finish *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      mutex_lock(&dev->struct_mutex);
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++#if WATCH_BUF
++      DRM_INFO("%s: sw_finish %d (%p %d)\n",
++               __func__, args->handle, obj, obj->size);
++#endif
++      obj_priv = obj->driver_private;
++
++      /* Pinned buffers may be scanout, so flush the cache */
++      if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
++              i915_gem_clflush_object(obj);
++              drm_agp_chipset_flush(dev);
++      }
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++/**
++ * Maps the contents of an object, returning the address it is mapped
++ * into.
++ *
++ * While the mapping holds a reference on the contents of the object, it doesn't
++ * imply a ref on the object itself.
++ */
++int
++i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_mmap *args = data;
++      struct drm_gem_object *obj;
++      loff_t offset;
++      unsigned long addr;
++
++      if (!(dev->driver->driver_features & DRIVER_GEM))
++              return -ENODEV;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EBADF;
++
++      offset = args->offset;
++
++      down_write(&current->mm->mmap_sem);
++      addr = do_mmap(obj->filp, 0, args->size,
++                     PROT_READ | PROT_WRITE, MAP_SHARED,
++                     args->offset);
++      up_write(&current->mm->mmap_sem);
++      mutex_lock(&dev->struct_mutex);
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      if (IS_ERR((void *)addr))
++              return addr;
++
++      args->addr_ptr = (uint64_t) addr;
++
++      return 0;
++}
++
++static void
++i915_gem_object_free_page_list(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page_count = obj->size / PAGE_SIZE;
++      int i;
++
++      if (obj_priv->page_list == NULL)
++              return;
++
++
++      for (i = 0; i < page_count; i++)
++              if (obj_priv->page_list[i] != NULL) {
++                      if (obj_priv->dirty)
++                              set_page_dirty(obj_priv->page_list[i]);
++                      mark_page_accessed(obj_priv->page_list[i]);
++                      page_cache_release(obj_priv->page_list[i]);
++              }
++      obj_priv->dirty = 0;
++
++      drm_free(obj_priv->page_list,
++               page_count * sizeof(struct page *),
++               DRM_MEM_DRIVER);
++      obj_priv->page_list = NULL;
++}
++
++static void
++i915_gem_object_move_to_active(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      /* Add a reference if we're newly entering the active list. */
++      if (!obj_priv->active) {
++              drm_gem_object_reference(obj);
++              obj_priv->active = 1;
++      }
++      /* Move from whatever list we were on to the tail of execution. */
++      list_move_tail(&obj_priv->list,
++                     &dev_priv->mm.active_list);
++}
++
++
++static void
++i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      if (obj_priv->pin_count != 0)
++              list_del_init(&obj_priv->list);
++      else
++              list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++
++      if (obj_priv->active) {
++              obj_priv->active = 0;
++              drm_gem_object_unreference(obj);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++/**
++ * Creates a new sequence number, emitting a write of it to the status page
++ * plus an interrupt, which will trigger i915_user_interrupt_handler.
++ *
++ * Must be called with struct_lock held.
++ *
++ * Returned sequence numbers are nonzero on success.
++ */
++static uint32_t
++i915_add_request(struct drm_device *dev, uint32_t flush_domains)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_request *request;
++      uint32_t seqno;
++      int was_empty;
++      RING_LOCALS;
++
++      request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
++      if (request == NULL)
++              return 0;
++
++      /* Grab the seqno we're going to make this request be, and bump the
++       * next (skipping 0 so it can be the reserved no-seqno value).
++       */
++      seqno = dev_priv->mm.next_gem_seqno;
++      dev_priv->mm.next_gem_seqno++;
++      if (dev_priv->mm.next_gem_seqno == 0)
++              dev_priv->mm.next_gem_seqno++;
++
++      BEGIN_LP_RING(4);
++      OUT_RING(MI_STORE_DWORD_INDEX);
++      OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
++      OUT_RING(seqno);
++
++      OUT_RING(MI_USER_INTERRUPT);
++      ADVANCE_LP_RING();
++
++      DRM_DEBUG("%d\n", seqno);
++
++      request->seqno = seqno;
++      request->emitted_jiffies = jiffies;
++      request->flush_domains = flush_domains;
++      was_empty = list_empty(&dev_priv->mm.request_list);
++      list_add_tail(&request->list, &dev_priv->mm.request_list);
++
++      if (was_empty)
++              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++      return seqno;
++}
++
++/**
++ * Command execution barrier
++ *
++ * Ensures that all commands in the ring are finished
++ * before signalling the CPU
++ */
++uint32_t
++i915_retire_commands(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++      uint32_t flush_domains = 0;
++      RING_LOCALS;
++
++      /* The sampler always gets flushed on i965 (sigh) */
++      if (IS_I965G(dev))
++              flush_domains |= I915_GEM_DOMAIN_SAMPLER;
++      BEGIN_LP_RING(2);
++      OUT_RING(cmd);
++      OUT_RING(0); /* noop */
++      ADVANCE_LP_RING();
++      return flush_domains;
++}
++
++/**
++ * Moves buffers associated only with the given active seqno from the active
++ * to inactive list, potentially freeing them.
++ */
++static void
++i915_gem_retire_request(struct drm_device *dev,
++                      struct drm_i915_gem_request *request)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (request->flush_domains != 0) {
++              struct drm_i915_gem_object *obj_priv, *next;
++
++              /* First clear any buffers that were only waiting for a flush
++               * matching the one just retired.
++               */
++
++              list_for_each_entry_safe(obj_priv, next,
++                                       &dev_priv->mm.flushing_list, list) {
++                      struct drm_gem_object *obj = obj_priv->obj;
++
++                      if (obj->write_domain & request->flush_domains) {
++                              obj->write_domain = 0;
++                              i915_gem_object_move_to_inactive(obj);
++                      }
++              }
++
++      }
++
++      /* Move any buffers on the active list that are no longer referenced
++       * by the ringbuffer to the flushing/inactive lists as appropriate.
++       */
++      while (!list_empty(&dev_priv->mm.active_list)) {
++              struct drm_gem_object *obj;
++              struct drm_i915_gem_object *obj_priv;
++
++              obj_priv = list_first_entry(&dev_priv->mm.active_list,
++                                          struct drm_i915_gem_object,
++                                          list);
++              obj = obj_priv->obj;
++
++              /* If the seqno being retired doesn't match the oldest in the
++               * list, then the oldest in the list must still be newer than
++               * this seqno.
++               */
++              if (obj_priv->last_rendering_seqno != request->seqno)
++                      return;
++#if WATCH_LRU
++              DRM_INFO("%s: retire %d moves to inactive list %p\n",
++                       __func__, request->seqno, obj);
++#endif
++
++              /* If this request flushes the write domain,
++               * clear the write domain from the object now
++               */
++              if (request->flush_domains & obj->write_domain)
++                  obj->write_domain = 0;
++
++              if (obj->write_domain != 0) {
++                      list_move_tail(&obj_priv->list,
++                                     &dev_priv->mm.flushing_list);
++              } else {
++                      i915_gem_object_move_to_inactive(obj);
++              }
++      }
++}
++
++/**
++ * Returns true if seq1 is later than seq2.
++ */
++static int
++i915_seqno_passed(uint32_t seq1, uint32_t seq2)
++{
++      return (int32_t)(seq1 - seq2) >= 0;
++}
++
++uint32_t
++i915_get_gem_seqno(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
++}
++
++/**
++ * This function clears the request list as sequence numbers are passed.
++ */
++void
++i915_gem_retire_requests(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t seqno;
++
++      seqno = i915_get_gem_seqno(dev);
++
++      while (!list_empty(&dev_priv->mm.request_list)) {
++              struct drm_i915_gem_request *request;
++              uint32_t retiring_seqno;
++
++              request = list_first_entry(&dev_priv->mm.request_list,
++                                         struct drm_i915_gem_request,
++                                         list);
++              retiring_seqno = request->seqno;
++
++              if (i915_seqno_passed(seqno, retiring_seqno) ||
++                  dev_priv->mm.wedged) {
++                      i915_gem_retire_request(dev, request);
++
++                      list_del(&request->list);
++                      drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
++              } else
++                      break;
++      }
++}
++
++void
++i915_gem_retire_work_handler(struct work_struct *work)
++{
++      drm_i915_private_t *dev_priv;
++      struct drm_device *dev;
++
++      dev_priv = container_of(work, drm_i915_private_t,
++                              mm.retire_work.work);
++      dev = dev_priv->dev;
++
++      mutex_lock(&dev->struct_mutex);
++      i915_gem_retire_requests(dev);
++      if (!list_empty(&dev_priv->mm.request_list))
++              schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
++      mutex_unlock(&dev->struct_mutex);
++}
++
++/**
++ * Waits for a sequence number to be signaled, and cleans up the
++ * request and object lists appropriately for that event.
++ */
++int
++i915_wait_request(struct drm_device *dev, uint32_t seqno)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret = 0;
++
++      BUG_ON(seqno == 0);
++
++      if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
++              dev_priv->mm.waiting_gem_seqno = seqno;
++              i915_user_irq_on(dev_priv);
++              ret = wait_event_interruptible(dev_priv->irq_queue,
++                                             i915_seqno_passed(i915_get_gem_seqno(dev),
++                                                               seqno) ||
++                                             dev_priv->mm.wedged);
++              i915_user_irq_off(dev_priv);
++              dev_priv->mm.waiting_gem_seqno = 0;
++      }
++      if (dev_priv->mm.wedged)
++              ret = -EIO;
++
++      if (ret && ret != -ERESTARTSYS)
++              DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
++                        __func__, ret, seqno, i915_get_gem_seqno(dev));
++
++      /* Directly dispatch request retiring.  While we have the work queue
++       * to handle this, the waiter on a request often wants an associated
++       * buffer to have made it to the inactive list, and we would need
++       * a separate wait queue to handle that.
++       */
++      if (ret == 0)
++              i915_gem_retire_requests(dev);
++
++      return ret;
++}
++
++static void
++i915_gem_flush(struct drm_device *dev,
++             uint32_t invalidate_domains,
++             uint32_t flush_domains)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t cmd;
++      RING_LOCALS;
++
++#if WATCH_EXEC
++      DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
++                invalidate_domains, flush_domains);
++#endif
++
++      if (flush_domains & I915_GEM_DOMAIN_CPU)
++              drm_agp_chipset_flush(dev);
++
++      if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
++                                                   I915_GEM_DOMAIN_GTT)) {
++              /*
++               * read/write caches:
++               *
++               * I915_GEM_DOMAIN_RENDER is always invalidated, but is
++               * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
++               * also flushed at 2d versus 3d pipeline switches.
++               *
++               * read-only caches:
++               *
++               * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
++               * MI_READ_FLUSH is set, and is always flushed on 965.
++               *
++               * I915_GEM_DOMAIN_COMMAND may not exist?
++               *
++               * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
++               * invalidated when MI_EXE_FLUSH is set.
++               *
++               * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
++               * invalidated with every MI_FLUSH.
++               *
++               * TLBs:
++               *
++               * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
++               * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
++               * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
++               * are flushed at any MI_FLUSH.
++               */
++
++              cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
++              if ((invalidate_domains|flush_domains) &
++                  I915_GEM_DOMAIN_RENDER)
++                      cmd &= ~MI_NO_WRITE_FLUSH;
++              if (!IS_I965G(dev)) {
++                      /*
++                       * On the 965, the sampler cache always gets flushed
++                       * and this bit is reserved.
++                       */
++                      if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
++                              cmd |= MI_READ_FLUSH;
++              }
++              if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
++                      cmd |= MI_EXE_FLUSH;
++
++#if WATCH_EXEC
++              DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
++#endif
++              BEGIN_LP_RING(2);
++              OUT_RING(cmd);
++              OUT_RING(0); /* noop */
++              ADVANCE_LP_RING();
++      }
++}
++
++/**
++ * Ensures that all rendering to the object has completed and the object is
++ * safe to unbind from the GTT or access from the CPU.
++ */
++static int
++i915_gem_object_wait_rendering(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret;
++      uint32_t write_domain;
++
++      /* If there are writes queued to the buffer, flush and
++       * create a new seqno to wait for.
++       */
++      write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
++      if (write_domain) {
++#if WATCH_BUF
++              DRM_INFO("%s: flushing object %p from write domain %08x\n",
++                        __func__, obj, write_domain);
++#endif
++              i915_gem_flush(dev, 0, write_domain);
++
++              i915_gem_object_move_to_active(obj);
++              obj_priv->last_rendering_seqno = i915_add_request(dev,
++                                                                write_domain);
++              BUG_ON(obj_priv->last_rendering_seqno == 0);
++#if WATCH_LRU
++              DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
++#endif
++      }
++
++      /* If there is rendering queued on the buffer being evicted, wait for
++       * it.
++       */
++      if (obj_priv->active) {
++#if WATCH_BUF
++              DRM_INFO("%s: object %p wait for seqno %08x\n",
++                        __func__, obj, obj_priv->last_rendering_seqno);
++#endif
++              ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
++              if (ret != 0)
++                      return ret;
++      }
++
++      return 0;
++}
++
++/**
++ * Unbinds an object from the GTT aperture.
++ */
++static int
++i915_gem_object_unbind(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret = 0;
++
++#if WATCH_BUF
++      DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
++      DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
++#endif
++      if (obj_priv->gtt_space == NULL)
++              return 0;
++
++      if (obj_priv->pin_count != 0) {
++              DRM_ERROR("Attempting to unbind pinned buffer\n");
++              return -EINVAL;
++      }
++
++      /* Wait for any rendering to complete
++       */
++      ret = i915_gem_object_wait_rendering(obj);
++      if (ret) {
++              DRM_ERROR("wait_rendering failed: %d\n", ret);
++              return ret;
++      }
++
++      /* Move the object to the CPU domain to ensure that
++       * any possible CPU writes while it's not in the GTT
++       * are flushed when we go to remap it. This will
++       * also ensure that all pending GPU writes are finished
++       * before we unbind.
++       */
++      ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
++                                       I915_GEM_DOMAIN_CPU);
++      if (ret) {
++              DRM_ERROR("set_domain failed: %d\n", ret);
++              return ret;
++      }
++
++      if (obj_priv->agp_mem != NULL) {
++              drm_unbind_agp(obj_priv->agp_mem);
++              drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
++              obj_priv->agp_mem = NULL;
++      }
++
++      BUG_ON(obj_priv->active);
++
++      i915_gem_object_free_page_list(obj);
++
++      if (obj_priv->gtt_space) {
++              atomic_dec(&dev->gtt_count);
++              atomic_sub(obj->size, &dev->gtt_memory);
++
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++      }
++
++      /* Remove ourselves from the LRU list if present. */
++      if (!list_empty(&obj_priv->list))
++              list_del_init(&obj_priv->list);
++
++      return 0;
++}
++
++static int
++i915_gem_evict_something(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret = 0;
++
++      for (;;) {
++              /* If there's an inactive buffer available now, grab it
++               * and be done.
++               */
++              if (!list_empty(&dev_priv->mm.inactive_list)) {
++                      obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
++                                                  struct drm_i915_gem_object,
++                                                  list);
++                      obj = obj_priv->obj;
++                      BUG_ON(obj_priv->pin_count != 0);
++#if WATCH_LRU
++                      DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++                      BUG_ON(obj_priv->active);
++
++                      /* Wait on the rendering and unbind the buffer. */
++                      ret = i915_gem_object_unbind(obj);
++                      break;
++              }
++
++              /* If we didn't get anything, but the ring is still processing
++               * things, wait for one of those things to finish and hopefully
++               * leave us a buffer to evict.
++               */
++              if (!list_empty(&dev_priv->mm.request_list)) {
++                      struct drm_i915_gem_request *request;
++
++                      request = list_first_entry(&dev_priv->mm.request_list,
++                                                 struct drm_i915_gem_request,
++                                                 list);
++
++                      ret = i915_wait_request(dev, request->seqno);
++                      if (ret)
++                              break;
++
++                      /* if waiting caused an object to become inactive,
++                       * then loop around and wait for it. Otherwise, we
++                       * assume that waiting freed and unbound something,
++                       * so there should now be some space in the GTT
++                       */
++                      if (!list_empty(&dev_priv->mm.inactive_list))
++                              continue;
++                      break;
++              }
++
++              /* If we didn't have anything on the request list but there
++               * are buffers awaiting a flush, emit one and try again.
++               * When we wait on it, those buffers waiting for that flush
++               * will get moved to inactive.
++               */
++              if (!list_empty(&dev_priv->mm.flushing_list)) {
++                      obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
++                                                  struct drm_i915_gem_object,
++                                                  list);
++                      obj = obj_priv->obj;
++
++                      i915_gem_flush(dev,
++                                     obj->write_domain,
++                                     obj->write_domain);
++                      i915_add_request(dev, obj->write_domain);
++
++                      obj = NULL;
++                      continue;
++              }
++
++              DRM_ERROR("inactive empty %d request empty %d "
++                        "flushing empty %d\n",
++                        list_empty(&dev_priv->mm.inactive_list),
++                        list_empty(&dev_priv->mm.request_list),
++                        list_empty(&dev_priv->mm.flushing_list));
++              /* If we didn't do any of the above, there's nothing to be done
++               * and we just can't fit it in.
++               */
++              return -ENOMEM;
++      }
++      return ret;
++}
++
++static int
++i915_gem_object_get_page_list(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page_count, i;
++      struct address_space *mapping;
++      struct inode *inode;
++      struct page *page;
++      int ret;
++
++      if (obj_priv->page_list)
++              return 0;
++
++      /* Get the list of pages out of our struct file.  They'll be pinned
++       * at this point until we release them.
++       */
++      page_count = obj->size / PAGE_SIZE;
++      BUG_ON(obj_priv->page_list != NULL);
++      obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
++                                       DRM_MEM_DRIVER);
++      if (obj_priv->page_list == NULL) {
++              DRM_ERROR("Faled to allocate page list\n");
++              return -ENOMEM;
++      }
++
++      inode = obj->filp->f_path.dentry->d_inode;
++      mapping = inode->i_mapping;
++      for (i = 0; i < page_count; i++) {
++              page = read_mapping_page(mapping, i, NULL);
++              if (IS_ERR(page)) {
++                      ret = PTR_ERR(page);
++                      DRM_ERROR("read_mapping_page failed: %d\n", ret);
++                      i915_gem_object_free_page_list(obj);
++                      return ret;
++              }
++              obj_priv->page_list[i] = page;
++      }
++      return 0;
++}
++
++/**
++ * Finds free space in the GTT aperture and binds the object there.
++ */
++static int
++i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct drm_mm_node *free_space;
++      int page_count, ret;
++
++      if (alignment == 0)
++              alignment = PAGE_SIZE;
++      if (alignment & (PAGE_SIZE - 1)) {
++              DRM_ERROR("Invalid object alignment requested %u\n", alignment);
++              return -EINVAL;
++      }
++
++ search_free:
++      free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
++                                      obj->size, alignment, 0);
++      if (free_space != NULL) {
++              obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
++                                                     alignment);
++              if (obj_priv->gtt_space != NULL) {
++                      obj_priv->gtt_space->private = obj;
++                      obj_priv->gtt_offset = obj_priv->gtt_space->start;
++              }
++      }
++      if (obj_priv->gtt_space == NULL) {
++              /* If the gtt is empty and we're still having trouble
++               * fitting our object in, we're out of memory.
++               */
++#if WATCH_LRU
++              DRM_INFO("%s: GTT full, evicting something\n", __func__);
++#endif
++              if (list_empty(&dev_priv->mm.inactive_list) &&
++                  list_empty(&dev_priv->mm.flushing_list) &&
++                  list_empty(&dev_priv->mm.active_list)) {
++                      DRM_ERROR("GTT full, but LRU list empty\n");
++                      return -ENOMEM;
++              }
++
++              ret = i915_gem_evict_something(dev);
++              if (ret != 0) {
++                      DRM_ERROR("Failed to evict a buffer %d\n", ret);
++                      return ret;
++              }
++              goto search_free;
++      }
++
++#if WATCH_BUF
++      DRM_INFO("Binding object of size %d at 0x%08x\n",
++               obj->size, obj_priv->gtt_offset);
++#endif
++      ret = i915_gem_object_get_page_list(obj);
++      if (ret) {
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++              return ret;
++      }
++
++      page_count = obj->size / PAGE_SIZE;
++      /* Create an AGP memory structure pointing at our pages, and bind it
++       * into the GTT.
++       */
++      obj_priv->agp_mem = drm_agp_bind_pages(dev,
++                                             obj_priv->page_list,
++                                             page_count,
++                                             obj_priv->gtt_offset);
++      if (obj_priv->agp_mem == NULL) {
++              i915_gem_object_free_page_list(obj);
++              drm_mm_put_block(obj_priv->gtt_space);
++              obj_priv->gtt_space = NULL;
++              return -ENOMEM;
++      }
++      atomic_inc(&dev->gtt_count);
++      atomic_add(obj->size, &dev->gtt_memory);
++
++      /* Assert that the object is not currently in any GPU domain. As it
++       * wasn't in the GTT, there shouldn't be any way it could have been in
++       * a GPU cache
++       */
++      BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++      BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++
++      return 0;
++}
++
++void
++i915_gem_clflush_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
++
++      /* If we don't have a page list set up, then we're not pinned
++       * to GPU, and we can ignore the cache flush because it'll happen
++       * again at bind time.
++       */
++      if (obj_priv->page_list == NULL)
++              return;
++
++      drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
++}
++
++/*
++ * Set the next domain for the specified object. This
++ * may not actually perform the necessary flushing/invaliding though,
++ * as that may want to be batched with other set_domain operations
++ *
++ * This is (we hope) the only really tricky part of gem. The goal
++ * is fairly simple -- track which caches hold bits of the object
++ * and make sure they remain coherent. A few concrete examples may
++ * help to explain how it works. For shorthand, we use the notation
++ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
++ * a pair of read and write domain masks.
++ *
++ * Case 1: the batch buffer
++ *
++ *    1. Allocated
++ *    2. Written by CPU
++ *    3. Mapped to GTT
++ *    4. Read by GPU
++ *    5. Unmapped from GTT
++ *    6. Freed
++ *
++ *    Let's take these a step at a time
++ *
++ *    1. Allocated
++ *            Pages allocated from the kernel may still have
++ *            cache contents, so we set them to (CPU, CPU) always.
++ *    2. Written by CPU (using pwrite)
++ *            The pwrite function calls set_domain (CPU, CPU) and
++ *            this function does nothing (as nothing changes)
++ *    3. Mapped by GTT
++ *            This function asserts that the object is not
++ *            currently in any GPU-based read or write domains
++ *    4. Read by GPU
++ *            i915_gem_execbuffer calls set_domain (COMMAND, 0).
++ *            As write_domain is zero, this function adds in the
++ *            current read domains (CPU+COMMAND, 0).
++ *            flush_domains is set to CPU.
++ *            invalidate_domains is set to COMMAND
++ *            clflush is run to get data out of the CPU caches
++ *            then i915_dev_set_domain calls i915_gem_flush to
++ *            emit an MI_FLUSH and drm_agp_chipset_flush
++ *    5. Unmapped from GTT
++ *            i915_gem_object_unbind calls set_domain (CPU, CPU)
++ *            flush_domains and invalidate_domains end up both zero
++ *            so no flushing/invalidating happens
++ *    6. Freed
++ *            yay, done
++ *
++ * Case 2: The shared render buffer
++ *
++ *    1. Allocated
++ *    2. Mapped to GTT
++ *    3. Read/written by GPU
++ *    4. set_domain to (CPU,CPU)
++ *    5. Read/written by CPU
++ *    6. Read/written by GPU
++ *
++ *    1. Allocated
++ *            Same as last example, (CPU, CPU)
++ *    2. Mapped to GTT
++ *            Nothing changes (assertions find that it is not in the GPU)
++ *    3. Read/written by GPU
++ *            execbuffer calls set_domain (RENDER, RENDER)
++ *            flush_domains gets CPU
++ *            invalidate_domains gets GPU
++ *            clflush (obj)
++ *            MI_FLUSH and drm_agp_chipset_flush
++ *    4. set_domain (CPU, CPU)
++ *            flush_domains gets GPU
++ *            invalidate_domains gets CPU
++ *            wait_rendering (obj) to make sure all drawing is complete.
++ *            This will include an MI_FLUSH to get the data from GPU
++ *            to memory
++ *            clflush (obj) to invalidate the CPU cache
++ *            Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
++ *    5. Read/written by CPU
++ *            cache lines are loaded and dirtied
++ *    6. Read written by GPU
++ *            Same as last GPU access
++ *
++ * Case 3: The constant buffer
++ *
++ *    1. Allocated
++ *    2. Written by CPU
++ *    3. Read by GPU
++ *    4. Updated (written) by CPU again
++ *    5. Read by GPU
++ *
++ *    1. Allocated
++ *            (CPU, CPU)
++ *    2. Written by CPU
++ *            (CPU, CPU)
++ *    3. Read by GPU
++ *            (CPU+RENDER, 0)
++ *            flush_domains = CPU
++ *            invalidate_domains = RENDER
++ *            clflush (obj)
++ *            MI_FLUSH
++ *            drm_agp_chipset_flush
++ *    4. Updated (written) by CPU again
++ *            (CPU, CPU)
++ *            flush_domains = 0 (no previous write domain)
++ *            invalidate_domains = 0 (no new read domains)
++ *    5. Read by GPU
++ *            (CPU+RENDER, 0)
++ *            flush_domains = CPU
++ *            invalidate_domains = RENDER
++ *            clflush (obj)
++ *            MI_FLUSH
++ *            drm_agp_chipset_flush
++ */
++static int
++i915_gem_object_set_domain(struct drm_gem_object *obj,
++                          uint32_t read_domains,
++                          uint32_t write_domain)
++{
++      struct drm_device               *dev = obj->dev;
++      struct drm_i915_gem_object      *obj_priv = obj->driver_private;
++      uint32_t                        invalidate_domains = 0;
++      uint32_t                        flush_domains = 0;
++      int                             ret;
++
++#if WATCH_BUF
++      DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
++               __func__, obj,
++               obj->read_domains, read_domains,
++               obj->write_domain, write_domain);
++#endif
++      /*
++       * If the object isn't moving to a new write domain,
++       * let the object stay in multiple read domains
++       */
++      if (write_domain == 0)
++              read_domains |= obj->read_domains;
++      else
++              obj_priv->dirty = 1;
++
++      /*
++       * Flush the current write domain if
++       * the new read domains don't match. Invalidate
++       * any read domains which differ from the old
++       * write domain
++       */
++      if (obj->write_domain && obj->write_domain != read_domains) {
++              flush_domains |= obj->write_domain;
++              invalidate_domains |= read_domains & ~obj->write_domain;
++      }
++      /*
++       * Invalidate any read caches which may have
++       * stale data. That is, any new read domains.
++       */
++      invalidate_domains |= read_domains & ~obj->read_domains;
++      if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
++#if WATCH_BUF
++              DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
++                       __func__, flush_domains, invalidate_domains);
++#endif
++              /*
++               * If we're invaliding the CPU cache and flushing a GPU cache,
++               * then pause for rendering so that the GPU caches will be
++               * flushed before the cpu cache is invalidated
++               */
++              if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
++                  (flush_domains & ~(I915_GEM_DOMAIN_CPU |
++                                     I915_GEM_DOMAIN_GTT))) {
++                      ret = i915_gem_object_wait_rendering(obj);
++                      if (ret)
++                              return ret;
++              }
++              i915_gem_clflush_object(obj);
++      }
++
++      if ((write_domain | flush_domains) != 0)
++              obj->write_domain = write_domain;
++
++      /* If we're invalidating the CPU domain, clear the per-page CPU
++       * domain list as well.
++       */
++      if (obj_priv->page_cpu_valid != NULL &&
++          (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
++          ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
++              memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
++      }
++      obj->read_domains = read_domains;
++
++      dev->invalidate_domains |= invalidate_domains;
++      dev->flush_domains |= flush_domains;
++#if WATCH_BUF
++      DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
++               __func__,
++               obj->read_domains, obj->write_domain,
++               dev->invalidate_domains, dev->flush_domains);
++#endif
++      return 0;
++}
++
++/**
++ * Set the read/write domain on a range of the object.
++ *
++ * Currently only implemented for CPU reads, otherwise drops to normal
++ * i915_gem_object_set_domain().
++ */
++static int
++i915_gem_object_set_domain_range(struct drm_gem_object *obj,
++                               uint64_t offset,
++                               uint64_t size,
++                               uint32_t read_domains,
++                               uint32_t write_domain)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret, i;
++
++      if (obj->read_domains & I915_GEM_DOMAIN_CPU)
++              return 0;
++
++      if (read_domains != I915_GEM_DOMAIN_CPU ||
++          write_domain != 0)
++              return i915_gem_object_set_domain(obj,
++                                                read_domains, write_domain);
++
++      /* Wait on any GPU rendering to the object to be flushed. */
++      if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
++              ret = i915_gem_object_wait_rendering(obj);
++              if (ret)
++                      return ret;
++      }
++
++      if (obj_priv->page_cpu_valid == NULL) {
++              obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
++                                                    DRM_MEM_DRIVER);
++      }
++
++      /* Flush the cache on any pages that are still invalid from the CPU's
++       * perspective.
++       */
++      for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
++              if (obj_priv->page_cpu_valid[i])
++                      continue;
++
++              drm_ttm_cache_flush(obj_priv->page_list + i, 1);
++
++              obj_priv->page_cpu_valid[i] = 1;
++      }
++
++      return 0;
++}
++
++/**
++ * Once all of the objects have been set in the proper domain,
++ * perform the necessary flush and invalidate operations.
++ *
++ * Returns the write domains flushed, for use in flush tracking.
++ */
++static uint32_t
++i915_gem_dev_set_domain(struct drm_device *dev)
++{
++      uint32_t flush_domains = dev->flush_domains;
++
++      /*
++       * Now that all the buffers are synced to the proper domains,
++       * flush and invalidate the collected domains
++       */
++      if (dev->invalidate_domains | dev->flush_domains) {
++#if WATCH_EXEC
++              DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
++                        __func__,
++                       dev->invalidate_domains,
++                       dev->flush_domains);
++#endif
++              i915_gem_flush(dev,
++                             dev->invalidate_domains,
++                             dev->flush_domains);
++              dev->invalidate_domains = 0;
++              dev->flush_domains = 0;
++      }
++
++      return flush_domains;
++}
++
++/**
++ * Pin an object to the GTT and evaluate the relocations landing in it.
++ */
++static int
++i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
++                               struct drm_file *file_priv,
++                               struct drm_i915_gem_exec_object *entry)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_relocation_entry reloc;
++      struct drm_i915_gem_relocation_entry __user *relocs;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int i, ret;
++      uint32_t last_reloc_offset = -1;
++      void *reloc_page = NULL;
++
++      /* Choose the GTT offset for our buffer and put it there. */
++      ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
++      if (ret)
++              return ret;
++
++      entry->offset = obj_priv->gtt_offset;
++
++      relocs = (struct drm_i915_gem_relocation_entry __user *)
++               (uintptr_t) entry->relocs_ptr;
++      /* Apply the relocations, using the GTT aperture to avoid cache
++       * flushing requirements.
++       */
++      for (i = 0; i < entry->relocation_count; i++) {
++              struct drm_gem_object *target_obj;
++              struct drm_i915_gem_object *target_obj_priv;
++              uint32_t reloc_val, reloc_offset, *reloc_entry;
++              int ret;
++
++              ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
++              if (ret != 0) {
++                      i915_gem_object_unpin(obj);
++                      return ret;
++              }
++
++              target_obj = drm_gem_object_lookup(obj->dev, file_priv,
++                                                 reloc.target_handle);
++              if (target_obj == NULL) {
++                      i915_gem_object_unpin(obj);
++                      return -EBADF;
++              }
++              target_obj_priv = target_obj->driver_private;
++
++              /* The target buffer should have appeared before us in the
++               * exec_object list, so it should have a GTT space bound by now.
++               */
++              if (target_obj_priv->gtt_space == NULL) {
++                      DRM_ERROR("No GTT space found for object %d\n",
++                                reloc.target_handle);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++              if (reloc.offset > obj->size - 4) {
++                      DRM_ERROR("Relocation beyond object bounds: "
++                                "obj %p target %d offset %d size %d.\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset, (int) obj->size);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++              if (reloc.offset & 3) {
++                      DRM_ERROR("Relocation not 4-byte aligned: "
++                                "obj %p target %d offset %d.\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++              if (reloc.write_domain && target_obj->pending_write_domain &&
++                  reloc.write_domain != target_obj->pending_write_domain) {
++                      DRM_ERROR("Write domain conflict: "
++                                "obj %p target %d offset %d "
++                                "new %08x old %08x\n",
++                                obj, reloc.target_handle,
++                                (int) reloc.offset,
++                                reloc.write_domain,
++                                target_obj->pending_write_domain);
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return -EINVAL;
++              }
++
++#if WATCH_RELOC
++              DRM_INFO("%s: obj %p offset %08x target %d "
++                       "read %08x write %08x gtt %08x "
++                       "presumed %08x delta %08x\n",
++                       __func__,
++                       obj,
++                       (int) reloc.offset,
++                       (int) reloc.target_handle,
++                       (int) reloc.read_domains,
++                       (int) reloc.write_domain,
++                       (int) target_obj_priv->gtt_offset,
++                       (int) reloc.presumed_offset,
++                       reloc.delta);
++#endif
++
++              target_obj->pending_read_domains |= reloc.read_domains;
++              target_obj->pending_write_domain |= reloc.write_domain;
++
++              /* If the relocation already has the right value in it, no
++               * more work needs to be done.
++               */
++              if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
++                      drm_gem_object_unreference(target_obj);
++                      continue;
++              }
++
++              /* Now that we're going to actually write some data in,
++               * make sure that any rendering using this buffer's contents
++               * is completed.
++               */
++              i915_gem_object_wait_rendering(obj);
++
++              /* As we're writing through the gtt, flush
++               * any CPU writes before we write the relocations
++               */
++              if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++                      i915_gem_clflush_object(obj);
++                      drm_agp_chipset_flush(dev);
++                      obj->write_domain = 0;
++              }
++
++              /* Map the page containing the relocation we're going to
++               * perform.
++               */
++              reloc_offset = obj_priv->gtt_offset + reloc.offset;
++              if (reloc_page == NULL ||
++                  (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
++                  (reloc_offset & ~(PAGE_SIZE - 1))) {
++                      if (reloc_page != NULL)
++                              iounmap(reloc_page);
++
++                      reloc_page = ioremap(dev->agp->base +
++                                           (reloc_offset & ~(PAGE_SIZE - 1)),
++                                           PAGE_SIZE);
++                      last_reloc_offset = reloc_offset;
++                      if (reloc_page == NULL) {
++                              drm_gem_object_unreference(target_obj);
++                              i915_gem_object_unpin(obj);
++                              return -ENOMEM;
++                      }
++              }
++
++              reloc_entry = (uint32_t *)((char *)reloc_page +
++                                         (reloc_offset & (PAGE_SIZE - 1)));
++              reloc_val = target_obj_priv->gtt_offset + reloc.delta;
++
++#if WATCH_BUF
++              DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
++                        obj, (unsigned int) reloc.offset,
++                        readl(reloc_entry), reloc_val);
++#endif
++              writel(reloc_val, reloc_entry);
++
++              /* Write the updated presumed offset for this entry back out
++               * to the user.
++               */
++              reloc.presumed_offset = target_obj_priv->gtt_offset;
++              ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
++              if (ret != 0) {
++                      drm_gem_object_unreference(target_obj);
++                      i915_gem_object_unpin(obj);
++                      return ret;
++              }
++
++              drm_gem_object_unreference(target_obj);
++      }
++
++      if (reloc_page != NULL)
++              iounmap(reloc_page);
++
++#if WATCH_BUF
++      if (0)
++              i915_gem_dump_object(obj, 128, __func__, ~0);
++#endif
++      return 0;
++}
++
++/** Dispatch a batchbuffer to the ring
++ */
++static int
++i915_dispatch_gem_execbuffer(struct drm_device *dev,
++                            struct drm_i915_gem_execbuffer *exec,
++                            uint64_t exec_offset)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
++                                           (uintptr_t) exec->cliprects_ptr;
++      int nbox = exec->num_cliprects;
++      int i = 0, count;
++      uint32_t        exec_start, exec_len;
++      RING_LOCALS;
++
++      exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
++      exec_len = (uint32_t) exec->batch_len;
++
++      if ((exec_start | exec_len) & 0x7) {
++              DRM_ERROR("alignment\n");
++              return -EINVAL;
++      }
++
++      if (!exec_start)
++              return -EINVAL;
++
++      count = nbox ? nbox : 1;
++
++      for (i = 0; i < count; i++) {
++              if (i < nbox) {
++                      int ret = i915_emit_box(dev, boxes, i,
++                                              exec->DR1, exec->DR4);
++                      if (ret)
++                              return ret;
++              }
++
++              if (IS_I830(dev) || IS_845G(dev)) {
++                      BEGIN_LP_RING(4);
++                      OUT_RING(MI_BATCH_BUFFER);
++                      OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++                      OUT_RING(exec_start + exec_len - 4);
++                      OUT_RING(0);
++                      ADVANCE_LP_RING();
++              } else {
++                      BEGIN_LP_RING(2);
++                      if (IS_I965G(dev)) {
++                              OUT_RING(MI_BATCH_BUFFER_START |
++                                       (2 << 6) |
++                                       MI_BATCH_NON_SECURE_I965);
++                              OUT_RING(exec_start);
++                      } else {
++                              OUT_RING(MI_BATCH_BUFFER_START |
++                                       (2 << 6));
++                              OUT_RING(exec_start | MI_BATCH_NON_SECURE);
++                      }
++                      ADVANCE_LP_RING();
++              }
++      }
++
++      /* XXX breadcrumb */
++      return 0;
++}
++
++/* Throttle our rendering by waiting until the ring has completed our requests
++ * emitted over 20 msec ago.
++ *
++ * This should get us reasonable parallelism between CPU and GPU but also
++ * relatively low latency when blocking on a particular request to finish.
++ */
++static int
++i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++      int ret = 0;
++      uint32_t seqno;
++
++      mutex_lock(&dev->struct_mutex);
++      seqno = i915_file_priv->mm.last_gem_throttle_seqno;
++      i915_file_priv->mm.last_gem_throttle_seqno =
++              i915_file_priv->mm.last_gem_seqno;
++      if (seqno)
++              ret = i915_wait_request(dev, seqno);
++      mutex_unlock(&dev->struct_mutex);
++      return ret;
++}
++
++int
++i915_gem_execbuffer(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
++      struct drm_i915_gem_execbuffer *args = data;
++      struct drm_i915_gem_exec_object *exec_list = NULL;
++      struct drm_gem_object **object_list = NULL;
++      struct drm_gem_object *batch_obj;
++      int ret, i, pinned = 0;
++      uint64_t exec_offset;
++      uint32_t seqno, flush_domains;
++
++#if WATCH_EXEC
++      DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
++                (int) args->buffers_ptr, args->buffer_count, args->batch_len);
++#endif
++
++      /* Copy in the exec list from userland */
++      exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
++                             DRM_MEM_DRIVER);
++      object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
++                               DRM_MEM_DRIVER);
++      if (exec_list == NULL || object_list == NULL) {
++              DRM_ERROR("Failed to allocate exec or object list "
++                        "for %d buffers\n",
++                        args->buffer_count);
++              ret = -ENOMEM;
++              goto pre_mutex_err;
++      }
++      ret = copy_from_user(exec_list,
++                           (struct drm_i915_relocation_entry __user *)
++                           (uintptr_t) args->buffers_ptr,
++                           sizeof(*exec_list) * args->buffer_count);
++      if (ret != 0) {
++              DRM_ERROR("copy %d exec entries failed %d\n",
++                        args->buffer_count, ret);
++              goto pre_mutex_err;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      if (dev_priv->mm.wedged) {
++              DRM_ERROR("Execbuf while wedged\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EIO;
++      }
++
++      if (dev_priv->mm.suspended) {
++              DRM_ERROR("Execbuf while VT-switched.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EBUSY;
++      }
++
++      /* Zero the gloabl flush/invalidate flags. These
++       * will be modified as each object is bound to the
++       * gtt
++       */
++      dev->invalidate_domains = 0;
++      dev->flush_domains = 0;
++
++      /* Look up object handles and perform the relocations */
++      for (i = 0; i < args->buffer_count; i++) {
++              object_list[i] = drm_gem_object_lookup(dev, file_priv,
++                                                     exec_list[i].handle);
++              if (object_list[i] == NULL) {
++                      DRM_ERROR("Invalid object handle %d at index %d\n",
++                                 exec_list[i].handle, i);
++                      ret = -EBADF;
++                      goto err;
++              }
++
++              object_list[i]->pending_read_domains = 0;
++              object_list[i]->pending_write_domain = 0;
++              ret = i915_gem_object_pin_and_relocate(object_list[i],
++                                                     file_priv,
++                                                     &exec_list[i]);
++              if (ret) {
++                      DRM_ERROR("object bind and relocate failed %d\n", ret);
++                      goto err;
++              }
++              pinned = i + 1;
++      }
++
++      /* Set the pending read domains for the batch buffer to COMMAND */
++      batch_obj = object_list[args->buffer_count-1];
++      batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
++      batch_obj->pending_write_domain = 0;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      for (i = 0; i < args->buffer_count; i++) {
++              struct drm_gem_object *obj = object_list[i];
++              struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++              if (obj_priv->gtt_space == NULL) {
++                      /* We evicted the buffer in the process of validating
++                       * our set of buffers in.  We could try to recover by
++                       * kicking them everything out and trying again from
++                       * the start.
++                       */
++                      ret = -ENOMEM;
++                      goto err;
++              }
++
++              /* make sure all previous memory operations have passed */
++              ret = i915_gem_object_set_domain(obj,
++                                               obj->pending_read_domains,
++                                               obj->pending_write_domain);
++              if (ret)
++                      goto err;
++      }
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /* Flush/invalidate caches and chipset buffer */
++      flush_domains = i915_gem_dev_set_domain(dev);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++#if WATCH_COHERENCY
++      for (i = 0; i < args->buffer_count; i++) {
++              i915_gem_object_check_coherency(object_list[i],
++                                              exec_list[i].handle);
++      }
++#endif
++
++      exec_offset = exec_list[args->buffer_count - 1].offset;
++
++#if WATCH_EXEC
++      i915_gem_dump_object(object_list[args->buffer_count - 1],
++                            args->batch_len,
++                            __func__,
++                            ~0);
++#endif
++
++      /* Exec the batchbuffer */
++      ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
++      if (ret) {
++              DRM_ERROR("dispatch failed %d\n", ret);
++              goto err;
++      }
++
++      /*
++       * Ensure that the commands in the batch buffer are
++       * finished before the interrupt fires
++       */
++      flush_domains |= i915_retire_commands(dev);
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /*
++       * Get a seqno representing the execution of the current buffer,
++       * which we can wait on.  We would like to mitigate these interrupts,
++       * likely by only creating seqnos occasionally (so that we have
++       * *some* interrupts representing completion of buffers that we can
++       * wait on when trying to clear up gtt space).
++       */
++      seqno = i915_add_request(dev, flush_domains);
++      BUG_ON(seqno == 0);
++      i915_file_priv->mm.last_gem_seqno = seqno;
++      for (i = 0; i < args->buffer_count; i++) {
++              struct drm_gem_object *obj = object_list[i];
++              struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++              i915_gem_object_move_to_active(obj);
++              obj_priv->last_rendering_seqno = seqno;
++#if WATCH_LRU
++              DRM_INFO("%s: move to exec list %p\n", __func__, obj);
++#endif
++      }
++#if WATCH_LRU
++      i915_dump_lru(dev, __func__);
++#endif
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      /* Copy the new buffer offsets back to the user's exec list. */
++      ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++                         (uintptr_t) args->buffers_ptr,
++                         exec_list,
++                         sizeof(*exec_list) * args->buffer_count);
++      if (ret)
++              DRM_ERROR("failed to copy %d exec entries "
++                        "back to user (%d)\n",
++                         args->buffer_count, ret);
++err:
++      if (object_list != NULL) {
++              for (i = 0; i < pinned; i++)
++                      i915_gem_object_unpin(object_list[i]);
++
++              for (i = 0; i < args->buffer_count; i++)
++                      drm_gem_object_unreference(object_list[i]);
++      }
++      mutex_unlock(&dev->struct_mutex);
++
++pre_mutex_err:
++      drm_free(object_list, sizeof(*object_list) * args->buffer_count,
++               DRM_MEM_DRIVER);
++      drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
++               DRM_MEM_DRIVER);
++
++      return ret;
++}
++
++int
++i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int ret;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      if (obj_priv->gtt_space == NULL) {
++              ret = i915_gem_object_bind_to_gtt(obj, alignment);
++              if (ret != 0) {
++                      DRM_ERROR("Failure to bind: %d", ret);
++                      return ret;
++              }
++      }
++      obj_priv->pin_count++;
++
++      /* If the object is not active and not pending a flush,
++       * remove it from the inactive list
++       */
++      if (obj_priv->pin_count == 1) {
++              atomic_inc(&dev->pin_count);
++              atomic_add(obj->size, &dev->pin_memory);
++              if (!obj_priv->active &&
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)) == 0 &&
++                  !list_empty(&obj_priv->list))
++                      list_del_init(&obj_priv->list);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++
++      return 0;
++}
++
++void
++i915_gem_object_unpin(struct drm_gem_object *obj)
++{
++      struct drm_device *dev = obj->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++      obj_priv->pin_count--;
++      BUG_ON(obj_priv->pin_count < 0);
++      BUG_ON(obj_priv->gtt_space == NULL);
++
++      /* If the object is no longer pinned, and is
++       * neither active nor being flushed, then stick it on
++       * the inactive list
++       */
++      if (obj_priv->pin_count == 0) {
++              if (!obj_priv->active &&
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)) == 0)
++                      list_move_tail(&obj_priv->list,
++                                     &dev_priv->mm.inactive_list);
++              atomic_dec(&dev->pin_count);
++              atomic_sub(obj->size, &dev->pin_memory);
++      }
++      i915_verify_inactive(dev, __FILE__, __LINE__);
++}
++
++int
++i915_gem_pin_ioctl(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pin *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, args->alignment);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      /* XXX - flush the CPU caches for pinned objects
++       * as the X server doesn't manage domains yet
++       */
++      if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
++              i915_gem_clflush_object(obj);
++              drm_agp_chipset_flush(dev);
++              obj->write_domain = 0;
++      }
++      args->offset = obj_priv->gtt_offset;
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++int
++i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      struct drm_i915_gem_pin *args = data;
++      struct drm_gem_object *obj;
++
++      mutex_lock(&dev->struct_mutex);
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++      i915_gem_object_unpin(obj);
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_busy_ioctl(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_i915_gem_busy *args = data;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      mutex_lock(&dev->struct_mutex);
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL) {
++              DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
++                        args->handle);
++              mutex_unlock(&dev->struct_mutex);
++              return -EBADF;
++      }
++
++      obj_priv = obj->driver_private;
++      args->busy = obj_priv->active;
++
++      drm_gem_object_unreference(obj);
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++    return i915_gem_ring_throttle(dev, file_priv);
++}
++
++int i915_gem_init_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv;
++
++      obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
++      if (obj_priv == NULL)
++              return -ENOMEM;
++
++      /*
++       * We've just allocated pages from the kernel,
++       * so they've just been written by the CPU with
++       * zeros. They'll need to be clflushed before we
++       * use them with the GPU.
++       */
++      obj->write_domain = I915_GEM_DOMAIN_CPU;
++      obj->read_domains = I915_GEM_DOMAIN_CPU;
++
++      obj->driver_private = obj_priv;
++      obj_priv->obj = obj;
++      INIT_LIST_HEAD(&obj_priv->list);
++      return 0;
++}
++
++void i915_gem_free_object(struct drm_gem_object *obj)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++
++      while (obj_priv->pin_count > 0)
++              i915_gem_object_unpin(obj);
++
++      i915_gem_object_unbind(obj);
++
++      drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
++      drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
++}
++
++int
++i915_gem_set_domain(struct drm_gem_object *obj,
++                  struct drm_file *file_priv,
++                  uint32_t read_domains,
++                  uint32_t write_domain)
++{
++      struct drm_device *dev = obj->dev;
++      int ret;
++      uint32_t flush_domains;
++
++      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++
++      ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
++      if (ret)
++              return ret;
++      flush_domains = i915_gem_dev_set_domain(obj->dev);
++
++      if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
++              (void) i915_add_request(dev, flush_domains);
++
++      return 0;
++}
++
++/** Unbinds all objects that are on the given buffer list. */
++static int
++i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
++{
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      while (!list_empty(head)) {
++              obj_priv = list_first_entry(head,
++                                          struct drm_i915_gem_object,
++                                          list);
++              obj = obj_priv->obj;
++
++              if (obj_priv->pin_count != 0) {
++                      DRM_ERROR("Pinned object in unbind list\n");
++                      mutex_unlock(&dev->struct_mutex);
++                      return -EINVAL;
++              }
++
++              ret = i915_gem_object_unbind(obj);
++              if (ret != 0) {
++                      DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
++                                ret);
++                      mutex_unlock(&dev->struct_mutex);
++                      return ret;
++              }
++      }
++
++
++      return 0;
++}
++
++static int
++i915_gem_idle(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      uint32_t seqno, cur_seqno, last_seqno;
++      int stuck;
++
++      if (dev_priv->mm.suspended)
++              return 0;
++
++      /* Hack!  Don't let anybody do execbuf while we don't control the chip.
++       * We need to replace this with a semaphore, or something.
++       */
++      dev_priv->mm.suspended = 1;
++
++      i915_kernel_lost_context(dev);
++
++      /* Flush the GPU along with all non-CPU write domains
++       */
++      i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
++                     ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
++      seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
++                                      I915_GEM_DOMAIN_GTT));
++
++      if (seqno == 0) {
++              mutex_unlock(&dev->struct_mutex);
++              return -ENOMEM;
++      }
++
++      dev_priv->mm.waiting_gem_seqno = seqno;
++      last_seqno = 0;
++      stuck = 0;
++      for (;;) {
++              cur_seqno = i915_get_gem_seqno(dev);
++              if (i915_seqno_passed(cur_seqno, seqno))
++                      break;
++              if (last_seqno == cur_seqno) {
++                      if (stuck++ > 100) {
++                              DRM_ERROR("hardware wedged\n");
++                              dev_priv->mm.wedged = 1;
++                              DRM_WAKEUP(&dev_priv->irq_queue);
++                              break;
++                      }
++              }
++              msleep(10);
++              last_seqno = cur_seqno;
++      }
++      dev_priv->mm.waiting_gem_seqno = 0;
++
++      i915_gem_retire_requests(dev);
++
++      /* Active and flushing should now be empty as we've
++       * waited for a sequence higher than any pending execbuffer
++       */
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++
++      /* Request should now be empty as we've also waited
++       * for the last request in the list
++       */
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++
++      /* Move all buffers out of the GTT. */
++      i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
++
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++      return 0;
++}
++
++static int
++i915_gem_init_hws(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      /* If we need a physical address for the status page, it's already
++       * initialized at driver load time.
++       */
++      if (!I915_NEED_GFX_HWS(dev))
++              return 0;
++
++      obj = drm_gem_object_alloc(dev, 4096);
++      if (obj == NULL) {
++              DRM_ERROR("Failed to allocate status page\n");
++              return -ENOMEM;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, 4096);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              return ret;
++      }
++
++      dev_priv->status_gfx_addr = obj_priv->gtt_offset;
++      dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
++      dev_priv->hws_map.size = 4096;
++      dev_priv->hws_map.type = 0;
++      dev_priv->hws_map.flags = 0;
++      dev_priv->hws_map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->hws_map, dev);
++      if (dev_priv->hws_map.handle == NULL) {
++              DRM_ERROR("Failed to map status page.\n");
++              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++      dev_priv->hws_obj = obj;
++      dev_priv->hw_status_page = dev_priv->hws_map.handle;
++      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
++      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
++      DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
++
++      return 0;
++}
++
++static int
++i915_gem_init_ringbuffer(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++      int ret;
++
++      ret = i915_gem_init_hws(dev);
++      if (ret != 0)
++              return ret;
++
++      obj = drm_gem_object_alloc(dev, 128 * 1024);
++      if (obj == NULL) {
++              DRM_ERROR("Failed to allocate ringbuffer\n");
++              return -ENOMEM;
++      }
++      obj_priv = obj->driver_private;
++
++      ret = i915_gem_object_pin(obj, 4096);
++      if (ret != 0) {
++              drm_gem_object_unreference(obj);
++              return ret;
++      }
++
++      /* Set up the kernel mapping for the ring. */
++      dev_priv->ring.Size = obj->size;
++      dev_priv->ring.tail_mask = obj->size - 1;
++
++      dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
++      dev_priv->ring.map.size = obj->size;
++      dev_priv->ring.map.type = 0;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++      if (dev_priv->ring.map.handle == NULL) {
++              DRM_ERROR("Failed to map ringbuffer.\n");
++              memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++              drm_gem_object_unreference(obj);
++              return -EINVAL;
++      }
++      dev_priv->ring.ring_obj = obj;
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      /* Stop the ring if it's running. */
++      I915_WRITE(PRB0_CTL, 0);
++      I915_WRITE(PRB0_HEAD, 0);
++      I915_WRITE(PRB0_TAIL, 0);
++      I915_WRITE(PRB0_START, 0);
++
++      /* Initialize the ring. */
++      I915_WRITE(PRB0_START, obj_priv->gtt_offset);
++      I915_WRITE(PRB0_CTL,
++                 ((obj->size - 4096) & RING_NR_PAGES) |
++                 RING_NO_REPORT |
++                 RING_VALID);
++
++      /* Update our cache of the ring state */
++      i915_kernel_lost_context(dev);
++
++      return 0;
++}
++
++static void
++i915_gem_cleanup_ringbuffer(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (dev_priv->ring.ring_obj == NULL)
++              return;
++
++      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++
++      i915_gem_object_unpin(dev_priv->ring.ring_obj);
++      drm_gem_object_unreference(dev_priv->ring.ring_obj);
++      dev_priv->ring.ring_obj = NULL;
++      memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
++
++      if (dev_priv->hws_obj != NULL) {
++              i915_gem_object_unpin(dev_priv->hws_obj);
++              drm_gem_object_unreference(dev_priv->hws_obj);
++              dev_priv->hws_obj = NULL;
++              memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
++
++              /* Write high address into HWS_PGA when disabling. */
++              I915_WRITE(HWS_PGA, 0x1ffff000);
++      }
++}
++
++int
++i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      if (dev_priv->mm.wedged) {
++              DRM_ERROR("Reenabling wedged hardware, good luck\n");
++              dev_priv->mm.wedged = 0;
++      }
++
++      ret = i915_gem_init_ringbuffer(dev);
++      if (ret != 0)
++              return ret;
++
++      mutex_lock(&dev->struct_mutex);
++      BUG_ON(!list_empty(&dev_priv->mm.active_list));
++      BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++      BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
++      BUG_ON(!list_empty(&dev_priv->mm.request_list));
++      dev_priv->mm.suspended = 0;
++      mutex_unlock(&dev->struct_mutex);
++      return 0;
++}
++
++int
++i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_idle(dev);
++      if (ret == 0)
++              i915_gem_cleanup_ringbuffer(dev);
++      mutex_unlock(&dev->struct_mutex);
++
++      return 0;
++}
++
++void
++i915_gem_lastclose(struct drm_device *dev)
++{
++      int ret;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (dev_priv->ring.ring_obj != NULL) {
++              ret = i915_gem_idle(dev);
++              if (ret)
++                      DRM_ERROR("failed to idle hardware: %d\n", ret);
++
++              i915_gem_cleanup_ringbuffer(dev);
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void i915_gem_load(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      INIT_LIST_HEAD(&dev_priv->mm.active_list);
++      INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
++      INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
++      INIT_LIST_HEAD(&dev_priv->mm.request_list);
++      INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
++                        i915_gem_retire_work_handler);
++      dev_priv->mm.next_gem_seqno = 1;
++
++      i915_gem_detect_bit_6_swizzle(dev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_debug.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_debug.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_debug.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,202 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if WATCH_INACTIVE
++void
++i915_verify_inactive(struct drm_device *dev, char *file, int line)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++              obj = obj_priv->obj;
++              if (obj_priv->pin_count || obj_priv->active ||
++                  (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
++                                         I915_GEM_DOMAIN_GTT)))
++                      DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
++                                obj,
++                                obj_priv->pin_count, obj_priv->active,
++                                obj->write_domain, file, line);
++      }
++}
++#endif /* WATCH_INACTIVE */
++
++
++#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
++static void
++i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
++                 uint32_t bias, uint32_t mark)
++{
++      uint32_t *mem = kmap_atomic(page, KM_USER0);
++      int i;
++      for (i = start; i < end; i += 4)
++              DRM_INFO("%08x: %08x%s\n",
++                        (int) (bias + i), mem[i / 4],
++                        (bias + i == mark) ? " ********" : "");
++      kunmap_atomic(mem, KM_USER0);
++      /* give syslog time to catch up */
++      msleep(1);
++}
++
++void
++i915_gem_dump_object(struct drm_gem_object *obj, int len,
++                   const char *where, uint32_t mark)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page;
++
++      DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
++      for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
++              int page_len, chunk, chunk_len;
++
++              page_len = len - page * PAGE_SIZE;
++              if (page_len > PAGE_SIZE)
++                      page_len = PAGE_SIZE;
++
++              for (chunk = 0; chunk < page_len; chunk += 128) {
++                      chunk_len = page_len - chunk;
++                      if (chunk_len > 128)
++                              chunk_len = 128;
++                      i915_gem_dump_page(obj_priv->page_list[page],
++                                         chunk, chunk + chunk_len,
++                                         obj_priv->gtt_offset +
++                                         page * PAGE_SIZE,
++                                         mark);
++              }
++      }
++}
++#endif
++
++#if WATCH_LRU
++void
++i915_dump_lru(struct drm_device *dev, const char *where)
++{
++      drm_i915_private_t              *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object      *obj_priv;
++
++      DRM_INFO("active list %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++                          list)
++      {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++      DRM_INFO("flushing list %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++                          list)
++      {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++      DRM_INFO("inactive %s {\n", where);
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++              DRM_INFO("    %p: %08x\n", obj_priv,
++                       obj_priv->last_rendering_seqno);
++      }
++      DRM_INFO("}\n");
++}
++#endif
++
++
++#if WATCH_COHERENCY
++void
++i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
++{
++      struct drm_device *dev = obj->dev;
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      int page;
++      uint32_t *gtt_mapping;
++      uint32_t *backing_map = NULL;
++      int bad_count = 0;
++
++      DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
++               __func__, obj, obj_priv->gtt_offset, handle,
++               obj->size / 1024);
++
++      gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
++                            obj->size);
++      if (gtt_mapping == NULL) {
++              DRM_ERROR("failed to map GTT space\n");
++              return;
++      }
++
++      for (page = 0; page < obj->size / PAGE_SIZE; page++) {
++              int i;
++
++              backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
++
++              if (backing_map == NULL) {
++                      DRM_ERROR("failed to map backing page\n");
++                      goto out;
++              }
++
++              for (i = 0; i < PAGE_SIZE / 4; i++) {
++                      uint32_t cpuval = backing_map[i];
++                      uint32_t gttval = readl(gtt_mapping +
++                                              page * 1024 + i);
++
++                      if (cpuval != gttval) {
++                              DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
++                                       "0x%08x vs 0x%08x\n",
++                                       (int)(obj_priv->gtt_offset +
++                                             page * PAGE_SIZE + i * 4),
++                                       cpuval, gttval);
++                              if (bad_count++ >= 8) {
++                                      DRM_INFO("...\n");
++                                      goto out;
++                              }
++                      }
++              }
++              kunmap_atomic(backing_map, KM_USER0);
++              backing_map = NULL;
++      }
++
++ out:
++      if (backing_map != NULL)
++              kunmap_atomic(backing_map, KM_USER0);
++      iounmap(gtt_mapping);
++
++      /* give syslog time to catch up */
++      msleep(1);
++
++      /* Directly flush the object, since we just loaded values with the CPU
++       * from the backing pages and we don't want to disturb the cache
++       * management that we're trying to observe.
++       */
++
++      i915_gem_clflush_object(obj);
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_proc.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_proc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_proc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,293 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *    Keith Packard <keithp@keithp.com>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_compat.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static int i915_gem_active_info(char *buf, char **start, off_t offset,
++                              int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Active:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n",
++                                     obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
++                                int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Flushing:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
++                                int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_object *obj_priv;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Inactive:\n");
++      list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
++                          list)
++      {
++              struct drm_gem_object *obj = obj_priv->obj;
++              if (obj->name) {
++                      DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
++                                     obj, obj->name,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              } else {
++                      DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
++                                     obj->read_domains, obj->write_domain,
++                                     obj_priv->last_rendering_seqno);
++              }
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_request_info(char *buf, char **start, off_t offset,
++                               int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_i915_gem_request *gem_request;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Request:\n");
++      list_for_each_entry(gem_request, &dev_priv->mm.request_list,
++                          list)
++      {
++              DRM_PROC_PRINT("    %d @ %d %08x\n",
++                             gem_request->seqno,
++                             (int) (jiffies - gem_request->emitted_jiffies),
++                             gem_request->flush_domains);
++      }
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
++      DRM_PROC_PRINT("Waiter sequence:  %d\n",
++                     dev_priv->mm.waiting_gem_seqno);
++      DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++
++static int i915_interrupt_info(char *buf, char **start, off_t offset,
++                             int request, int *eof, void *data)
++{
++      struct drm_minor *minor = (struct drm_minor *) data;
++      struct drm_device *dev = minor->dev;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      int len = 0;
++
++      if (offset > DRM_PROC_LIMIT) {
++              *eof = 1;
++              return 0;
++      }
++
++      *start = &buf[offset];
++      *eof = 0;
++      DRM_PROC_PRINT("Interrupt enable:    %08x\n",
++                     I915_READ(IER));
++      DRM_PROC_PRINT("Interrupt identity:  %08x\n",
++                     I915_READ(IIR));
++      DRM_PROC_PRINT("Interrupt mask:      %08x\n",
++                     I915_READ(IMR));
++      DRM_PROC_PRINT("Pipe A stat:         %08x\n",
++                     I915_READ(PIPEASTAT));
++      DRM_PROC_PRINT("Pipe B stat:         %08x\n",
++                     I915_READ(PIPEBSTAT));
++      DRM_PROC_PRINT("Interrupts received: %d\n",
++                     atomic_read(&dev_priv->irq_received));
++      DRM_PROC_PRINT("Current sequence:    %d\n",
++                     i915_get_gem_seqno(dev));
++      DRM_PROC_PRINT("Waiter sequence:     %d\n",
++                     dev_priv->mm.waiting_gem_seqno);
++      DRM_PROC_PRINT("IRQ sequence:        %d\n",
++                     dev_priv->mm.irq_gem_seqno);
++      if (len > request + offset)
++              return request;
++      *eof = 1;
++      return len - offset;
++}
++
++static struct drm_proc_list {
++      /** file name */
++      const char *name;
++      /** proc callback*/
++      int (*f) (char *, char **, off_t, int, int *, void *);
++} i915_gem_proc_list[] = {
++      {"i915_gem_active", i915_gem_active_info},
++      {"i915_gem_flushing", i915_gem_flushing_info},
++      {"i915_gem_inactive", i915_gem_inactive_info},
++      {"i915_gem_request", i915_gem_request_info},
++      {"i915_gem_seqno", i915_gem_seqno_info},
++      {"i915_gem_interrupt", i915_interrupt_info},
++};
++
++#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
++
++int i915_gem_proc_init(struct drm_minor *minor)
++{
++      struct proc_dir_entry *ent;
++      int i, j;
++
++      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
++              ent = create_proc_entry(i915_gem_proc_list[i].name,
++                                      S_IFREG | S_IRUGO, minor->dev_root);
++              if (!ent) {
++                      DRM_ERROR("Cannot create /proc/dri/.../%s\n",
++                                i915_gem_proc_list[i].name);
++                      for (j = 0; j < i; j++)
++                              remove_proc_entry(i915_gem_proc_list[i].name,
++                                                minor->dev_root);
++                      return -1;
++              }
++              ent->read_proc = i915_gem_proc_list[i].f;
++              ent->data = minor;
++      }
++      return 0;
++}
++
++void i915_gem_proc_cleanup(struct drm_minor *minor)
++{
++      int i;
++
++      if (!minor->dev_root)
++              return;
++
++      for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
++              remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_gem_tiling.c git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c
+--- git/drivers/gpu/drm-tungsten/i915_gem_tiling.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_gem_tiling.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,309 @@
++/*
++ * Copyright Â© 2008 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric@anholt.net>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/** @file i915_gem_tiling.c
++ *
++ * Support for managing tiling state of buffer objects.
++ *
++ * The idea behind tiling is to increase cache hit rates by rearranging
++ * pixel data so that a group of pixel accesses are in the same cacheline.
++ * Performance improvement from doing this on the back/depth buffer are on
++ * the order of 30%.
++ *
++ * Intel architectures make this somewhat more complicated, though, by
++ * adjustments made to addressing of data when the memory is in interleaved
++ * mode (matched pairs of DIMMS) to improve memory bandwidth.
++ * For interleaved memory, the CPU sends every sequential 64 bytes
++ * to an alternate memory channel so it can get the bandwidth from both.
++ *
++ * The GPU also rearranges its accesses for increased bandwidth to interleaved
++ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
++ * it does it a little differently, since one walks addresses not just in the
++ * X direction but also Y.  So, along with alternating channels when bit
++ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
++ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
++ * are common to both the 915 and 965-class hardware.
++ *
++ * The CPU also sometimes XORs in higher bits as well, to improve
++ * bandwidth doing strided access like we do so frequently in graphics.  This
++ * is called "Channel XOR Randomization" in the MCH documentation.  The result
++ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
++ * decode.
++ *
++ * All of this bit 6 XORing has an effect on our memory management,
++ * as we need to make sure that the 3d driver can correctly address object
++ * contents.
++ *
++ * If we don't have interleaved memory, all tiling is safe and no swizzling is
++ * required.
++ *
++ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
++ * 17 is not just a page offset, so as we page an objet out and back in,
++ * individual pages in it will have different bit 17 addresses, resulting in
++ * each 64 bytes being swapped with its neighbor!
++ *
++ * Otherwise, if interleaved, we have to tell the 3d driver what the address
++ * swizzling it needs to do is, since it's writing with the CPU to the pages
++ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
++ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
++ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
++ * to match what the GPU expects.
++ */
++
++/**
++ * Detects bit 6 swizzling of address lookup between IGD access and CPU
++ * access through main memory.
++ */
++void
++i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct pci_dev *bridge;
++      uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++      uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++      int mchbar_offset;
++      char __iomem *mchbar;
++      int ret;
++
++      bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
++      if (bridge == NULL) {
++              DRM_ERROR("Couldn't get bridge device\n");
++              return;
++      }
++
++      ret = pci_enable_device(bridge);
++      if (ret != 0) {
++              DRM_ERROR("pci_enable_device failed: %d\n", ret);
++              return;
++      }
++
++      if (IS_I965G(dev))
++              mchbar_offset = 0x48;
++      else
++              mchbar_offset = 0x44;
++
++      /* Use resource 2 for our BAR that's stashed in a nonstandard location,
++       * since the bridge would only ever use standard BARs 0-1 (though it
++       * doesn't anyway)
++       */
++      ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
++      if (ret != 0) {
++              DRM_ERROR("pci_read_base failed: %d\n", ret);
++              return;
++      }
++
++      mchbar = ioremap(pci_resource_start(bridge, 2),
++                       pci_resource_len(bridge, 2));
++      if (mchbar == NULL) {
++              DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
++              return;
++      }
++
++      if (IS_I965G(dev) && !IS_I965GM(dev)) {
++              uint32_t chdecmisc;
++
++              /* On the 965, channel interleave appears to be determined by
++               * the flex bit.  If flex is set, then the ranks (sides of a
++               * DIMM) of memory will be "stacked" (physical addresses walk
++               * through one rank then move on to the next, flipping channels
++               * or not depending on rank configuration).  The GPU in this
++               * case does exactly the same addressing as the CPU.
++               *
++               * Unlike the 945, channel randomization based does not
++               * appear to be available.
++               *
++               * XXX: While the G965 doesn't appear to do any interleaving
++               * when the DIMMs are not exactly matched, the G4x chipsets
++               * might be for "L-shaped" configurations, and will need to be
++               * detected.
++               *
++               * L-shaped configuration:
++               *
++               * +-----+
++               * |     |
++               * |DIMM2|         <-- non-interleaved
++               * +-----+
++               * +-----+ +-----+
++               * |     | |     |
++               * |DIMM0| |DIMM1| <-- interleaved area
++               * +-----+ +-----+
++               */
++              chdecmisc = readb(mchbar + CHDECMISC);
++
++              if (chdecmisc == 0xff) {
++                      DRM_ERROR("Couldn't read from MCHBAR.  "
++                                "Disabling tiling.\n");
++              } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
++                      swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++                      swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++              } else {
++                      swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++                      swizzle_y = I915_BIT_6_SWIZZLE_9;
++              }
++      } else if (IS_I9XX(dev)) {
++              uint32_t dcc;
++
++              /* On 915-945 and GM965, channel interleave by the CPU is
++               * determined by DCC.  The CPU will alternate based on bit 6
++               * in interleaved mode, and the GPU will then also alternate
++               * on bit 6, 9, and 10 for X, but the CPU may also optionally
++               * alternate based on bit 17 (XOR not disabled and XOR
++               * bit == 17).
++               */
++              dcc = readl(mchbar + DCC);
++              switch (dcc & DCC_ADDRESSING_MODE_MASK) {
++              case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
++              case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
++                      swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++                      swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++                      break;
++              case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
++                      if (IS_I915G(dev) || IS_I915GM(dev) ||
++                          dcc & DCC_CHANNEL_XOR_DISABLE) {
++                              swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++                              swizzle_y = I915_BIT_6_SWIZZLE_9;
++                      } else if (IS_I965GM(dev)) {
++                              /* GM965 only does bit 11-based channel
++                               * randomization
++                               */
++                              swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
++                              swizzle_y = I915_BIT_6_SWIZZLE_9_11;
++                      } else {
++                              /* Bit 17 or perhaps other swizzling */
++                              swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++                              swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++                      }
++                      break;
++              }
++              if (dcc == 0xffffffff) {
++                      DRM_ERROR("Couldn't read from MCHBAR.  "
++                                "Disabling tiling.\n");
++                      swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
++                      swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
++              }
++      } else {
++              /* As far as we know, the 865 doesn't have these bit 6
++               * swizzling issues.
++               */
++              swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++              swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++      }
++
++      iounmap(mchbar);
++
++      dev_priv->mm.bit_6_swizzle_x = swizzle_x;
++      dev_priv->mm.bit_6_swizzle_y = swizzle_y;
++}
++
++/**
++ * Sets the tiling mode of an object, returning the required swizzling of
++ * bit 6 of addresses in the object.
++ */
++int
++i915_gem_set_tiling(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_set_tiling *args = data;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++      obj_priv = obj->driver_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (args->tiling_mode == I915_TILING_NONE) {
++              obj_priv->tiling_mode = I915_TILING_NONE;
++              args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++      } else {
++              if (args->tiling_mode == I915_TILING_X)
++                      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++              else
++                      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++              /* If we can't handle the swizzling, make it untiled. */
++              if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
++                      args->tiling_mode = I915_TILING_NONE;
++                      args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++              }
++      }
++      obj_priv->tiling_mode = args->tiling_mode;
++
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_gem_object_unreference(obj);
++
++      return 0;
++}
++
++/**
++ * Returns the current tiling mode and required bit 6 swizzling for the object.
++ */
++int
++i915_gem_get_tiling(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      struct drm_i915_gem_get_tiling *args = data;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      struct drm_gem_object *obj;
++      struct drm_i915_gem_object *obj_priv;
++
++      obj = drm_gem_object_lookup(dev, file_priv, args->handle);
++      if (obj == NULL)
++              return -EINVAL;
++      obj_priv = obj->driver_private;
++
++      mutex_lock(&dev->struct_mutex);
++
++      args->tiling_mode = obj_priv->tiling_mode;
++      switch (obj_priv->tiling_mode) {
++      case I915_TILING_X:
++              args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
++              break;
++      case I915_TILING_Y:
++              args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
++              break;
++      case I915_TILING_NONE:
++              args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
++              break;
++      default:
++              DRM_ERROR("unknown tiling mode\n");
++      }
++
++      mutex_unlock(&dev->struct_mutex);
++
++      drm_gem_object_unreference(obj);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_ioc32.c git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c
+--- git/drivers/gpu/drm-tungsten/i915_ioc32.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_ioc32.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,284 @@
++/**
++ * \file i915_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the i915 DRM.
++ *
++ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Alan Hourihane 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++typedef struct _drm_i915_batchbuffer32 {
++      int start;              /* agp offset */
++      int used;               /* nr bytes in use */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      u32 cliprects;  /* pointer to userspace cliprects */
++} drm_i915_batchbuffer32_t;
++
++static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_i915_batchbuffer32_t batchbuffer32;
++      drm_i915_batchbuffer_t __user *batchbuffer;
++
++      if (copy_from_user
++          (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
++              return -EFAULT;
++
++      batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
++      if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
++          || __put_user(batchbuffer32.start, &batchbuffer->start)
++          || __put_user(batchbuffer32.used, &batchbuffer->used)
++          || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
++          || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
++          || __put_user(batchbuffer32.num_cliprects,
++                        &batchbuffer->num_cliprects)
++          || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
++                        &batchbuffer->cliprects))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_BATCHBUFFER,
++                       (unsigned long) batchbuffer);
++}
++
++typedef struct _drm_i915_cmdbuffer32 {
++      u32 buf;        /* pointer to userspace command buffer */
++      int sz;                 /* nr bytes in buf */
++      int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
++      int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
++      int num_cliprects;      /* mulitpass with multiple cliprects? */
++      u32 cliprects;  /* pointer to userspace cliprects */
++} drm_i915_cmdbuffer32_t;
++
++static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_i915_cmdbuffer32_t cmdbuffer32;
++      drm_i915_cmdbuffer_t __user *cmdbuffer;
++
++      if (copy_from_user
++          (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
++              return -EFAULT;
++
++      cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
++      if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
++          || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
++                        &cmdbuffer->buf)
++          || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
++          || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
++          || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
++          || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
++          || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
++                        &cmdbuffer->cliprects))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
++}
++
++typedef struct drm_i915_irq_emit32 {
++      u32 irq_seq;
++} drm_i915_irq_emit32_t;
++
++static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_i915_irq_emit32_t req32;
++      drm_i915_irq_emit_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((int __user *)(unsigned long)req32.irq_seq,
++                        &request->irq_seq))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request);
++}
++typedef struct drm_i915_getparam32 {
++      int param;
++      u32 value;
++} drm_i915_getparam32_t;
++
++static int compat_i915_getparam(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_i915_getparam32_t req32;
++      drm_i915_getparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_GETPARAM, (unsigned long) request);
++}
++
++typedef struct drm_i915_mem_alloc32 {
++      int region;
++      int alignment;
++      int size;
++      u32 region_offset;      /* offset from start of fb or agp */
++} drm_i915_mem_alloc32_t;
++
++static int compat_i915_alloc(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_i915_mem_alloc32_t req32;
++      drm_i915_mem_alloc_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.region, &request->region)
++          || __put_user(req32.alignment, &request->alignment)
++          || __put_user(req32.size, &request->size)
++          || __put_user((void __user *)(unsigned long)req32.region_offset,
++                        &request->region_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_ALLOC, (unsigned long) request);
++}
++
++typedef struct drm_i915_execbuffer32 {
++      uint64_t ops_list;
++      uint32_t num_buffers;
++      struct _drm_i915_batchbuffer32 batch;
++      drm_context_t context; 
++      struct drm_fence_arg fence_arg;
++} drm_i915_execbuffer32_t;
++
++static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_i915_execbuffer32_t req32;
++      struct drm_i915_execbuffer __user *request;
++      int err;
++
++      if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++       || __put_user(req32.ops_list, &request->ops_list)
++       || __put_user(req32.num_buffers, &request->num_buffers)
++       || __put_user(req32.context, &request->context)
++       || __copy_to_user(&request->fence_arg, &req32.fence_arg, 
++                         sizeof(req32.fence_arg))
++       || __put_user(req32.batch.start, &request->batch.start)
++       || __put_user(req32.batch.used, &request->batch.used)
++       || __put_user(req32.batch.DR1, &request->batch.DR1)
++       || __put_user(req32.batch.DR4, &request->batch.DR4)
++       || __put_user(req32.batch.num_cliprects,
++                     &request->batch.num_cliprects)
++       || __put_user((int __user *)(unsigned long)req32.batch.cliprects,
++                     &request->batch.cliprects))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
++
++      if (err)
++              return err;
++
++      if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
++          || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
++          || __get_user(req32.fence_arg.type, &request->fence_arg.type)
++          || __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
++          || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
++          || __get_user(req32.fence_arg.error, &request->fence_arg.error)
++          || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++
++drm_ioctl_compat_t *i915_compat_ioctls[] = {
++      [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
++      [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
++      [DRM_I915_GETPARAM] = compat_i915_getparam,
++      [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
++      [DRM_I915_ALLOC] = compat_i915_alloc,
++#ifdef I915_HAVE_BUFFER
++      [DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
++#endif
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
++              fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_irq.c git-nokia/drivers/gpu/drm-tungsten/i915_irq.c
+--- git/drivers/gpu/drm-tungsten/i915_irq.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_irq.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1005 @@
++/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#define MAX_NOPID ((u32)~0)
++
++/*
++ * These are the interrupts used by the driver
++ */
++#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
++                                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
++                                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
++
++static inline void
++i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
++{
++      if ((dev_priv->irq_mask_reg & mask) != 0) {
++              dev_priv->irq_mask_reg &= ~mask;
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
++      }
++}
++
++static inline void
++i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
++{
++      if ((dev_priv->irq_mask_reg & mask) != mask) {
++              dev_priv->irq_mask_reg |= mask;
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++              (void) I915_READ(IMR);
++      }
++}
++
++/**
++ * i915_get_pipe - return the the pipe associated with a given plane
++ * @dev: DRM device
++ * @plane: plane to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a pipe number, since they may not always be equal.  This routine
++ * maps the given @plane back to a pipe number.
++ */
++static int
++i915_get_pipe(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 dspcntr;
++
++      dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
++
++      return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
++}
++
++/**
++ * i915_get_plane - return the the plane associated with a given pipe
++ * @dev: DRM device
++ * @pipe: pipe to look for
++ *
++ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
++ * rather than a plane number, since they may not always be equal.  This routine
++ * maps the given @pipe back to a plane number.
++ */
++static int
++i915_get_plane(struct drm_device *dev, int pipe)
++{
++      if (i915_get_pipe(dev, 0) == pipe)
++              return 0;
++      return 1;
++}
++
++/**
++ * i915_pipe_enabled - check if a pipe is enabled
++ * @dev: DRM device
++ * @pipe: pipe to check
++ *
++ * Reading certain registers when the pipe is disabled can hang the chip.
++ * Use this routine to make sure the PLL is running and the pipe is active
++ * before reading such registers if unsure.
++ */
++static int
++i915_pipe_enabled(struct drm_device *dev, int pipe)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
++
++      if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
++              return 1;
++
++      return 0;
++}
++
++/**
++ * Emit a synchronous flip.
++ *
++ * This function must be called with the drawable spinlock held.
++ */
++static void
++i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
++                       int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u16 x1, y1, x2, y2;
++      int pf_planes = 1 << plane;
++
++      DRM_SPINLOCK_ASSERT(&dev->drw_lock);
++
++      /* If the window is visible on the other plane, we have to flip on that
++       * plane as well.
++       */
++      if (plane == 1) {
++              x1 = sarea_priv->planeA_x;
++              y1 = sarea_priv->planeA_y;
++              x2 = x1 + sarea_priv->planeA_w;
++              y2 = y1 + sarea_priv->planeA_h;
++      } else {
++              x1 = sarea_priv->planeB_x;
++              y1 = sarea_priv->planeB_y;
++              x2 = x1 + sarea_priv->planeB_w;
++              y2 = y1 + sarea_priv->planeB_h;
++      }
++
++      if (x2 > 0 && y2 > 0) {
++              int i, num_rects = drw->num_rects;
++              struct drm_clip_rect *rect = drw->rects;
++
++              for (i = 0; i < num_rects; i++)
++                      if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
++                            rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
++                              pf_planes = 0x3;
++
++                              break;
++                      }
++      }
++
++      i915_dispatch_flip(dev, pf_planes, 1);
++}
++
++/**
++ * Emit blits for scheduled buffer swaps.
++ *
++ * This function will be called with the HW lock held.
++ */
++static void i915_vblank_tasklet(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      struct list_head *list, *tmp, hits, *hit;
++      int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
++      unsigned counter[2];
++      struct drm_drawable_info *drw;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 cpp = dev_priv->cpp,  offsets[3];
++      u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
++                              XY_SRC_COPY_BLT_WRITE_ALPHA |
++                              XY_SRC_COPY_BLT_WRITE_RGB)
++                           : XY_SRC_COPY_BLT_CMD;
++      u32 src_pitch = sarea_priv->pitch * cpp;
++      u32 dst_pitch = sarea_priv->pitch * cpp;
++      /* COPY rop (0xcc), map cpp to magic color depth constants */
++      u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
++      RING_LOCALS;
++      
++      if (IS_I965G(dev) && sarea_priv->front_tiled) {
++              cmd |= XY_SRC_COPY_BLT_DST_TILED;
++              dst_pitch >>= 2;
++      }
++      if (IS_I965G(dev) && sarea_priv->back_tiled) {
++              cmd |= XY_SRC_COPY_BLT_SRC_TILED;
++              src_pitch >>= 2;
++      }
++      
++      counter[0] = drm_vblank_count(dev, 0);
++      counter[1] = drm_vblank_count(dev, 1);
++
++      DRM_DEBUG("\n");
++
++      INIT_LIST_HEAD(&hits);
++
++      nhits = nrects = 0;
++
++      /* No irqsave/restore necessary.  This tasklet may be run in an
++       * interrupt context or normal context, but we don't have to worry
++       * about getting interrupted by something acquiring the lock, because
++       * we are the interrupt context thing that acquires the lock.
++       */
++      DRM_SPINLOCK(&dev_priv->swaps_lock);
++
++      /* Find buffer swaps scheduled for this vertical blank */
++      list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
++              drm_i915_vbl_swap_t *vbl_swap =
++                      list_entry(list, drm_i915_vbl_swap_t, head);
++              int pipe = i915_get_pipe(dev, vbl_swap->plane);
++
++              if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
++                      continue;
++
++              list_del(list);
++              dev_priv->swaps_pending--;
++              drm_vblank_put(dev, pipe);
++
++              DRM_SPINUNLOCK(&dev_priv->swaps_lock);
++              DRM_SPINLOCK(&dev->drw_lock);
++
++              drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
++
++              if (!drw) {
++                      DRM_SPINUNLOCK(&dev->drw_lock);
++                      drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
++                      DRM_SPINLOCK(&dev_priv->swaps_lock);
++                      continue;
++              }
++
++              list_for_each(hit, &hits) {
++                      drm_i915_vbl_swap_t *swap_cmp =
++                              list_entry(hit, drm_i915_vbl_swap_t, head);
++                      struct drm_drawable_info *drw_cmp =
++                              drm_get_drawable_info(dev, swap_cmp->drw_id);
++
++                      if (drw_cmp &&
++                          drw_cmp->rects[0].y1 > drw->rects[0].y1) {
++                              list_add_tail(list, hit);
++                              break;
++                      }
++              }
++
++              DRM_SPINUNLOCK(&dev->drw_lock);
++
++              /* List of hits was empty, or we reached the end of it */
++              if (hit == &hits)
++                      list_add_tail(list, hits.prev);
++
++              nhits++;
++
++              DRM_SPINLOCK(&dev_priv->swaps_lock);
++      }
++
++      DRM_SPINUNLOCK(&dev_priv->swaps_lock);
++
++      if (nhits == 0) {
++              return;
++      }
++
++      i915_kernel_lost_context(dev);
++
++      upper[0] = upper[1] = 0;
++      slice[0] = max(sarea_priv->planeA_h / nhits, 1);
++      slice[1] = max(sarea_priv->planeB_h / nhits, 1);
++      lower[0] = sarea_priv->planeA_y + slice[0];
++      lower[1] = sarea_priv->planeB_y + slice[0];
++
++      offsets[0] = sarea_priv->front_offset;
++      offsets[1] = sarea_priv->back_offset;
++      offsets[2] = sarea_priv->third_offset;
++      num_pages = sarea_priv->third_handle ? 3 : 2;
++
++      DRM_SPINLOCK(&dev->drw_lock);
++
++      /* Emit blits for buffer swaps, partitioning both outputs into as many
++       * slices as there are buffer swaps scheduled in order to avoid tearing
++       * (based on the assumption that a single buffer swap would always
++       * complete before scanout starts).
++       */
++      for (i = 0; i++ < nhits;
++           upper[0] = lower[0], lower[0] += slice[0],
++           upper[1] = lower[1], lower[1] += slice[1]) {
++              int init_drawrect = 1;
++
++              if (i == nhits)
++                      lower[0] = lower[1] = sarea_priv->height;
++
++              list_for_each(hit, &hits) {
++                      drm_i915_vbl_swap_t *swap_hit =
++                              list_entry(hit, drm_i915_vbl_swap_t, head);
++                      struct drm_clip_rect *rect;
++                      int num_rects, plane, front, back;
++                      unsigned short top, bottom;
++
++                      drw = drm_get_drawable_info(dev, swap_hit->drw_id);
++
++                      if (!drw)
++                              continue;
++
++                      plane = swap_hit->plane;
++
++                      if (swap_hit->flip) {
++                              i915_dispatch_vsync_flip(dev, drw, plane);
++                              continue;
++                      }
++
++                      if (init_drawrect) {
++                              int width  = sarea_priv->width;
++                              int height = sarea_priv->height;
++                              if (IS_I965G(dev)) {
++                                      BEGIN_LP_RING(4);
++
++                                      OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
++                                      OUT_RING(0);
++                                      OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
++                                      OUT_RING(0);
++                                      
++                                      ADVANCE_LP_RING();
++                              } else {
++                                      BEGIN_LP_RING(6);
++      
++                                      OUT_RING(GFX_OP_DRAWRECT_INFO);
++                                      OUT_RING(0);
++                                      OUT_RING(0);
++                                      OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16));
++                                      OUT_RING(0);
++                                      OUT_RING(0);
++                                      
++                                      ADVANCE_LP_RING();
++                              }
++
++                              sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
++
++                              init_drawrect = 0;
++                      }
++
++                      rect = drw->rects;
++                      top = upper[plane];
++                      bottom = lower[plane];
++
++                      front = (dev_priv->sarea_priv->pf_current_page >>
++                               (2 * plane)) & 0x3;
++                      back = (front + 1) % num_pages;
++
++                      for (num_rects = drw->num_rects; num_rects--; rect++) {
++                              int y1 = max(rect->y1, top);
++                              int y2 = min(rect->y2, bottom);
++
++                              if (y1 >= y2)
++                                      continue;
++
++                              BEGIN_LP_RING(8);
++
++                              OUT_RING(cmd);
++                              OUT_RING(ropcpp | dst_pitch);
++                              OUT_RING((y1 << 16) | rect->x1);
++                              OUT_RING((y2 << 16) | rect->x2);
++                              OUT_RING(offsets[front]);
++                              OUT_RING((y1 << 16) | rect->x1);
++                              OUT_RING(src_pitch);
++                              OUT_RING(offsets[back]);
++
++                              ADVANCE_LP_RING();
++                      }
++              }
++      }
++
++      DRM_SPINUNLOCK(&dev->drw_lock);
++
++      list_for_each_safe(hit, tmp, &hits) {
++              drm_i915_vbl_swap_t *swap_hit =
++                      list_entry(hit, drm_i915_vbl_swap_t, head);
++
++              list_del(hit);
++
++              drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
++      }
++}
++
++u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      unsigned long high_frame;
++      unsigned long low_frame;
++      u32 high1, high2, low, count;
++      int pipe;
++
++      pipe = i915_get_pipe(dev, plane);
++      high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
++      low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
++
++      if (!i915_pipe_enabled(dev, pipe)) {
++          DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++          return 0;
++      }
++
++      /*
++       * High & low register fields aren't synchronized, so make sure
++       * we get a low value that's stable across two reads of the high
++       * register.
++       */
++      do {
++              high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++                       PIPE_FRAME_HIGH_SHIFT);
++              low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++                      PIPE_FRAME_LOW_SHIFT);
++              high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++                       PIPE_FRAME_HIGH_SHIFT);
++      } while (high1 != high2);
++
++      count = (high1 << 8) | low;
++
++      return count;
++}
++
++irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 iir;
++      u32 pipea_stats = 0, pipeb_stats = 0;
++      int vblank = 0;
++#ifdef __linux__
++      if (dev->pdev->msi_enabled)
++              I915_WRITE(IMR, ~0);
++#endif
++      iir = I915_READ(IIR);
++#if 0
++      DRM_DEBUG("flag=%08x\n", iir);
++#endif
++      atomic_inc(&dev_priv->irq_received);
++      if (iir == 0) {
++#ifdef __linux__
++              if (dev->pdev->msi_enabled) {
++                      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++                      (void) I915_READ(IMR);
++              }
++#endif
++              return IRQ_NONE;
++      }
++
++      /*
++       * Clear the PIPE(A|B)STAT regs before the IIR otherwise
++       * we may get extra interrupts.
++       */
++      if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
++              pipea_stats = I915_READ(PIPEASTAT);
++              if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++                                 PIPE_VBLANK_INTERRUPT_STATUS))
++              {
++                      vblank++;
++                      drm_handle_vblank(dev, i915_get_plane(dev, 0));
++              }
++              I915_WRITE(PIPEASTAT, pipea_stats);
++      }
++      if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
++              pipeb_stats = I915_READ(PIPEBSTAT);
++              /* Ack the event */
++              I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++              /* The vblank interrupt gets enabled even if we didn't ask for
++                 it, so make sure it's shut down again */
++              if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
++                      pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE);
++
++              if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
++                                 PIPE_VBLANK_INTERRUPT_STATUS))
++              {
++                      vblank++;
++                      drm_handle_vblank(dev, i915_get_plane(dev, 1));
++              }
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++              if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE)
++                      opregion_asle_intr(dev);
++#endif
++#endif
++              I915_WRITE(PIPEBSTAT, pipeb_stats);
++      }
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      if (iir & I915_ASLE_INTERRUPT)
++              opregion_asle_intr(dev);
++#endif
++#endif
++
++      if (dev_priv->sarea_priv)
++          dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++
++      I915_WRITE(IIR, iir);
++#ifdef __linux__
++      if (dev->pdev->msi_enabled)
++              I915_WRITE(IMR, dev_priv->irq_mask_reg);
++#endif
++      (void) I915_READ(IIR); /* Flush posted writes */
++
++      if (iir & I915_USER_INTERRUPT) {
++#ifdef I915_HAVE_GEM
++              dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
++#endif
++              DRM_WAKEUP(&dev_priv->irq_queue);
++#ifdef I915_HAVE_FENCE
++              i915_fence_handler(dev);
++#endif
++      }
++
++      if (vblank) {
++              if (dev_priv->swaps_pending > 0)
++                      drm_locked_tasklet(dev, i915_vblank_tasklet);
++      }
++
++      return IRQ_HANDLED;
++}
++
++int i915_emit_irq(struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      i915_kernel_lost_context(dev);
++
++      DRM_DEBUG("\n");
++
++      i915_emit_breadcrumb(dev);
++
++      BEGIN_LP_RING(2);
++      OUT_RING(0);
++      OUT_RING(MI_USER_INTERRUPT);
++      ADVANCE_LP_RING();
++
++      return dev_priv->counter;
++}
++
++void i915_user_irq_on(drm_i915_private_t *dev_priv)
++{
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
++              i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++}
++
++void i915_user_irq_off(drm_i915_private_t *dev_priv)
++{
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++#ifdef __linux__
++      BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
++#endif
++      if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
++              i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++}
++
++
++int i915_wait_irq(struct drm_device * dev, int irq_nr)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
++                READ_BREADCRUMB(dev_priv));
++
++      if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
++              if (dev_priv->sarea_priv)
++                      dev_priv->sarea_priv->last_dispatch =
++                              READ_BREADCRUMB(dev_priv);
++              return 0;
++      }
++
++      i915_user_irq_on(dev_priv);
++      DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
++                  READ_BREADCRUMB(dev_priv) >= irq_nr);
++      i915_user_irq_off(dev_priv);
++
++      if (ret == -EBUSY) {
++              DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
++                        READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
++      }
++
++      if (dev_priv->sarea_priv)
++              dev_priv->sarea_priv->last_dispatch =
++                      READ_BREADCRUMB(dev_priv);
++      return ret;
++}
++
++/* Needs the lock as it touches the ring.
++ */
++int i915_irq_emit(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_irq_emit_t *emit = data;
++      int result;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      result = i915_emit_irq(dev);
++
++      if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++/* Doesn't need the hardware lock.
++ */
++int i915_irq_wait(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_irq_wait_t *irqwait = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return i915_wait_irq(dev, irqwait->irq_seq);
++}
++
++int i915_enable_vblank(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int pipe = i915_get_pipe(dev, plane);
++      u32     pipestat_reg = 0;
++      u32     mask_reg = 0;
++      u32     pipestat;
++
++      switch (pipe) {
++      case 0:
++              pipestat_reg = PIPEASTAT;
++              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
++              break;
++      case 1:
++              pipestat_reg = PIPEBSTAT;
++              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              break;
++      default:
++              DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
++                        pipe);
++              break;
++      }
++
++      if (pipestat_reg)
++      {
++              pipestat = I915_READ (pipestat_reg);
++              /*
++               * Older chips didn't have the start vblank interrupt,
++               * but 
++               */
++              if (IS_I965G (dev))
++                      pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
++              else
++                      pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
++              /*
++               * Clear any pending status
++               */
++              pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++                           PIPE_VBLANK_INTERRUPT_STATUS);
++              I915_WRITE(pipestat_reg, pipestat);
++      }
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      i915_enable_irq(dev_priv, mask_reg);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++
++      return 0;
++}
++
++void i915_disable_vblank(struct drm_device *dev, int plane)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int pipe = i915_get_pipe(dev, plane);
++      u32     pipestat_reg = 0;
++      u32     mask_reg = 0;
++      u32     pipestat;
++
++      switch (pipe) {
++      case 0:
++              pipestat_reg = PIPEASTAT;
++              mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
++              break;
++      case 1:
++              pipestat_reg = PIPEBSTAT;
++              mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              break;
++      default:
++              DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
++                        pipe);
++              break;
++      }
++
++      DRM_SPINLOCK(&dev_priv->user_irq_lock);
++      i915_disable_irq(dev_priv, mask_reg);
++      DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
++
++      if (pipestat_reg)
++      {
++              pipestat = I915_READ (pipestat_reg);
++              pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
++                            PIPE_VBLANK_INTERRUPT_ENABLE);
++              /*
++               * Clear any pending status
++               */
++              pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
++                           PIPE_VBLANK_INTERRUPT_STATUS);
++              I915_WRITE(pipestat_reg, pipestat);
++              (void) I915_READ(pipestat_reg);
++      }
++}
++
++static void i915_enable_interrupt (struct drm_device *dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      dev_priv->irq_mask_reg = ~0;
++      I915_WRITE(IMR, dev_priv->irq_mask_reg);
++      I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
++      (void) I915_READ (IER);
++
++#ifdef __linux__
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++      opregion_enable_asle(dev);
++#endif
++#endif
++
++      dev_priv->irq_enabled = 1;
++}
++
++/* Set the vblank monitor pipe
++ */
++int i915_vblank_pipe_set(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++int i915_vblank_pipe_get(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_vblank_pipe_t *pipe = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
++
++      return 0;
++}
++
++/**
++ * Schedule buffer swap at given vertical blank.
++ */
++int i915_vblank_swap(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_vblank_swap_t *swap = data;
++      drm_i915_vbl_swap_t *vbl_swap;
++      unsigned int pipe, seqtype, curseq, plane;
++      unsigned long irqflags;
++      struct list_head *list;
++      int ret;
++
++      if (!dev_priv) {
++              DRM_ERROR("%s called with no initialization\n", __func__);
++              return -EINVAL;
++      }
++
++      if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) {
++              DRM_DEBUG("Rotation not supported\n");
++              return -EINVAL;
++      }
++
++      if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
++                           _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
++                           _DRM_VBLANK_FLIP)) {
++              DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
++              return -EINVAL;
++      }
++
++      plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
++      pipe = i915_get_pipe(dev, plane);
++
++      seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
++
++      if (!(dev_priv->vblank_pipe & (1 << pipe))) {
++              DRM_ERROR("Invalid pipe %d\n", pipe);
++              return -EINVAL;
++      }
++
++      DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
++
++      /* It makes no sense to schedule a swap for a drawable that doesn't have
++       * valid information at this point. E.g. this could mean that the X
++       * server is too old to push drawable information to the DRM, in which
++       * case all such swaps would become ineffective.
++       */
++      if (!drm_get_drawable_info(dev, swap->drawable)) {
++              DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++              DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
++              return -EINVAL;
++      }
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++
++      /*
++       * We take the ref here and put it when the swap actually completes
++       * in the tasklet.
++       */
++      ret = drm_vblank_get(dev, pipe);
++      if (ret)
++              return ret;
++      curseq = drm_vblank_count(dev, pipe);
++
++      if (seqtype == _DRM_VBLANK_RELATIVE)
++              swap->sequence += curseq;
++
++      if ((curseq - swap->sequence) <= (1<<23)) {
++              if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
++                      swap->sequence = curseq + 1;
++              } else {
++                      DRM_DEBUG("Missed target sequence\n");
++                      drm_vblank_put(dev, pipe);
++                      return -EINVAL;
++              }
++      }
++
++      if (swap->seqtype & _DRM_VBLANK_FLIP) {
++              swap->sequence--;
++
++              if ((curseq - swap->sequence) <= (1<<23)) {
++                      struct drm_drawable_info *drw;
++
++                      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++                      DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
++
++                      drw = drm_get_drawable_info(dev, swap->drawable);
++
++                      if (!drw) {
++                              DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
++                                  irqflags);
++                              DRM_DEBUG("Invalid drawable ID %d\n",
++                                        swap->drawable);
++                              drm_vblank_put(dev, pipe);
++                              return -EINVAL;
++                      }
++
++                      i915_dispatch_vsync_flip(dev, drw, plane);
++
++                      DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
++
++                      drm_vblank_put(dev, pipe);
++                      return 0;
++              }
++      }
++
++      DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
++
++      list_for_each(list, &dev_priv->vbl_swaps.head) {
++              vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
++
++              if (vbl_swap->drw_id == swap->drawable &&
++                  vbl_swap->plane == plane &&
++                  vbl_swap->sequence == swap->sequence) {
++                      vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
++                      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++                      DRM_DEBUG("Already scheduled\n");
++                      return 0;
++              }
++      }
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++
++      if (dev_priv->swaps_pending >= 100) {
++              DRM_DEBUG("Too many swaps queued\n");
++              drm_vblank_put(dev, pipe);
++              return -EBUSY;
++      }
++
++      vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
++
++      if (!vbl_swap) {
++              DRM_ERROR("Failed to allocate memory to queue swap\n");
++              drm_vblank_put(dev, pipe);
++              return -ENOMEM;
++      }
++
++      DRM_DEBUG("\n");
++
++      vbl_swap->drw_id = swap->drawable;
++      vbl_swap->plane = plane;
++      vbl_swap->sequence = swap->sequence;
++      vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
++
++      if (vbl_swap->flip)
++              swap->sequence++;
++
++      DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
++
++      list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
++      dev_priv->swaps_pending++;
++
++      DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
++
++      return 0;
++}
++
++/* drm_dma.h hooks
++*/
++void i915_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++      I915_WRITE16(HWSTAM, 0xeffe);
++      I915_WRITE16(IMR, 0x0);
++      I915_WRITE16(IER, 0x0);
++}
++
++int i915_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      int ret, num_pipes = 2;
++
++      INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
++      dev_priv->swaps_pending = 0;
++
++      dev_priv->user_irq_refcount = 0;
++      dev_priv->irq_mask_reg = ~0;
++
++      ret = drm_vblank_init(dev, num_pipes);
++      if (ret)
++              return ret;
++
++      dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
++      dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++      i915_enable_interrupt(dev);
++      DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
++
++      /*
++       * Initialize the hardware status page IRQ location.
++       */
++
++      I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
++      return 0;
++}
++
++void i915_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++      u32 temp;
++
++      if (!dev_priv)
++              return;
++
++      dev_priv->vblank_pipe = 0;
++
++      dev_priv->irq_enabled = 0;
++      I915_WRITE(HWSTAM, 0xffffffff);
++      I915_WRITE(IMR, 0xffffffff);
++      I915_WRITE(IER, 0x0);
++
++      temp = I915_READ(PIPEASTAT);
++      I915_WRITE(PIPEASTAT, temp);
++      temp = I915_READ(PIPEBSTAT);
++      I915_WRITE(PIPEBSTAT, temp);
++      temp = I915_READ(IIR);
++      I915_WRITE(IIR, temp);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_mem.c git-nokia/drivers/gpu/drm-tungsten/i915_mem.c
+--- git/drivers/gpu/drm-tungsten/i915_mem.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_mem.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,386 @@
++/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
++ */
++/*
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++/* This memory manager is integrated into the global/local lru
++ * mechanisms used by the clients.  Specifically, it operates by
++ * setting the 'in_use' fields of the global LRU to indicate whether
++ * this region is privately allocated to a client.
++ *
++ * This does require the client to actually respect that field.
++ *
++ * Currently no effort is made to allocate 'private' memory in any
++ * clever way - the LRU information isn't used to determine which
++ * block to allocate, and the ring is drained prior to allocations --
++ * in other words allocation is expensive.
++ */
++static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_tex_region *list;
++      unsigned shift, nr;
++      unsigned start;
++      unsigned end;
++      unsigned i;
++      int age;
++
++      shift = dev_priv->tex_lru_log_granularity;
++      nr = I915_NR_TEX_REGIONS;
++
++      start = p->start >> shift;
++      end = (p->start + p->size - 1) >> shift;
++
++      age = ++sarea_priv->texAge;
++      list = sarea_priv->texList;
++
++      /* Mark the regions with the new flag and update their age.  Move
++       * them to head of list to preserve LRU semantics.
++       */
++      for (i = start; i <= end; i++) {
++              list[i].in_use = in_use;
++              list[i].age = age;
++
++              /* remove_from_list(i)
++               */
++              list[(unsigned)list[i].next].prev = list[i].prev;
++              list[(unsigned)list[i].prev].next = list[i].next;
++
++              /* insert_at_head(list, i)
++               */
++              list[i].prev = nr;
++              list[i].next = list[nr].next;
++              list[(unsigned)list[nr].next].prev = i;
++              list[nr].next = i;
++      }
++}
++
++/* Very simple allocator for agp memory, working on a static range
++ * already mapped into each client's address space.
++ */
++
++static struct mem_block *split_block(struct mem_block *p, int start, int size,
++                                   struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++      out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++static struct mem_block *alloc_block(struct mem_block *heap, int size,
++                                   int align2, struct drm_file *file_priv)
++{
++      struct mem_block *p;
++      int mask = (1 << align2) - 1;
++
++      for (p = heap->next; p != heap; p = p->next) {
++              int start = (p->start + mask) & ~mask;
++              if (p->file_priv == NULL && start + size <= p->start + p->size)
++                      return split_block(p, start, size, file_priv);
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, int start)
++{
++      struct mem_block *p;
++
++      for (p = heap->next; p != heap; p = p->next)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++static void free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == NULL) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++
++      if (p->prev->file_priv == NULL) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++static int init_heap(struct mem_block **heap, int start, int size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/* Free all blocks associated with the releasing file.
++ */
++void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
++                    struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      for (p = heap->next; p != heap; p = p->next) {
++              if (p->file_priv == file_priv) {
++                      p->file_priv = NULL;
++                      mark_block(dev, p, 0);
++              }
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      for (p = heap->next; p != heap; p = p->next) {
++              while (p->file_priv == NULL && p->next->file_priv == NULL) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++              }
++      }
++}
++
++/* Shutdown.
++ */
++void i915_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
++      *heap = NULL;
++}
++
++static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
++{
++      switch (region) {
++      case I915_MEM_REGION_AGP:
++              return &dev_priv->agp_heap;
++      default:
++              return NULL;
++      }
++}
++
++/* IOCTL HANDLERS */
++
++int i915_mem_alloc(struct drm_device *dev, void *data,
++                 struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_alloc_t *alloc = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, alloc->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      /* Make things easier on ourselves: all allocations at least
++       * 4k aligned.
++       */
++      if (alloc->alignment < 12)
++              alloc->alignment = 12;
++
++      block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
++
++      if (!block)
++              return -ENOMEM;
++
++      mark_block(dev, block, 1);
++
++      if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
++                           sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++int i915_mem_free(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_free_t *memfree = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, memfree->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      block = find_block(*heap, memfree->region_offset);
++      if (!block)
++              return -EFAULT;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      mark_block(dev, block, 0);
++      free_block(block);
++      return 0;
++}
++
++int i915_mem_init_heap(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_init_heap_t *initheap = data;
++      struct mem_block **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, initheap->region);
++      if (!heap)
++              return -EFAULT;
++
++      if (*heap) {
++              DRM_ERROR("heap already initialized?");
++              return -EFAULT;
++      }
++
++      return init_heap(heap, initheap->start, initheap->size);
++}
++
++int i915_mem_destroy_heap( struct drm_device *dev, void *data,
++                         struct drm_file *file_priv )
++{
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      drm_i915_mem_destroy_heap_t *destroyheap = data;
++      struct mem_block **heap;
++
++      if ( !dev_priv ) {
++              DRM_ERROR( "called with no initialization\n" );
++              return -EINVAL;
++      }
++
++      heap = get_heap( dev_priv, destroyheap->region );
++      if (!heap) {
++              DRM_ERROR("get_heap failed");
++              return -EFAULT;
++      }
++
++      if (!*heap) {
++              DRM_ERROR("heap not initialized?");
++              return -EFAULT;
++      }
++
++      i915_mem_takedown( heap );
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_opregion.c git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c
+--- git/drivers/gpu/drm-tungsten/i915_opregion.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_opregion.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,389 @@
++/*
++ *
++ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
++ * Copyright 2008 Red Hat <mjg@redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/acpi.h>
++
++#include "drmP.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
++#define PCI_ASLE 0xe4
++#define PCI_ASLS 0xfc
++
++#define OPREGION_SZ            (8*1024)
++#define OPREGION_HEADER_OFFSET 0
++#define OPREGION_ACPI_OFFSET   0x100
++#define OPREGION_SWSCI_OFFSET  0x200
++#define OPREGION_ASLE_OFFSET   0x300
++#define OPREGION_VBT_OFFSET    0x1000
++
++#define OPREGION_SIGNATURE "IntelGraphicsMem"
++#define MBOX_ACPI      (1<<0)
++#define MBOX_SWSCI     (1<<1)
++#define MBOX_ASLE      (1<<2)
++
++/* _DOD id definitions */
++#define OUTPUT_CONNECTOR_MSK   0xf000
++#define OUTPUT_CONNECTOR_OFFSET        12
++
++#define OUTPUT_PORT_MSK                0x00f0
++#define OUTPUT_PORT_OFFSET     4
++  #define OUTPUT_PORT_ANALOG   0
++  #define OUTPUT_PORT_LVDS     1
++  #define OUTPUT_PORT_SDVOB    2
++  #define OUTPUT_PORT_SDVOC    3
++  #define OUTPUT_PORT_TV       4
++
++#define OUTPUT_DISPLAY_MSK     0x0f00
++#define OUTPUT_DISPLAY_OFFSET  8
++  #define OUTPUT_DISPLAY_OTHER         0
++  #define OUTPUT_DISPLAY_VGA           1
++  #define OUTPUT_DISPLAY_TV            2
++  #define OUTPUT_DISPLAY_DIGI          3
++  #define OUTPUT_DISPLAY_FLAT_PANEL    4
++
++/* predefined id for integrated LVDS and VGA connector */
++#define OUTPUT_INT_LVDS        0x00000110
++#define OUTPUT_INT_VGA 0x80000100
++
++struct opregion_header {
++       u8 signature[16];
++       u32 size;
++       u32 opregion_ver;
++       u8 bios_ver[32];
++       u8 vbios_ver[16];
++       u8 driver_ver[16];
++       u32 mboxes;
++       u8 reserved[164];
++} __attribute__((packed));
++
++/* OpRegion mailbox #1: public ACPI methods */
++struct opregion_acpi {
++       u32 drdy;       /* driver readiness */
++       u32 csts;       /* notification status */
++       u32 cevt;       /* current event */
++       u8 rsvd1[20];
++       u32 didl[8];    /* supported display devices ID list */
++       u32 cpdl[8];    /* currently presented display list */
++       u32 cadl[8];    /* currently active display list */
++       u32 nadl[8];    /* next active devices list */
++       u32 aslp;       /* ASL sleep time-out */
++       u32 tidx;       /* toggle table index */
++       u32 chpd;       /* current hotplug enable indicator */
++       u32 clid;       /* current lid state*/
++       u32 cdck;       /* current docking state */
++       u32 sxsw;       /* Sx state resume */
++       u32 evts;       /* ASL supported events */
++       u32 cnot;       /* current OS notification */
++       u32 nrdy;       /* driver status */
++       u8 rsvd2[60];
++} __attribute__((packed));
++
++/* OpRegion mailbox #2: SWSCI */
++struct opregion_swsci {
++       u32 scic;       /* SWSCI command|status|data */
++       u32 parm;       /* command parameters */
++       u32 dslp;       /* driver sleep time-out */
++       u8 rsvd[244];
++} __attribute__((packed));
++
++/* OpRegion mailbox #3: ASLE */
++struct opregion_asle {
++       u32 ardy;       /* driver readiness */
++       u32 aslc;       /* ASLE interrupt command */
++       u32 tche;       /* technology enabled indicator */
++       u32 alsi;       /* current ALS illuminance reading */
++       u32 bclp;       /* backlight brightness to set */
++       u32 pfit;       /* panel fitting state */
++       u32 cblv;       /* current brightness level */
++       u16 bclm[20];   /* backlight level duty cycle mapping table */
++       u32 cpfm;       /* current panel fitting mode */
++       u32 epfm;       /* enabled panel fitting modes */
++       u8 plut[74];    /* panel LUT and identifier */
++       u32 pfmb;       /* PWM freq and min brightness */
++       u8 rsvd[102];
++} __attribute__((packed));
++
++/* ASLE irq request bits */
++#define ASLE_SET_ALS_ILLUM     (1 << 0)
++#define ASLE_SET_BACKLIGHT     (1 << 1)
++#define ASLE_SET_PFIT          (1 << 2)
++#define ASLE_SET_PWM_FREQ      (1 << 3)
++#define ASLE_REQ_MSK           0xf
++
++/* response bits of ASLE irq request */
++#define ASLE_ALS_ILLUM_FAIL    (2<<10)
++#define ASLE_BACKLIGHT_FAIL    (2<<12)
++#define ASLE_PFIT_FAIL         (2<<14)
++#define ASLE_PWM_FREQ_FAIL     (2<<16)
++
++/* ASLE backlight brightness to set */
++#define ASLE_BCLP_VALID                (1<<31)
++#define ASLE_BCLP_MSK          (~(1<<31))
++
++/* ASLE panel fitting request */
++#define ASLE_PFIT_VALID         (1<<31)
++#define ASLE_PFIT_CENTER (1<<0)
++#define ASLE_PFIT_STRETCH_TEXT (1<<1)
++#define ASLE_PFIT_STRETCH_GFX (1<<2)
++
++/* PWM frequency and minimum brightness */
++#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
++#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
++#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
++#define ASLE_PFMB_PWM_VALID (1<<31)
++
++#define ASLE_CBLV_VALID         (1<<31)
++
++static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++      u32 blc_pwm_ctl;
++      
++      if (!(bclp & ASLE_BCLP_VALID))
++              return ASLE_BACKLIGHT_FAIL;
++      
++      bclp &= ASLE_BCLP_MSK;
++      if (bclp < 0 || bclp > 255)
++              return ASLE_BACKLIGHT_FAIL;
++      
++      blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++      blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++      I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
++      asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++      
++      return 0;
++}
++
++static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
++{
++      return 0;
++}
++
++static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      if (pfmb & ASLE_PFMB_PWM_VALID) {
++              u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
++              u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
++              blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
++              pwm = pwm >> 9;
++              // FIXME - what do we do with the PWM?
++      }
++      return 0;
++}
++
++static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
++{
++      if (!(pfit & ASLE_PFIT_VALID))
++              return ASLE_PFIT_FAIL;
++      return 0;
++}
++
++void opregion_asle_intr(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++      u32 asle_stat = 0;
++      u32 asle_req;
++
++      if (!asle)
++              return;
++
++      asle_req = asle->aslc & ASLE_REQ_MSK;
++      
++      if (!asle_req) {
++              DRM_DEBUG("non asle set request??\n");
++              return;
++      }
++
++      if (asle_req & ASLE_SET_ALS_ILLUM)
++              asle_stat |= asle_set_als_illum(dev, asle->alsi);
++      
++      if (asle_req & ASLE_SET_BACKLIGHT)
++              asle_stat |= asle_set_backlight(dev, asle->bclp);
++      
++      if (asle_req & ASLE_SET_PFIT)
++              asle_stat |= asle_set_pfit(dev, asle->pfit);
++      
++      if (asle_req & ASLE_SET_PWM_FREQ)
++              asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
++      
++      asle->aslc = asle_stat;
++}
++
++#define ASLE_ALS_EN    (1<<0)
++#define ASLE_BLC_EN    (1<<1)
++#define ASLE_PFIT_EN   (1<<2)
++#define ASLE_PFMB_EN   (1<<3)
++
++void opregion_enable_asle(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct opregion_asle *asle = dev_priv->opregion.asle;
++
++      if (asle) {
++              if (IS_MOBILE(dev)) {
++                      u32 pipeb_stats = I915_READ(PIPEBSTAT);
++                      /* Some hardware uses the legacy backlight controller
++                         to signal interrupts, so we need to set up pipe B
++                         to generate an IRQ on writes */
++                      pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
++                      I915_WRITE(PIPEBSTAT, pipeb_stats);
++
++                      dev_priv->irq_mask_reg &=
++                              ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
++              }
++
++              dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
++
++              asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 
++                      ASLE_PFMB_EN;
++              asle->ardy = 1;
++      }
++}
++
++#define ACPI_EV_DISPLAY_SWITCH (1<<0)
++#define ACPI_EV_LID            (1<<1)
++#define ACPI_EV_DOCK           (1<<2)
++
++static struct intel_opregion *system_opregion;
++
++int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
++                               void *data)
++{
++      /* The only video events relevant to opregion are 0x80. These indicate
++         either a docking event, lid switch or display switch request. In
++         Linux, these are handled by the dock, button and video drivers.
++         We might want to fix the video driver to be opregion-aware in
++         future, but right now we just indicate to the firmware that the
++         request has been handled */
++      
++      struct opregion_acpi *acpi;
++
++      if (!system_opregion)
++              return NOTIFY_DONE;
++      
++      acpi = system_opregion->acpi;
++      acpi->csts = 0;
++
++      return NOTIFY_OK;
++}
++
++static struct notifier_block intel_opregion_notifier = {
++      .notifier_call = intel_opregion_video_event,
++};
++
++int intel_opregion_init(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct intel_opregion *opregion = &dev_priv->opregion;
++      void *base;
++      u32 asls, mboxes;
++      int err = 0;
++      
++      pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
++      DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
++      if (asls == 0) {
++              DRM_DEBUG("ACPI OpRegion not supported!\n");
++              return -ENOTSUPP;
++      }
++      
++      base = ioremap(asls, OPREGION_SZ);
++      if (!base)
++              return -ENOMEM;
++      
++      opregion->header = base;
++      if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
++              DRM_DEBUG("opregion signature mismatch\n");
++              err = -EINVAL;
++              goto err_out;
++      }
++      
++      mboxes = opregion->header->mboxes;
++      if (mboxes & MBOX_ACPI) {
++              DRM_DEBUG("Public ACPI methods supported\n");
++              opregion->acpi = base + OPREGION_ACPI_OFFSET;
++      } else {
++              DRM_DEBUG("Public ACPI methods not supported\n");
++              err = -ENOTSUPP;
++              goto err_out;
++      }
++      opregion->enabled = 1;
++      
++      if (mboxes & MBOX_SWSCI) {
++              DRM_DEBUG("SWSCI supported\n");
++              opregion->swsci = base + OPREGION_SWSCI_OFFSET;
++      }
++      if (mboxes & MBOX_ASLE) {
++              DRM_DEBUG("ASLE supported\n");
++              opregion->asle = base + OPREGION_ASLE_OFFSET;
++      }
++      
++      /* Notify BIOS we are ready to handle ACPI video ext notifs.
++       * Right now, all the events are handled by the ACPI video module.
++       * We don't actually need to do anything with them. */
++      opregion->acpi->csts = 0;
++      opregion->acpi->drdy = 1;
++
++      system_opregion = opregion;
++      register_acpi_notifier(&intel_opregion_notifier);
++      
++      return 0;
++      
++err_out:
++      iounmap(opregion->header);
++      opregion->header = NULL;
++      return err;
++}
++
++void intel_opregion_free(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      struct intel_opregion *opregion = &dev_priv->opregion;
++      
++      if (!opregion->enabled)
++              return;
++      
++      opregion->acpi->drdy = 0;
++      
++      system_opregion = NULL;
++      unregister_acpi_notifier(&intel_opregion_notifier);
++      
++      /* just clear all opregion memory pointers now */
++      iounmap(opregion->header);
++      opregion->header = NULL;
++      opregion->acpi = NULL;
++      opregion->swsci = NULL;
++      opregion->asle = NULL;
++      
++      opregion->enabled = 0;
++}
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/i915_suspend.c git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c
+--- git/drivers/gpu/drm-tungsten/i915_suspend.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/i915_suspend.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,520 @@
++/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
++ */
++/*
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      if (pipe == PIPE_A)
++              return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
++      else
++              return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
++}
++
++static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++      u32 *array;
++      int i;
++
++      if (!i915_pipe_enabled(dev, pipe))
++              return;
++
++      if (pipe == PIPE_A)
++              array = dev_priv->save_palette_a;
++      else
++              array = dev_priv->save_palette_b;
++
++      for(i = 0; i < 256; i++)
++              array[i] = I915_READ(reg + (i << 2));
++}
++
++static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
++      u32 *array;
++      int i;
++
++      if (!i915_pipe_enabled(dev, pipe))
++              return;
++
++      if (pipe == PIPE_A)
++              array = dev_priv->save_palette_a;
++      else
++              array = dev_priv->save_palette_b;
++
++      for(i = 0; i < 256; i++)
++              I915_WRITE(reg + (i << 2), array[i]);
++}
++
++static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_WRITE8(index_port, reg);
++      return I915_READ8(data_port);
++}
++
++static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++      return I915_READ8(VGA_AR_DATA_READ);
++}
++
++static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
++      I915_WRITE8(VGA_AR_DATA_WRITE, val);
++}
++
++static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
++      I915_WRITE8(index_port, reg);
++      I915_WRITE8(data_port, val);
++}
++
++static void i915_save_vga(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++      u16 cr_index, cr_data, st01;
++
++      /* VGA color palette registers */
++      dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
++      /* DACCRX automatically increments during read */
++      I915_WRITE8(VGA_DACRX, 0);
++      /* Read 3 bytes of color data from each index */
++      for (i = 0; i < 256 * 3; i++)
++              dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
++
++      /* MSR bits */
++      dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
++      if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++              cr_index = VGA_CR_INDEX_CGA;
++              cr_data = VGA_CR_DATA_CGA;
++              st01 = VGA_ST01_CGA;
++      } else {
++              cr_index = VGA_CR_INDEX_MDA;
++              cr_data = VGA_CR_DATA_MDA;
++              st01 = VGA_ST01_MDA;
++      }
++
++      /* CRT controller regs */
++      i915_write_indexed(dev, cr_index, cr_data, 0x11,
++                         i915_read_indexed(dev, cr_index, cr_data, 0x11) &
++                         (~0x80));
++      for (i = 0; i <= 0x24; i++)
++              dev_priv->saveCR[i] =
++                      i915_read_indexed(dev, cr_index, cr_data, i);
++      /* Make sure we don't turn off CR group 0 writes */
++      dev_priv->saveCR[0x11] &= ~0x80;
++
++      /* Attribute controller registers */
++      I915_READ8(st01);
++      dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
++      for (i = 0; i <= 0x14; i++)
++              dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
++      I915_READ8(st01);
++      I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
++      I915_READ8(st01);
++
++      /* Graphics controller registers */
++      for (i = 0; i < 9; i++)
++              dev_priv->saveGR[i] =
++                      i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
++
++      dev_priv->saveGR[0x10] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
++      dev_priv->saveGR[0x11] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
++      dev_priv->saveGR[0x18] =
++              i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
++
++      /* Sequencer registers */
++      for (i = 0; i < 8; i++)
++              dev_priv->saveSR[i] =
++                      i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
++}
++
++static void i915_restore_vga(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++      u16 cr_index, cr_data, st01;
++
++      /* MSR bits */
++      I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
++      if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
++              cr_index = VGA_CR_INDEX_CGA;
++              cr_data = VGA_CR_DATA_CGA;
++              st01 = VGA_ST01_CGA;
++      } else {
++              cr_index = VGA_CR_INDEX_MDA;
++              cr_data = VGA_CR_DATA_MDA;
++              st01 = VGA_ST01_MDA;
++      }
++
++      /* Sequencer registers, don't write SR07 */
++      for (i = 0; i < 7; i++)
++              i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
++                                 dev_priv->saveSR[i]);
++
++      /* CRT controller regs */
++      /* Enable CR group 0 writes */
++      i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
++      for (i = 0; i <= 0x24; i++)
++              i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
++
++      /* Graphics controller regs */
++      for (i = 0; i < 9; i++)
++              i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
++                                 dev_priv->saveGR[i]);
++
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
++                         dev_priv->saveGR[0x10]);
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
++                         dev_priv->saveGR[0x11]);
++      i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
++                         dev_priv->saveGR[0x18]);
++
++      /* Attribute controller registers */
++      I915_READ8(st01); /* switch back to index mode */
++      for (i = 0; i <= 0x14; i++)
++              i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
++      I915_READ8(st01); /* switch back to index mode */
++      I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
++      I915_READ8(st01);
++
++      /* VGA color palette registers */
++      I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
++      /* DACCRX automatically increments during read */
++      I915_WRITE8(VGA_DACWX, 0);
++      /* Read 3 bytes of color data from each index */
++      for (i = 0; i < 256 * 3; i++)
++              I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
++
++}
++
++int i915_save_state(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++
++#if defined(__FreeBSD__)
++      dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1);
++#else
++      pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
++#endif
++
++      /* Display arbitration control */
++      dev_priv->saveDSPARB = I915_READ(DSPARB);
++
++      /* Pipe & plane A info */
++      dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
++      dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
++      dev_priv->saveFPA0 = I915_READ(FPA0);
++      dev_priv->saveFPA1 = I915_READ(FPA1);
++      dev_priv->saveDPLL_A = I915_READ(DPLL_A);
++      if (IS_I965G(dev))
++              dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
++      dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
++      dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
++      dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
++      dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
++      dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
++      dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
++      dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++      dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
++      dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
++      dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
++      dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
++      dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
++      if (IS_I965G(dev)) {
++              dev_priv->saveDSPASURF = I915_READ(DSPASURF);
++              dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
++      }
++      i915_save_palette(dev, PIPE_A);
++      dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
++
++      /* Pipe & plane B info */
++      dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
++      dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
++      dev_priv->saveFPB0 = I915_READ(FPB0);
++      dev_priv->saveFPB1 = I915_READ(FPB1);
++      dev_priv->saveDPLL_B = I915_READ(DPLL_B);
++      if (IS_I965G(dev))
++              dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
++      dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
++      dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
++      dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
++      dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
++      dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
++      dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
++      dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
++
++      dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
++      dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
++      dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
++      dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
++      dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
++      if (IS_I965GM(dev) || IS_GM45(dev)) {
++              dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
++              dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
++      }
++      i915_save_palette(dev, PIPE_B);
++      dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
++
++      /* CRT state */
++      dev_priv->saveADPA = I915_READ(ADPA);
++
++      /* LVDS state */
++      dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
++      dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
++      dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
++      if (IS_I965G(dev))
++              dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
++      if (IS_MOBILE(dev) && !IS_I830(dev))
++              dev_priv->saveLVDS = I915_READ(LVDS);
++      if (!IS_I830(dev) && !IS_845G(dev))
++              dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
++      dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
++      dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
++      dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
++
++      /* FIXME: save TV & SDVO state */
++
++      /* FBC state */
++      dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
++      dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
++      dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
++      dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
++
++      /* Interrupt state */
++      dev_priv->saveIIR = I915_READ(IIR);
++      dev_priv->saveIER = I915_READ(IER);
++      dev_priv->saveIMR = I915_READ(IMR);
++
++      /* VGA state */
++      dev_priv->saveVGA0 = I915_READ(VGA0);
++      dev_priv->saveVGA1 = I915_READ(VGA1);
++      dev_priv->saveVGA_PD = I915_READ(VGA_PD);
++      dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
++
++      /* Clock gating state */
++      dev_priv->saveD_STATE = I915_READ(D_STATE);
++      dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
++
++      /* Cache mode state */
++      dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
++
++      /* Memory Arbitration state */
++      dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
++
++      /* Scratch space */
++      for (i = 0; i < 16; i++) {
++              dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
++              dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
++      }
++      for (i = 0; i < 3; i++)
++              dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
++
++      i915_save_vga(dev);
++
++      return 0;
++}
++
++int i915_restore_state(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++      int i;
++
++#if defined(__FreeBSD__)
++      pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
++#else
++      pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
++#endif
++
++      I915_WRITE(DSPARB, dev_priv->saveDSPARB);
++
++      /* Pipe & plane A info */
++      /* Prime the clock */
++      if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
++              I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
++                         ~DPLL_VCO_ENABLE);
++              DRM_UDELAY(150);
++      }
++      I915_WRITE(FPA0, dev_priv->saveFPA0);
++      I915_WRITE(FPA1, dev_priv->saveFPA1);
++      /* Actually enable it */
++      I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
++      DRM_UDELAY(150);
++      if (IS_I965G(dev))
++              I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
++      DRM_UDELAY(150);
++
++      /* Restore mode */
++      I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
++      I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
++      I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
++      I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
++      I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
++      I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
++      I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
++
++      /* Restore plane info */
++      I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
++      I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
++      I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
++      I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
++      I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
++      if (IS_I965G(dev)) {
++              I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
++              I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
++      }
++
++      I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
++
++      i915_restore_palette(dev, PIPE_A);
++      /* Enable the plane */
++      I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
++      I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
++
++      /* Pipe & plane B info */
++      if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
++              I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
++                         ~DPLL_VCO_ENABLE);
++              DRM_UDELAY(150);
++      }
++      I915_WRITE(FPB0, dev_priv->saveFPB0);
++      I915_WRITE(FPB1, dev_priv->saveFPB1);
++      /* Actually enable it */
++      I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
++      DRM_UDELAY(150);
++      if (IS_I965G(dev))
++              I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
++      DRM_UDELAY(150);
++
++      /* Restore mode */
++      I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
++      I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
++      I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
++      I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
++      I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
++      I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
++      I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
++
++      /* Restore plane info */
++      I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
++      I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
++      I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
++      I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
++      I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
++      if (IS_I965G(dev)) {
++              I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
++              I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
++      }
++
++      I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
++
++      i915_restore_palette(dev, PIPE_B);
++      /* Enable the plane */
++      I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
++      I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
++
++      /* CRT state */
++      I915_WRITE(ADPA, dev_priv->saveADPA);
++
++      /* LVDS state */
++      if (IS_I965G(dev))
++              I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
++      if (IS_MOBILE(dev) && !IS_I830(dev))
++              I915_WRITE(LVDS, dev_priv->saveLVDS);
++      if (!IS_I830(dev) && !IS_845G(dev))
++              I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
++
++      I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
++      I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
++      I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
++      I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
++      I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
++      I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
++
++      /* FIXME: restore TV & SDVO state */
++
++      /* FBC info */
++      I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
++      I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
++      I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
++      I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
++
++      /* VGA state */
++      I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
++      I915_WRITE(VGA0, dev_priv->saveVGA0);
++      I915_WRITE(VGA1, dev_priv->saveVGA1);
++      I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
++      DRM_UDELAY(150);
++
++      /* Clock gating state */
++      I915_WRITE (D_STATE, dev_priv->saveD_STATE);
++      I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
++
++      /* Cache mode state */
++      I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
++
++      /* Memory arbitration state */
++      I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
++
++      for (i = 0; i < 16; i++) {
++              I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
++              I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
++      }
++      for (i = 0; i < 3; i++)
++              I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
++
++      i915_restore_vga(dev);
++
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/drm-tungsten/imagine_drv.c git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c
+--- git/drivers/gpu/drm-tungsten/imagine_drv.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/imagine_drv.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,85 @@
++/*
++ * Copyright 2005 Adam Jackson.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++/* derived from tdfx_drv.c */
++
++#include "drmP.h"
++#include "imagine_drv.h"
++
++#include "drm_pciids.h"
++
++static struct drm_driver driver;
++
++static struct pci_device_id pciidlist[] = {
++    imagine_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++    return drm_get_dev(pdev, ent, &driver);
++}
++
++static struct drm_driver driver = {
++    .driver_features = DRIVER_USE_MTRR,
++    .reclaim_buffers = drm_core_reclaim_buffers,
++    .get_map_ofs = drm_core_get_map_ofs,
++    .get_reg_ofs = drm_core_get_reg_ofs,
++    .fops = {
++        .owner = THIS_MODULE,
++        .open = drm_open,
++        .release = drm_release,
++        .ioctl = drm_ioctl,
++        .mmap = drm_mmap,
++        .poll = drm_poll,
++        .fasync = drm_fasync,
++    },
++    .pci_driver = {
++        .name = DRIVER_NAME,
++        .id_table = pciidlist,
++        .probe = probe,
++        .remove = __devexit_p(drm_cleanup_pci),
++    },
++
++    .name = DRIVER_NAME,
++    .desc = DRIVER_DESC,
++    .date = DRIVER_DATE,
++    .major = DRIVER_MAJOR,
++    .minor = DRIVER_MINOR,
++    .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init imagine_init(void)
++{
++    return drm_init(&driver, pciidlist);
++}
++
++static void __exit imagine_exit(void)
++{
++    drm_exit(&driver);
++}
++
++module_init(imagine_init);
++module_exit(imagine_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/Kconfig git-nokia/drivers/gpu/drm-tungsten/Kconfig
+--- git/drivers/gpu/drm-tungsten/Kconfig       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/Kconfig 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++#
++# DRM device configuration from Tungsten Graphics
++#
++# This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++# The driver is the Tungsten alternative of the original DRM driver.
++#
++
++menuconfig DRM_TUNGSTEN
++      tristate "Direct Rendering Manager (Tungsten - XFree86 4.1.0 and higher DRI support)"
++      help
++        Kernel-level support for the Direct Rendering Infrastructure (DRI)
++        introduced in XFree86 4.0. If you say Y here, you need to select
++        the module that's right for your graphics card from the list below.
++        These modules provide support for synchronization, security, and
++        DMA transfers. Please see <http://dri.sourceforge.net/> for more
++        details.  You should also select and configure AGP
++        (/dev/agpgart) support.
++
++config DRM_TUNGSTEN_PVR2D
++      tristate "PVR2D kernel helper"
++      depends on DRM_TUNGSTEN && PVR
++      help
++        Choose this option if you want to give DRI access to your card
++        handled by the Imagination PowerVR framework. If M is selected,
++        the module will be called pvr2d.
++
++if DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
++
++config DRM_TUNGSTEN_TDFX
++      tristate "3dfx Banshee/Voodoo3+"
++      help
++        Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
++        graphics card.  If M is selected, the module will be called tdfx.
++
++config DRM_TUNGSTEN_R128
++      tristate "ATI Rage 128"
++      help
++        Choose this option if you have an ATI Rage 128 graphics card.  If M
++        is selected, the module will be called r128.  AGP support for
++        this card is strongly suggested (unless you have a PCI version).
++
++config DRM_TUNGSTEN_RADEON
++      tristate "ATI Radeon"
++      help
++        Choose this option if you have an ATI Radeon graphics card.  There
++        are both PCI and AGP versions.  You don't need to choose this to
++        run the Radeon in plain VGA mode.
++
++        If M is selected, the module will be called radeon.
++
++config DRM_TUNGSTEN_I810
++      tristate "Intel I810"
++      depends on AGP && AGP_INTEL
++      help
++        Choose this option if you have an Intel I810 graphics card.  If M is
++        selected, the module will be called i810.  AGP support is required
++        for this driver to work.
++
++config DRM_TUNGSTEN_I915
++      tristate "i915 driver"
++      depends on AGP && AGP_INTEL
++      help
++        Choose this option if you have a system that has Intel 830M, 845G,
++        852GM, 855GM 865G or 915G integrated graphics.  If M is selected, the
++        module will be called i915.  AGP support is required for this driver
++        to work. This driver is used by the Intel driver in X.org 6.8 and
++        XFree86 4.4 and above. If unsure, build this and i830 as modules and
++        the X server will load the correct one.
++
++config DRM_TUNGSTEN_MGA
++      tristate "Matrox g200/g400"
++      help
++        Choose this option if you have a Matrox G200, G400 or G450 graphics
++        card.  If M is selected, the module will be called mga.  AGP
++        support is required for this driver to work.
++
++config DRM_TUNGSTEN_SIS
++      tristate "SiS video cards"
++      depends on AGP
++      help
++        Choose this option if you have a SiS 630 or compatible video
++          chipset. If M is selected the module will be called sis. AGP
++          support is required for this driver to work.
++
++config DRM_TUNGSTEN_VIA
++      tristate "Via unichrome video cards"
++      help
++        Choose this option if you have a Via unichrome or compatible video
++        chipset. If M is selected the module will be called via.
++
++config DRM_TUNGSTEN_SAVAGE
++      tristate "Savage video cards"
++      help
++        Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
++        chipset. If M is selected the module will be called savage.
++
++config DRM_TUNGSTEN_FFB
++      tristate "Creator/Creator3D direct rendering"
++      help
++        Choose this option to include the Creator/Creator3D direct rendering
++        driver. If M is selected the module will be called ffb.
++
++config DRM_TUNGSTEN_MACH64
++      tristate "MACH64 Rage Pro video card"
++      help
++        Choose this option if you have a Mach64 Rage Pro chipset.
++        If M is selected the module will be called mach64.
++
++config DRM_TUNGSTEN_NV
++      tristate "Nvidia video card (NV driver)"
++      help
++        Choose this option if you have a Nvidia chipset and want to use the
++        original nv driver. If M is selected the module will be called nv.
++
++config DRM_TUNGSTEN_NOUVEAU
++      tristate "Nvidia video card (Nouveau driver)"
++      help
++        Choose this option if you have a Nvidia chipset and want to use the
++        nouveau driver. If M is selected the module will be called nouveau.
++
++config DRM_TUNGSTEN_XGI
++      tristate "XGI video card"
++      help
++        Choose this option if you have a XGI chipset. If M is selected the
++        module will be called xgi.
++
++endif # DRM_TUNGSTEN && (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
++
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_dma.c git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c
+--- git/drivers/gpu/drm-tungsten/mach64_dma.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_dma.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1778 @@
++/* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
++/**
++ * \file mach64_dma.c
++ * DMA support for mach64 (Rage Pro) driver
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ * \author Frank C. Earl <fearl@airmail.net>
++ * \author Leif Delgass <ldelgass@retinalburn.net>
++ * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++/*******************************************************************/
++/** \name Engine, FIFO control */
++/*@{*/
++
++/**
++ * Waits for free entries in the FIFO.
++ *
++ * \note Most writes to Mach64 registers are automatically routed through
++ * command FIFO which is 16 entry deep. Prior to writing to any draw engine
++ * register one has to ensure that enough FIFO entries are available by calling
++ * this function.  Failure to do so may cause the engine to lock.
++ *
++ * \param dev_priv pointer to device private data structure.
++ * \param entries number of free entries in the FIFO to wait for.
++ *
++ * \returns zero on success, or -EBUSY if the timeout (specificed by
++ * drm_mach64_private::usec_timeout) occurs.
++ */
++int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
++{
++      int slots = 0, i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
++              if (slots <= (0x8000 >> entries))
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
++      return -EBUSY;
++}
++
++/**
++ * Wait for the draw engine to be idle.
++ */
++int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
++{
++      int i, ret;
++
++      ret = mach64_do_wait_for_fifo(dev_priv, 16);
++      if (ret < 0)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Wait for free entries in the ring buffer.
++ *
++ * The Mach64 bus master can be configured to act as a virtual FIFO, using a
++ * circular buffer (commonly referred as "ring buffer" in other drivers) with
++ * pointers to engine commands. This allows the CPU to do other things while
++ * the graphics engine is busy, i.e., DMA mode.
++ *
++ * This function should be called before writing new entries to the ring
++ * buffer.
++ *
++ * \param dev_priv pointer to device private data structure.
++ * \param n number of free entries in the ring buffer to wait for.
++ *
++ * \returns zero on success, or -EBUSY if the timeout (specificed by
++ * drm_mach64_private_t::usec_timeout) occurs.
++ *
++ * \sa mach64_dump_ring_info()
++ */
++int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              mach64_update_ring_snapshot(dev_priv);
++              if (ring->space >= n) {
++                      if (i > 0)
++                              DRM_DEBUG("%d usecs\n", i);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This is being ignored... */
++      DRM_ERROR("failed!\n");
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Wait until all DMA requests have been processed...
++ *
++ * \sa mach64_wait_ring()
++ */
++static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      u32 head;
++      int i;
++
++      head = ring->head;
++      i = 0;
++      while (i < dev_priv->usec_timeout) {
++              mach64_update_ring_snapshot(dev_priv);
++              if (ring->head == ring->tail &&
++                  !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
++                      if (i > 0)
++                              DRM_DEBUG("%d usecs\n", i);
++                      return 0;
++              }
++              if (ring->head == head) {
++                      ++i;
++              } else {
++                      head = ring->head;
++                      i = 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++      DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
++      mach64_dump_ring_info(dev_priv);
++      return -EBUSY;
++}
++
++/**
++ * Reset the the ring buffer descriptors.
++ *
++ * \sa mach64_do_engine_reset()
++ */
++static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      mach64_do_release_used_buffers(dev_priv);
++      ring->head_addr = ring->start_addr;
++      ring->head = ring->tail = 0;
++      ring->space = ring->size;
++
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      dev_priv->ring_running = 0;
++}
++
++/**
++ * Ensure the all the queued commands will be processed.
++ */
++int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
++{
++      /* FIXME: It's not necessary to wait for idle when flushing
++       * we just need to ensure the ring will be completely processed
++       * in finite time without another ioctl
++       */
++      return mach64_ring_idle(dev_priv);
++}
++
++/**
++ * Stop all DMA activity.
++ */
++int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
++{
++      int ret;
++
++      /* wait for completion */
++      if ((ret = mach64_ring_idle(dev_priv)) < 0) {
++              DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
++                        MACH64_READ(MACH64_BM_GUI_TABLE),
++                        dev_priv->ring.tail);
++              return ret;
++      }
++
++      mach64_ring_stop(dev_priv);
++
++      /* clean up after pass */
++      mach64_do_release_used_buffers(dev_priv);
++      return 0;
++}
++
++/**
++ * Reset the engine.  This will stop the DMA if it is running.
++ */
++int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
++{
++      u32 tmp;
++
++      DRM_DEBUG("\n");
++
++      /* Kill off any outstanding DMA transfers.
++       */
++      tmp = MACH64_READ(MACH64_BUS_CNTL);
++      MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
++
++      /* Reset the GUI engine (high to low transition).
++       */
++      tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
++      MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
++      /* Enable the GUI engine
++       */
++      tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
++      MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
++
++      /* ensure engine is not locked up by clearing any FIFO or HOST errors
++       */
++      tmp = MACH64_READ(MACH64_BUS_CNTL);
++      MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
++
++      /* Once GUI engine is restored, disable bus mastering */
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++
++      /* Reset descriptor ring */
++      mach64_ring_reset(dev_priv);
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name Debugging output */
++/*@{*/
++
++/**
++ * Dump engine registers values.
++ */
++void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
++{
++      DRM_INFO("\n");
++      if (!dev_priv->is_pci) {
++              DRM_INFO("           AGP_BASE = 0x%08x\n",
++                       MACH64_READ(MACH64_AGP_BASE));
++              DRM_INFO("           AGP_CNTL = 0x%08x\n",
++                       MACH64_READ(MACH64_AGP_CNTL));
++      }
++      DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_ALPHA_TST_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("         BM_COMMAND = 0x%08x\n",
++               MACH64_READ(MACH64_BM_COMMAND));
++      DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
++               MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
++      DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_GUI_TABLE));
++      DRM_INFO("          BM_STATUS = 0x%08x\n",
++               MACH64_READ(MACH64_BM_STATUS));
++      DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
++      DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_TABLE));
++      DRM_INFO("           BUS_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_BUS_CNTL));
++      DRM_INFO("\n");
++      /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
++      DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_CLR_CMP_CLR));
++      DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CLR_CMP_CNTL));
++      /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
++      DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_CHIP_ID));
++      DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_CNTL));
++      DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT0));
++      DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT1));
++      DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
++               MACH64_READ(MACH64_CONFIG_STAT2));
++      DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
++      DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
++      DRM_INFO("\n");
++      /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
++      /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
++      DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_DP_BKGD_CLR));
++      DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
++               MACH64_READ(MACH64_DP_FRGD_CLR));
++      DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
++      DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
++               MACH64_READ(MACH64_DP_PIX_WIDTH));
++      DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
++      DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
++               MACH64_READ(MACH64_DP_WRITE_MASK));
++      DRM_INFO("         DSP_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_DSP_CONFIG));
++      DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
++               MACH64_READ(MACH64_DSP_ON_OFF));
++      DRM_INFO("           DST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_DST_CNTL));
++      DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_DST_OFF_PITCH));
++      DRM_INFO("\n");
++      /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
++      DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_EXT_MEM_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("          FIFO_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_FIFO_STAT));
++      DRM_INFO("\n");
++      DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GEN_TEST_CNTL));
++      /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
++      DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
++      DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
++      DRM_INFO("           GUI_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_CNTL));
++      DRM_INFO("           GUI_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_STAT));
++      DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_TRAJ_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("          HOST_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_HOST_CNTL));
++      DRM_INFO("           HW_DEBUG = 0x%08x\n",
++               MACH64_READ(MACH64_HW_DEBUG));
++      DRM_INFO("\n");
++      DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_MEM_ADDR_CONFIG));
++      DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_MEM_BUF_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("           PAT_REG0 = 0x%08x\n",
++               MACH64_READ(MACH64_PAT_REG0));
++      DRM_INFO("           PAT_REG1 = 0x%08x\n",
++               MACH64_READ(MACH64_PAT_REG1));
++      DRM_INFO("\n");
++      DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
++      DRM_INFO("           SC_RIGHT = 0x%08x\n",
++               MACH64_READ(MACH64_SC_RIGHT));
++      DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
++      DRM_INFO("          SC_BOTTOM = 0x%08x\n",
++               MACH64_READ(MACH64_SC_BOTTOM));
++      DRM_INFO("\n");
++      DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SCALE_3D_CNTL));
++      DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
++               MACH64_READ(MACH64_SCRATCH_REG0));
++      DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
++               MACH64_READ(MACH64_SCRATCH_REG1));
++      DRM_INFO("         SETUP_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SETUP_CNTL));
++      DRM_INFO("           SRC_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SRC_CNTL));
++      DRM_INFO("\n");
++      DRM_INFO("           TEX_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_TEX_CNTL));
++      DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_TEX_SIZE_PITCH));
++      DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
++               MACH64_READ(MACH64_TIMER_CONFIG));
++      DRM_INFO("\n");
++      DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
++      DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
++               MACH64_READ(MACH64_Z_OFF_PITCH));
++      DRM_INFO("\n");
++}
++
++#define MACH64_DUMP_CONTEXT   3
++
++/**
++ * Used by mach64_dump_ring_info() to dump the contents of the current buffer
++ * pointed by the ring head.
++ */
++static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
++                               struct drm_buf *buf)
++{
++      u32 addr = GETBUFADDR(buf);
++      u32 used = buf->used >> 2;
++      u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
++      u32 *p = GETBUFPTR(buf);
++      int skipped = 0;
++
++      DRM_INFO("buffer contents:\n");
++
++      while (used) {
++              u32 reg, count;
++
++              reg = le32_to_cpu(*p++);
++              if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
++                  (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
++                   addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
++                  addr >=
++                  GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
++                      DRM_INFO("%08x:  0x%08x\n", addr, reg);
++              }
++              addr += 4;
++              used--;
++
++              count = (reg >> 16) + 1;
++              reg = reg & 0xffff;
++              reg = MMSELECT(reg);
++              while (count && used) {
++                      if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
++                          (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
++                           addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
++                          addr >=
++                          GETBUFADDR(buf) + buf->used -
++                          MACH64_DUMP_CONTEXT * 4) {
++                              DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
++                                       reg, le32_to_cpu(*p));
++                              skipped = 0;
++                      } else {
++                              if (!skipped) {
++                                      DRM_INFO("  ...\n");
++                                      skipped = 1;
++                              }
++                      }
++                      p++;
++                      addr += 4;
++                      used--;
++
++                      reg += 4;
++                      count--;
++              }
++      }
++
++      DRM_INFO("\n");
++}
++
++/**
++ * Dump the ring state and contents, including the contents of the buffer being
++ * processed by the graphics engine.
++ */
++void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      int i, skipped;
++
++      DRM_INFO("\n");
++
++      DRM_INFO("ring contents:\n");
++      DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
++               ring->head_addr, ring->head, ring->tail);
++
++      skipped = 0;
++      for (i = 0; i < ring->size / sizeof(u32); i += 4) {
++              if (i <= MACH64_DUMP_CONTEXT * 4 ||
++                  i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
++                  (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
++                   i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
++                  (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
++                   i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
++                      DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
++                               (u32)(ring->start_addr + i * sizeof(u32)),
++                               le32_to_cpu(((u32 *) ring->start)[i + 0]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 1]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 2]),
++                               le32_to_cpu(((u32 *) ring->start)[i + 3]),
++                               i == ring->head ? " (head)" : "",
++                               i == ring->tail ? " (tail)" : "");
++                      skipped = 0;
++              } else {
++                      if (!skipped) {
++                              DRM_INFO("  ...\n");
++                              skipped = 1;
++                      }
++              }
++      }
++
++      DRM_INFO("\n");
++
++      if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
++              struct list_head *ptr;
++              u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
++
++              list_for_each(ptr, &dev_priv->pending) {
++                      drm_mach64_freelist_t *entry =
++                          list_entry(ptr, drm_mach64_freelist_t, list);
++                      struct drm_buf *buf = entry->buf;
++
++                      u32 buf_addr = GETBUFADDR(buf);
++
++                      if (buf_addr <= addr && addr < buf_addr + buf->used)
++                              mach64_dump_buf_info(dev_priv, buf);
++              }
++      }
++
++      DRM_INFO("\n");
++      DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
++               MACH64_READ(MACH64_BM_GUI_TABLE));
++      DRM_INFO("\n");
++      DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
++               MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
++      DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
++               MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
++      DRM_INFO("         BM_COMMAND = 0x%08x\n",
++               MACH64_READ(MACH64_BM_COMMAND));
++      DRM_INFO("\n");
++      DRM_INFO("          BM_STATUS = 0x%08x\n",
++               MACH64_READ(MACH64_BM_STATUS));
++      DRM_INFO("           BUS_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_BUS_CNTL));
++      DRM_INFO("          FIFO_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_FIFO_STAT));
++      DRM_INFO("           GUI_STAT = 0x%08x\n",
++               MACH64_READ(MACH64_GUI_STAT));
++      DRM_INFO("           SRC_CNTL = 0x%08x\n",
++               MACH64_READ(MACH64_SRC_CNTL));
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA descriptor ring macros */
++/*@{*/
++
++/**
++ * Add the end mark to the ring's new tail position.
++ *
++ * The bus master engine will keep processing the DMA buffers listed in the ring
++ * until it finds this mark, making it stop.
++ *
++ * \sa mach64_clear_dma_eol
++ */ 
++static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
++{
++#if defined(__i386__)
++      int nr = 31;
++
++      /* Taken from include/asm-i386/bitops.h linux header */
++      __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
++                           :"Ir"(nr));
++#elif defined(__powerpc__)
++      u32 old;
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      /* Taken from the include/asm-ppc/bitops.h linux header */
++      __asm__ __volatile__("\n\
++1:    lwarx   %0,0,%3 \n\
++      or      %0,%0,%2 \n\
++      stwcx.  %0,0,%3 \n\
++      bne-    1b":"=&r"(old), "=m"(*addr)
++                           :"r"(mask), "r"(addr), "m"(*addr)
++                           :"cc");
++#elif defined(__alpha__)
++      u32 temp;
++      u32 mask = MACH64_DMA_EOL;
++
++      /* Taken from the include/asm-alpha/bitops.h linux header */
++      __asm__ __volatile__("1:        ldl_l %0,%3\n"
++                           "  bis %0,%2,%0\n"
++                           "  stl_c %0,%1\n"
++                           "  beq %0,2f\n"
++                           ".subsection 2\n"
++                           "2:        br 1b\n"
++                           ".previous":"=&r"(temp), "=m"(*addr)
++                           :"Ir"(mask), "m"(*addr));
++#else
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      *addr |= mask;
++#endif
++}
++
++/**
++ * Remove the end mark from the ring's old tail position.
++ *
++ * It should be called after calling mach64_set_dma_eol to mark the ring's new
++ * tail position.
++ *
++ * We update the end marks while the bus master engine is in operation. Since
++ * the bus master engine may potentially be reading from the same position
++ * that we write, we must change atomically to avoid having intermediary bad
++ * data.
++ */
++static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
++{
++#if defined(__i386__)
++      int nr = 31;
++
++      /* Taken from include/asm-i386/bitops.h linux header */
++      __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
++                           :"Ir"(nr));
++#elif defined(__powerpc__)
++      u32 old;
++      u32 mask = cpu_to_le32(MACH64_DMA_EOL);
++
++      /* Taken from the include/asm-ppc/bitops.h linux header */
++      __asm__ __volatile__("\n\
++1:    lwarx   %0,0,%3 \n\
++      andc    %0,%0,%2 \n\
++      stwcx.  %0,0,%3 \n\
++      bne-    1b":"=&r"(old), "=m"(*addr)
++                           :"r"(mask), "r"(addr), "m"(*addr)
++                           :"cc");
++#elif defined(__alpha__)
++      u32 temp;
++      u32 mask = ~MACH64_DMA_EOL;
++
++      /* Taken from the include/asm-alpha/bitops.h linux header */
++      __asm__ __volatile__("1:        ldl_l %0,%3\n"
++                           "  and %0,%2,%0\n"
++                           "  stl_c %0,%1\n"
++                           "  beq %0,2f\n"
++                           ".subsection 2\n"
++                           "2:        br 1b\n"
++                           ".previous":"=&r"(temp), "=m"(*addr)
++                           :"Ir"(mask), "m"(*addr));
++#else
++      u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
++
++      *addr &= mask;
++#endif
++}
++
++#define RING_LOCALS                                                   \
++      int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
++
++#define RING_WRITE_OFS  _ring_write
++
++#define BEGIN_RING(n)                                                 \
++      do {                                                            \
++              if (MACH64_VERBOSE) {                                   \
++                      DRM_INFO( "BEGIN_RING( %d ) \n",                \
++                                (n) );                                \
++              }                                                       \
++              if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
++                      int ret;                                        \
++                      if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
++                              DRM_ERROR( "wait_ring failed, resetting engine\n"); \
++                              mach64_dump_engine_info( dev_priv );    \
++                              mach64_do_engine_reset( dev_priv );     \
++                              return ret;                             \
++                      }                                               \
++              }                                                       \
++              dev_priv->ring.space -= (n) * sizeof(u32);              \
++              _ring = (u32 *) dev_priv->ring.start;                   \
++              _ring_tail = _ring_write = dev_priv->ring.tail;         \
++              _ring_mask = dev_priv->ring.tail_mask;                  \
++      } while (0)
++
++#define OUT_RING( x )                                         \
++do {                                                          \
++      if (MACH64_VERBOSE) {                                   \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
++                         (unsigned int)(x), _ring_write );    \
++      }                                                       \
++      _ring[_ring_write++] = cpu_to_le32( x );                \
++      _ring_write &= _ring_mask;                              \
++} while (0)
++
++#define ADVANCE_RING()                                                        \
++do {                                                                  \
++      if (MACH64_VERBOSE) {                                           \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        _ring_write, _ring_tail );                    \
++      }                                                               \
++      DRM_MEMORYBARRIER();                                            \
++      mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
++      DRM_MEMORYBARRIER();                                            \
++      dev_priv->ring.tail = _ring_write;                              \
++      mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
++} while (0)
++
++/**
++ * Queue a DMA buffer of registers writes into the ring buffer.
++ */ 
++int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
++                           drm_mach64_freelist_t *entry)
++{
++      int bytes, pages, remainder;
++      u32 address, page;
++      int i;
++      struct drm_buf *buf = entry->buf;
++      RING_LOCALS;
++
++      bytes = buf->used;
++      address = GETBUFADDR( buf );
++      pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
++
++      BEGIN_RING( pages * 4 );
++
++      for ( i = 0 ; i < pages-1 ; i++ ) {
++              page = address + i * MACH64_DMA_CHUNKSIZE;
++              OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++              OUT_RING( page );
++              OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
++              OUT_RING( 0 );
++      }
++
++      /* generate the final descriptor for any remaining commands in this buffer */
++      page = address + i * MACH64_DMA_CHUNKSIZE;
++      remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
++
++      /* Save dword offset of last descriptor for this buffer.
++       * This is needed to check for completion of the buffer in freelist_get
++       */
++      entry->ring_ofs = RING_WRITE_OFS;
++
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++      OUT_RING( page );
++      OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
++      OUT_RING( 0 );
++
++      ADVANCE_RING();
++      
++      return 0;
++}
++
++/**
++ * Queue DMA buffer controlling host data tranfers (e.g., blit).
++ * 
++ * Almost identical to mach64_add_buf_to_ring.
++ */
++int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                    drm_mach64_freelist_t *entry)
++{
++      int bytes, pages, remainder;
++      u32 address, page;
++      int i;
++      struct drm_buf *buf = entry->buf;
++      RING_LOCALS;
++      
++      bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
++      pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
++      address = GETBUFADDR( buf );
++      
++      BEGIN_RING( 4 + pages * 4 );
++      
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
++      OUT_RING( address );
++      OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
++      OUT_RING( 0 );
++      address += MACH64_HOSTDATA_BLIT_OFFSET;
++      
++      for ( i = 0 ; i < pages-1 ; i++ ) {
++              page = address + i * MACH64_DMA_CHUNKSIZE;
++              OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
++              OUT_RING( page );
++              OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
++              OUT_RING( 0 );
++      }
++      
++      /* generate the final descriptor for any remaining commands in this buffer */
++      page = address + i * MACH64_DMA_CHUNKSIZE;
++      remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
++      
++      /* Save dword offset of last descriptor for this buffer.
++       * This is needed to check for completion of the buffer in freelist_get
++       */
++      entry->ring_ofs = RING_WRITE_OFS;
++      
++      OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
++      OUT_RING( page );
++      OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
++      OUT_RING( 0 );
++      
++      ADVANCE_RING();
++      
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA test and initialization */
++/*@{*/
++
++/**
++ * Perform a simple DMA operation using the pattern registers to test whether
++ * DMA works.
++ *
++ * \return zero if successful.
++ *
++ * \note This function was the testbed for many experiences regarding Mach64
++ * DMA operation. It is left here since it so tricky to get DMA operating
++ * properly in some architectures and hardware.
++ */
++static int mach64_bm_dma_test(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_dma_handle_t *cpu_addr_dmah;
++      u32 data_addr;
++      u32 *table, *data;
++      u32 expected[2];
++      u32 src_cntl, pat_reg0, pat_reg1;
++      int i, count, failed;
++
++      DRM_DEBUG("\n");
++
++      table = (u32 *) dev_priv->ring.start;
++
++      /* FIXME: get a dma buffer from the freelist here */
++      DRM_DEBUG("Allocating data memory ...\n");
++#ifdef __FreeBSD__
++      DRM_UNLOCK();
++#endif
++      cpu_addr_dmah =
++          drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
++#ifdef __FreeBSD__
++      DRM_LOCK();
++#endif
++      if (!cpu_addr_dmah) {
++              DRM_INFO("data-memory allocation failed!\n");
++              return -ENOMEM;
++      } else {
++              data = (u32 *) cpu_addr_dmah->vaddr;
++              data_addr = (u32) cpu_addr_dmah->busaddr;
++      }
++
++      /* Save the X server's value for SRC_CNTL and restore it
++       * in case our test fails.  This prevents the X server
++       * from disabling it's cache for this register
++       */
++      src_cntl = MACH64_READ(MACH64_SRC_CNTL);
++      pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
++      pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
++
++      mach64_do_wait_for_fifo(dev_priv, 3);
++
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++      MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
++      MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
++
++      mach64_do_wait_for_idle(dev_priv);
++
++      for (i = 0; i < 2; i++) {
++              u32 reg;
++              reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
++              DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
++              if (reg != 0x11111111) {
++                      DRM_INFO("Error initializing test registers\n");
++                      DRM_INFO("resetting engine ...\n");
++                      mach64_do_engine_reset(dev_priv);
++                      DRM_INFO("freeing data buffer memory.\n");
++                      drm_pci_free(dev, cpu_addr_dmah);
++                      return -EIO;
++              }
++      }
++
++      /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
++      count = 0;
++
++      data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
++      data[count++] = expected[0] = 0x22222222;
++      data[count++] = expected[1] = 0xaaaaaaaa;
++
++      while (count < 1020) {
++              data[count++] =
++                  cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
++              data[count++] = 0x22222222;
++              data[count++] = 0xaaaaaaaa;
++      }
++      data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
++      data[count++] = 0;
++
++      DRM_DEBUG("Preparing table ...\n");
++      table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
++                                                       MACH64_APERTURE_OFFSET);
++      table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
++      table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
++                                              | MACH64_DMA_HOLD_OFFSET
++                                              | MACH64_DMA_EOL);
++      table[MACH64_DMA_RESERVED] = 0;
++
++      DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
++      DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
++      DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
++      DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
++
++      for (i = 0; i < 6; i++) {
++              DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
++      }
++      DRM_DEBUG(" ...\n");
++      for (i = count - 5; i < count; i++) {
++              DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
++      }
++
++      DRM_MEMORYBARRIER();
++
++      DRM_DEBUG("waiting for idle...\n");
++      if ((i = mach64_do_wait_for_idle(dev_priv))) {
++              DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
++              DRM_INFO("resetting engine ...\n");
++              mach64_do_engine_reset(dev_priv);
++              mach64_do_wait_for_fifo(dev_priv, 3);
++              MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++              MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++              MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++              DRM_INFO("freeing data buffer memory.\n");
++              drm_pci_free(dev, cpu_addr_dmah);
++              return i;
++      }
++      DRM_DEBUG("waiting for idle...done\n");
++
++      DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
++      DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
++      DRM_DEBUG("\n");
++      DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
++      DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
++
++      DRM_DEBUG("starting DMA transfer...\n");
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      MACH64_WRITE(MACH64_SRC_CNTL,
++                   MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
++                   MACH64_SRC_BM_OP_SYSTEM_TO_REG);
++
++      /* Kick off the transfer */
++      DRM_DEBUG("starting DMA transfer... done.\n");
++      MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
++
++      DRM_DEBUG("waiting for idle...\n");
++
++      if ((i = mach64_do_wait_for_idle(dev_priv))) {
++              /* engine locked up, dump register state and reset */
++              DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
++              mach64_dump_engine_info(dev_priv);
++              DRM_INFO("resetting engine ...\n");
++              mach64_do_engine_reset(dev_priv);
++              mach64_do_wait_for_fifo(dev_priv, 3);
++              MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++              MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++              MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++              DRM_INFO("freeing data buffer memory.\n");
++              drm_pci_free(dev, cpu_addr_dmah);
++              return i;
++      }
++
++      DRM_DEBUG("waiting for idle...done\n");
++
++      /* restore SRC_CNTL */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
++
++      failed = 0;
++
++      /* Check register values to see if the GUI master operation succeeded */
++      for (i = 0; i < 2; i++) {
++              u32 reg;
++              reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
++              DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
++              if (reg != expected[i]) {
++                      failed = -1;
++              }
++      }
++
++      /* restore pattern registers */
++      mach64_do_wait_for_fifo(dev_priv, 2);
++      MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
++      MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
++
++      DRM_DEBUG("freeing data buffer memory.\n");
++      drm_pci_free(dev, cpu_addr_dmah);
++      DRM_DEBUG("returning ...\n");
++
++      return failed;
++}
++
++/**
++ * Called during the DMA initialization ioctl to initialize all the necessary
++ * software and hardware state for DMA operation.
++ */
++static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
++{
++      drm_mach64_private_t *dev_priv;
++      u32 tmp;
++      int i, ret;
++
++      DRM_DEBUG("\n");
++
++      dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_mach64_private_t));
++
++      dev_priv->is_pci = init->is_pci;
++
++      dev_priv->fb_bpp = init->fb_bpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      dev_priv->depth_bpp = init->depth_bpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
++                                      (dev_priv->front_offset >> 3));
++      dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
++                                     (dev_priv->back_offset >> 3));
++      dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
++                                      (dev_priv->depth_offset >> 3));
++
++      dev_priv->usec_timeout = 1000000;
++
++      /* Set up the freelist, placeholder list and pending list */
++      INIT_LIST_HEAD(&dev_priv->free_list);
++      INIT_LIST_HEAD(&dev_priv->placeholders);
++      INIT_LIST_HEAD(&dev_priv->pending);
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("can not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++      dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
++      if (!dev_priv->fb) {
++              DRM_ERROR("can not find frame buffer map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("can not find mmio map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->ring_map) {
++              DRM_ERROR("can not find ring map!\n");
++              dev->dev_private = (void *)dev_priv;
++              mach64_do_cleanup_dma(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv = (drm_mach64_sarea_t *)
++          ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
++
++      if (!dev_priv->is_pci) {
++              drm_core_ioremap(dev_priv->ring_map, dev);
++              if (!dev_priv->ring_map->handle) {
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " descriptor ring\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -ENOMEM;
++              }
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map =
++                  drm_core_findmap(dev, init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("can not find dma buffer map!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -EINVAL;
++              }
++              /* there might be a nicer way to do this -
++                 dev isn't passed all the way though the mach64 - DA */
++              dev_priv->dev_buffers = dev->agp_buffer_map;
++
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev->agp_buffer_map->handle) {
++                      DRM_ERROR("can not ioremap virtual address for"
++                                " dma buffer\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -ENOMEM;
++              }
++              dev_priv->agp_textures =
++                  drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("can not find agp texture region!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      mach64_do_cleanup_dma(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->driver_mode = init->dma_mode;
++
++      /* changing the FIFO size from the default causes problems with DMA */
++      tmp = MACH64_READ(MACH64_GUI_CNTL);
++      if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
++              DRM_INFO("Setting FIFO size to 128 entries\n");
++              /* FIFO must be empty to change the FIFO depth */
++              if ((ret = mach64_do_wait_for_idle(dev_priv))) {
++                      DRM_ERROR
++                          ("wait for idle failed before changing FIFO depth!\n");
++                      mach64_do_cleanup_dma(dev);
++                      return ret;
++              }
++              MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
++                                             | MACH64_CMDFIFO_SIZE_128));
++              /* need to read GUI_STAT for proper sync according to docs */
++              if ((ret = mach64_do_wait_for_idle(dev_priv))) {
++                      DRM_ERROR
++                          ("wait for idle failed when changing FIFO depth!\n");
++                      mach64_do_cleanup_dma(dev);
++                      return ret;
++              }
++      }
++
++      dev_priv->ring.size = 0x4000;   /* 16KB */
++      dev_priv->ring.start = dev_priv->ring_map->handle;
++      dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
++
++      memset(dev_priv->ring.start, 0, dev_priv->ring.size);
++      DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
++               dev_priv->ring.start, dev_priv->ring.start_addr);
++
++      ret = 0;
++      if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
++
++              /* enable block 1 registers and bus mastering */
++              MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
++                                              | MACH64_BUS_EXT_REG_EN)
++                                             & ~MACH64_BUS_MASTER_DIS));
++
++              /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
++              DRM_DEBUG("Starting DMA test...\n");
++              if ((ret = mach64_bm_dma_test(dev))) {
++                      dev_priv->driver_mode = MACH64_MODE_MMIO;
++              }
++      }
++
++      switch (dev_priv->driver_mode) {
++      case MACH64_MODE_MMIO:
++              MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
++                                             | MACH64_BUS_EXT_REG_EN
++                                             | MACH64_BUS_MASTER_DIS));
++              if (init->dma_mode == MACH64_MODE_MMIO)
++                      DRM_INFO("Forcing pseudo-DMA mode\n");
++              else
++                      DRM_INFO
++                          ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
++                           ret);
++              break;
++      case MACH64_MODE_DMA_SYNC:
++              DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
++              break;
++      case MACH64_MODE_DMA_ASYNC:
++      default:
++              DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
++      }
++
++      dev_priv->ring_running = 0;
++
++      /* setup offsets for physical address of table start and end */
++      dev_priv->ring.head_addr = dev_priv->ring.start_addr;
++      dev_priv->ring.head = dev_priv->ring.tail = 0;
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++      dev_priv->ring.space = dev_priv->ring.size;
++
++      /* setup physical address and size of descriptor table */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   (dev_priv->ring.
++                    head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
++
++      /* init frame counter */
++      dev_priv->sarea_priv->frames_queued = 0;
++      for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++              dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
++      }
++
++      /* Allocate the DMA buffer freelist */
++      if ((ret = mach64_init_freelist(dev))) {
++              DRM_ERROR("Freelist allocation failed\n");
++              mach64_do_cleanup_dma(dev);
++              return ret;
++      }
++
++      return 0;
++}
++
++/*******************************************************************/
++/** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
++ */
++
++int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      volatile u32 *ring_read;
++      struct list_head *ptr;
++      drm_mach64_freelist_t *entry;
++      struct drm_buf *buf = NULL;
++      u32 *buf_ptr;
++      u32 used, reg, target;
++      int fifo, count, found, ret, no_idle_wait;
++
++      fifo = count = reg = no_idle_wait = 0;
++      target = MACH64_BM_ADDR;
++
++      if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
++              DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
++              mach64_dump_engine_info(dev_priv);
++              mach64_do_engine_reset(dev_priv);
++              return ret;
++      }
++
++      ring_read = (u32 *) ring->start;
++
++      while (ring->tail != ring->head) {
++              u32 buf_addr, new_target, offset;
++              u32 bytes, remaining, head, eol;
++
++              head = ring->head;
++
++              new_target =
++                  le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
++              buf_addr = le32_to_cpu(ring_read[head++]);
++              eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
++              bytes = le32_to_cpu(ring_read[head++])
++                  & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
++              head++;
++              head &= ring->tail_mask;
++
++              /* can't wait for idle between a blit setup descriptor
++               * and a HOSTDATA descriptor or the engine will lock
++               */
++              if (new_target == MACH64_BM_HOSTDATA
++                  && target == MACH64_BM_ADDR)
++                      no_idle_wait = 1;
++
++              target = new_target;
++
++              found = 0;
++              offset = 0;
++              list_for_each(ptr, &dev_priv->pending) {
++                      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++                      buf = entry->buf;
++                      offset = buf_addr - GETBUFADDR(buf);
++                      if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
++                              found = 1;
++                              break;
++                      }
++              }
++
++              if (!found || buf == NULL) {
++                      DRM_ERROR
++                          ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
++                           head, ring->tail, buf_addr, (eol ? "eol" : ""));
++                      mach64_dump_ring_info(dev_priv);
++                      mach64_do_engine_reset(dev_priv);
++                      return -EINVAL;
++              }
++
++              /* Hand feed the buffer to the card via MMIO, waiting for the fifo
++               * every 16 writes
++               */
++              DRM_DEBUG("target: (0x%08x) %s\n", target,
++                        (target ==
++                         MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
++              DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
++                        buf->used);
++
++              remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
++              used = bytes >> 2;      /* dwords in buffer for this descriptor */
++              buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
++
++              while (used) {
++
++                      if (count == 0) {
++                              if (target == MACH64_BM_HOSTDATA) {
++                                      reg = DMAREG(MACH64_HOST_DATA0);
++                                      count =
++                                          (remaining > 16) ? 16 : remaining;
++                                      fifo = 0;
++                              } else {
++                                      reg = le32_to_cpu(*buf_ptr++);
++                                      used--;
++                                      count = (reg >> 16) + 1;
++                              }
++
++                              reg = reg & 0xffff;
++                              reg = MMSELECT(reg);
++                      }
++                      while (count && used) {
++                              if (!fifo) {
++                                      if (no_idle_wait) {
++                                              if ((ret =
++                                                   mach64_do_wait_for_fifo
++                                                   (dev_priv, 16)) < 0) {
++                                                      no_idle_wait = 0;
++                                                      return ret;
++                                              }
++                                      } else {
++                                              if ((ret =
++                                                   mach64_do_wait_for_idle
++                                                   (dev_priv)) < 0) {
++                                                      return ret;
++                                              }
++                                      }
++                                      fifo = 16;
++                              }
++                              --fifo;
++                              MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
++                              used--;
++                              remaining--;
++
++                              reg += 4;
++                              count--;
++                      }
++              }
++              ring->head = head;
++              ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
++              ring->space += (4 * sizeof(u32));
++      }
++
++      if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
++              return ret;
++      }
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      DRM_DEBUG("completed\n");
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA cleanup */
++/*@{*/
++
++int mach64_do_cleanup_dma(struct drm_device * dev)
++{
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_mach64_private_t *dev_priv = dev->dev_private;
++
++              if (!dev_priv->is_pci) {
++                      if (dev_priv->ring_map)
++                              drm_core_ioremapfree(dev_priv->ring_map, dev);
++
++                      if (dev->agp_buffer_map) {
++                              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                              dev->agp_buffer_map = NULL;
++                      }
++              }
++
++              mach64_destroy_freelist(dev);
++
++              drm_free(dev_priv, sizeof(drm_mach64_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++      }
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name IOCTL handlers */
++/*@{*/
++
++int mach64_dma_init(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case DRM_MACH64_INIT_DMA:
++              return mach64_do_dma_init(dev, init);
++      case DRM_MACH64_CLEANUP_DMA:
++              return mach64_do_cleanup_dma(dev);
++      }
++
++      return -EINVAL;
++}
++
++int mach64_dma_idle(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_dma_idle(dev_priv);
++}
++
++int mach64_dma_flush(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_dma_flush(dev_priv);
++}
++
++int mach64_engine_reset(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mach64_do_engine_reset(dev_priv);
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name Freelist management */
++/*@{*/
++
++int mach64_init_freelist(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      int i;
++
++      DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              if ((entry =
++                   (drm_mach64_freelist_t *)
++                   drm_alloc(sizeof(drm_mach64_freelist_t),
++                             DRM_MEM_BUFLISTS)) == NULL)
++                      return -ENOMEM;
++              memset(entry, 0, sizeof(drm_mach64_freelist_t));
++              entry->buf = dma->buflist[i];
++              ptr = &entry->list;
++              list_add_tail(ptr, &dev_priv->free_list);
++      }
++
++      return 0;
++}
++
++void mach64_destroy_freelist(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      struct list_head *tmp;
++
++      DRM_DEBUG("\n");
++
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++      list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++
++      list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
++              list_del(ptr);
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
++      }
++}
++
++/* IMPORTANT: This function should only be called when the engine is idle or locked up,
++ * as it assumes all buffers in the pending list have been completed by the hardware.
++ */
++int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
++{
++      struct list_head *ptr;
++      struct list_head *tmp;
++      drm_mach64_freelist_t *entry;
++      int i;
++
++      if (list_empty(&dev_priv->pending))
++              return 0;
++
++      /* Iterate the pending list and move all buffers into the freelist... */
++      i = 0;
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              if (entry->discard) {
++                      entry->buf->pending = 0;
++                      list_del(ptr);
++                      list_add_tail(ptr, &dev_priv->free_list);
++                      i++;
++              }
++      }
++
++      DRM_DEBUG("released %d buffers from pending list\n", i);
++
++      return 0;
++}
++
++static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      struct list_head *ptr;
++      struct list_head *tmp;
++      drm_mach64_freelist_t *entry;
++      u32 head, tail, ofs;
++
++      mach64_ring_tick(dev_priv, ring);
++      head = ring->head;
++      tail = ring->tail;
++
++      if (head == tail) {
++#if MACH64_EXTRA_CHECKING
++              if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
++                      DRM_ERROR("Empty ring with non-idle engine!\n");
++                      mach64_dump_ring_info(dev_priv);
++                      return -1;
++              }
++#endif
++              /* last pass is complete, so release everything */
++              mach64_do_release_used_buffers(dev_priv);
++              DRM_DEBUG("idle engine, freed all buffers.\n");
++              if (list_empty(&dev_priv->free_list)) {
++                      DRM_ERROR("Freelist empty with idle engine\n");
++                      return -1;
++              }
++              return 0;
++      }
++      /* Look for a completed buffer and bail out of the loop
++       * as soon as we find one -- don't waste time trying
++       * to free extra bufs here, leave that to do_release_used_buffers
++       */
++      list_for_each_safe(ptr, tmp, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              ofs = entry->ring_ofs;
++              if (entry->discard &&
++                  ((head < tail && (ofs < head || ofs >= tail)) ||
++                   (head > tail && (ofs < head && ofs >= tail)))) {
++#if MACH64_EXTRA_CHECKING
++                      int i;
++
++                      for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
++                      {
++                              u32 o1 = le32_to_cpu(((u32 *) ring->
++                                               start)[i + 1]);
++                              u32 o2 = GETBUFADDR(entry->buf);
++
++                              if (o1 == o2) {
++                                      DRM_ERROR
++                                          ("Attempting to free used buffer: "
++                                           "i=%d  buf=0x%08x\n",
++                                           i, o1);
++                                      mach64_dump_ring_info(dev_priv);
++                                      return -1;
++                              }
++                      }
++#endif
++                      /* found a processed buffer */
++                      entry->buf->pending = 0;
++                      list_del(ptr);
++                      list_add_tail(ptr, &dev_priv->free_list);
++                      DRM_DEBUG
++                          ("freed processed buffer (head=%d tail=%d "
++                           "buf ring ofs=%d).\n",
++                           head, tail, ofs);
++                      return 0;
++              }
++      }
++
++      return 1;
++}
++
++struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      drm_mach64_freelist_t *entry;
++      struct list_head *ptr;
++      int t;
++
++      if (list_empty(&dev_priv->free_list)) {
++              if (list_empty(&dev_priv->pending)) {
++                      DRM_ERROR
++                          ("Couldn't get buffer - pending and free lists empty\n");
++                      t = 0;
++                      list_for_each(ptr, &dev_priv->placeholders) {
++                              t++;
++                      }
++                      DRM_INFO("Placeholders: %d\n", t);
++                      return NULL;
++              }
++
++              for (t = 0; t < dev_priv->usec_timeout; t++) {
++                      int ret;
++
++                      ret = mach64_do_reclaim_completed(dev_priv);
++                      if (ret == 0)
++                              goto _freelist_entry_found;
++                      if (ret < 0)
++                              return NULL;
++
++                      DRM_UDELAY(1);
++              }
++              mach64_dump_ring_info(dev_priv);
++              DRM_ERROR
++                  ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
++                   ring->head_addr, ring->head, ring->tail);
++              return NULL;
++      }
++
++      _freelist_entry_found:
++      ptr = dev_priv->free_list.next;
++      list_del(ptr);
++      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      entry->buf->used = 0;
++      list_add_tail(ptr, &dev_priv->placeholders);
++      return entry->buf;
++}
++
++int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
++{
++      struct list_head *ptr;
++      drm_mach64_freelist_t *entry;
++
++#if MACH64_EXTRA_CHECKING
++      list_for_each(ptr, &dev_priv->pending) {
++              entry = list_entry(ptr, drm_mach64_freelist_t, list);
++              if (copy_buf == entry->buf) {
++                      DRM_ERROR("Trying to release a pending buf\n");
++                      return -EFAULT;
++              }
++      }
++#endif
++      ptr = dev_priv->placeholders.next;
++      entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      copy_buf->pending = 0;
++      copy_buf->used = 0;
++      entry->buf = copy_buf;
++      entry->discard = 1;
++      list_del(ptr);
++      list_add_tail(ptr, &dev_priv->free_list);
++
++      return 0;
++}
++
++/*@}*/
++
++
++/*******************************************************************/
++/** \name DMA buffer request and submission IOCTL handler */
++/*@{*/
++
++static int mach64_dma_get_buffers(struct drm_device *dev,
++                                struct drm_file *file_priv,
++                                struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = mach64_freelist_get(dev_priv);
++#if MACH64_EXTRA_CHECKING
++              if (!buf)
++                      return -EFAULT;
++#else
++              if (!buf)
++                      return -EAGAIN;
++#endif
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int mach64_dma_buffers(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              ret = -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = mach64_dma_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++void mach64_driver_lastclose(struct drm_device * dev)
++{
++      mach64_do_cleanup_dma(dev);
++}
++
++/*@}*/
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drm.h git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h
+--- git/drivers/gpu/drm-tungsten/mach64_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,256 @@
++/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
++ * Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Frank C. Earl <fearl@airmail.net>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#ifndef __MACH64_DRM_H__
++#define __MACH64_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mach64_sarea.h)
++ */
++#ifndef __MACH64_SAREA_DEFINES__
++#define __MACH64_SAREA_DEFINES__
++
++/* What needs to be changed for the current vertex buffer?
++ * GH: We're going to be pedantic about this.  We want the card to do as
++ * little as possible, so let's avoid having it fetch a whole bunch of
++ * register values that don't change all that often, if at all.
++ */
++#define MACH64_UPLOAD_DST_OFF_PITCH   0x0001
++#define MACH64_UPLOAD_Z_OFF_PITCH     0x0002
++#define MACH64_UPLOAD_Z_ALPHA_CNTL    0x0004
++#define MACH64_UPLOAD_SCALE_3D_CNTL   0x0008
++#define MACH64_UPLOAD_DP_FOG_CLR      0x0010
++#define MACH64_UPLOAD_DP_WRITE_MASK   0x0020
++#define MACH64_UPLOAD_DP_PIX_WIDTH    0x0040
++#define MACH64_UPLOAD_SETUP_CNTL      0x0080
++#define MACH64_UPLOAD_MISC            0x0100
++#define MACH64_UPLOAD_TEXTURE         0x0200
++#define MACH64_UPLOAD_TEX0IMAGE               0x0400
++#define MACH64_UPLOAD_TEX1IMAGE               0x0800
++#define MACH64_UPLOAD_CLIPRECTS               0x1000  /* handled client-side */
++#define MACH64_UPLOAD_CONTEXT         0x00ff
++#define MACH64_UPLOAD_ALL             0x1fff
++
++/* DMA buffer size
++ */
++#define MACH64_BUFFER_SIZE            16384
++
++/* Max number of swaps allowed on the ring
++ * before the client must wait
++ */
++#define MACH64_MAX_QUEUED_FRAMES        3U
++
++/* Byte offsets for host blit buffer data
++ */
++#define MACH64_HOSTDATA_BLIT_OFFSET   104
++
++/* Keep these small for testing.
++ */
++#define MACH64_NR_SAREA_CLIPRECTS     8
++
++#define MACH64_CARD_HEAP              0
++#define MACH64_AGP_HEAP                       1
++#define MACH64_NR_TEX_HEAPS           2
++#define MACH64_NR_TEX_REGIONS         64
++#define MACH64_LOG_TEX_GRANULARITY    16
++
++#define MACH64_TEX_MAXLEVELS          1
++
++#define MACH64_NR_CONTEXT_REGS                15
++#define MACH64_NR_TEXTURE_REGS                4
++
++#endif                                /* __MACH64_SAREA_DEFINES__ */
++
++typedef struct {
++      unsigned int dst_off_pitch;
++
++      unsigned int z_off_pitch;
++      unsigned int z_cntl;
++      unsigned int alpha_tst_cntl;
++
++      unsigned int scale_3d_cntl;
++
++      unsigned int sc_left_right;
++      unsigned int sc_top_bottom;
++
++      unsigned int dp_fog_clr;
++      unsigned int dp_write_mask;
++      unsigned int dp_pix_width;
++      unsigned int dp_mix;
++      unsigned int dp_src;
++
++      unsigned int clr_cmp_cntl;
++      unsigned int gui_traj_cntl;
++
++      unsigned int setup_cntl;
++
++      unsigned int tex_size_pitch;
++      unsigned int tex_cntl;
++      unsigned int secondary_tex_off;
++      unsigned int tex_offset;
++} drm_mach64_context_regs_t;
++
++typedef struct drm_mach64_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex dma buffer.
++       */
++      drm_mach64_context_regs_t context_state;
++      unsigned int dirty;
++      unsigned int vertsize;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int frames_queued;
++
++      /* Texture memory LRU.
++       */
++      struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
++                                                     1];
++      unsigned int tex_age[MACH64_NR_TEX_HEAPS];
++      int ctx_owner;
++} drm_mach64_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mach64_common.h)
++ */
++
++/* Mach64 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++
++#define DRM_MACH64_INIT           0x00
++#define DRM_MACH64_IDLE           0x01
++#define DRM_MACH64_RESET          0x02
++#define DRM_MACH64_SWAP           0x03
++#define DRM_MACH64_CLEAR          0x04
++#define DRM_MACH64_VERTEX         0x05
++#define DRM_MACH64_BLIT           0x06
++#define DRM_MACH64_FLUSH          0x07
++#define DRM_MACH64_GETPARAM       0x08
++
++#define DRM_IOCTL_MACH64_INIT           DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
++#define DRM_IOCTL_MACH64_IDLE           DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_IDLE )
++#define DRM_IOCTL_MACH64_RESET          DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_RESET )
++#define DRM_IOCTL_MACH64_SWAP           DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_SWAP )
++#define DRM_IOCTL_MACH64_CLEAR          DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
++#define DRM_IOCTL_MACH64_VERTEX         DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
++#define DRM_IOCTL_MACH64_BLIT           DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
++#define DRM_IOCTL_MACH64_FLUSH          DRM_IO(  DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
++#define DRM_IOCTL_MACH64_GETPARAM       DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
++
++/* Buffer flags for clears
++ */
++#define MACH64_FRONT                  0x1
++#define MACH64_BACK                   0x2
++#define MACH64_DEPTH                  0x4
++
++/* Primitive types for vertex buffers
++ */
++#define MACH64_PRIM_POINTS            0x00000000
++#define MACH64_PRIM_LINES             0x00000001
++#define MACH64_PRIM_LINE_LOOP         0x00000002
++#define MACH64_PRIM_LINE_STRIP                0x00000003
++#define MACH64_PRIM_TRIANGLES         0x00000004
++#define MACH64_PRIM_TRIANGLE_STRIP    0x00000005
++#define MACH64_PRIM_TRIANGLE_FAN      0x00000006
++#define MACH64_PRIM_QUADS             0x00000007
++#define MACH64_PRIM_QUAD_STRIP                0x00000008
++#define MACH64_PRIM_POLYGON           0x00000009
++
++typedef enum _drm_mach64_dma_mode_t {
++      MACH64_MODE_DMA_ASYNC,
++      MACH64_MODE_DMA_SYNC,
++      MACH64_MODE_MMIO
++} drm_mach64_dma_mode_t;
++
++typedef struct drm_mach64_init {
++      enum {
++              DRM_MACH64_INIT_DMA = 0x01,
++              DRM_MACH64_CLEANUP_DMA = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++      int is_pci;
++      drm_mach64_dma_mode_t dma_mode;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long ring_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++} drm_mach64_init_t;
++
++typedef struct drm_mach64_clear {
++      unsigned int flags;
++      int x, y, w, h;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++} drm_mach64_clear_t;
++
++typedef struct drm_mach64_vertex {
++      int prim;
++      void *buf;              /* Address of vertex buffer */
++      unsigned long used;     /* Number of bytes in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_mach64_vertex_t;
++
++typedef struct drm_mach64_blit {
++      void *buf;
++      int pitch;
++      int offset;
++      int format;
++      unsigned short x, y;
++      unsigned short width, height;
++} drm_mach64_blit_t;
++
++typedef struct drm_mach64_getparam {
++      enum {
++              MACH64_PARAM_FRAMES_QUEUED = 0x01,
++              MACH64_PARAM_IRQ_NR = 0x02
++      } param;
++      void *value;
++} drm_mach64_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.c git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c
+--- git/drivers/gpu/drm-tungsten/mach64_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,105 @@
++/* mach64_drv.c -- mach64 (Rage Pro) driver -*- linux-c -*-
++ * Created: Fri Nov 24 18:34:32 2000 by gareth@valinux.com
++ *
++ * Copyright 2000 Gareth Hughes
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * GARETH HUGHES BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      mach64_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
++          | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .lastclose = mach64_driver_lastclose,
++      .get_vblank_counter = mach64_get_vblank_counter,
++      .enable_vblank = mach64_enable_vblank,
++      .disable_vblank = mach64_disable_vblank,
++      .irq_preinstall = mach64_driver_irq_preinstall,
++      .irq_postinstall = mach64_driver_irq_postinstall,
++      .irq_uninstall = mach64_driver_irq_uninstall,
++      .irq_handler = mach64_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = mach64_ioctls,
++      .dma_ioctl = mach64_dma_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init mach64_init(void)
++{
++      driver.num_ioctls = mach64_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit mach64_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(mach64_init);
++module_exit(mach64_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_drv.h git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h
+--- git/drivers/gpu/drm-tungsten/mach64_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,859 @@
++/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*-
++ * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002 Frank C. Earl
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Frank C. Earl <fearl@airmail.net>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ *    José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++#ifndef __MACH64_DRV_H__
++#define __MACH64_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, Leif Delgass, José Fonseca"
++
++#define DRIVER_NAME           "mach64"
++#define DRIVER_DESC           "DRM module for the ATI Rage Pro"
++#define DRIVER_DATE           "20060718"
++
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++/* FIXME: remove these when not needed */
++/* Development driver options */
++#define MACH64_EXTRA_CHECKING     0   /* Extra sanity checks for DMA/freelist management */
++#define MACH64_VERBOSE                  0     /* Verbose debugging output */
++
++typedef struct drm_mach64_freelist {
++      struct list_head list;  /* List pointers for free_list, placeholders, or pending list */
++      struct drm_buf *buf;            /* Pointer to the buffer */
++      int discard;            /* This flag is set when we're done (re)using a buffer */
++      u32 ring_ofs;           /* dword offset in ring of last descriptor for this buffer */
++} drm_mach64_freelist_t;
++
++typedef struct drm_mach64_descriptor_ring {
++      void *start;            /* write pointer (cpu address) to start of descriptor ring */
++      u32 start_addr;         /* bus address of beginning of descriptor ring */
++      int size;               /* size of ring in bytes */
++
++      u32 head_addr;          /* bus address of descriptor ring head */
++      u32 head;               /* dword offset of descriptor ring head */
++      u32 tail;               /* dword offset of descriptor ring tail */
++      u32 tail_mask;          /* mask used to wrap ring */
++      int space;              /* number of free bytes in ring */
++} drm_mach64_descriptor_ring_t;
++
++typedef struct drm_mach64_private {
++      drm_mach64_sarea_t *sarea_priv;
++
++      int is_pci;
++      drm_mach64_dma_mode_t driver_mode;      /* Async DMA, sync DMA, or MMIO */
++
++      int usec_timeout;       /* Timeout for the wait functions */
++
++      drm_mach64_descriptor_ring_t ring;      /* DMA descriptor table (ring buffer) */
++      int ring_running;       /* Is bus mastering is enabled */
++
++      struct list_head free_list;     /* Free-list head */
++      struct list_head placeholders;  /* Placeholder list for buffers held by clients */
++      struct list_head pending;       /* Buffers pending completion */
++
++      u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES];        /* dword ring offsets of most recent frame swaps */
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      atomic_t vbl_received;          /**< Number of vblanks received. */
++
++      u32 front_offset_pitch;
++      u32 back_offset_pitch;
++      u32 depth_offset_pitch;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *fb;
++      drm_local_map_t *mmio;
++      drm_local_map_t *ring_map;
++      drm_local_map_t *dev_buffers;   /* this is a pointer to a structure in dev */
++      drm_local_map_t *agp_textures;
++} drm_mach64_private_t;
++
++extern struct drm_ioctl_desc mach64_ioctls[];
++extern int mach64_max_ioctl;
++
++                              /* mach64_dma.c */
++extern int mach64_dma_init(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_idle(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_flush(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int mach64_engine_reset(struct drm_device *dev, void *data,
++                             struct drm_file *file_priv);
++extern int mach64_dma_buffers(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv);
++extern void mach64_driver_lastclose(struct drm_device * dev);
++
++extern int mach64_init_freelist(struct drm_device * dev);
++extern void mach64_destroy_freelist(struct drm_device * dev);
++extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv);
++extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
++                             struct drm_buf * copy_buf);
++
++extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
++                                 int entries);
++extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv);
++extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n);
++extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv);
++extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv);
++extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
++extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
++extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
++
++extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                  drm_mach64_freelist_t *_entry);
++extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
++                                           drm_mach64_freelist_t *_entry);
++
++extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
++extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
++extern int mach64_do_cleanup_dma(struct drm_device * dev);
++
++                              /* mach64_state.c */
++extern int mach64_dma_clear(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++extern int mach64_dma_swap(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_dma_vertex(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++extern int mach64_dma_blit(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mach64_get_param(struct drm_device *dev, void *data,
++                          struct drm_file *file_priv);
++
++extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int mach64_enable_vblank(struct drm_device *dev, int crtc);
++extern void mach64_disable_vblank(struct drm_device *dev, int crtc);
++extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
++extern void mach64_driver_irq_preinstall(struct drm_device *dev);
++extern int mach64_driver_irq_postinstall(struct drm_device *dev);
++extern void mach64_driver_irq_uninstall(struct drm_device *dev);
++
++/* ================================================================
++ * Registers
++ */
++
++#define MACH64_AGP_BASE                               0x0148
++#define MACH64_AGP_CNTL                               0x014c
++#define MACH64_ALPHA_TST_CNTL                 0x0550
++
++#define MACH64_DSP_CONFIG                     0x0420
++#define MACH64_DSP_ON_OFF                     0x0424
++#define MACH64_EXT_MEM_CNTL                   0x04ac
++#define MACH64_GEN_TEST_CNTL                  0x04d0
++#define MACH64_HW_DEBUG                               0x047c
++#define MACH64_MEM_ADDR_CONFIG                        0x0434
++#define MACH64_MEM_BUF_CNTL                   0x042c
++#define MACH64_MEM_CNTL                               0x04b0
++
++#define MACH64_BM_ADDR                                0x0648
++#define MACH64_BM_COMMAND                     0x0188
++#define MACH64_BM_DATA                                0x0648
++#define MACH64_BM_FRAME_BUF_OFFSET            0x0180
++#define MACH64_BM_GUI_TABLE                   0x01b8
++#define MACH64_BM_GUI_TABLE_CMD                       0x064c
++#     define MACH64_CIRCULAR_BUF_SIZE_16KB            (0 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_32KB            (1 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_64KB            (2 << 0)
++#     define MACH64_CIRCULAR_BUF_SIZE_128KB           (3 << 0)
++#     define MACH64_LAST_DESCRIPTOR                   (1 << 31)
++#define MACH64_BM_HOSTDATA                    0x0644
++#define MACH64_BM_STATUS                      0x018c
++#define MACH64_BM_SYSTEM_MEM_ADDR             0x0184
++#define MACH64_BM_SYSTEM_TABLE                        0x01bc
++#define MACH64_BUS_CNTL                               0x04a0
++#     define MACH64_BUS_MSTR_RESET                    (1 << 1)
++#     define MACH64_BUS_APER_REG_DIS                  (1 << 4)
++#     define MACH64_BUS_FLUSH_BUF                     (1 << 2)
++#     define MACH64_BUS_MASTER_DIS                    (1 << 6)
++#     define MACH64_BUS_EXT_REG_EN                    (1 << 27)
++
++#define MACH64_CLR_CMP_CLR                    0x0700
++#define MACH64_CLR_CMP_CNTL                   0x0708
++#define MACH64_CLR_CMP_MASK                   0x0704
++#define MACH64_CONFIG_CHIP_ID                 0x04e0
++#define MACH64_CONFIG_CNTL                    0x04dc
++#define MACH64_CONFIG_STAT0                   0x04e4
++#define MACH64_CONFIG_STAT1                   0x0494
++#define MACH64_CONFIG_STAT2                   0x0498
++#define MACH64_CONTEXT_LOAD_CNTL              0x072c
++#define MACH64_CONTEXT_MASK                   0x0720
++#define MACH64_COMPOSITE_SHADOW_ID            0x0798
++#define MACH64_CRC_SIG                                0x04e8
++#define MACH64_CUSTOM_MACRO_CNTL              0x04d4
++
++#define MACH64_DP_BKGD_CLR                    0x06c0
++#define MACH64_DP_FOG_CLR                     0x06c4
++#define MACH64_DP_FGRD_BKGD_CLR                       0x06e0
++#define MACH64_DP_FRGD_CLR                    0x06c4
++#define MACH64_DP_FGRD_CLR_MIX                        0x06dc
++
++#define MACH64_DP_MIX                         0x06d4
++#     define BKGD_MIX_NOT_D                           (0 << 0)
++#     define BKGD_MIX_ZERO                            (1 << 0)
++#     define BKGD_MIX_ONE                             (2 << 0)
++#     define MACH64_BKGD_MIX_D                        (3 << 0)
++#     define BKGD_MIX_NOT_S                           (4 << 0)
++#     define BKGD_MIX_D_XOR_S                         (5 << 0)
++#     define BKGD_MIX_NOT_D_XOR_S                     (6 << 0)
++#     define MACH64_BKGD_MIX_S                        (7 << 0)
++#     define BKGD_MIX_NOT_D_OR_NOT_S                  (8 << 0)
++#     define BKGD_MIX_D_OR_NOT_S                      (9 << 0)
++#     define BKGD_MIX_NOT_D_OR_S                      (10 << 0)
++#     define BKGD_MIX_D_OR_S                          (11 << 0)
++#     define BKGD_MIX_D_AND_S                         (12 << 0)
++#     define BKGD_MIX_NOT_D_AND_S                     (13 << 0)
++#     define BKGD_MIX_D_AND_NOT_S                     (14 << 0)
++#     define BKGD_MIX_NOT_D_AND_NOT_S                 (15 << 0)
++#     define BKGD_MIX_D_PLUS_S_DIV2                   (23 << 0)
++#     define FRGD_MIX_NOT_D                           (0 << 16)
++#     define FRGD_MIX_ZERO                            (1 << 16)
++#     define FRGD_MIX_ONE                             (2 << 16)
++#     define FRGD_MIX_D                               (3 << 16)
++#     define FRGD_MIX_NOT_S                           (4 << 16)
++#     define FRGD_MIX_D_XOR_S                         (5 << 16)
++#     define FRGD_MIX_NOT_D_XOR_S                     (6 << 16)
++#     define MACH64_FRGD_MIX_S                        (7 << 16)
++#     define FRGD_MIX_NOT_D_OR_NOT_S                  (8 << 16)
++#     define FRGD_MIX_D_OR_NOT_S                      (9 << 16)
++#     define FRGD_MIX_NOT_D_OR_S                      (10 << 16)
++#     define FRGD_MIX_D_OR_S                          (11 << 16)
++#     define FRGD_MIX_D_AND_S                         (12 << 16)
++#     define FRGD_MIX_NOT_D_AND_S                     (13 << 16)
++#     define FRGD_MIX_D_AND_NOT_S                     (14 << 16)
++#     define FRGD_MIX_NOT_D_AND_NOT_S                 (15 << 16)
++#     define FRGD_MIX_D_PLUS_S_DIV2                   (23 << 16)
++
++#define MACH64_DP_PIX_WIDTH                   0x06d0
++#     define MACH64_HOST_TRIPLE_ENABLE                (1 << 13)
++#     define MACH64_BYTE_ORDER_MSB_TO_LSB             (0 << 24)
++#     define MACH64_BYTE_ORDER_LSB_TO_MSB             (1 << 24)
++
++#define MACH64_DP_SRC                         0x06d8
++#     define MACH64_BKGD_SRC_BKGD_CLR                 (0 << 0)
++#     define MACH64_BKGD_SRC_FRGD_CLR                 (1 << 0)
++#     define MACH64_BKGD_SRC_HOST                     (2 << 0)
++#     define MACH64_BKGD_SRC_BLIT                     (3 << 0)
++#     define MACH64_BKGD_SRC_PATTERN                  (4 << 0)
++#     define MACH64_BKGD_SRC_3D                       (5 << 0)
++#     define MACH64_FRGD_SRC_BKGD_CLR                 (0 << 8)
++#     define MACH64_FRGD_SRC_FRGD_CLR                 (1 << 8)
++#     define MACH64_FRGD_SRC_HOST                     (2 << 8)
++#     define MACH64_FRGD_SRC_BLIT                     (3 << 8)
++#     define MACH64_FRGD_SRC_PATTERN                  (4 << 8)
++#     define MACH64_FRGD_SRC_3D                       (5 << 8)
++#     define MACH64_MONO_SRC_ONE                      (0 << 16)
++#     define MACH64_MONO_SRC_PATTERN                  (1 << 16)
++#     define MACH64_MONO_SRC_HOST                     (2 << 16)
++#     define MACH64_MONO_SRC_BLIT                     (3 << 16)
++
++#define MACH64_DP_WRITE_MASK                  0x06c8
++
++#define MACH64_DST_CNTL                               0x0530
++#     define MACH64_DST_X_RIGHT_TO_LEFT               (0 << 0)
++#     define MACH64_DST_X_LEFT_TO_RIGHT               (1 << 0)
++#     define MACH64_DST_Y_BOTTOM_TO_TOP               (0 << 1)
++#     define MACH64_DST_Y_TOP_TO_BOTTOM               (1 << 1)
++#     define MACH64_DST_X_MAJOR                       (0 << 2)
++#     define MACH64_DST_Y_MAJOR                       (1 << 2)
++#     define MACH64_DST_X_TILE                        (1 << 3)
++#     define MACH64_DST_Y_TILE                        (1 << 4)
++#     define MACH64_DST_LAST_PEL                      (1 << 5)
++#     define MACH64_DST_POLYGON_ENABLE                (1 << 6)
++#     define MACH64_DST_24_ROTATION_ENABLE            (1 << 7)
++
++#define MACH64_DST_HEIGHT_WIDTH                       0x0518
++#define MACH64_DST_OFF_PITCH                  0x0500
++#define MACH64_DST_WIDTH_HEIGHT                       0x06ec
++#define MACH64_DST_X_Y                                0x06e8
++#define MACH64_DST_Y_X                                0x050c
++
++#define MACH64_FIFO_STAT                      0x0710
++#     define MACH64_FIFO_SLOT_MASK                    0x0000ffff
++#     define MACH64_FIFO_ERR                          (1 << 31)
++
++#define MACH64_GEN_TEST_CNTL                  0x04d0
++#     define MACH64_GUI_ENGINE_ENABLE                 (1 << 8)
++#define MACH64_GUI_CMDFIFO_DEBUG              0x0170
++#define MACH64_GUI_CMDFIFO_DATA                       0x0174
++#define MACH64_GUI_CNTL                               0x0178
++#       define MACH64_CMDFIFO_SIZE_MASK                 0x00000003ul
++#       define MACH64_CMDFIFO_SIZE_192                  0x00000000ul
++#       define MACH64_CMDFIFO_SIZE_128                  0x00000001ul
++#       define MACH64_CMDFIFO_SIZE_64                   0x00000002ul
++#define MACH64_GUI_STAT                               0x0738
++#     define MACH64_GUI_ACTIVE                        (1 << 0)
++#define MACH64_GUI_TRAJ_CNTL                  0x0730
++
++#define MACH64_HOST_CNTL                      0x0640
++#define MACH64_HOST_DATA0                     0x0600
++
++#define MACH64_ONE_OVER_AREA                  0x029c
++#define MACH64_ONE_OVER_AREA_UC                       0x0300
++
++#define MACH64_PAT_REG0                               0x0680
++#define MACH64_PAT_REG1                               0x0684
++
++#define MACH64_SC_LEFT                          0x06a0
++#define MACH64_SC_RIGHT                         0x06a4
++#define MACH64_SC_LEFT_RIGHT                    0x06a8
++#define MACH64_SC_TOP                           0x06ac
++#define MACH64_SC_BOTTOM                        0x06b0
++#define MACH64_SC_TOP_BOTTOM                    0x06b4
++
++#define MACH64_SCALE_3D_CNTL                  0x05fc
++#define MACH64_SCRATCH_REG0                   0x0480
++#define MACH64_SCRATCH_REG1                   0x0484
++#define MACH64_SECONDARY_TEX_OFF              0x0778
++#define MACH64_SETUP_CNTL                     0x0304
++#define MACH64_SRC_CNTL                               0x05b4
++#     define MACH64_SRC_BM_ENABLE                     (1 << 8)
++#     define MACH64_SRC_BM_SYNC                       (1 << 9)
++#     define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM         (0 << 10)
++#     define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME         (1 << 10)
++#     define MACH64_SRC_BM_OP_REG_TO_SYSTEM           (2 << 10)
++#     define MACH64_SRC_BM_OP_SYSTEM_TO_REG           (3 << 10)
++#define MACH64_SRC_HEIGHT1                    0x0594
++#define MACH64_SRC_HEIGHT2                    0x05ac
++#define MACH64_SRC_HEIGHT1_WIDTH1             0x0598
++#define MACH64_SRC_HEIGHT2_WIDTH2             0x05b0
++#define MACH64_SRC_OFF_PITCH                  0x0580
++#define MACH64_SRC_WIDTH1                     0x0590
++#define MACH64_SRC_Y_X                                0x058c
++
++#define MACH64_TEX_0_OFF                      0x05c0
++#define MACH64_TEX_CNTL                               0x0774
++#define MACH64_TEX_SIZE_PITCH                 0x0770
++#define MACH64_TIMER_CONFIG                   0x0428
++
++#define MACH64_VERTEX_1_ARGB                  0x0254
++#define MACH64_VERTEX_1_S                     0x0240
++#define MACH64_VERTEX_1_SECONDARY_S           0x0328
++#define MACH64_VERTEX_1_SECONDARY_T           0x032c
++#define MACH64_VERTEX_1_SECONDARY_W           0x0330
++#define MACH64_VERTEX_1_SPEC_ARGB             0x024c
++#define MACH64_VERTEX_1_T                     0x0244
++#define MACH64_VERTEX_1_W                     0x0248
++#define MACH64_VERTEX_1_X_Y                   0x0258
++#define MACH64_VERTEX_1_Z                     0x0250
++#define MACH64_VERTEX_2_ARGB                  0x0274
++#define MACH64_VERTEX_2_S                     0x0260
++#define MACH64_VERTEX_2_SECONDARY_S           0x0334
++#define MACH64_VERTEX_2_SECONDARY_T           0x0338
++#define MACH64_VERTEX_2_SECONDARY_W           0x033c
++#define MACH64_VERTEX_2_SPEC_ARGB             0x026c
++#define MACH64_VERTEX_2_T                     0x0264
++#define MACH64_VERTEX_2_W                     0x0268
++#define MACH64_VERTEX_2_X_Y                   0x0278
++#define MACH64_VERTEX_2_Z                     0x0270
++#define MACH64_VERTEX_3_ARGB                  0x0294
++#define MACH64_VERTEX_3_S                     0x0280
++#define MACH64_VERTEX_3_SECONDARY_S           0x02a0
++#define MACH64_VERTEX_3_SECONDARY_T           0x02a4
++#define MACH64_VERTEX_3_SECONDARY_W           0x02a8
++#define MACH64_VERTEX_3_SPEC_ARGB             0x028c
++#define MACH64_VERTEX_3_T                     0x0284
++#define MACH64_VERTEX_3_W                     0x0288
++#define MACH64_VERTEX_3_X_Y                   0x0298
++#define MACH64_VERTEX_3_Z                     0x0290
++
++#define MACH64_Z_CNTL                         0x054c
++#define MACH64_Z_OFF_PITCH                    0x0548
++
++#define MACH64_CRTC_VLINE_CRNT_VLINE          0x0410
++#     define MACH64_CRTC_VLINE_MASK                   0x000007ff
++#     define MACH64_CRTC_CRNT_VLINE_MASK              0x07ff0000
++#define MACH64_CRTC_OFF_PITCH                 0x0414
++#define MACH64_CRTC_INT_CNTL                  0x0418
++#     define MACH64_CRTC_VBLANK                       (1 << 0)
++#     define MACH64_CRTC_VBLANK_INT_EN                (1 << 1)
++#     define MACH64_CRTC_VBLANK_INT                   (1 << 2)
++#     define MACH64_CRTC_VLINE_INT_EN                 (1 << 3)
++#     define MACH64_CRTC_VLINE_INT                    (1 << 4)
++#     define MACH64_CRTC_VLINE_SYNC                   (1 << 5)        /* 0=even, 1=odd */
++#     define MACH64_CRTC_FRAME                        (1 << 6)        /* 0=even, 1=odd */
++#     define MACH64_CRTC_SNAPSHOT_INT_EN              (1 << 7)
++#     define MACH64_CRTC_SNAPSHOT_INT                 (1 << 8)
++#     define MACH64_CRTC_I2C_INT_EN                   (1 << 9)
++#     define MACH64_CRTC_I2C_INT                      (1 << 10)
++#     define MACH64_CRTC2_VBLANK                      (1 << 11)       /* LT Pro */
++#     define MACH64_CRTC2_VBLANK_INT_EN               (1 << 12)       /* LT Pro */
++#     define MACH64_CRTC2_VBLANK_INT                  (1 << 13)       /* LT Pro */
++#     define MACH64_CRTC2_VLINE_INT_EN                (1 << 14)       /* LT Pro */
++#     define MACH64_CRTC2_VLINE_INT                   (1 << 15)       /* LT Pro */
++#     define MACH64_CRTC_CAPBUF0_INT_EN               (1 << 16)
++#     define MACH64_CRTC_CAPBUF0_INT                  (1 << 17)
++#     define MACH64_CRTC_CAPBUF1_INT_EN               (1 << 18)
++#     define MACH64_CRTC_CAPBUF1_INT                  (1 << 19)
++#     define MACH64_CRTC_OVERLAY_EOF_INT_EN           (1 << 20)
++#     define MACH64_CRTC_OVERLAY_EOF_INT              (1 << 21)
++#     define MACH64_CRTC_ONESHOT_CAP_INT_EN           (1 << 22)
++#     define MACH64_CRTC_ONESHOT_CAP_INT              (1 << 23)
++#     define MACH64_CRTC_BUSMASTER_EOL_INT_EN         (1 << 24)
++#     define MACH64_CRTC_BUSMASTER_EOL_INT            (1 << 25)
++#     define MACH64_CRTC_GP_INT_EN                    (1 << 26)
++#     define MACH64_CRTC_GP_INT                       (1 << 27)
++#     define MACH64_CRTC2_VLINE_SYNC                  (1 << 28) /* LT Pro */  /* 0=even, 1=odd */
++#     define MACH64_CRTC_SNAPSHOT2_INT_EN             (1 << 29)       /* LT Pro */
++#     define MACH64_CRTC_SNAPSHOT2_INT                (1 << 30)       /* LT Pro */
++#     define MACH64_CRTC_VBLANK2_INT                  (1 << 31)
++#     define MACH64_CRTC_INT_ENS                              \
++              (                                               \
++                      MACH64_CRTC_VBLANK_INT_EN |             \
++                      MACH64_CRTC_VLINE_INT_EN |              \
++                      MACH64_CRTC_SNAPSHOT_INT_EN |           \
++                      MACH64_CRTC_I2C_INT_EN |                \
++                      MACH64_CRTC2_VBLANK_INT_EN |            \
++                      MACH64_CRTC2_VLINE_INT_EN |             \
++                      MACH64_CRTC_CAPBUF0_INT_EN |            \
++                      MACH64_CRTC_CAPBUF1_INT_EN |            \
++                      MACH64_CRTC_OVERLAY_EOF_INT_EN |        \
++                      MACH64_CRTC_ONESHOT_CAP_INT_EN |        \
++                      MACH64_CRTC_BUSMASTER_EOL_INT_EN |      \
++                      MACH64_CRTC_GP_INT_EN |                 \
++                      MACH64_CRTC_SNAPSHOT2_INT_EN |          \
++                      0                                       \
++              )
++#     define MACH64_CRTC_INT_ACKS                     \
++              (                                       \
++                      MACH64_CRTC_VBLANK_INT |        \
++                      MACH64_CRTC_VLINE_INT |         \
++                      MACH64_CRTC_SNAPSHOT_INT |      \
++                      MACH64_CRTC_I2C_INT |           \
++                      MACH64_CRTC2_VBLANK_INT |       \
++                      MACH64_CRTC2_VLINE_INT |        \
++                      MACH64_CRTC_CAPBUF0_INT |       \
++                      MACH64_CRTC_CAPBUF1_INT |       \
++                      MACH64_CRTC_OVERLAY_EOF_INT |   \
++                      MACH64_CRTC_ONESHOT_CAP_INT |   \
++                      MACH64_CRTC_BUSMASTER_EOL_INT | \
++                      MACH64_CRTC_GP_INT |            \
++                      MACH64_CRTC_SNAPSHOT2_INT |     \
++                      MACH64_CRTC_VBLANK2_INT |       \
++                      0                               \
++              )
++
++#define MACH64_DATATYPE_CI8                           2
++#define MACH64_DATATYPE_ARGB1555                      3
++#define MACH64_DATATYPE_RGB565                                4
++#define MACH64_DATATYPE_ARGB8888                      6
++#define MACH64_DATATYPE_RGB332                                7
++#define MACH64_DATATYPE_Y8                            8
++#define MACH64_DATATYPE_RGB8                          9
++#define MACH64_DATATYPE_VYUY422                               11
++#define MACH64_DATATYPE_YVYU422                               12
++#define MACH64_DATATYPE_AYUV444                               14
++#define MACH64_DATATYPE_ARGB4444                      15
++
++#define MACH64_READ(reg)      DRM_READ32(dev_priv->mmio, (reg) )
++#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) )
++
++#define DWMREG0               0x0400
++#define DWMREG0_END   0x07ff
++#define DWMREG1               0x0000
++#define DWMREG1_END   0x03ff
++
++#define ISREG0(r)     (((r) >= DWMREG0) && ((r) <= DWMREG0_END))
++#define DMAREG0(r)    (((r) - DWMREG0) >> 2)
++#define DMAREG1(r)    ((((r) - DWMREG1) >> 2 ) | 0x0100)
++#define DMAREG(r)     (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
++
++#define MMREG0                0x0000
++#define MMREG0_END    0x00ff
++
++#define ISMMREG0(r)   (((r) >= MMREG0) && ((r) <= MMREG0_END))
++#define MMSELECT0(r)  (((r) << 2) + DWMREG0)
++#define MMSELECT1(r)  (((((r) & 0xff) << 2) + DWMREG1))
++#define MMSELECT(r)   (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r))
++
++/* ================================================================
++ * DMA constants
++ */
++
++/* DMA descriptor field indices:
++ * The descriptor fields are loaded into the read-only
++ * BM_* system bus master registers during a bus-master operation
++ */
++#define MACH64_DMA_FRAME_BUF_OFFSET   0       /* BM_FRAME_BUF_OFFSET */
++#define MACH64_DMA_SYS_MEM_ADDR               1       /* BM_SYSTEM_MEM_ADDR */
++#define MACH64_DMA_COMMAND            2       /* BM_COMMAND */
++#define MACH64_DMA_RESERVED           3       /* BM_STATUS */
++
++/* BM_COMMAND descriptor field flags */
++#define MACH64_DMA_HOLD_OFFSET                (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */
++#define MACH64_DMA_EOL                        (1<<31) /* End of descriptor list flag */
++
++#define MACH64_DMA_CHUNKSIZE          0x1000  /* 4kB per DMA descriptor */
++#define MACH64_APERTURE_OFFSET                0x7ff800        /* frame-buffer offset for gui-masters */
++
++/* ================================================================
++ * Ring operations
++ *
++ * Since the Mach64 bus master engine requires polling, these functions end
++ * up being called frequently, hence being inline.
++ */
++
++static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      if (mach64_do_wait_for_idle(dev_priv) < 0) {
++              mach64_do_engine_reset(dev_priv);
++      }
++
++      if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
++              /* enable bus mastering and block 1 registers */
++              MACH64_WRITE(MACH64_BUS_CNTL,
++                           (MACH64_READ(MACH64_BUS_CNTL) &
++                            ~MACH64_BUS_MASTER_DIS)
++                           | MACH64_BUS_EXT_REG_EN);
++              mach64_do_wait_for_idle(dev_priv);
++      }
++
++      /* reset descriptor table ring head */
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      dev_priv->ring_running = 1;
++}
++
++static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
++                                        drm_mach64_descriptor_ring_t * ring)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      /* reset descriptor table ring head */
++      MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
++                   ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
++
++      if (dev_priv->driver_mode == MACH64_MODE_MMIO) {
++              mach64_do_dispatch_pseudo_dma(dev_priv);
++      } else {
++              /* enable GUI bus mastering, and sync the bus master to the GUI */
++              MACH64_WRITE(MACH64_SRC_CNTL,
++                           MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
++                           MACH64_SRC_BM_OP_SYSTEM_TO_REG);
++
++              /* kick off the transfer */
++              MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
++              if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {
++                      if ((mach64_do_wait_for_idle(dev_priv)) < 0) {
++                              DRM_ERROR("idle failed, resetting engine\n");
++                              mach64_dump_engine_info(dev_priv);
++                              mach64_do_engine_reset(dev_priv);
++                              return;
++                      }
++                      mach64_do_release_used_buffers(dev_priv);
++              }
++      }
++}
++
++/**
++ * Poll the ring head and make sure the bus master is alive.
++ * 
++ * Mach64's bus master engine will stop if there are no more entries to process.
++ * This function polls the engine for the last processed entry and calls 
++ * mach64_ring_resume if there is an unprocessed entry.
++ * 
++ * Note also that, since we update the ring tail while the bus master engine is 
++ * in operation, it is possible that the last tail update was too late to be 
++ * processed, and the bus master engine stops at the previous tail position. 
++ * Therefore it is important to call this function frequently. 
++ */
++static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
++                                      drm_mach64_descriptor_ring_t * ring)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                ring->head_addr, ring->head, ring->tail, ring->space);
++
++      if (!dev_priv->ring_running) {
++              mach64_ring_start(dev_priv);
++
++              if (ring->head != ring->tail) {
++                      mach64_ring_resume(dev_priv, ring);
++              }
++      } else {
++              /* GUI_ACTIVE must be read before BM_GUI_TABLE to
++               * correctly determine the ring head
++               */
++              int gui_active =
++                  MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
++
++              ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
++
++              if (gui_active) {
++                      /* If not idle, BM_GUI_TABLE points one descriptor
++                       * past the current head
++                       */
++                      if (ring->head_addr == ring->start_addr) {
++                              ring->head_addr += ring->size;
++                      }
++                      ring->head_addr -= 4 * sizeof(u32);
++              }
++
++              if (ring->head_addr < ring->start_addr ||
++                  ring->head_addr >= ring->start_addr + ring->size) {
++                      DRM_ERROR("bad ring head address: 0x%08x\n",
++                                ring->head_addr);
++                      mach64_dump_ring_info(dev_priv);
++                      mach64_do_engine_reset(dev_priv);
++                      return;
++              }
++
++              ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32);
++
++              if (!gui_active && ring->head != ring->tail) {
++                      mach64_ring_resume(dev_priv, ring);
++              }
++      }
++}
++
++static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)
++{
++      DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
++                dev_priv->ring.head_addr, dev_priv->ring.head,
++                dev_priv->ring.tail, dev_priv->ring.space);
++
++      /* restore previous SRC_CNTL to disable busmastering */
++      mach64_do_wait_for_fifo(dev_priv, 1);
++      MACH64_WRITE(MACH64_SRC_CNTL, 0);
++
++      /* disable busmastering but keep the block 1 registers enabled */
++      mach64_do_wait_for_idle(dev_priv);
++      MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL)
++                   | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN);
++
++      dev_priv->ring_running = 0;
++}
++
++static __inline__ void
++mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++
++      DRM_DEBUG("\n");
++
++      mach64_ring_tick(dev_priv, ring);
++
++      ring->space = (ring->head - ring->tail) * sizeof(u32);
++      if (ring->space <= 0) {
++              ring->space += ring->size;
++      }
++}
++
++/* ================================================================
++ * DMA macros
++ * 
++ * Mach64's ring buffer doesn't take register writes directly. These 
++ * have to be written indirectly in DMA buffers. These macros simplify 
++ * the task of setting up a buffer, writing commands to it, and 
++ * queuing the buffer in the ring. 
++ */
++
++#define DMALOCALS                             \
++      drm_mach64_freelist_t *_entry = NULL;   \
++      struct drm_buf *_buf = NULL;            \
++      u32 *_buf_wptr; int _outcount
++
++#define GETBUFPTR( __buf )                                            \
++((dev_priv->is_pci) ?                                                 \
++      ((u32 *)(__buf)->address) :                                     \
++      ((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
++
++#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
++
++#define GETRINGOFFSET() (_entry->ring_ofs)
++
++static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
++                                                  dev_priv,
++                                                  drm_mach64_freelist_t **
++                                                  entry, struct drm_buf * buf)
++{
++      struct list_head *ptr;
++#if MACH64_EXTRA_CHECKING
++      if (list_empty(&dev_priv->pending)) {
++              DRM_ERROR("Empty pending list in \n");
++              return -EINVAL;
++      }
++#endif
++      ptr = dev_priv->pending.prev;
++      *entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      while ((*entry)->buf != buf) {
++              if (ptr == &dev_priv->pending) {
++                      return -EFAULT;
++              }
++              ptr = ptr->prev;
++              *entry = list_entry(ptr, drm_mach64_freelist_t, list);
++      }
++      return 0;
++}
++
++#define DMASETPTR( _p )                               \
++do {                                          \
++      _buf = (_p);                            \
++      _outcount = 0;                          \
++      _buf_wptr = GETBUFPTR( _buf );          \
++} while(0)
++
++/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
++#define DMAGETPTR( file_priv, dev_priv, n )                           \
++do {                                                                  \
++      if ( MACH64_VERBOSE ) {                                         \
++              DRM_INFO( "DMAGETPTR( %d )\n", (n) );                   \
++      }                                                               \
++      _buf = mach64_freelist_get( dev_priv );                         \
++      if (_buf == NULL) {                                             \
++              DRM_ERROR("couldn't get buffer in DMAGETPTR\n");        \
++              return -EAGAIN;                                 \
++      }                                                               \
++      if (_buf->pending) {                                            \
++              DRM_ERROR("pending buf in DMAGETPTR\n");                \
++              return -EFAULT;                                 \
++      }                                                               \
++      _buf->file_priv = file_priv;                                    \
++      _outcount = 0;                                                  \
++                                                                      \
++        _buf_wptr = GETBUFPTR( _buf );                                        \
++} while (0)
++
++#define DMAOUTREG( reg, val )                                 \
++do {                                                          \
++      if ( MACH64_VERBOSE ) {                                 \
++              DRM_INFO( "   DMAOUTREG( 0x%x = 0x%08x )\n",    \
++                        reg, val );                           \
++      }                                                       \
++      _buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg));      \
++      _buf_wptr[_outcount++] = cpu_to_le32((val));            \
++      _buf->used += 8;                                        \
++} while (0)
++
++#define DMAADVANCE( dev_priv, _discard )                              \
++      do {                                                            \
++              struct list_head *ptr;                                  \
++              int ret;                                                \
++                                                                      \
++              if ( MACH64_VERBOSE ) {                                 \
++                      DRM_INFO( "DMAADVANCE() in \n" );               \
++              }                                                       \
++                                                                      \
++              if (_buf->used <= 0) {                                  \
++                      DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \
++                                 _buf->idx );                         \
++                      return -EFAULT;                                 \
++              }                                                       \
++              if (_buf->pending) {                                    \
++                      /* This is a resued buffer, so we need to find it in the pending list */ \
++                      if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
++                              DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \
++                              return ret;                             \
++                      }                                               \
++                      if (_entry->discard) {                          \
++                              DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \
++                              return -EFAULT;                         \
++                      }                                               \
++              } else {                                                \
++                      if (list_empty(&dev_priv->placeholders)) {      \
++                              DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \
++                              return -EFAULT;                         \
++                      }                                               \
++                      ptr = dev_priv->placeholders.next;              \
++                      list_del(ptr);                                  \
++                      _entry = list_entry(ptr, drm_mach64_freelist_t, list); \
++                      _buf->pending = 1;                              \
++                      _entry->buf = _buf;                             \
++                      list_add_tail(ptr, &dev_priv->pending);         \
++              }                                                       \
++              _entry->discard = (_discard);                           \
++              if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \
++                      return ret;                                     \
++      } while (0)
++
++#define DMADISCARDBUF()                                                       \
++      do {                                                            \
++              if (_entry == NULL) {                                   \
++                      int ret;                                        \
++                      if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
++                              DRM_ERROR( "couldn't find pending buf %d\n", \
++                                         _buf->idx );                 \
++                              return ret;                             \
++                      }                                               \
++              }                                                       \
++              _entry->discard = 1;                                    \
++      } while(0)
++
++#define DMAADVANCEHOSTDATA( dev_priv )                                        \
++      do {                                                            \
++              struct list_head *ptr;                                  \
++              int ret;                                                \
++                                                                      \
++              if ( MACH64_VERBOSE ) {                                 \
++                      DRM_INFO( "DMAADVANCEHOSTDATA() in \n" );       \
++              }                                                       \
++                                                                      \
++              if (_buf->used <= 0) {                                  \
++                      DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \
++                      return -EFAULT;                                 \
++              }                                                       \
++              if (list_empty(&dev_priv->placeholders)) {              \
++                      DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \
++                      return -EFAULT;                                 \
++              }                                                       \
++                                                                      \
++              ptr = dev_priv->placeholders.next;                      \
++              list_del(ptr);                                          \
++              _entry = list_entry(ptr, drm_mach64_freelist_t, list);  \
++              _entry->buf = _buf;                                     \
++              _entry->buf->pending = 1;                               \
++              list_add_tail(ptr, &dev_priv->pending);                 \
++              _entry->discard = 1;                                    \
++              if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \
++                      return ret;                                     \
++      } while (0)
++
++#endif                                /* __MACH64_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_irq.c git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c
+--- git/drivers/gpu/drm-tungsten/mach64_irq.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_irq.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,159 @@
++/* mach64_irq.c -- IRQ handling for ATI Mach64 -*- linux-c -*-
++ * Created: Tue Feb 25, 2003 by Leif Delgass, based on radeon_irq.c/r128_irq.c
++ */
++/*-
++ * Copyright (C) The Weather Channel, Inc.  2002.
++ * Copyright 2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = arg;
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      int status;
++
++      status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      /* VBLANK interrupt */
++      if (status & MACH64_CRTC_VBLANK_INT) {
++              /* Mask off all interrupt ack bits before setting the ack bit, since
++               * there may be other handlers outside the DRM.
++               *
++               * NOTE: On mach64, you need to keep the enable bits set when doing
++               * the ack, despite what the docs say about not acking and enabling
++               * in a single write.
++               */
++              MACH64_WRITE(MACH64_CRTC_INT_CNTL,
++                           (status & ~MACH64_CRTC_INT_ACKS)
++                           | MACH64_CRTC_VBLANK_INT);
++
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              return IRQ_HANDLED;
++      }
++      return IRQ_NONE;
++}
++
++u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)
++{
++      const drm_mach64_private_t *const dev_priv = dev->dev_private;
++
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++int mach64_enable_vblank(struct drm_device * dev, int crtc)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                        crtc);
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status);
++
++      /* Turn on VBLANK interrupt */
++      MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
++                   | MACH64_CRTC_VBLANK_INT_EN);
++
++      return 0;
++}
++
++void mach64_disable_vblank(struct drm_device * dev, int crtc)
++{
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++              return;
++      }
++
++      /*
++       * FIXME: implement proper interrupt disable by using the vblank
++       * counter register (if available).
++       */
++}
++
++static void mach64_disable_vblank_local(struct drm_device * dev, int crtc)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++              return;
++      }
++
++      DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);
++
++      /* Disable and clear VBLANK interrupt */
++      MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
++                   | MACH64_CRTC_VBLANK_INT);
++}
++
++void mach64_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++
++      u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
++
++      DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
++
++      mach64_disable_vblank_local(dev, 0);
++}
++
++int mach64_driver_irq_postinstall(struct drm_device * dev)
++{
++      return drm_vblank_init(dev, 1);
++}
++
++void mach64_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      mach64_disable_vblank_local(dev, 0);
++
++      DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
++                MACH64_READ(MACH64_CRTC_INT_CNTL));
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mach64_state.c git-nokia/drivers/gpu/drm-tungsten/mach64_state.c
+--- git/drivers/gpu/drm-tungsten/mach64_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mach64_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,910 @@
++/* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*-
++ * Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 Gareth Hughes
++ * Copyright 2002-2003 Leif Delgass
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Leif Delgass <ldelgass@retinalburn.net>
++ *    José Fonseca <j_r_fonseca@yahoo.co.uk>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mach64_drm.h"
++#include "mach64_drv.h"
++
++/* Interface history:
++ *
++ * 1.0 - Initial mach64 DRM
++ *
++ */
++struct drm_ioctl_desc mach64_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
++};
++
++int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
++
++/* ================================================================
++ * DMA hardware state programming functions
++ */
++
++static void mach64_print_dirty(const char *msg, unsigned int flags)
++{
++      DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
++                msg,
++                flags,
++                (flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " :
++                "",
++                (flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "",
++                (flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " :
++                "", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
++                (flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " :
++                "",
++                (flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "",
++                (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "",
++                (flags & MACH64_UPLOAD_MISC) ? "misc, " : "",
++                (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "",
++                (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "",
++                (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "",
++                (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : "");
++}
++
++/* Mach64 doesn't have hardware cliprects, just one hardware scissor,
++ * so the GL scissor is intersected with each cliprect here
++ */
++/* This function returns 0 on success, 1 for no intersection, and
++ * negative for an error
++ */
++static int mach64_emit_cliprect(struct drm_file *file_priv,
++                              drm_mach64_private_t * dev_priv,
++                              struct drm_clip_rect * box)
++{
++      u32 sc_left_right, sc_top_bottom;
++      struct drm_clip_rect scissor;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
++      DMALOCALS;
++
++      DRM_DEBUG("box=%p\n", box);
++
++      /* Get GL scissor */
++      /* FIXME: store scissor in SAREA as a cliprect instead of in
++       * hardware format, or do intersection client-side
++       */
++      scissor.x1 = regs->sc_left_right & 0xffff;
++      scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16;
++      scissor.y1 = regs->sc_top_bottom & 0xffff;
++      scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16;
++
++      /* Intersect GL scissor with cliprect */
++      if (box->x1 > scissor.x1)
++              scissor.x1 = box->x1;
++      if (box->y1 > scissor.y1)
++              scissor.y1 = box->y1;
++      if (box->x2 < scissor.x2)
++              scissor.x2 = box->x2;
++      if (box->y2 < scissor.y2)
++              scissor.y2 = box->y2;
++      /* positive return means skip */
++      if (scissor.x1 >= scissor.x2)
++              return 1;
++      if (scissor.y1 >= scissor.y2)
++              return 1;
++
++      DMAGETPTR(file_priv, dev_priv, 2);      /* returns on failure to get buffer */
++
++      sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
++      sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right);
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom);
++
++      DMAADVANCE(dev_priv, 1);
++
++      return 0;
++}
++
++static __inline__ int mach64_emit_state(struct drm_file *file_priv,
++                                      drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
++      unsigned int dirty = sarea_priv->dirty;
++      u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2);
++      DMALOCALS;
++
++      if (MACH64_VERBOSE) {
++              mach64_print_dirty(__FUNCTION__, dirty);
++      } else {
++              DRM_DEBUG("dirty=0x%08x\n", dirty);
++      }
++
++      DMAGETPTR(file_priv, dev_priv, 17);     /* returns on failure to get buffer */
++
++      if (dirty & MACH64_UPLOAD_MISC) {
++              DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
++              DMAOUTREG(MACH64_DP_SRC, regs->dp_src);
++              DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl);
++              DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_MISC;
++      }
++
++      if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) {
++              DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH;
++      }
++      if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) {
++              DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH;
++      }
++      if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) {
++              DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl);
++              DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL;
++      }
++      if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) {
++              DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL;
++      }
++      if (dirty & MACH64_UPLOAD_DP_FOG_CLR) {
++              DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR;
++      }
++      if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) {
++              DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK;
++      }
++      if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) {
++              DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH;
++      }
++      if (dirty & MACH64_UPLOAD_SETUP_CNTL) {
++              DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL;
++      }
++
++      if (dirty & MACH64_UPLOAD_TEXTURE) {
++              DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch);
++              DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl);
++              DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off);
++              DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset);
++              sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE;
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS;
++
++      return 0;
++
++}
++
++/* ================================================================
++ * DMA command dispatch functions
++ */
++
++static int mach64_dma_dispatch_clear(struct drm_device * dev,
++                                   struct drm_file *file_priv,
++                                   unsigned int flags,
++                                   int cx, int cy, int cw, int ch,
++                                   unsigned int clear_color,
++                                   unsigned int clear_depth)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      u32 fb_bpp, depth_bpp;
++      int i;
++      DMALOCALS;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 32:
++              fb_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      default:
++              return -EINVAL;
++      }
++      switch (dev_priv->depth_bpp) {
++      case 16:
++              depth_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 24:
++      case 32:
++              depth_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (!nbox)
++              return 0;
++
++      DMAGETPTR(file_priv, dev_priv, nbox * 31);      /* returns on failure to get buffer */
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
++                        pbox[i].x1, pbox[i].y1,
++                        pbox[i].x2, pbox[i].y2, flags);
++
++              if (flags & (MACH64_FRONT | MACH64_BACK)) {
++                      /* Setup for color buffer clears
++                       */
++
++                      DMAOUTREG(MACH64_Z_CNTL, 0);
++                      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++                      DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
++                      DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
++
++                      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++                      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                                (MACH64_DST_X_LEFT_TO_RIGHT |
++                                 MACH64_DST_Y_TOP_TO_BOTTOM));
++
++                      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
++                                                      (fb_bpp << 4) |
++                                                      (fb_bpp << 8) |
++                                                      (fb_bpp << 16) |
++                                                      (fb_bpp << 28)));
++
++                      DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color);
++                      DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask);
++                      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
++                                                MACH64_FRGD_MIX_S));
++                      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
++                                                MACH64_FRGD_SRC_FRGD_CLR |
++                                                MACH64_MONO_SRC_ONE));
++
++              }
++
++              if (flags & MACH64_FRONT) {
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->front_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++              }
++
++              if (flags & MACH64_BACK) {
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->back_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++              }
++
++              if (flags & MACH64_DEPTH) {
++                      /* Setup for depth buffer clear
++                       */
++                      DMAOUTREG(MACH64_Z_CNTL, 0);
++                      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++                      DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
++                      DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
++
++                      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++                      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                                (MACH64_DST_X_LEFT_TO_RIGHT |
++                                 MACH64_DST_Y_TOP_TO_BOTTOM));
++
++                      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) |
++                                                      (depth_bpp << 4) |
++                                                      (depth_bpp << 8) |
++                                                      (depth_bpp << 16) |
++                                                      (depth_bpp << 28)));
++
++                      DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth);
++                      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
++                      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
++                                                MACH64_FRGD_MIX_S));
++                      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
++                                                MACH64_FRGD_SRC_FRGD_CLR |
++                                                MACH64_MONO_SRC_ONE));
++
++                      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                                dev_priv->depth_offset_pitch);
++                      DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
++                      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++              }
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      return 0;
++}
++
++static int mach64_dma_dispatch_swap(struct drm_device * dev,
++                                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      u32 fb_bpp;
++      int i;
++      DMALOCALS;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = MACH64_DATATYPE_RGB565;
++              break;
++      case 32:
++      default:
++              fb_bpp = MACH64_DATATYPE_ARGB8888;
++              break;
++      }
++
++      if (!nbox)
++              return 0;
++
++      DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4);  /* returns on failure to get buffer */
++
++      DMAOUTREG(MACH64_Z_CNTL, 0);
++      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16));      /* no scissor */
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
++
++      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
++      DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT |
++                                       MACH64_DST_Y_TOP_TO_BOTTOM));
++
++      DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
++                                      (fb_bpp << 4) |
++                                      (fb_bpp << 8) |
++                                      (fb_bpp << 16) | (fb_bpp << 28)));
++
++      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
++      DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S));
++      DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR |
++                                MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE));
++
++      DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch);
++      DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch);
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch swap %d,%d-%d,%d\n",
++                        pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
++
++              DMAOUTREG(MACH64_SRC_WIDTH1, w);
++              DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y);
++              DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y);
++              DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
++
++      }
++
++      DMAADVANCE(dev_priv, 1);
++
++      if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) {
++              for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) {
++                      dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1];
++              }
++              dev_priv->frame_ofs[i] = GETRINGOFFSET();
++
++              dev_priv->sarea_priv->frames_queued++;
++      }
++
++      return 0;
++}
++
++static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
++{
++      drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int i, start;
++      u32 head, tail, ofs;
++
++      DRM_DEBUG("\n");
++
++      if (sarea_priv->frames_queued == 0)
++              return 0;
++
++      tail = ring->tail;
++      mach64_ring_tick(dev_priv, ring);
++      head = ring->head;
++
++      start = (MACH64_MAX_QUEUED_FRAMES -
++               DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued));
++
++      if (head == tail) {
++              sarea_priv->frames_queued = 0;
++              for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++                      dev_priv->frame_ofs[i] = ~0;
++              }
++              return 0;
++      }
++
++      for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
++              ofs = dev_priv->frame_ofs[i];
++              DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs);
++              if (ofs == ~0 ||
++                  (head < tail && (ofs < head || ofs >= tail)) ||
++                  (head > tail && (ofs < head && ofs >= tail))) {
++                      sarea_priv->frames_queued =
++                          (MACH64_MAX_QUEUED_FRAMES - 1) - i;
++                      dev_priv->frame_ofs[i] = ~0;
++              }
++      }
++
++      return sarea_priv->frames_queued;
++}
++
++/* Copy and verify a client submited buffer.
++ * FIXME: Make an assembly optimized version
++ */
++static __inline__ int copy_from_user_vertex(u32 *to,
++                                          const u32 __user *ufrom,
++                                          unsigned long bytes)
++{
++      unsigned long n = bytes;        /* dwords remaining in buffer */
++      u32 *from, *orig_from;
++
++      from = drm_alloc(bytes, DRM_MEM_DRIVER);
++      if (from == NULL)
++              return -ENOMEM;
++
++      if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
++              drm_free(from, bytes, DRM_MEM_DRIVER);
++              return -EFAULT;
++      }
++      orig_from = from; /* we'll be modifying the "from" ptr, so save it */
++
++      n >>= 2;
++
++      while (n > 1) {
++              u32 data, reg, count;
++
++              data = *from++;
++
++              n--;
++
++              reg = le32_to_cpu(data);
++              count = (reg >> 16) + 1;
++              if (count <= n) {
++                      n -= count;
++                      reg &= 0xffff;
++
++                      /* This is an exact match of Mach64's Setup Engine registers,
++                       * excluding SETUP_CNTL (1_C1).
++                       */
++                      if ((reg >= 0x0190 && reg < 0x01c1) ||
++                          (reg >= 0x01ca && reg <= 0x01cf)) {
++                              *to++ = data;
++                              memcpy(to, from, count << 2);
++                              from += count;
++                              to += count;
++                      } else {
++                              DRM_ERROR("Got bad command: 0x%04x\n", reg);
++                              drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++                              return -EACCES;
++                      }
++              } else {
++                      DRM_ERROR
++                          ("Got bad command count(=%u) dwords remaining=%lu\n",
++                           count, n);
++                      drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++                      return -EINVAL;
++              }
++      }
++
++      drm_free(orig_from, bytes, DRM_MEM_DRIVER);
++      if (n == 0)
++              return 0;
++      else {
++              DRM_ERROR("Bad buf->used(=%lu)\n", bytes);
++              return -EINVAL;
++      }
++}
++
++static int mach64_dma_dispatch_vertex(struct drm_device * dev,
++                                    struct drm_file *file_priv,
++                                    drm_mach64_vertex_t * vertex)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      struct drm_buf *copy_buf;
++      void *buf = vertex->buf;
++      unsigned long used = vertex->used;
++      int ret = 0;
++      int i = 0;
++      int done = 0;
++      int verify_ret = 0;
++      DMALOCALS;
++
++      DRM_DEBUG("buf=%p used=%lu nbox=%d\n",
++                buf, used, sarea_priv->nbox);
++
++      if (!used)
++              goto _vertex_done;
++
++      copy_buf = mach64_freelist_get(dev_priv);
++      if (copy_buf == NULL) {
++              DRM_ERROR("couldn't get buffer\n");
++              return -EAGAIN;
++      }
++
++      /* Mach64's vertex data is actually register writes. To avoid security
++       * compromises these register writes have to be verified and copied from
++       * user space into a private DMA buffer.
++       */
++      verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
++
++      if (verify_ret != 0) {
++              mach64_freelist_put(dev_priv, copy_buf);
++              goto _vertex_done;
++      }
++
++      copy_buf->used = used;
++
++      DMASETPTR(copy_buf);
++
++      if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
++              ret = mach64_emit_state(file_priv, dev_priv);
++              if (ret < 0)
++                      return ret;
++      }
++
++      do {
++              /* Emit the next cliprect */
++              if (i < sarea_priv->nbox) {
++                      ret = mach64_emit_cliprect(file_priv, dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      if (ret < 0) {
++                              /* failed to get buffer */
++                              return ret;
++                      } else if (ret != 0) {
++                              /* null intersection with scissor */
++                              continue;
++                      }
++              }
++              if ((i >= sarea_priv->nbox - 1))
++                      done = 1;
++
++              /* Add the buffer to the DMA queue */
++              DMAADVANCE(dev_priv, done);
++
++      } while (++i < sarea_priv->nbox);
++
++      if (!done) {
++              if (copy_buf->pending) {
++                      DMADISCARDBUF();
++              } else {
++                      /* This buffer wasn't used (no cliprects), so place it
++                       * back on the free list
++                       */
++                      mach64_freelist_put(dev_priv, copy_buf);
++              }
++      }
++
++_vertex_done:
++      sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++
++      return verify_ret;
++}
++
++static __inline__ int copy_from_user_blit(u32 *to,
++                                        const u32 __user *ufrom,
++                                        unsigned long bytes)
++{
++      to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
++
++      if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int mach64_dma_dispatch_blit(struct drm_device * dev,
++                                  struct drm_file *file_priv,
++                                  drm_mach64_blit_t * blit)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      int dword_shift, dwords;
++      unsigned long used;
++      struct drm_buf *copy_buf;
++      int verify_ret = 0;
++      DMALOCALS;
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (blit->format) {
++      case MACH64_DATATYPE_ARGB8888:
++              dword_shift = 0;
++              break;
++      case MACH64_DATATYPE_ARGB1555:
++      case MACH64_DATATYPE_RGB565:
++      case MACH64_DATATYPE_VYUY422:
++      case MACH64_DATATYPE_YVYU422:
++      case MACH64_DATATYPE_ARGB4444:
++              dword_shift = 1;
++              break;
++      case MACH64_DATATYPE_CI8:
++      case MACH64_DATATYPE_RGB8:
++              dword_shift = 2;
++              break;
++      default:
++              DRM_ERROR("invalid blit format %d\n", blit->format);
++              return -EINVAL;
++      }
++
++      /* Set buf->used to the bytes of blit data based on the blit dimensions
++       * and verify the size.  When the setup is emitted to the buffer with
++       * the DMA* macros below, buf->used is incremented to include the bytes
++       * used for setup as well as the blit data.
++       */
++      dwords = (blit->width * blit->height) >> dword_shift;
++      used = dwords << 2;
++      if (used <= 0 ||
++          used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
++              DRM_ERROR("Invalid blit size: %lu bytes\n", used);
++              return -EINVAL;
++      }
++
++      copy_buf = mach64_freelist_get(dev_priv);
++      if (copy_buf == NULL) {
++              DRM_ERROR("couldn't get buffer\n");
++              return -EAGAIN;
++      }
++
++      /* Copy the blit data from userspace.
++       * 
++       * XXX: This is overkill. The most efficient solution would be having 
++       * two sets of buffers (one set private for vertex data, the other set 
++       * client-writable for blits). However that would bring more complexity 
++       * and would break backward compatability. The solution currently 
++       * implemented is keeping all buffers private, allowing to secure the
++       * driver, without increasing complexity at the expense of some speed 
++       * transfering data.
++       */
++      verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
++
++      if (verify_ret != 0) {
++              mach64_freelist_put(dev_priv, copy_buf);
++              goto _blit_done;
++      }
++
++      copy_buf->used = used;
++
++      /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
++       * continuation buffers?
++       */
++
++      /* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require
++       * a register command every 16 dwords.  State setup is added at the start of the
++       * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
++       */
++      DMASETPTR(copy_buf);
++
++      DMAOUTREG(MACH64_Z_CNTL, 0);
++      DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
++
++      DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16));      /* no scissor */
++      DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
++
++      DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);      /* disable */
++      DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
++                MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM);
++
++      DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0)      /* dst pix width */
++                |(blit->format << 4)  /* composite pix width */
++                |(blit->format << 8)  /* src pix width */
++                |(blit->format << 16) /* host data pix width */
++                |(blit->format << 28) /* scaler/3D pix width */
++          );
++
++      DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);    /* enable all planes */
++      DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S);
++      DMAOUTREG(MACH64_DP_SRC,
++                MACH64_BKGD_SRC_BKGD_CLR
++                | MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE);
++
++      DMAOUTREG(MACH64_DST_OFF_PITCH,
++                (blit->pitch << 22) | (blit->offset >> 3));
++      DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
++      DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
++
++      DRM_DEBUG("%lu bytes\n", used);
++
++      /* Add the buffer to the queue */
++      DMAADVANCEHOSTDATA(dev_priv);
++
++_blit_done:
++      return verify_ret;
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++
++int mach64_dma_clear(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_clear_t *clear = data;
++      int ret;
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
++                                      clear->x, clear->y, clear->w, clear->h,
++                                      clear->clear_color,
++                                      clear->clear_depth);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
++      return ret;
++}
++
++int mach64_dma_swap(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int ret;
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      ret = mach64_dma_dispatch_swap(dev, file_priv);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
++      return ret;
++}
++
++int mach64_dma_vertex(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n",
++                DRM_CURRENTPID,
++                vertex->buf, vertex->used, vertex->discard);
++
++      if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
++              DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
++                        vertex->used);
++              return -EINVAL;
++      }
++
++      if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
++
++      return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
++}
++
++int mach64_dma_blit(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mach64_blit_t *blit = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
++                            MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS);
++
++      return ret;
++}
++
++int mach64_get_param(struct drm_device *dev, void *data,
++                   struct drm_file *file_priv)
++{
++      drm_mach64_private_t *dev_priv = dev->dev_private;
++      drm_mach64_getparam_t *param = data;
++      int value;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (param->param) {
++      case MACH64_PARAM_FRAMES_QUEUED:
++              /* Needs lock since it calls mach64_ring_tick() */
++              LOCK_TEST_WITH_RETURN(dev, file_priv);
++              value = mach64_do_get_frames_queued(dev_priv);
++              break;
++      case MACH64_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/Makefile git-nokia/drivers/gpu/drm-tungsten/Makefile
+--- git/drivers/gpu/drm-tungsten/Makefile      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/Makefile        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,74 @@
++#
++# Makefile for the drm device driver.  This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++#
++# Based on David Woodhouse's mtd build.
++#
++# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $
++#
++
++drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
++              drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
++              drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
++              drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
++              drm_memory_debug.o ati_pcigart.o drm_sman.o \
++              drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
++              drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
++              drm_regman.o drm_vm_nopage_compat.o drm_gem.o
++pvr2d-objs  := pvr2d_drv.o
++tdfx-objs   := tdfx_drv.o
++r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
++mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
++i810-objs   := i810_drv.o i810_dma.o
++i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
++              i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
++              i915_opregion.o \
++              i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
++nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
++              nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
++              nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
++              nv04_timer.o \
++              nv04_mc.o nv40_mc.o nv50_mc.o \
++              nv04_fb.o nv10_fb.o nv40_fb.o \
++              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
++              nv04_graph.o nv10_graph.o nv20_graph.o \
++              nv40_graph.o nv50_graph.o \
++              nv04_instmem.o nv50_instmem.o
++radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
++sis-objs    := sis_drv.o sis_mm.o
++ffb-objs    := ffb_drv.o ffb_context.o
++savage-objs := savage_drv.o savage_bci.o savage_state.o
++via-objs    := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
++              via_video.o via_dmablit.o via_fence.o via_buffer.o
++mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
++nv-objs := nv_drv.o
++xgi-objs    := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
++              xgi_fence.o
++
++ifeq ($(CONFIG_COMPAT),y)
++drm-objs    += drm_ioc32.o
++radeon-objs += radeon_ioc32.o
++mga-objs    += mga_ioc32.o
++r128-objs   += r128_ioc32.o
++i915-objs   += i915_ioc32.o
++nouveau-objs += nouveau_ioc32.o
++xgi-objs    += xgi_ioc32.o
++endif
++
++obj-m                                 += drm.o
++obj-$(CONFIG_DRM_TUNGSTEN_PVR2D)      += pvr2d.o
++obj-$(CONFIG_DRM_TUNGSTEN_TDFX)               += tdfx.o
++obj-$(CONFIG_DRM_TUNGSTEN_R128)               += r128.o
++obj-$(CONFIG_DRM_TUNGSTEN_RADEON)     += radeon.o
++obj-$(CONFIG_DRM_TUNGSTEN_MGA)                += mga.o
++obj-$(CONFIG_DRM_TUNGSTEN_I810)               += i810.o
++obj-$(CONFIG_DRM_TUNGSTEN_I915)               += i915.o
++obj-$(CONFIG_DRM_TUNGSTEN_SIS)                += sis.o
++obj-$(CONFIG_DRM_TUNGSTEN_FFB)                += ffb.o
++obj-$(CONFIG_DRM_TUNGSTEN_SAVAGE)     += savage.o
++obj-$(CONFIG_DRM_TUNGSTEN_VIA)                += via.o
++obj-$(CONFIG_DRM_TUNGSTEN_MACH64)     += mach64.o
++obj-$(CONFIG_DRM_TUNGSTEN_NV)         += nv.o
++obj-$(CONFIG_DRM_TUNGSTEN_NOUVEAU)    += nouveau.o
++obj-$(CONFIG_DRM_TUNGSTEN_XGI)                += xgi.o
++
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_dma.c git-nokia/drivers/gpu/drm-tungsten/mga_dma.c
+--- git/drivers/gpu/drm-tungsten/mga_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1161 @@
++/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ */
++/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++/**
++ * \file mga_dma.c
++ * DMA support for MGA G200 / G400.
++ *
++ * \author Rickard E. (Rik) Faith <faith@valinux.com>
++ * \author Jeff Hartmann <jhartmann@valinux.com>
++ * \author Keith Whitwell <keith@tungstengraphics.com>
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++#define MGA_DEFAULT_USEC_TIMEOUT      10000
++#define MGA_FREELIST_DEBUG            0
++
++#define MINIMAL_CLEANUP    0
++#define FULL_CLEANUP       1
++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
++
++/* ================================================================
++ * Engine control
++ */
++
++int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
++{
++      u32 status = 0;
++      int i;
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++              if (status == MGA_ENDPRDMASTS) {
++                      MGA_WRITE8(MGA_CRTC_INDEX, 0);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if MGA_DMA_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++
++      DRM_DEBUG("\n");
++
++      /* The primary DMA stream should look like new right about now.
++       */
++      primary->tail = 0;
++      primary->space = primary->size;
++      primary->last_flush = 0;
++
++      sarea_priv->last_wrap = 0;
++
++      /* FIXME: Reset counters, buffer ages etc...
++       */
++
++      /* FIXME: What else do we need to reinitialize?  WARP stuff?
++       */
++
++      return 0;
++}
++
++/* ================================================================
++ * Primary DMA stream
++ */
++
++void mga_do_dma_flush(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      u32 head, tail;
++      u32 status = 0;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* We need to wait so that we can do an safe flush */
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++              if (status == MGA_ENDPRDMASTS)
++                      break;
++              DRM_UDELAY(1);
++      }
++
++      if (primary->tail == primary->last_flush) {
++              DRM_DEBUG("   bailing out...\n");
++              return;
++      }
++
++      tail = primary->tail + dev_priv->primary->offset;
++
++      /* We need to pad the stream between flushes, as the card
++       * actually (partially?) reads the first of these commands.
++       * See page 4-16 in the G400 manual, middle of the page or so.
++       */
++      BEGIN_DMA(1);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++
++      primary->last_flush = primary->tail;
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++
++      if (head <= tail) {
++              primary->space = primary->size - primary->tail;
++      } else {
++              primary->space = head - tail;
++      }
++
++      DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
++      DRM_DEBUG("   tail = 0x%06lx\n", tail - dev_priv->primary->offset);
++      DRM_DEBUG("  space = 0x%06x\n", primary->space);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
++
++      DRM_DEBUG("done.\n");
++}
++
++void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      u32 head, tail;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA_WRAP();
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++
++      tail = primary->tail + dev_priv->primary->offset;
++
++      primary->tail = 0;
++      primary->last_flush = 0;
++      primary->last_wrap++;
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++
++      if (head == dev_priv->primary->offset) {
++              primary->space = primary->size;
++      } else {
++              primary->space = head - dev_priv->primary->offset;
++      }
++
++      DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
++      DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
++      DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
++      DRM_DEBUG("  space = 0x%06x\n", primary->space);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
++
++      set_bit(0, &primary->wrapped);
++      DRM_DEBUG("done.\n");
++}
++
++void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
++{
++      drm_mga_primary_buffer_t *primary = &dev_priv->prim;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 head = dev_priv->primary->offset;
++      DRM_DEBUG("\n");
++
++      sarea_priv->last_wrap++;
++      DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
++
++      mga_flush_write_combine();
++      MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
++
++      clear_bit(0, &primary->wrapped);
++      DRM_DEBUG("done.\n");
++}
++
++/* ================================================================
++ * Freelist management
++ */
++
++#define MGA_BUFFER_USED               ~0
++#define MGA_BUFFER_FREE               0
++
++#if MGA_FREELIST_DEBUG
++static void mga_freelist_print(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *entry;
++
++      DRM_INFO("\n");
++      DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
++               dev_priv->sarea_priv->last_dispatch,
++               (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
++                              dev_priv->primary->offset));
++      DRM_INFO("current freelist:\n");
++
++      for (entry = dev_priv->head->next; entry; entry = entry->next) {
++              DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
++                       entry, entry->buf->idx, entry->age.head,
++                       entry->age.head - dev_priv->primary->offset);
++      }
++      DRM_INFO("\n");
++}
++#endif
++
++static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_freelist_t *entry;
++      int i;
++      DRM_DEBUG("count=%d\n", dma->buf_count);
++
++      dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++      if (dev_priv->head == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
++      SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++
++              entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++              if (entry == NULL)
++                      return -ENOMEM;
++
++              memset(entry, 0, sizeof(drm_mga_freelist_t));
++
++              entry->next = dev_priv->head->next;
++              entry->prev = dev_priv->head;
++              SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
++              entry->buf = buf;
++
++              if (dev_priv->head->next != NULL)
++                      dev_priv->head->next->prev = entry;
++              if (entry->next == NULL)
++                      dev_priv->tail = entry;
++
++              buf_priv->list_entry = entry;
++              buf_priv->discard = 0;
++              buf_priv->dispatched = 0;
++
++              dev_priv->head->next = entry;
++      }
++
++      return 0;
++}
++
++static void mga_freelist_cleanup(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *entry;
++      drm_mga_freelist_t *next;
++      DRM_DEBUG("\n");
++
++      entry = dev_priv->head;
++      while (entry) {
++              next = entry->next;
++              drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
++              entry = next;
++      }
++
++      dev_priv->head = dev_priv->tail = NULL;
++}
++
++#if 0
++/* FIXME: Still needed?
++ */
++static void mga_freelist_reset(struct drm_device * dev)
++{
++      drm_device_dma_t *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      int i;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++              SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
++      }
++}
++#endif
++
++static struct drm_buf *mga_freelist_get(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_freelist_t *next;
++      drm_mga_freelist_t *prev;
++      drm_mga_freelist_t *tail = dev_priv->tail;
++      u32 head, wrap;
++      DRM_DEBUG("\n");
++
++      head = MGA_READ(MGA_PRIMADDRESS);
++      wrap = dev_priv->sarea_priv->last_wrap;
++
++      DRM_DEBUG("   tail=0x%06lx %d\n",
++                tail->age.head ?
++                tail->age.head - dev_priv->primary->offset : 0,
++                tail->age.wrap);
++      DRM_DEBUG("   head=0x%06lx %d\n",
++                head - dev_priv->primary->offset, wrap);
++
++      if (TEST_AGE(&tail->age, head, wrap)) {
++              prev = dev_priv->tail->prev;
++              next = dev_priv->tail;
++              prev->next = NULL;
++              next->prev = next->next = NULL;
++              dev_priv->tail = prev;
++              SET_AGE(&next->age, MGA_BUFFER_USED, 0);
++              return next->buf;
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_freelist_t *head, *entry, *prev;
++
++      DRM_DEBUG("age=0x%06lx wrap=%d\n",
++                buf_priv->list_entry->age.head -
++                dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
++
++      entry = buf_priv->list_entry;
++      head = dev_priv->head;
++
++      if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
++              SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
++              prev = dev_priv->tail;
++              prev->next = entry;
++              entry->prev = prev;
++              entry->next = NULL;
++      } else {
++              prev = head->next;
++              head->next = entry;
++              prev->prev = entry;
++              entry->prev = head;
++              entry->next = prev;
++      }
++
++      return 0;
++}
++
++/* ================================================================
++ * DMA initialization, cleanup
++ */
++
++int mga_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      drm_mga_private_t *dev_priv;
++
++      dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
++      if (!dev_priv)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++      memset(dev_priv, 0, sizeof(drm_mga_private_t));
++
++      dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
++      dev_priv->chipset = flags;
++
++      dev_priv->mmio_base = drm_get_resource_start(dev, 1);
++      dev_priv->mmio_size = drm_get_resource_len(dev, 1);
++
++      dev->counters += 3;
++      dev->types[6] = _DRM_STAT_IRQ;
++      dev->types[7] = _DRM_STAT_PRIMARY;
++      dev->types[8] = _DRM_STAT_SECONDARY;
++
++      return 0;
++}
++
++/**
++ * Bootstrap the driver for AGP DMA.
++ *
++ * \todo
++ * Investigate whether there is any benifit to storing the WARP microcode in
++ * AGP memory.  If not, the microcode may as well always be put in PCI
++ * memory.
++ *
++ * \todo
++ * This routine needs to set dma_bs->agp_mode to the mode actually configured
++ * in the hardware.  Looking just at the Linux AGP driver code, I don't see
++ * an easy way to determine this.
++ *
++ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
++ */
++static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
++                                  drm_mga_dma_bootstrap_t * dma_bs)
++{
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *)dev->dev_private;
++      unsigned int warp_size = mga_warp_microcode_size(dev_priv);
++      int err;
++      unsigned offset;
++      const unsigned secondary_size = dma_bs->secondary_bin_count
++              * dma_bs->secondary_bin_size;
++      const unsigned agp_size = (dma_bs->agp_size << 20);
++      struct drm_buf_desc req;
++      struct drm_agp_mode mode;
++      struct drm_agp_info info;
++      struct drm_agp_buffer agp_req;
++      struct drm_agp_binding bind_req;
++
++      /* Acquire AGP. */
++      err = drm_agp_acquire(dev);
++      if (err) {
++              DRM_ERROR("Unable to acquire AGP: %d\n", err);
++              return err;
++      }
++
++      err = drm_agp_info(dev, &info);
++      if (err) {
++              DRM_ERROR("Unable to get AGP info: %d\n", err);
++              return err;
++      }
++
++      mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
++      err = drm_agp_enable(dev, mode);
++      if (err) {
++              DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
++              return err;
++      }
++
++      /* In addition to the usual AGP mode configuration, the G200 AGP cards
++       * need to have the AGP mode "manually" set.
++       */
++
++      if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
++              if (mode.mode & 0x02) {
++                      MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
++              } else {
++                      MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
++              }
++      }
++
++      /* Allocate and bind AGP memory. */
++      agp_req.size = agp_size;
++      agp_req.type = 0;
++      err = drm_agp_alloc(dev, &agp_req);
++      if (err) {
++              dev_priv->agp_size = 0;
++              DRM_ERROR("Unable to allocate %uMB AGP memory\n",
++                        dma_bs->agp_size);
++              return err;
++      }
++
++      dev_priv->agp_size = agp_size;
++      dev_priv->agp_handle = agp_req.handle;
++
++      bind_req.handle = agp_req.handle;
++      bind_req.offset = 0;
++      err = drm_agp_bind( dev, &bind_req );
++      if (err) {
++              DRM_ERROR("Unable to bind AGP memory: %d\n", err);
++              return err;
++      }
++
++      /* Make drm_addbufs happy by not trying to create a mapping for less
++       * than a page.
++       */
++      if (warp_size < PAGE_SIZE)
++              warp_size = PAGE_SIZE;
++
++      offset = 0;
++      err = drm_addmap(dev, offset, warp_size,
++                       _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
++      if (err) {
++              DRM_ERROR("Unable to map WARP microcode: %d\n", err);
++              return err;
++      }
++
++      offset += warp_size;
++      err = drm_addmap(dev, offset, dma_bs->primary_size,
++                       _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);
++      if (err) {
++              DRM_ERROR("Unable to map primary DMA region: %d\n", err);
++              return err;
++      }
++
++      offset += dma_bs->primary_size;
++      err = drm_addmap(dev, offset, secondary_size,
++                       _DRM_AGP, 0, & dev->agp_buffer_map);
++      if (err) {
++              DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
++              return err;
++      }
++
++      (void)memset( &req, 0, sizeof(req) );
++      req.count = dma_bs->secondary_bin_count;
++      req.size = dma_bs->secondary_bin_size;
++      req.flags = _DRM_AGP_BUFFER;
++      req.agp_start = offset;
++
++      err = drm_addbufs_agp(dev, &req);
++      if (err) {
++              DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
++              return err;
++      }
++
++#ifdef __linux__
++      {
++              struct drm_map_list *_entry;
++              unsigned long agp_token = 0;
++
++              list_for_each_entry(_entry, &dev->maplist, head) {
++                      if (_entry->map == dev->agp_buffer_map)
++                              agp_token = _entry->user_token;
++              }
++              if (!agp_token)
++                      return -EFAULT;
++
++              dev->agp_buffer_token = agp_token;
++      }
++#endif
++
++      offset += secondary_size;
++      err = drm_addmap(dev, offset, agp_size - offset,
++                       _DRM_AGP, 0, & dev_priv->agp_textures);
++      if (err) {
++              DRM_ERROR("Unable to map AGP texture region: %d\n", err);
++              return err;
++      }
++
++      drm_core_ioremap(dev_priv->warp, dev);
++      drm_core_ioremap(dev_priv->primary, dev);
++      drm_core_ioremap(dev->agp_buffer_map, dev);
++
++      if (!dev_priv->warp->handle ||
++          !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
++              DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
++                        dev_priv->warp->handle, dev_priv->primary->handle,
++                        dev->agp_buffer_map->handle);
++              return -ENOMEM;
++      }
++
++      dev_priv->dma_access = MGA_PAGPXFER;
++      dev_priv->wagp_enable = MGA_WAGP_ENABLE;
++
++      DRM_INFO("Initialized card for AGP DMA.\n");
++      return 0;
++}
++
++/**
++ * Bootstrap the driver for PCI DMA.
++ *
++ * \todo
++ * The algorithm for decreasing the size of the primary DMA buffer could be
++ * better.  The size should be rounded up to the nearest page size, then
++ * decrease the request size by a single page each pass through the loop.
++ *
++ * \todo
++ * Determine whether the maximum address passed to drm_pci_alloc is correct.
++ * The same goes for drm_addbufs_pci.
++ *
++ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
++ */
++static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
++                                  drm_mga_dma_bootstrap_t * dma_bs)
++{
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++      unsigned int warp_size = mga_warp_microcode_size(dev_priv);
++      unsigned int primary_size;
++      unsigned int bin_count;
++      int err;
++      struct drm_buf_desc req;
++
++
++      if (dev->dma == NULL) {
++              DRM_ERROR("dev->dma is NULL\n");
++              return -EFAULT;
++      }
++
++      /* Make drm_addbufs happy by not trying to create a mapping for less
++       * than a page.
++       */
++      if (warp_size < PAGE_SIZE)
++              warp_size = PAGE_SIZE;
++
++      /* The proper alignment is 0x100 for this mapping */
++      err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
++                       _DRM_READ_ONLY, &dev_priv->warp);
++      if (err != 0) {
++              DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
++                        err);
++              return err;
++      }
++
++      /* Other than the bottom two bits being used to encode other
++       * information, there don't appear to be any restrictions on the
++       * alignment of the primary or secondary DMA buffers.
++       */
++
++      for (primary_size = dma_bs->primary_size; primary_size != 0;
++           primary_size >>= 1 ) {
++              /* The proper alignment for this mapping is 0x04 */
++              err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
++                               _DRM_READ_ONLY, &dev_priv->primary);
++              if (!err)
++                      break;
++      }
++
++      if (err != 0) {
++              DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
++              return -ENOMEM;
++      }
++
++      if (dev_priv->primary->size != dma_bs->primary_size) {
++              DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
++                       dma_bs->primary_size,
++                       (unsigned)dev_priv->primary->size);
++              dma_bs->primary_size = dev_priv->primary->size;
++      }
++
++      for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
++           bin_count-- ) {
++              (void)memset(&req, 0, sizeof(req));
++              req.count = bin_count;
++              req.size = dma_bs->secondary_bin_size;
++
++              err = drm_addbufs_pci(dev, &req);
++              if (!err) {
++                      break;
++              }
++      }
++
++      if (bin_count == 0) {
++              DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
++              return err;
++      }
++
++      if (bin_count != dma_bs->secondary_bin_count) {
++              DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
++                       "to %u.\n", dma_bs->secondary_bin_count, bin_count);
++
++              dma_bs->secondary_bin_count = bin_count;
++      }
++
++      dev_priv->dma_access = 0;
++      dev_priv->wagp_enable = 0;
++
++      dma_bs->agp_mode = 0;
++
++      DRM_INFO("Initialized card for PCI DMA.\n");
++      return 0;
++}
++
++
++static int mga_do_dma_bootstrap(struct drm_device *dev,
++                              drm_mga_dma_bootstrap_t *dma_bs)
++{
++      const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
++      int err;
++      drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++
++
++      dev_priv->used_new_dma_init = 1;
++
++      /* The first steps are the same for both PCI and AGP based DMA.  Map
++       * the cards MMIO registers and map a status page.
++       */
++      err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
++                       _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);
++      if (err) {
++              DRM_ERROR("Unable to map MMIO region: %d\n", err);
++              return err;
++      }
++
++
++      err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
++                       _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
++                       & dev_priv->status);
++      if (err) {
++              DRM_ERROR("Unable to map status region: %d\n", err);
++              return err;
++      }
++
++
++      /* The DMA initialization procedure is slightly different for PCI and
++       * AGP cards.  AGP cards just allocate a large block of AGP memory and
++       * carve off portions of it for internal uses.  The remaining memory
++       * is returned to user-mode to be used for AGP textures.
++       */
++
++      if (is_agp) {
++              err = mga_do_agp_dma_bootstrap(dev, dma_bs);
++      }
++
++      /* If we attempted to initialize the card for AGP DMA but failed,
++       * clean-up any mess that may have been created.
++       */
++
++      if (err) {
++              mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
++      }
++
++
++      /* Not only do we want to try and initialized PCI cards for PCI DMA,
++       * but we also try to initialized AGP cards that could not be
++       * initialized for AGP DMA.  This covers the case where we have an AGP
++       * card in a system with an unsupported AGP chipset.  In that case the
++       * card will be detected as AGP, but we won't be able to allocate any
++       * AGP memory, etc.
++       */
++
++      if (!is_agp || err) {
++              err = mga_do_pci_dma_bootstrap(dev, dma_bs);
++      }
++
++
++      return err;
++}
++
++int mga_dma_bootstrap(struct drm_device *dev, void *data,
++                    struct drm_file *file_priv)
++{
++      drm_mga_dma_bootstrap_t *bootstrap = data;
++      int err;
++      static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
++      const drm_mga_private_t *const dev_priv =
++              (drm_mga_private_t *) dev->dev_private;
++
++
++      err = mga_do_dma_bootstrap(dev, bootstrap);
++      if (err) {
++              mga_do_cleanup_dma(dev, FULL_CLEANUP);
++              return err;
++      }
++
++      if (dev_priv->agp_textures != NULL) {
++              bootstrap->texture_handle = dev_priv->agp_textures->offset;
++              bootstrap->texture_size = dev_priv->agp_textures->size;
++      } else {
++              bootstrap->texture_handle = 0;
++              bootstrap->texture_size = 0;
++      }
++
++      bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
++
++      return 0;
++}
++
++
++static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
++{
++      drm_mga_private_t *dev_priv;
++      int ret;
++      DRM_DEBUG("\n");
++
++
++      dev_priv = dev->dev_private;
++
++      if (init->sgram) {
++              dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
++      } else {
++              dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
++      }
++      dev_priv->maccess = init->maccess;
++
++      dev_priv->fb_cpp = init->fb_cpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      dev_priv->depth_cpp = init->depth_cpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      /* FIXME: Need to support AGP textures...
++       */
++      dev_priv->texture_offset = init->texture_offset[0];
++      dev_priv->texture_size = init->texture_size[0];
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("failed to find sarea!\n");
++              return -EINVAL;
++      }
++
++      if (!dev_priv->used_new_dma_init) {
++
++              dev_priv->dma_access = MGA_PAGPXFER;
++              dev_priv->wagp_enable = MGA_WAGP_ENABLE;
++
++              dev_priv->status = drm_core_findmap(dev, init->status_offset);
++              if (!dev_priv->status) {
++                      DRM_ERROR("failed to find status page!\n");
++                      return -EINVAL;
++              }
++              dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++              if (!dev_priv->mmio) {
++                      DRM_ERROR("failed to find mmio region!\n");
++                      return -EINVAL;
++              }
++              dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
++              if (!dev_priv->warp) {
++                      DRM_ERROR("failed to find warp microcode region!\n");
++                      return -EINVAL;
++              }
++              dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
++              if (!dev_priv->primary) {
++                      DRM_ERROR("failed to find primary dma region!\n");
++                      return -EINVAL;
++              }
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map =
++                      drm_core_findmap(dev, init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("failed to find dma buffer region!\n");
++                      return -EINVAL;
++              }
++
++              drm_core_ioremap(dev_priv->warp, dev);
++              drm_core_ioremap(dev_priv->primary, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++      }
++
++      dev_priv->sarea_priv =
++          (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                               init->sarea_priv_offset);
++
++      if (!dev_priv->warp->handle ||
++          !dev_priv->primary->handle ||
++          ((dev_priv->dma_access != 0) &&
++           ((dev->agp_buffer_map == NULL) ||
++            (dev->agp_buffer_map->handle == NULL)))) {
++              DRM_ERROR("failed to ioremap agp regions!\n");
++              return -ENOMEM;
++      }
++
++      ret = mga_warp_install_microcode(dev_priv);
++      if (ret != 0) {
++              DRM_ERROR("failed to install WARP ucode: %d!\n", ret);
++              return ret;
++      }
++
++      ret = mga_warp_init(dev_priv);
++      if (ret != 0) {
++              DRM_ERROR("failed to init WARP engine: %d!\n", ret);
++              return ret;
++      }
++
++      dev_priv->prim.status = (u32 *) dev_priv->status->handle;
++
++      mga_do_wait_for_idle(dev_priv);
++
++      /* Init the primary DMA registers.
++       */
++      MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
++
++      dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
++      dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
++                            + dev_priv->primary->size);
++      dev_priv->prim.size = dev_priv->primary->size;
++
++      dev_priv->prim.tail = 0;
++      dev_priv->prim.space = dev_priv->prim.size;
++      dev_priv->prim.wrapped = 0;
++
++      dev_priv->prim.last_flush = 0;
++      dev_priv->prim.last_wrap = 0;
++
++      dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
++
++      dev_priv->prim.status[0] = dev_priv->primary->offset;
++      dev_priv->prim.status[1] = 0;
++
++      dev_priv->sarea_priv->last_wrap = 0;
++      dev_priv->sarea_priv->last_frame.head = 0;
++      dev_priv->sarea_priv->last_frame.wrap = 0;
++
++      if (mga_freelist_init(dev, dev_priv) < 0) {
++              DRM_ERROR("could not initialize freelist\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
++{
++      int err = 0;
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_mga_private_t *dev_priv = dev->dev_private;
++
++              if ((dev_priv->warp != NULL)
++                  && (dev_priv->warp->type != _DRM_CONSISTENT))
++                      drm_core_ioremapfree(dev_priv->warp, dev);
++
++              if ((dev_priv->primary != NULL)
++                  && (dev_priv->primary->type != _DRM_CONSISTENT))
++                      drm_core_ioremapfree(dev_priv->primary, dev);
++
++              if (dev->agp_buffer_map != NULL)
++                      drm_core_ioremapfree(dev->agp_buffer_map, dev);
++
++              if (dev_priv->used_new_dma_init) {
++                      if (dev_priv->agp_handle != 0) {
++                              struct drm_agp_binding unbind_req;
++                              struct drm_agp_buffer free_req;
++
++                              unbind_req.handle = dev_priv->agp_handle;
++                              drm_agp_unbind(dev, &unbind_req);
++
++                              free_req.handle = dev_priv->agp_handle;
++                              drm_agp_free(dev, &free_req);
++
++                              dev_priv->agp_textures = NULL;
++                              dev_priv->agp_size = 0;
++                              dev_priv->agp_handle = 0;
++                      }
++
++                      if ((dev->agp != NULL) && dev->agp->acquired) {
++                              err = drm_agp_release(dev);
++                      }
++              }
++
++              dev_priv->warp = NULL;
++              dev_priv->primary = NULL;
++              dev_priv->sarea = NULL;
++              dev_priv->sarea_priv = NULL;
++              dev->agp_buffer_map = NULL;
++
++              if (full_cleanup) {
++                      dev_priv->mmio = NULL;
++                      dev_priv->status = NULL;
++                      dev_priv->used_new_dma_init = 0;
++              }
++
++              memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
++              dev_priv->warp_pipe = 0;
++              memset(dev_priv->warp_pipe_phys, 0,
++                     sizeof(dev_priv->warp_pipe_phys));
++
++              if (dev_priv->head != NULL) {
++                      mga_freelist_cleanup(dev);
++              }
++      }
++
++      return err;
++}
++
++int mga_dma_init(struct drm_device *dev, void *data,
++               struct drm_file *file_priv)
++{
++      drm_mga_init_t *init = data;
++      int err;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case MGA_INIT_DMA:
++              err = mga_do_init_dma(dev, init);
++              if (err) {
++                      (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
++              }
++              return err;
++      case MGA_CLEANUP_DMA:
++              return mga_do_cleanup_dma(dev, FULL_CLEANUP);
++      }
++
++      return -EINVAL;
++}
++
++/* ================================================================
++ * Primary DMA stream management
++ */
++
++int mga_dma_flush(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      struct drm_lock *lock = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("%s%s%s\n",
++                (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
++                (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
++                (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
++
++      WRAP_WAIT_WITH_RETURN(dev_priv);
++
++      if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
++              mga_do_dma_flush(dev_priv);
++      }
++
++      if (lock->flags & _DRM_LOCK_QUIESCENT) {
++#if MGA_DMA_DEBUG
++              int ret = mga_do_wait_for_idle(dev_priv);
++              if (ret < 0)
++                      DRM_INFO("-EBUSY\n");
++              return ret;
++#else
++              return mga_do_wait_for_idle(dev_priv);
++#endif
++      } else {
++              return 0;
++      }
++}
++
++int mga_dma_reset(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return mga_do_dma_reset(dev_priv);
++}
++
++/* ================================================================
++ * DMA buffer management
++ */
++
++static int mga_dma_get_buffers(struct drm_device * dev,
++                             struct drm_file *file_priv, struct drm_dma * d)
++{
++      struct drm_buf *buf;
++      int i;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = mga_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i],
++                                   &buf->idx, sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i],
++                                   &buf->total, sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int mga_dma_buffers(struct drm_device *dev, void *data,
++                  struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = mga_dma_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++/**
++ * Called just before the module is unloaded.
++ */
++int mga_driver_unload(struct drm_device * dev)
++{
++      drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++      return 0;
++}
++
++/**
++ * Called when the last opener of the device is closed.
++ */
++void mga_driver_lastclose(struct drm_device * dev)
++{
++      mga_do_cleanup_dma(dev, FULL_CLEANUP);
++}
++
++int mga_driver_dma_quiescent(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      return mga_do_wait_for_idle(dev_priv);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drm.h git-nokia/drivers/gpu/drm-tungsten/mga_drm.h
+--- git/drivers/gpu/drm-tungsten/mga_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,425 @@
++/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
++ * Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *
++ * Rewritten by:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __MGA_DRM_H__
++#define __MGA_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (mga_sarea.h)
++ */
++
++#ifndef __MGA_SAREA_DEFINES__
++#define __MGA_SAREA_DEFINES__
++
++/* WARP pipe flags
++ */
++#define MGA_F                 0x1     /* fog */
++#define MGA_A                 0x2     /* alpha */
++#define MGA_S                 0x4     /* specular */
++#define MGA_T2                        0x8     /* multitexture */
++
++#define MGA_WARP_TGZ          0
++#define MGA_WARP_TGZF         (MGA_F)
++#define MGA_WARP_TGZA         (MGA_A)
++#define MGA_WARP_TGZAF                (MGA_F|MGA_A)
++#define MGA_WARP_TGZS         (MGA_S)
++#define MGA_WARP_TGZSF                (MGA_S|MGA_F)
++#define MGA_WARP_TGZSA                (MGA_S|MGA_A)
++#define MGA_WARP_TGZSAF               (MGA_S|MGA_F|MGA_A)
++#define MGA_WARP_T2GZ         (MGA_T2)
++#define MGA_WARP_T2GZF                (MGA_T2|MGA_F)
++#define MGA_WARP_T2GZA                (MGA_T2|MGA_A)
++#define MGA_WARP_T2GZAF               (MGA_T2|MGA_A|MGA_F)
++#define MGA_WARP_T2GZS                (MGA_T2|MGA_S)
++#define MGA_WARP_T2GZSF               (MGA_T2|MGA_S|MGA_F)
++#define MGA_WARP_T2GZSA               (MGA_T2|MGA_S|MGA_A)
++#define MGA_WARP_T2GZSAF      (MGA_T2|MGA_S|MGA_F|MGA_A)
++
++#define MGA_MAX_G200_PIPES    8       /* no multitex */
++#define MGA_MAX_G400_PIPES    16
++#define MGA_MAX_WARP_PIPES    MGA_MAX_G400_PIPES
++#define MGA_WARP_UCODE_SIZE   32768   /* in bytes */
++
++#define MGA_CARD_TYPE_G200    1
++#define MGA_CARD_TYPE_G400    2
++#define MGA_CARD_TYPE_G450    3       /* not currently used */
++#define MGA_CARD_TYPE_G550    4
++
++#define MGA_FRONT             0x1
++#define MGA_BACK              0x2
++#define MGA_DEPTH             0x4
++
++/* What needs to be changed for the current vertex dma buffer?
++ */
++#define MGA_UPLOAD_CONTEXT    0x1
++#define MGA_UPLOAD_TEX0               0x2
++#define MGA_UPLOAD_TEX1               0x4
++#define MGA_UPLOAD_PIPE               0x8
++#define MGA_UPLOAD_TEX0IMAGE  0x10    /* handled client-side */
++#define MGA_UPLOAD_TEX1IMAGE  0x20    /* handled client-side */
++#define MGA_UPLOAD_2D         0x40
++#define MGA_WAIT_AGE          0x80    /* handled client-side */
++#define MGA_UPLOAD_CLIPRECTS  0x100   /* handled client-side */
++#if 0
++#define MGA_DMA_FLUSH         0x200   /* set when someone gets the lock
++                                         quiescent */
++#endif
++
++/* 32 buffers of 64k each, total 2 meg.
++ */
++#define MGA_BUFFER_SIZE               (1 << 16)
++#define MGA_NUM_BUFFERS               128
++
++/* Keep these small for testing.
++ */
++#define MGA_NR_SAREA_CLIPRECTS        8
++
++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
++ * regions, subject to a minimum region size of (1<<16) == 64k.
++ *
++ * Clients may subdivide regions internally, but when sharing between
++ * clients, the region size is the minimum granularity.
++ */
++
++#define MGA_CARD_HEAP                 0
++#define MGA_AGP_HEAP                  1
++#define MGA_NR_TEX_HEAPS              2
++#define MGA_NR_TEX_REGIONS            16
++#define MGA_LOG_MIN_TEX_REGION_SIZE   16
++
++#define  DRM_MGA_IDLE_RETRY          2048
++
++#endif                                /* __MGA_SAREA_DEFINES__ */
++
++/* Setup registers for 3D context
++ */
++typedef struct {
++      unsigned int dstorg;
++      unsigned int maccess;
++      unsigned int plnwt;
++      unsigned int dwgctl;
++      unsigned int alphactrl;
++      unsigned int fogcolor;
++      unsigned int wflag;
++      unsigned int tdualstage0;
++      unsigned int tdualstage1;
++      unsigned int fcol;
++      unsigned int stencil;
++      unsigned int stencilctl;
++} drm_mga_context_regs_t;
++
++/* Setup registers for 2D, X server
++ */
++typedef struct {
++      unsigned int pitch;
++} drm_mga_server_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int texctl;
++      unsigned int texctl2;
++      unsigned int texfilter;
++      unsigned int texbordercol;
++      unsigned int texorg;
++      unsigned int texwidth;
++      unsigned int texheight;
++      unsigned int texorg1;
++      unsigned int texorg2;
++      unsigned int texorg3;
++      unsigned int texorg4;
++} drm_mga_texture_regs_t;
++
++/* General aging mechanism
++ */
++typedef struct {
++      unsigned int head;      /* Position of head pointer          */
++      unsigned int wrap;      /* Primary DMA wrap count            */
++} drm_mga_age_t;
++
++typedef struct _drm_mga_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex dma buffer.
++       */
++      drm_mga_context_regs_t context_state;
++      drm_mga_server_regs_t server_state;
++      drm_mga_texture_regs_t tex_state[2];
++      unsigned int warp_pipe;
++      unsigned int dirty;
++      unsigned int vertsize;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Information about the most recently used 3d drawable.  The
++       * client fills in the req_* fields, the server fills in the
++       * exported_ fields and puts the cliprects into boxes, above.
++       *
++       * The client clears the exported_drawable field before
++       * clobbering the boxes data.
++       */
++      unsigned int req_drawable;      /* the X drawable id */
++      unsigned int req_draw_buffer;   /* MGA_FRONT or MGA_BACK */
++
++      unsigned int exported_drawable;
++      unsigned int exported_index;
++      unsigned int exported_stamp;
++      unsigned int exported_buffers;
++      unsigned int exported_nfront;
++      unsigned int exported_nback;
++      int exported_back_x, exported_front_x, exported_w;
++      int exported_back_y, exported_front_y, exported_h;
++      struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
++
++      /* Counters for aging textures and for client-side throttling.
++       */
++      unsigned int status[4];
++      unsigned int last_wrap;
++
++      drm_mga_age_t last_frame;
++      unsigned int last_enqueue;      /* last time a buffer was enqueued */
++      unsigned int last_dispatch;     /* age of the most recently dispatched buffer */
++      unsigned int last_quiescent;    /*  */
++
++      /* LRU lists for texture memory in agp space and on the card.
++       */
++      struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
++      unsigned int texAge[MGA_NR_TEX_HEAPS];
++
++      /* Mechanism to validate card state.
++       */
++      int ctxOwner;
++} drm_mga_sarea_t;
++
++
++/* MGA specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_MGA_INIT     0x00
++#define DRM_MGA_FLUSH    0x01
++#define DRM_MGA_RESET    0x02
++#define DRM_MGA_SWAP     0x03
++#define DRM_MGA_CLEAR    0x04
++#define DRM_MGA_VERTEX   0x05
++#define DRM_MGA_INDICES  0x06
++#define DRM_MGA_ILOAD    0x07
++#define DRM_MGA_BLIT     0x08
++#define DRM_MGA_GETPARAM 0x09
++
++/* 3.2:
++ * ioctls for operating on fences.
++ */
++#define DRM_MGA_SET_FENCE      0x0a
++#define DRM_MGA_WAIT_FENCE     0x0b
++#define DRM_MGA_DMA_BOOTSTRAP  0x0c
++
++
++#define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
++#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
++#define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
++#define DRM_IOCTL_MGA_SWAP     DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_SWAP)
++#define DRM_IOCTL_MGA_CLEAR    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
++#define DRM_IOCTL_MGA_VERTEX   DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
++#define DRM_IOCTL_MGA_INDICES  DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
++#define DRM_IOCTL_MGA_ILOAD    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
++#define DRM_IOCTL_MGA_BLIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
++#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
++#define DRM_IOCTL_MGA_SET_FENCE     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
++#define DRM_IOCTL_MGA_WAIT_FENCE    DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
++#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
++
++typedef struct _drm_mga_warp_index {
++      int installed;
++      unsigned long phys_addr;
++      int size;
++} drm_mga_warp_index_t;
++
++typedef struct drm_mga_init {
++      enum {
++              MGA_INIT_DMA = 0x01,
++              MGA_CLEANUP_DMA = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++
++      int chipset;
++      int sgram;
++
++      unsigned int maccess;
++
++      unsigned int fb_cpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++
++      unsigned int depth_cpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned int texture_offset[MGA_NR_TEX_HEAPS];
++      unsigned int texture_size[MGA_NR_TEX_HEAPS];
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long status_offset;
++      unsigned long warp_offset;
++      unsigned long primary_offset;
++      unsigned long buffers_offset;
++} drm_mga_init_t;
++
++
++typedef struct drm_mga_dma_bootstrap {
++      /**
++       * \name AGP texture region
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
++       * be filled in with the actual AGP texture settings.
++       *
++       * \warning
++       * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
++       * is zero, it means that PCI memory (most likely through the use of
++       * an IOMMU) is being used for "AGP" textures.
++       */
++      /*@{*/
++      unsigned long texture_handle;  /**< Handle used to map AGP textures. */
++      uint32_t     texture_size;    /**< Size of the AGP texture region. */
++      /*@}*/
++
++
++      /**
++       * Requested size of the primary DMA region.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual AGP mode.  If AGP was not available
++       */
++      uint32_t primary_size;
++
++
++      /**
++       * Requested number of secondary DMA buffers.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual number of secondary DMA buffers
++       * allocated.  Particularly when PCI DMA is used, this may be
++       * (subtantially) less than the number requested.
++       */
++      uint32_t secondary_bin_count;
++
++
++      /**
++       * Requested size of each secondary DMA buffer.
++       *
++       * While the kernel \b is free to reduce
++       * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
++       * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
++       */
++      uint32_t secondary_bin_size;
++
++
++      /**
++       * Bit-wise mask of AGPSTAT2_* values.  Currently only \c AGPSTAT2_1X,
++       * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported.  If this value is
++       * zero, it means that PCI DMA should be used, even if AGP is
++       * possible.
++       *
++       * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
++       * filled in with the actual AGP mode.  If AGP was not available
++       * (i.e., PCI DMA was used), this value will be zero.
++       */
++      uint32_t agp_mode;
++
++
++      /**
++       * Desired AGP GART size, measured in megabytes.
++       */
++      uint8_t agp_size;
++} drm_mga_dma_bootstrap_t;
++
++typedef struct drm_mga_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;
++} drm_mga_clear_t;
++
++typedef struct drm_mga_vertex {
++      int idx;                /* buffer to queue */
++      int used;               /* bytes in use */
++      int discard;            /* client finished with buffer?  */
++} drm_mga_vertex_t;
++
++typedef struct drm_mga_indices {
++      int idx;                /* buffer to queue */
++      unsigned int start;
++      unsigned int end;
++      int discard;            /* client finished with buffer?  */
++} drm_mga_indices_t;
++
++typedef struct drm_mga_iload {
++      int idx;
++      unsigned int dstorg;
++      unsigned int length;
++} drm_mga_iload_t;
++
++typedef struct _drm_mga_blit {
++      unsigned int planemask;
++      unsigned int srcorg;
++      unsigned int dstorg;
++      int src_pitch, dst_pitch;
++      int delta_sx, delta_sy;
++      int delta_dx, delta_dy;
++      int height, ydir;       /* flip image vertically */
++      int source_pitch, dest_pitch;
++} drm_mga_blit_t;
++
++/* 3.1: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define MGA_PARAM_IRQ_NR            1
++
++/* 3.2: Query the actual card type.  The DDX only distinguishes between
++ * G200 chips and non-G200 chips, which it calls G400.  It turns out that
++ * there are some very sublte differences between the G4x0 chips and the G550
++ * chips.  Using this parameter query, a client-side driver can detect the
++ * difference between a G4x0 and a G550.
++ */
++#define MGA_PARAM_CARD_TYPE         2
++
++typedef struct drm_mga_getparam {
++      int param;
++      void __user *value;
++} drm_mga_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.c git-nokia/drivers/gpu/drm-tungsten/mga_drv.c
+--- git/drivers/gpu/drm-tungsten/mga_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++#include "drm_pciids.h"
++
++static int mga_driver_device_is_agp(struct drm_device * dev);
++
++static struct pci_device_id pciidlist[] = {
++      mga_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
++          DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof (drm_mga_buf_priv_t),
++      .load = mga_driver_load,
++      .unload = mga_driver_unload,
++      .lastclose = mga_driver_lastclose,
++      .dma_quiescent = mga_driver_dma_quiescent,
++      .device_is_agp = mga_driver_device_is_agp,
++      .get_vblank_counter = mga_get_vblank_counter,
++      .enable_vblank = mga_enable_vblank,
++      .disable_vblank = mga_disable_vblank,
++      .irq_preinstall = mga_driver_irq_preinstall,
++      .irq_postinstall = mga_driver_irq_postinstall,
++      .irq_uninstall = mga_driver_irq_uninstall,
++      .irq_handler = mga_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = mga_ioctls,
++      .dma_ioctl = mga_dma_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = mga_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init mga_init(void)
++{
++      driver.num_ioctls = mga_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit mga_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(mga_init);
++module_exit(mga_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
++
++/**
++ * Determine if the device really is AGP or not.
++ *
++ * In addition to the usual tests performed by \c drm_device_is_agp, this
++ * function detects PCI G450 cards that appear to the system exactly like
++ * AGP G450 cards.
++ *
++ * \param dev   The device to be tested.
++ *
++ * \returns
++ * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
++ */
++static int mga_driver_device_is_agp(struct drm_device * dev)
++{
++      const struct pci_dev * const pdev = dev->pdev;
++
++
++      /* There are PCI versions of the G450.  These cards have the
++       * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
++       * bridge chip.  We detect these cards, which are not currently
++       * supported by this driver, by looking at the device ID of the
++       * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
++       * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
++       * device.
++       */
++
++      if ((pdev->device == 0x0525) && pdev->bus->self
++          && (pdev->bus->self->vendor == 0x3388)
++          && (pdev->bus->self->device == 0x0021)) {
++              return 0;
++      }
++
++      return 2;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_drv.h git-nokia/drivers/gpu/drm-tungsten/mga_drv.h
+--- git/drivers/gpu/drm-tungsten/mga_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,691 @@
++/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
++ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __MGA_DRV_H__
++#define __MGA_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, VA Linux Systems Inc."
++
++#define DRIVER_NAME           "mga"
++#define DRIVER_DESC           "Matrox G200/G400"
++#define DRIVER_DATE           "20060319"
++
++#define DRIVER_MAJOR          3
++#define DRIVER_MINOR          2
++#define DRIVER_PATCHLEVEL     2
++
++typedef struct drm_mga_primary_buffer {
++      u8 *start;
++      u8 *end;
++      int size;
++
++      u32 tail;
++      int space;
++      volatile long wrapped;
++
++      volatile u32 *status;
++
++      u32 last_flush;
++      u32 last_wrap;
++
++      u32 high_mark;
++} drm_mga_primary_buffer_t;
++
++typedef struct drm_mga_freelist {
++      struct drm_mga_freelist *next;
++      struct drm_mga_freelist *prev;
++      drm_mga_age_t age;
++      struct drm_buf *buf;
++} drm_mga_freelist_t;
++
++typedef struct {
++      drm_mga_freelist_t *list_entry;
++      int discard;
++      int dispatched;
++} drm_mga_buf_priv_t;
++
++typedef struct drm_mga_private {
++      drm_mga_primary_buffer_t prim;
++      drm_mga_sarea_t *sarea_priv;
++
++      drm_mga_freelist_t *head;
++      drm_mga_freelist_t *tail;
++
++      unsigned int warp_pipe;
++      unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
++
++      int chipset;
++      int usec_timeout;
++
++      /**
++       * If set, the new DMA initialization sequence was used.  This is
++       * primarilly used to select how the driver should uninitialized its
++       * internal DMA structures.
++       */
++      int used_new_dma_init;
++
++      /**
++       * If AGP memory is used for DMA buffers, this will be the value
++       * \c MGA_PAGPXFER.  Otherwise, it will be zero (for a PCI transfer).
++       */
++      u32 dma_access;
++
++      /**
++       * If AGP memory is used for DMA buffers, this will be the value
++       * \c MGA_WAGP_ENABLE.  Otherwise, it will be zero (for a PCI
++       * transfer).
++       */
++      u32 wagp_enable;
++
++      /**
++       * \name MMIO region parameters.
++       *
++       * \sa drm_mga_private_t::mmio
++       */
++      /*@{*/
++      u32 mmio_base;                  /**< Bus address of base of MMIO. */
++      u32 mmio_size;                  /**< Size of the MMIO region. */
++      /*@}*/
++
++      u32 clear_cmd;
++      u32 maccess;
++
++      atomic_t vbl_received;          /**< Number of vblanks received. */
++      wait_queue_head_t fence_queue;
++      atomic_t last_fence_retired;
++      u32 next_fence_to_post;
++
++      unsigned int fb_cpp;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      unsigned int depth_cpp;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *status;
++      drm_local_map_t *warp;
++      drm_local_map_t *primary;
++      drm_local_map_t *agp_textures;
++
++      unsigned long agp_handle;
++      unsigned int agp_size;
++} drm_mga_private_t;
++
++extern struct drm_ioctl_desc mga_ioctls[];
++extern int mga_max_ioctl;
++
++                              /* mga_dma.c */
++extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv);
++extern int mga_dma_init(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv);
++extern int mga_dma_flush(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int mga_dma_reset(struct drm_device *dev, void *data,
++                       struct drm_file *file_priv);
++extern int mga_dma_buffers(struct drm_device *dev, void *data,
++                         struct drm_file *file_priv);
++extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
++extern int mga_driver_unload(struct drm_device * dev);
++extern void mga_driver_lastclose(struct drm_device * dev);
++extern int mga_driver_dma_quiescent(struct drm_device * dev);
++
++extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
++
++extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
++extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
++extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
++
++extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
++
++                              /* mga_warp.c */
++extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
++extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
++extern int mga_warp_init(drm_mga_private_t * dev_priv);
++
++                              /* mga_irq.c */
++extern int mga_enable_vblank(struct drm_device *dev, int crtc);
++extern void mga_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
++extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
++extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
++extern void mga_driver_irq_preinstall(struct drm_device * dev);
++extern int mga_driver_irq_postinstall(struct drm_device * dev);
++extern void mga_driver_irq_uninstall(struct drm_device * dev);
++extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
++                           unsigned long arg);
++
++#define mga_flush_write_combine()     DRM_WRITEMEMORYBARRIER()
++
++#if defined(__linux__) && defined(__alpha__)
++#define MGA_BASE( reg )               ((unsigned long)(dev_priv->mmio->handle))
++#define MGA_ADDR( reg )               (MGA_BASE(reg) + reg)
++
++#define MGA_DEREF( reg )      *(volatile u32 *)MGA_ADDR( reg )
++#define MGA_DEREF8( reg )     *(volatile u8 *)MGA_ADDR( reg )
++
++#define MGA_READ( reg )               (_MGA_READ((u32 *)MGA_ADDR(reg)))
++#define MGA_READ8( reg )      (_MGA_READ((u8 *)MGA_ADDR(reg)))
++#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
++#define MGA_WRITE8( reg, val )  do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
++
++static inline u32 _MGA_READ(u32 * addr)
++{
++      DRM_MEMORYBARRIER();
++      return *(volatile u32 *)addr;
++}
++#else
++#define MGA_READ8( reg )      DRM_READ8(dev_priv->mmio, (reg))
++#define MGA_READ( reg )               DRM_READ32(dev_priv->mmio, (reg))
++#define MGA_WRITE8( reg, val )  DRM_WRITE8(dev_priv->mmio, (reg), (val))
++#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
++#endif
++
++#define DWGREG0               0x1c00
++#define DWGREG0_END   0x1dff
++#define DWGREG1               0x2c00
++#define DWGREG1_END   0x2dff
++
++#define ISREG0(r)     (r >= DWGREG0 && r <= DWGREG0_END)
++#define DMAREG0(r)    (u8)((r - DWGREG0) >> 2)
++#define DMAREG1(r)    (u8)(((r - DWGREG1) >> 2) | 0x80)
++#define DMAREG(r)     (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
++
++/* ================================================================
++ * Helper macross...
++ */
++
++#define MGA_EMIT_STATE( dev_priv, dirty )                             \
++do {                                                                  \
++      if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) {                        \
++              if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) {        \
++                      mga_g400_emit_state( dev_priv );                \
++              } else {                                                \
++                      mga_g200_emit_state( dev_priv );                \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++#define WRAP_TEST_WITH_RETURN( dev_priv )                             \
++do {                                                                  \
++      if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {                 \
++              if ( mga_is_idle( dev_priv ) ) {                        \
++                      mga_do_dma_wrap_end( dev_priv );                \
++              } else if ( dev_priv->prim.space <                      \
++                          dev_priv->prim.high_mark ) {                \
++                      if ( MGA_DMA_DEBUG )                            \
++                              DRM_INFO( "wrap...\n");         \
++                      return -EBUSY;                  \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++#define WRAP_WAIT_WITH_RETURN( dev_priv )                             \
++do {                                                                  \
++      if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {                 \
++              if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {           \
++                      if ( MGA_DMA_DEBUG )                            \
++                              DRM_INFO( "wrap...\n");         \
++                      return -EBUSY;                  \
++              }                                                       \
++              mga_do_dma_wrap_end( dev_priv );                        \
++      }                                                               \
++} while (0)
++
++/* ================================================================
++ * Primary DMA command stream
++ */
++
++#define MGA_VERBOSE   0
++
++#define DMA_LOCALS    unsigned int write; volatile u8 *prim;
++
++#define DMA_BLOCK_SIZE        (5 * sizeof(u32))
++
++#define BEGIN_DMA( n )                                                        \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "BEGIN_DMA( %d )\n", (n) );           \
++              DRM_INFO( "   space=0x%x req=0x%Zx\n",                  \
++                        dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
++      }                                                               \
++      prim = dev_priv->prim.start;                                    \
++      write = dev_priv->prim.tail;                                    \
++} while (0)
++
++#define BEGIN_DMA_WRAP()                                              \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "BEGIN_DMA()\n" );                            \
++              DRM_INFO( "   space=0x%x\n", dev_priv->prim.space );    \
++      }                                                               \
++      prim = dev_priv->prim.start;                                    \
++      write = dev_priv->prim.tail;                                    \
++} while (0)
++
++#define ADVANCE_DMA()                                                 \
++do {                                                                  \
++      dev_priv->prim.tail = write;                                    \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n",        \
++                        write, dev_priv->prim.space );                \
++      }                                                               \
++} while (0)
++
++#define FLUSH_DMA()                                                   \
++do {                                                                  \
++      if ( 0 ) {                                                      \
++              DRM_INFO( "\n" );                                       \
++              DRM_INFO( "   tail=0x%06x head=0x%06lx\n",              \
++                        dev_priv->prim.tail,                          \
++                        MGA_READ( MGA_PRIMADDRESS ) -                 \
++                        dev_priv->primary->offset );                  \
++      }                                                               \
++      if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) {                \
++              if ( dev_priv->prim.space <                             \
++                   dev_priv->prim.high_mark ) {                       \
++                      mga_do_dma_wrap_start( dev_priv );              \
++              } else {                                                \
++                      mga_do_dma_flush( dev_priv );                   \
++              }                                                       \
++      }                                                               \
++} while (0)
++
++/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
++ */
++#define DMA_WRITE( offset, val )                                      \
++do {                                                                  \
++      if ( MGA_VERBOSE ) {                                            \
++              DRM_INFO( "   DMA_WRITE( 0x%08x ) at 0x%04Zx\n",        \
++                        (u32)(val), write + (offset) * sizeof(u32) ); \
++      }                                                               \
++      *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
++} while (0)
++
++#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 )   \
++do {                                                                  \
++      DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) |                          \
++                     (DMAREG( reg1 ) << 8) |                          \
++                     (DMAREG( reg2 ) << 16) |                         \
++                     (DMAREG( reg3 ) << 24)) );                       \
++      DMA_WRITE( 1, val0 );                                           \
++      DMA_WRITE( 2, val1 );                                           \
++      DMA_WRITE( 3, val2 );                                           \
++      DMA_WRITE( 4, val3 );                                           \
++      write += DMA_BLOCK_SIZE;                                        \
++} while (0)
++
++/* Buffer aging via primary DMA stream head pointer.
++ */
++
++#define SET_AGE( age, h, w )                                          \
++do {                                                                  \
++      (age)->head = h;                                                \
++      (age)->wrap = w;                                                \
++} while (0)
++
++#define TEST_AGE( age, h, w )         ( (age)->wrap < w ||            \
++                                        ( (age)->wrap == w &&         \
++                                          (age)->head < h ) )
++
++#define AGE_BUFFER( buf_priv )                                                \
++do {                                                                  \
++      drm_mga_freelist_t *entry = (buf_priv)->list_entry;             \
++      if ( (buf_priv)->dispatched ) {                                 \
++              entry->age.head = (dev_priv->prim.tail +                \
++                                 dev_priv->primary->offset);          \
++              entry->age.wrap = dev_priv->sarea_priv->last_wrap;      \
++      } else {                                                        \
++              entry->age.head = 0;                                    \
++              entry->age.wrap = 0;                                    \
++      }                                                               \
++} while (0)
++
++#define MGA_ENGINE_IDLE_MASK          (MGA_SOFTRAPEN |                \
++                                       MGA_DWGENGSTS |                \
++                                       MGA_ENDPRDMASTS)
++#define MGA_DMA_IDLE_MASK             (MGA_SOFTRAPEN |                \
++                                       MGA_ENDPRDMASTS)
++
++#define MGA_DMA_DEBUG                 0
++
++/* A reduced set of the mga registers.
++ */
++#define MGA_CRTC_INDEX                        0x1fd4
++#define MGA_CRTC_DATA                 0x1fd5
++
++/* CRTC11 */
++#define MGA_VINTCLR                   (1 << 4)
++#define MGA_VINTEN                    (1 << 5)
++
++#define MGA_ALPHACTRL                 0x2c7c
++#define MGA_AR0                               0x1c60
++#define MGA_AR1                               0x1c64
++#define MGA_AR2                               0x1c68
++#define MGA_AR3                               0x1c6c
++#define MGA_AR4                               0x1c70
++#define MGA_AR5                               0x1c74
++#define MGA_AR6                               0x1c78
++
++#define MGA_CXBNDRY                   0x1c80
++#define MGA_CXLEFT                    0x1ca0
++#define MGA_CXRIGHT                   0x1ca4
++
++#define MGA_DMAPAD                    0x1c54
++#define MGA_DSTORG                    0x2cb8
++#define MGA_DWGCTL                    0x1c00
++#     define MGA_OPCOD_MASK                   (15 << 0)
++#     define MGA_OPCOD_TRAP                   (4 << 0)
++#     define MGA_OPCOD_TEXTURE_TRAP           (6 << 0)
++#     define MGA_OPCOD_BITBLT                 (8 << 0)
++#     define MGA_OPCOD_ILOAD                  (9 << 0)
++#     define MGA_ATYPE_MASK                   (7 << 4)
++#     define MGA_ATYPE_RPL                    (0 << 4)
++#     define MGA_ATYPE_RSTR                   (1 << 4)
++#     define MGA_ATYPE_ZI                     (3 << 4)
++#     define MGA_ATYPE_BLK                    (4 << 4)
++#     define MGA_ATYPE_I                      (7 << 4)
++#     define MGA_LINEAR                       (1 << 7)
++#     define MGA_ZMODE_MASK                   (7 << 8)
++#     define MGA_ZMODE_NOZCMP                 (0 << 8)
++#     define MGA_ZMODE_ZE                     (2 << 8)
++#     define MGA_ZMODE_ZNE                    (3 << 8)
++#     define MGA_ZMODE_ZLT                    (4 << 8)
++#     define MGA_ZMODE_ZLTE                   (5 << 8)
++#     define MGA_ZMODE_ZGT                    (6 << 8)
++#     define MGA_ZMODE_ZGTE                   (7 << 8)
++#     define MGA_SOLID                        (1 << 11)
++#     define MGA_ARZERO                       (1 << 12)
++#     define MGA_SGNZERO                      (1 << 13)
++#     define MGA_SHIFTZERO                    (1 << 14)
++#     define MGA_BOP_MASK                     (15 << 16)
++#     define MGA_BOP_ZERO                     (0 << 16)
++#     define MGA_BOP_DST                      (10 << 16)
++#     define MGA_BOP_SRC                      (12 << 16)
++#     define MGA_BOP_ONE                      (15 << 16)
++#     define MGA_TRANS_SHIFT                  20
++#     define MGA_TRANS_MASK                   (15 << 20)
++#     define MGA_BLTMOD_MASK                  (15 << 25)
++#     define MGA_BLTMOD_BMONOLEF              (0 << 25)
++#     define MGA_BLTMOD_BMONOWF               (4 << 25)
++#     define MGA_BLTMOD_PLAN                  (1 << 25)
++#     define MGA_BLTMOD_BFCOL                 (2 << 25)
++#     define MGA_BLTMOD_BU32BGR               (3 << 25)
++#     define MGA_BLTMOD_BU32RGB               (7 << 25)
++#     define MGA_BLTMOD_BU24BGR               (11 << 25)
++#     define MGA_BLTMOD_BU24RGB               (15 << 25)
++#     define MGA_PATTERN                      (1 << 29)
++#     define MGA_TRANSC                       (1 << 30)
++#     define MGA_CLIPDIS                      (1 << 31)
++#define MGA_DWGSYNC                   0x2c4c
++
++#define MGA_FCOL                      0x1c24
++#define MGA_FIFOSTATUS                        0x1e10
++#define MGA_FOGCOL                    0x1cf4
++#define MGA_FXBNDRY                   0x1c84
++#define MGA_FXLEFT                    0x1ca8
++#define MGA_FXRIGHT                   0x1cac
++
++#define MGA_ICLEAR                    0x1e18
++#     define MGA_SOFTRAPICLR                  (1 << 0)
++#     define MGA_VLINEICLR                    (1 << 5)
++#define MGA_IEN                               0x1e1c
++#     define MGA_SOFTRAPIEN                   (1 << 0)
++#     define MGA_VLINEIEN                     (1 << 5)
++
++#define MGA_LEN                               0x1c5c
++
++#define MGA_MACCESS                   0x1c04
++
++#define MGA_PITCH                     0x1c8c
++#define MGA_PLNWT                     0x1c1c
++#define MGA_PRIMADDRESS                       0x1e58
++#     define MGA_DMA_GENERAL                  (0 << 0)
++#     define MGA_DMA_BLIT                     (1 << 0)
++#     define MGA_DMA_VECTOR                   (2 << 0)
++#     define MGA_DMA_VERTEX                   (3 << 0)
++#define MGA_PRIMEND                   0x1e5c
++#     define MGA_PRIMNOSTART                  (1 << 0)
++#     define MGA_PAGPXFER                     (1 << 1)
++#define MGA_PRIMPTR                   0x1e50
++#     define MGA_PRIMPTREN0                   (1 << 0)
++#     define MGA_PRIMPTREN1                   (1 << 1)
++
++#define MGA_RST                               0x1e40
++#     define MGA_SOFTRESET                    (1 << 0)
++#     define MGA_SOFTEXTRST                   (1 << 1)
++
++#define MGA_SECADDRESS                        0x2c40
++#define MGA_SECEND                    0x2c44
++#define MGA_SETUPADDRESS              0x2cd0
++#define MGA_SETUPEND                  0x2cd4
++#define MGA_SGN                               0x1c58
++#define MGA_SOFTRAP                   0x2c48
++#define MGA_SRCORG                    0x2cb4
++#     define MGA_SRMMAP_MASK                  (1 << 0)
++#     define MGA_SRCMAP_FB                    (0 << 0)
++#     define MGA_SRCMAP_SYSMEM                (1 << 0)
++#     define MGA_SRCACC_MASK                  (1 << 1)
++#     define MGA_SRCACC_PCI                   (0 << 1)
++#     define MGA_SRCACC_AGP                   (1 << 1)
++#define MGA_STATUS                    0x1e14
++#     define MGA_SOFTRAPEN                    (1 << 0)
++#     define MGA_VSYNCPEN                     (1 << 4)
++#     define MGA_VLINEPEN                     (1 << 5)
++#     define MGA_DWGENGSTS                    (1 << 16)
++#     define MGA_ENDPRDMASTS                  (1 << 17)
++#define MGA_STENCIL                   0x2cc8
++#define MGA_STENCILCTL                        0x2ccc
++
++#define MGA_TDUALSTAGE0                       0x2cf8
++#define MGA_TDUALSTAGE1                       0x2cfc
++#define MGA_TEXBORDERCOL              0x2c5c
++#define MGA_TEXCTL                    0x2c30
++#define MGA_TEXCTL2                   0x2c3c
++#     define MGA_DUALTEX                      (1 << 7)
++#     define MGA_G400_TC2_MAGIC               (1 << 15)
++#     define MGA_MAP1_ENABLE                  (1 << 31)
++#define MGA_TEXFILTER                 0x2c58
++#define MGA_TEXHEIGHT                 0x2c2c
++#define MGA_TEXORG                    0x2c24
++#     define MGA_TEXORGMAP_MASK               (1 << 0)
++#     define MGA_TEXORGMAP_FB                 (0 << 0)
++#     define MGA_TEXORGMAP_SYSMEM             (1 << 0)
++#     define MGA_TEXORGACC_MASK               (1 << 1)
++#     define MGA_TEXORGACC_PCI                (0 << 1)
++#     define MGA_TEXORGACC_AGP                (1 << 1)
++#define MGA_TEXORG1                   0x2ca4
++#define MGA_TEXORG2                   0x2ca8
++#define MGA_TEXORG3                   0x2cac
++#define MGA_TEXORG4                   0x2cb0
++#define MGA_TEXTRANS                  0x2c34
++#define MGA_TEXTRANSHIGH              0x2c38
++#define MGA_TEXWIDTH                  0x2c28
++
++#define MGA_WACCEPTSEQ                        0x1dd4
++#define MGA_WCODEADDR                 0x1e6c
++#define MGA_WFLAG                     0x1dc4
++#define MGA_WFLAG1                    0x1de0
++#define MGA_WFLAGNB                   0x1e64
++#define MGA_WFLAGNB1                  0x1e08
++#define MGA_WGETMSB                   0x1dc8
++#define MGA_WIADDR                    0x1dc0
++#define MGA_WIADDR2                   0x1dd8
++#     define MGA_WMODE_SUSPEND                (0 << 0)
++#     define MGA_WMODE_RESUME                 (1 << 0)
++#     define MGA_WMODE_JUMP                   (2 << 0)
++#     define MGA_WMODE_START                  (3 << 0)
++#     define MGA_WAGP_ENABLE                  (1 << 2)
++#define MGA_WMISC                     0x1e70
++#     define MGA_WUCODECACHE_ENABLE           (1 << 0)
++#     define MGA_WMASTER_ENABLE               (1 << 1)
++#     define MGA_WCACHEFLUSH_ENABLE           (1 << 3)
++#define MGA_WVRTXSZ                   0x1dcc
++
++#define MGA_YBOT                      0x1c9c
++#define MGA_YDST                      0x1c90
++#define MGA_YDSTLEN                   0x1c88
++#define MGA_YDSTORG                   0x1c94
++#define MGA_YTOP                      0x1c98
++
++#define MGA_ZORG                      0x1c0c
++
++/* This finishes the current batch of commands
++ */
++#define MGA_EXEC                      0x0100
++
++/* AGP PLL encoding (for G200 only).
++ */
++#define MGA_AGP_PLL                   0x1e4c
++#     define MGA_AGP2XPLL_DISABLE             (0 << 0)
++#     define MGA_AGP2XPLL_ENABLE              (1 << 0)
++
++/* Warp registers
++ */
++#define MGA_WR0                               0x2d00
++#define MGA_WR1                               0x2d04
++#define MGA_WR2                               0x2d08
++#define MGA_WR3                               0x2d0c
++#define MGA_WR4                               0x2d10
++#define MGA_WR5                               0x2d14
++#define MGA_WR6                               0x2d18
++#define MGA_WR7                               0x2d1c
++#define MGA_WR8                               0x2d20
++#define MGA_WR9                               0x2d24
++#define MGA_WR10                      0x2d28
++#define MGA_WR11                      0x2d2c
++#define MGA_WR12                      0x2d30
++#define MGA_WR13                      0x2d34
++#define MGA_WR14                      0x2d38
++#define MGA_WR15                      0x2d3c
++#define MGA_WR16                      0x2d40
++#define MGA_WR17                      0x2d44
++#define MGA_WR18                      0x2d48
++#define MGA_WR19                      0x2d4c
++#define MGA_WR20                      0x2d50
++#define MGA_WR21                      0x2d54
++#define MGA_WR22                      0x2d58
++#define MGA_WR23                      0x2d5c
++#define MGA_WR24                      0x2d60
++#define MGA_WR25                      0x2d64
++#define MGA_WR26                      0x2d68
++#define MGA_WR27                      0x2d6c
++#define MGA_WR28                      0x2d70
++#define MGA_WR29                      0x2d74
++#define MGA_WR30                      0x2d78
++#define MGA_WR31                      0x2d7c
++#define MGA_WR32                      0x2d80
++#define MGA_WR33                      0x2d84
++#define MGA_WR34                      0x2d88
++#define MGA_WR35                      0x2d8c
++#define MGA_WR36                      0x2d90
++#define MGA_WR37                      0x2d94
++#define MGA_WR38                      0x2d98
++#define MGA_WR39                      0x2d9c
++#define MGA_WR40                      0x2da0
++#define MGA_WR41                      0x2da4
++#define MGA_WR42                      0x2da8
++#define MGA_WR43                      0x2dac
++#define MGA_WR44                      0x2db0
++#define MGA_WR45                      0x2db4
++#define MGA_WR46                      0x2db8
++#define MGA_WR47                      0x2dbc
++#define MGA_WR48                      0x2dc0
++#define MGA_WR49                      0x2dc4
++#define MGA_WR50                      0x2dc8
++#define MGA_WR51                      0x2dcc
++#define MGA_WR52                      0x2dd0
++#define MGA_WR53                      0x2dd4
++#define MGA_WR54                      0x2dd8
++#define MGA_WR55                      0x2ddc
++#define MGA_WR56                      0x2de0
++#define MGA_WR57                      0x2de4
++#define MGA_WR58                      0x2de8
++#define MGA_WR59                      0x2dec
++#define MGA_WR60                      0x2df0
++#define MGA_WR61                      0x2df4
++#define MGA_WR62                      0x2df8
++#define MGA_WR63                      0x2dfc
++#     define MGA_G400_WR_MAGIC                (1 << 6)
++#     define MGA_G400_WR56_MAGIC              0x46480000      /* 12800.0f */
++
++#define MGA_ILOAD_ALIGN               64
++#define MGA_ILOAD_MASK                (MGA_ILOAD_ALIGN - 1)
++
++#define MGA_DWGCTL_FLUSH      (MGA_OPCOD_TEXTURE_TRAP |               \
++                               MGA_ATYPE_I |                          \
++                               MGA_ZMODE_NOZCMP |                     \
++                               MGA_ARZERO |                           \
++                               MGA_SGNZERO |                          \
++                               MGA_BOP_SRC |                          \
++                               (15 << MGA_TRANS_SHIFT))
++
++#define MGA_DWGCTL_CLEAR      (MGA_OPCOD_TRAP |                       \
++                               MGA_ZMODE_NOZCMP |                     \
++                               MGA_SOLID |                            \
++                               MGA_ARZERO |                           \
++                               MGA_SGNZERO |                          \
++                               MGA_SHIFTZERO |                        \
++                               MGA_BOP_SRC |                          \
++                               (0 << MGA_TRANS_SHIFT) |               \
++                               MGA_BLTMOD_BMONOLEF |                  \
++                               MGA_TRANSC |                           \
++                               MGA_CLIPDIS)
++
++#define MGA_DWGCTL_COPY               (MGA_OPCOD_BITBLT |                     \
++                               MGA_ATYPE_RPL |                        \
++                               MGA_SGNZERO |                          \
++                               MGA_SHIFTZERO |                        \
++                               MGA_BOP_SRC |                          \
++                               (0 << MGA_TRANS_SHIFT) |               \
++                               MGA_BLTMOD_BFCOL |                     \
++                               MGA_CLIPDIS)
++
++/* Simple idle test.
++ */
++static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
++{
++      u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
++      return (status == MGA_ENDPRDMASTS);
++}
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_ioc32.c git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c
+--- git/drivers/gpu/drm-tungsten/mga_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,234 @@
++
++/**
++ * \file mga_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the MGA DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++
++typedef struct drm32_mga_init {
++      int func;
++      u32 sarea_priv_offset;
++      int chipset;
++      int sgram;
++      unsigned int maccess;
++      unsigned int fb_cpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_cpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int texture_offset[MGA_NR_TEX_HEAPS];
++      unsigned int texture_size[MGA_NR_TEX_HEAPS];
++      u32 fb_offset;
++      u32 mmio_offset;
++      u32 status_offset;
++      u32 warp_offset;
++      u32 primary_offset;
++      u32 buffers_offset;
++} drm_mga_init32_t;
++
++static int compat_mga_init(struct file *file, unsigned int cmd,
++                         unsigned long arg)
++{
++      drm_mga_init32_t init32;
++      drm_mga_init_t __user *init;
++      int err = 0, i;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.chipset, &init->chipset)
++          || __put_user(init32.sgram, &init->sgram)
++          || __put_user(init32.maccess, &init->maccess)
++          || __put_user(init32.fb_cpp, &init->fb_cpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_cpp, &init->depth_cpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.status_offset, &init->status_offset)
++          || __put_user(init32.warp_offset, &init->warp_offset)
++          || __put_user(init32.primary_offset, &init->primary_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset))
++              return -EFAULT;
++
++      for (i=0; i<MGA_NR_TEX_HEAPS; i++)
++      {
++              err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]);
++              err |= __put_user(init32.texture_size[i], &init->texture_size[i]);
++      }
++      if (err)
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MGA_INIT, (unsigned long) init);
++}
++
++
++typedef struct drm_mga_getparam32 {
++      int param;
++      u32 value;
++} drm_mga_getparam32_t;
++
++
++static int compat_mga_getparam(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_mga_getparam32_t getparam32;
++      drm_mga_getparam_t __user *getparam;
++
++      if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
++              return -EFAULT;
++
++      getparam = compat_alloc_user_space(sizeof(*getparam));
++      if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
++          || __put_user(getparam32.param, &getparam->param)
++          || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
++}
++
++typedef struct drm_mga_drm_bootstrap32 {
++      u32 texture_handle;
++      u32 texture_size;
++      u32 primary_size;
++      u32 secondary_bin_count;
++      u32 secondary_bin_size;
++      u32 agp_mode;
++      u8 agp_size;
++} drm_mga_dma_bootstrap32_t;
++
++static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_mga_dma_bootstrap32_t dma_bootstrap32;
++      drm_mga_dma_bootstrap_t __user *dma_bootstrap;
++      int err;
++
++      if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
++                         sizeof(dma_bootstrap32)))
++              return -EFAULT;
++
++      dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
++      if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
++          || __put_user(dma_bootstrap32.texture_handle,
++                        &dma_bootstrap->texture_handle)
++          || __put_user(dma_bootstrap32.texture_size,
++                        &dma_bootstrap->texture_size)
++          || __put_user(dma_bootstrap32.primary_size,
++                        &dma_bootstrap->primary_size)
++          || __put_user(dma_bootstrap32.secondary_bin_count,
++                        &dma_bootstrap->secondary_bin_count)
++          || __put_user(dma_bootstrap32.secondary_bin_size,
++                        &dma_bootstrap->secondary_bin_size)
++          || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
++          || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
++              return -EFAULT;
++
++      err = drm_ioctl(file->f_dentry->d_inode, file,
++                      DRM_IOCTL_MGA_DMA_BOOTSTRAP,
++                      (unsigned long)dma_bootstrap);
++      if (err)
++              return err;
++
++      if (__get_user(dma_bootstrap32.texture_handle,
++                     &dma_bootstrap->texture_handle)
++          || __get_user(dma_bootstrap32.texture_size,
++                        &dma_bootstrap->texture_size)
++          || __get_user(dma_bootstrap32.primary_size,
++                        &dma_bootstrap->primary_size)
++          || __get_user(dma_bootstrap32.secondary_bin_count,
++                        &dma_bootstrap->secondary_bin_count)
++          || __get_user(dma_bootstrap32.secondary_bin_size,
++                        &dma_bootstrap->secondary_bin_size)
++          || __get_user(dma_bootstrap32.agp_mode,
++                        &dma_bootstrap->agp_mode)
++          || __get_user(dma_bootstrap32.agp_size,
++                        &dma_bootstrap->agp_size))
++              return -EFAULT;
++
++      if (copy_to_user((void __user *)arg, &dma_bootstrap32,
++                       sizeof(dma_bootstrap32)))
++              return -EFAULT;
++
++      return 0;
++}
++
++drm_ioctl_compat_t *mga_compat_ioctls[] = {
++      [DRM_MGA_INIT] = compat_mga_init,
++      [DRM_MGA_GETPARAM] = compat_mga_getparam,
++      [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long mga_compat_ioctl(struct file *filp, unsigned int cmd,
++                       unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
++              fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_irq.c git-nokia/drivers/gpu/drm-tungsten/mga_irq.c
+--- git/drivers/gpu/drm-tungsten/mga_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,182 @@
++/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
++ */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      const drm_mga_private_t *const dev_priv = 
++              (drm_mga_private_t *) dev->dev_private;
++
++      if (crtc != 0) {
++              return 0;
++      }
++
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++
++irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      int status;
++      int handled = 0;
++
++      status = MGA_READ(MGA_STATUS);
++
++      /* VBLANK interrupt */
++      if (status & MGA_VLINEPEN) {
++              MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              handled = 1;
++      }
++
++      /* SOFTRAP interrupt */
++      if (status & MGA_SOFTRAPEN) {
++              const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
++              const u32 prim_end = MGA_READ(MGA_PRIMEND);
++
++
++              MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
++
++              /* In addition to clearing the interrupt-pending bit, we
++               * have to write to MGA_PRIMEND to re-start the DMA operation.
++               */
++              if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
++                      MGA_WRITE(MGA_PRIMEND, prim_end);
++              }
++
++              atomic_inc(&dev_priv->last_fence_retired);
++              DRM_WAKEUP(&dev_priv->fence_queue);
++              handled = 1;
++      }
++
++      if (handled)
++              return IRQ_HANDLED;
++      return IRQ_NONE;
++}
++
++int mga_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      if (crtc != 0) {
++              DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                        crtc);
++              return 0;
++      }
++
++      MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
++      return 0;
++}
++
++
++void mga_disable_vblank(struct drm_device *dev, int crtc)
++{
++      if (crtc != 0) {
++              DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
++                        crtc);
++      }
++
++      /* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
++       * a nice hardware counter that tracks the number of refreshes when
++       * the interrupt is disabled, and the kernel doesn't know the refresh
++       * rate to calculate an estimate.
++       */
++      /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
++}
++
++int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      unsigned int cur_fence;
++      int ret = 0;
++
++      /* Assume that the user has missed the current sequence number
++       * by about a day rather than she wants to wait for years
++       * using fences.
++       */
++      DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
++                  (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
++                    - *sequence) <= (1 << 23)));
++
++      *sequence = cur_fence;
++
++      return ret;
++}
++
++void mga_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++
++      /* Disable *all* interrupts */
++      MGA_WRITE(MGA_IEN, 0);
++      /* Clear bits if they're already high */
++      MGA_WRITE(MGA_ICLEAR, ~0);
++}
++
++int mga_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      int ret;
++
++      ret = drm_vblank_init(dev, 1);
++      if (ret)
++              return ret;
++
++      DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
++
++      /* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
++       * in mga_enable_vblank.
++       */
++      MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
++      return 0;
++}
++
++void mga_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      /* Disable *all* interrupts */
++      MGA_WRITE(MGA_IEN, 0);
++
++      dev->irq_enabled = 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_state.c git-nokia/drivers/gpu/drm-tungsten/mga_state.c
+--- git/drivers/gpu/drm-tungsten/mga_state.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_state.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1139 @@
++/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
++ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
++ */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Jeff Hartmann <jhartmann@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *
++ * Rewritten by:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++
++/* ================================================================
++ * DMA hardware state programming functions
++ */
++
++static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
++                             struct drm_clip_rect * box)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      unsigned int pitch = dev_priv->front_pitch;
++      DMA_LOCALS;
++
++      BEGIN_DMA(2);
++
++      /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
++       */
++      if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
++              DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
++                        MGA_LEN + MGA_EXEC, 0x80000000,
++                        MGA_DWGCTL, ctx->dwgctl,
++                        MGA_LEN + MGA_EXEC, 0x80000000);
++      }
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
++                MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      DMA_LOCALS;
++
++      BEGIN_DMA(3);
++
++      DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
++                MGA_MACCESS, ctx->maccess,
++                MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
++
++      DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
++                MGA_FOGCOL, ctx->fogcolor,
++                MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
++
++      DMA_BLOCK(MGA_FCOL, ctx->fcol,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      DMA_LOCALS;
++
++      BEGIN_DMA(4);
++
++      DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
++                MGA_MACCESS, ctx->maccess,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
++                MGA_FOGCOL, ctx->fogcolor,
++                MGA_WFLAG, ctx->wflag,
++                MGA_ZORG, dev_priv->depth_offset);
++
++      DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
++                MGA_TDUALSTAGE0, ctx->tdualstage0,
++                MGA_TDUALSTAGE1, ctx->tdualstage1,
++                MGA_FCOL, ctx->fcol);
++
++      DMA_BLOCK(MGA_STENCIL, ctx->stencil,
++                MGA_STENCILCTL, ctx->stencilctl,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      DMA_LOCALS;
++
++      BEGIN_DMA(4);
++
++      DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR24, tex->texwidth);
++
++      DMA_BLOCK(MGA_WR34, tex->texheight,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff,
++                MGA_DMAPAD, 0x00000000);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
++/*           tex->texctl, tex->texctl2); */
++
++      BEGIN_DMA(6);
++
++      DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR49, 0x00000000);
++
++      DMA_BLOCK(MGA_WR57, 0x00000000,
++                MGA_WR53, 0x00000000,
++                MGA_WR61, 0x00000000,
++                MGA_WR52, MGA_G400_WR_MAGIC);
++
++      DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
++                MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
++                MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
++                MGA_DMAPAD, 0x00000000);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */
++/*           tex->texctl, tex->texctl2); */
++
++      BEGIN_DMA(5);
++
++      DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
++                              MGA_MAP1_ENABLE |
++                              MGA_G400_TC2_MAGIC),
++                MGA_TEXCTL, tex->texctl,
++                MGA_TEXFILTER, tex->texfilter,
++                MGA_TEXBORDERCOL, tex->texbordercol);
++
++      DMA_BLOCK(MGA_TEXORG, tex->texorg,
++                MGA_TEXORG1, tex->texorg1,
++                MGA_TEXORG2, tex->texorg2,
++                MGA_TEXORG3, tex->texorg3);
++
++      DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
++                MGA_TEXWIDTH, tex->texwidth,
++                MGA_TEXHEIGHT, tex->texheight,
++                MGA_WR49, 0x00000000);
++
++      DMA_BLOCK(MGA_WR57, 0x00000000,
++                MGA_WR53, 0x00000000,
++                MGA_WR61, 0x00000000,
++                MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
++
++      DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
++                MGA_TEXTRANS, 0x0000ffff,
++                MGA_TEXTRANSHIGH, 0x0000ffff,
++                MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int pipe = sarea_priv->warp_pipe;
++      DMA_LOCALS;
++
++      BEGIN_DMA(3);
++
++      DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
++                MGA_WVRTXSZ, 0x00000007,
++                MGA_WFLAG, 0x00000000,
++                MGA_WR24, 0x00000000);
++
++      DMA_BLOCK(MGA_WR25, 0x00000100,
++                MGA_WR34, 0x00000000,
++                MGA_WR42, 0x0000ffff,
++                MGA_WR60, 0x0000ffff);
++
++      /* Padding required to to hardware bug.
++       */
++      DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
++                             MGA_WMODE_START | dev_priv->wagp_enable));
++
++      ADVANCE_DMA();
++}
++
++static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int pipe = sarea_priv->warp_pipe;
++      DMA_LOCALS;
++
++/*    printk("mga_g400_emit_pipe %x\n", pipe); */
++
++      BEGIN_DMA(10);
++
++      DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000);
++
++      if (pipe & MGA_T2) {
++              DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000);
++
++              DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x1e000000);
++      } else {
++              if (dev_priv->warp_pipe & MGA_T2) {
++                      /* Flush the WARP pipe */
++                      DMA_BLOCK(MGA_YDST, 0x00000000,
++                                MGA_FXLEFT, 0x00000000,
++                                MGA_FXRIGHT, 0x00000001,
++                                MGA_DWGCTL, MGA_DWGCTL_FLUSH);
++
++                      DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
++                                MGA_DWGSYNC, 0x00007000,
++                                MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
++                                MGA_LEN + MGA_EXEC, 0x00000000);
++
++                      DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
++                                              MGA_G400_TC2_MAGIC),
++                                MGA_LEN + MGA_EXEC, 0x00000000,
++                                MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
++                                MGA_DMAPAD, 0x00000000);
++              }
++
++              DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000,
++                        MGA_DMAPAD, 0x00000000);
++
++              DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x00000000,
++                        MGA_WACCEPTSEQ, 0x18000000);
++      }
++
++      DMA_BLOCK(MGA_WFLAG, 0x00000000,
++                MGA_WFLAG1, 0x00000000,
++                MGA_WR56, MGA_G400_WR56_MAGIC,
++                MGA_DMAPAD, 0x00000000);
++
++      DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0              */
++                MGA_WR57, 0x00000000, /* tex0              */
++                MGA_WR53, 0x00000000, /* tex1              */
++                MGA_WR61, 0x00000000);        /* tex1              */
++
++      DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC,  /* tex0 width        */
++                MGA_WR62, MGA_G400_WR_MAGIC,  /* tex0 height       */
++                MGA_WR52, MGA_G400_WR_MAGIC,  /* tex1 width        */
++                MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height       */
++
++      /* Padding required to to hardware bug */
++      DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_DMAPAD, 0xffffffff,
++                MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
++                              MGA_WMODE_START | dev_priv->wagp_enable));
++
++      ADVANCE_DMA();
++}
++
++static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
++              mga_g200_emit_pipe(dev_priv);
++              dev_priv->warp_pipe = sarea_priv->warp_pipe;
++      }
++
++      if (dirty & MGA_UPLOAD_CONTEXT) {
++              mga_g200_emit_context(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & MGA_UPLOAD_TEX0) {
++              mga_g200_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
++      }
++}
++
++static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++      int multitex = sarea_priv->warp_pipe & MGA_T2;
++
++      if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
++              mga_g400_emit_pipe(dev_priv);
++              dev_priv->warp_pipe = sarea_priv->warp_pipe;
++      }
++
++      if (dirty & MGA_UPLOAD_CONTEXT) {
++              mga_g400_emit_context(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & MGA_UPLOAD_TEX0) {
++              mga_g400_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
++      }
++
++      if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
++              mga_g400_emit_tex1(dev_priv);
++              sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
++      }
++}
++
++/* ================================================================
++ * SAREA state verification
++ */
++
++/* Disallow all write destinations except the front and backbuffer.
++ */
++static int mga_verify_context(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++
++      if (ctx->dstorg != dev_priv->front_offset &&
++          ctx->dstorg != dev_priv->back_offset) {
++              DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
++                        ctx->dstorg, dev_priv->front_offset,
++                        dev_priv->back_offset);
++              ctx->dstorg = 0;
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* Disallow texture reads from PCI space.
++ */
++static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
++      unsigned int org;
++
++      org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
++
++      if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
++              DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
++              tex->texorg = 0;
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int mga_verify_state(drm_mga_private_t * dev_priv)
++{
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++      int ret = 0;
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      if (dirty & MGA_UPLOAD_CONTEXT)
++              ret |= mga_verify_context(dev_priv);
++
++      if (dirty & MGA_UPLOAD_TEX0)
++              ret |= mga_verify_tex(dev_priv, 0);
++
++      if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
++              if (dirty & MGA_UPLOAD_TEX1)
++                      ret |= mga_verify_tex(dev_priv, 1);
++
++              if (dirty & MGA_UPLOAD_PIPE)
++                      ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
++      } else {
++              if (dirty & MGA_UPLOAD_PIPE)
++                      ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
++      }
++
++      return (ret == 0);
++}
++
++static int mga_verify_iload(drm_mga_private_t * dev_priv,
++                          unsigned int dstorg, unsigned int length)
++{
++      if (dstorg < dev_priv->texture_offset ||
++          dstorg + length > (dev_priv->texture_offset +
++                             dev_priv->texture_size)) {
++              DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
++              return -EINVAL;
++      }
++
++      if (length & MGA_ILOAD_MASK) {
++              DRM_ERROR("*** bad iload length: 0x%x\n",
++                        length & MGA_ILOAD_MASK);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int mga_verify_blit(drm_mga_private_t * dev_priv,
++                         unsigned int srcorg, unsigned int dstorg)
++{
++      if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
++          (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
++              DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++/* ================================================================
++ *
++ */
++
++static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA(1);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      ADVANCE_DMA();
++
++      for (i = 0; i < nbox; i++) {
++              struct drm_clip_rect *box = &pbox[i];
++              u32 height = box->y2 - box->y1;
++
++              DRM_DEBUG("   from=%d,%d to=%d,%d\n",
++                        box->x1, box->y1, box->x2, box->y2);
++
++              if (clear->flags & MGA_FRONT) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->color_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_color,
++                                MGA_DSTORG, dev_priv->front_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++              if (clear->flags & MGA_BACK) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->color_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_color,
++                                MGA_DSTORG, dev_priv->back_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++              if (clear->flags & MGA_DEPTH) {
++                      BEGIN_DMA(2);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_PLNWT, clear->depth_mask,
++                                MGA_YDSTLEN, (box->y1 << 16) | height,
++                                MGA_FXBNDRY, (box->x2 << 16) | box->x1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_FCOL, clear->clear_depth,
++                                MGA_DSTORG, dev_priv->depth_offset,
++                                MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
++
++                      ADVANCE_DMA();
++              }
++
++      }
++
++      BEGIN_DMA(1);
++
++      /* Force reset of DWGCTL */
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_swap(struct drm_device * dev)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      int i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      sarea_priv->last_frame.head = dev_priv->prim.tail;
++      sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
++
++      BEGIN_DMA(4 + nbox);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
++                MGA_MACCESS, dev_priv->maccess,
++                MGA_SRCORG, dev_priv->back_offset,
++                MGA_AR5, dev_priv->front_pitch);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, 0xffffffff,
++                MGA_DWGCTL, MGA_DWGCTL_COPY);
++
++      for (i = 0; i < nbox; i++) {
++              struct drm_clip_rect *box = &pbox[i];
++              u32 height = box->y2 - box->y1;
++              u32 start = box->y1 * dev_priv->front_pitch;
++
++              DRM_DEBUG("   from=%d,%d to=%d,%d\n",
++                        box->x1, box->y1, box->x2, box->y2);
++
++              DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
++                        MGA_AR3, start + box->x1,
++                        MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
++                        MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
++      }
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_SRCORG, dev_priv->front_offset,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++
++      FLUSH_DMA();
++
++      DRM_DEBUG("... done.\n");
++}
++
++static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 address = (u32) buf->bus_address;
++      u32 length = (u32) buf->used;
++      int i = 0;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
++
++      if (buf->used) {
++              buf_priv->dispatched = 1;
++
++              MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
++
++              do {
++                      if (i < sarea_priv->nbox) {
++                              mga_emit_clip_rect(dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      }
++
++                      BEGIN_DMA(1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_DMAPAD, 0x00000000,
++                                MGA_SECADDRESS, (address |
++                                                 MGA_DMA_VERTEX),
++                                MGA_SECEND, ((address + length) |
++                                             dev_priv->dma_access));
++
++                      ADVANCE_DMA();
++              } while (++i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              AGE_BUFFER(buf_priv);
++              buf->pending = 0;
++              buf->used = 0;
++              buf_priv->dispatched = 0;
++
++              mga_freelist_put(dev, buf);
++      }
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
++                                   unsigned int start, unsigned int end)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      u32 address = (u32) buf->bus_address;
++      int i = 0;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
++
++      if (start != end) {
++              buf_priv->dispatched = 1;
++
++              MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
++
++              do {
++                      if (i < sarea_priv->nbox) {
++                              mga_emit_clip_rect(dev_priv,
++                                                 &sarea_priv->boxes[i]);
++                      }
++
++                      BEGIN_DMA(1);
++
++                      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                                MGA_DMAPAD, 0x00000000,
++                                MGA_SETUPADDRESS, address + start,
++                                MGA_SETUPEND, ((address + end) |
++                                               dev_priv->dma_access));
++
++                      ADVANCE_DMA();
++              } while (++i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              AGE_BUFFER(buf_priv);
++              buf->pending = 0;
++              buf->used = 0;
++              buf_priv->dispatched = 0;
++
++              mga_freelist_put(dev, buf);
++      }
++
++      FLUSH_DMA();
++}
++
++/* This copies a 64 byte aligned agp region to the frambuffer with a
++ * standard blit, the ioctl needs to do checking.
++ */
++static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
++                                 unsigned int dstorg, unsigned int length)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_buf_priv_t *buf_priv = buf->dev_private;
++      drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
++      u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
++      u32 y2;
++      DMA_LOCALS;
++      DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
++
++      y2 = length / 64;
++
++      BEGIN_DMA(5);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DSTORG, dstorg,
++                MGA_MACCESS, 0x00000000,
++                MGA_SRCORG, srcorg,
++                MGA_AR5, 64);
++
++      DMA_BLOCK(MGA_PITCH, 64,
++                MGA_PLNWT, 0xffffffff,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGCTL, MGA_DWGCTL_COPY);
++
++      DMA_BLOCK(MGA_AR0, 63,
++                MGA_AR3, 0,
++                MGA_FXBNDRY, (63 << 16) | 0,
++                MGA_YDSTLEN + MGA_EXEC, y2);
++
++      DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
++                MGA_SRCORG, dev_priv->front_offset,
++                MGA_PITCH, dev_priv->front_pitch,
++                MGA_DWGSYNC, 0x00007000);
++
++      ADVANCE_DMA();
++
++      AGE_BUFFER(buf_priv);
++
++      buf->pending = 0;
++      buf->used = 0;
++      buf_priv->dispatched = 0;
++
++      mga_freelist_put(dev, buf);
++
++      FLUSH_DMA();
++}
++
++static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int nbox = sarea_priv->nbox;
++      u32 scandir = 0, i;
++      DMA_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_DMA(4 + nbox);
++
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DWGSYNC, 0x00007100,
++                MGA_DWGSYNC, 0x00007000);
++
++      DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
++                MGA_PLNWT, blit->planemask,
++                MGA_SRCORG, blit->srcorg,
++                MGA_DSTORG, blit->dstorg);
++
++      DMA_BLOCK(MGA_SGN, scandir,
++                MGA_MACCESS, dev_priv->maccess,
++                MGA_AR5, blit->ydir * blit->src_pitch,
++                MGA_PITCH, blit->dst_pitch);
++
++      for (i = 0; i < nbox; i++) {
++              int srcx = pbox[i].x1 + blit->delta_sx;
++              int srcy = pbox[i].y1 + blit->delta_sy;
++              int dstx = pbox[i].x1 + blit->delta_dx;
++              int dsty = pbox[i].y1 + blit->delta_dy;
++              int h = pbox[i].y2 - pbox[i].y1;
++              int w = pbox[i].x2 - pbox[i].x1 - 1;
++              int start;
++
++              if (blit->ydir == -1) {
++                      srcy = blit->height - srcy - 1;
++              }
++
++              start = srcy * blit->src_pitch + srcx;
++
++              DMA_BLOCK(MGA_AR0, start + w,
++                        MGA_AR3, start,
++                        MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
++                        MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
++      }
++
++      /* Do something to flush AGP?
++       */
++
++      /* Force reset of DWGCTL */
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_PLNWT, ctx->plnwt,
++                MGA_PITCH, dev_priv->front_pitch,
++                MGA_DWGCTL, ctx->dwgctl);
++
++      ADVANCE_DMA();
++}
++
++/* ================================================================
++ *
++ */
++
++static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_clear_t *clear = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_clear(dev, clear);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_swap(dev);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (vertex->idx < 0 || vertex->idx > dma->buf_count)
++              return -EINVAL;
++      buf = dma->buflist[vertex->idx];
++      buf_priv = buf->dev_private;
++
++      buf->used = vertex->used;
++      buf_priv->discard = vertex->discard;
++
++      if (!mga_verify_state(dev_priv)) {
++              if (vertex->discard) {
++                      if (buf_priv->dispatched == 1)
++                              AGE_BUFFER(buf_priv);
++                      buf_priv->dispatched = 0;
++                      mga_freelist_put(dev, buf);
++              }
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_vertex(dev, buf);
++
++      return 0;
++}
++
++static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_indices_t *indices = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (indices->idx < 0 || indices->idx > dma->buf_count)
++              return -EINVAL;
++
++      buf = dma->buflist[indices->idx];
++      buf_priv = buf->dev_private;
++
++      buf_priv->discard = indices->discard;
++
++      if (!mga_verify_state(dev_priv)) {
++              if (indices->discard) {
++                      if (buf_priv->dispatched == 1)
++                              AGE_BUFFER(buf_priv);
++                      buf_priv->dispatched = 0;
++                      mga_freelist_put(dev, buf);
++              }
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
++
++      return 0;
++}
++
++static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      drm_mga_buf_priv_t *buf_priv;
++      drm_mga_iload_t *iload = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++#if 0
++      if (mga_do_wait_for_idle(dev_priv) < 0) {
++              if (MGA_DMA_DEBUG)
++                      DRM_INFO("-EBUSY\n");
++              return -EBUSY;
++      }
++#endif
++      if (iload->idx < 0 || iload->idx > dma->buf_count)
++              return -EINVAL;
++
++      buf = dma->buflist[iload->idx];
++      buf_priv = buf->dev_private;
++
++      if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
++              mga_freelist_put(dev, buf);
++              return -EINVAL;
++      }
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_mga_blit_t *blit = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
++
++      if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
++              return -EINVAL;
++
++      WRAP_TEST_WITH_RETURN(dev_priv);
++
++      mga_dma_dispatch_blit(dev, blit);
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
++
++      return 0;
++}
++
++static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      drm_mga_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case MGA_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      case MGA_PARAM_CARD_TYPE:
++              value = dev_priv->chipset;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      u32 *fence = data;
++      DMA_LOCALS;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      /* I would normal do this assignment in the declaration of fence,
++       * but dev_priv may be NULL.
++       */
++
++      *fence = dev_priv->next_fence_to_post;
++      dev_priv->next_fence_to_post++;
++
++      BEGIN_DMA(1);
++      DMA_BLOCK(MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_DMAPAD, 0x00000000,
++                MGA_SOFTRAP, 0x00000000);
++      ADVANCE_DMA();
++
++      return 0;
++}
++
++static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_mga_private_t *dev_priv = dev->dev_private;
++      u32 *fence = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      mga_driver_fence_wait(dev, fence);
++
++      return 0;
++}
++
++struct drm_ioctl_desc mga_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++
++};
++
++int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_ucode.h git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h
+--- git/drivers/gpu/drm-tungsten/mga_ucode.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_ucode.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,11645 @@
++/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*-
++ * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com
++ *
++ * Copyright 1999 Matrox Graphics Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
++ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Kernel-based WARP engine management:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * WARP pipes are named according to the functions they perform, where:
++ *
++ *   - T stands for computation of texture stage 0
++ *   - T2 stands for computation of both texture stage 0 and texture stage 1
++ *   - G stands for computation of triangle intensity (Gouraud interpolation)
++ *   - Z stands for computation of Z buffer interpolation
++ *   - S stands for computation of specular highlight
++ *   - A stands for computation of the alpha channel
++ *   - F stands for computation of vertex fog interpolation
++ */
++
++static unsigned char warp_g200_tgz[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x72, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x60, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x03, 0x80, 0x0A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x57, 0x39, 0x20, 0xE9,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x2B, 0x32, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x16, 0x28, 0x20, 0xE9,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x2B, 0x20, 0xE9,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x85, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x84, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x82, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x7F, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgza[] = {
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x7D, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1F, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3F, 0x3D, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0x26, 0x1F, 0xDF,
++      0x9D, 0x1F, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x9E, 0x3F, 0x4F, 0xE9,
++
++      0x07, 0x07, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x9C, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x7A, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x79, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x77, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzaf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x83, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6F, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x2D, 0x20,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x1F, 0x62, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x3F, 0x3D, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0x1F, 0x26, 0x1F, 0xDF,
++      0x9D, 0x1F, 0x4F, 0xE9,
++
++      0x9E, 0x3F, 0x4F, 0xE9,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x07, 0x07, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x9C, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x73, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x71, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6E, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x7F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x6B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0xB3, 0x05,
++      0x00, 0xE0,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x78, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x77, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x75, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x72, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzs[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8B, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x77, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x8F, 0x20,
++
++      0xA5, 0x37, 0x4F, 0xE9,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x6C, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6B, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x69, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsa[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x7B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x44, 0x4C, 0xB6,
++      0x05, 0x44, 0x54, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2F, 0xC0, 0x44, 0xC6,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x9D, 0x17, 0x4F, 0xE9,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x9E, 0x37, 0x4F, 0xE9,
++      0x2F, 0x17, 0x2F, 0xAF,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x9C, 0x80, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x68, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x67, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x65, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsaf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x94, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x80, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0x2D, 0x44, 0x4C, 0xB6,
++      0x25, 0x44, 0x54, 0xB6,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x07, 0xC0, 0x44, 0xC6,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x3E, 0x3D, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x07, 0x20,
++
++      0x2F, 0x20,
++      0x00, 0xE0,
++      0xA3, 0x0F, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0xA1, 0x1F, 0x4F, 0xE9,
++
++      0x1E, 0x26, 0x1E, 0xDF,
++      0x9D, 0x1E, 0x4F, 0xE9,
++
++      0x35, 0x17, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x07, 0x07, 0x1E, 0xAF,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x9E, 0x3E, 0x4F, 0xE9,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x9C, 0x80, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x63, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x60, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x5D, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g200_tgzsf[] = {
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x98, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x81, 0x04,
++      0x89, 0x04,
++      0x01, 0x04,
++      0x09, 0x04,
++
++      0xC9, 0x41, 0xC0, 0xEC,
++      0x11, 0x04,
++      0x00, 0xE0,
++
++      0x41, 0xCC, 0x41, 0xCD,
++      0x49, 0xCC, 0x49, 0xCD,
++
++      0xD1, 0x41, 0xC0, 0xEC,
++      0x51, 0xCC, 0x51, 0xCD,
++
++      0x80, 0x04,
++      0x10, 0x04,
++      0x08, 0x04,
++      0x00, 0xE0,
++
++      0x00, 0xCC, 0xC0, 0xCD,
++      0xD1, 0x49, 0xC0, 0xEC,
++
++      0x8A, 0x1F, 0x20, 0xE9,
++      0x8B, 0x3F, 0x20, 0xE9,
++
++      0x41, 0x3C, 0x41, 0xAD,
++      0x49, 0x3C, 0x49, 0xAD,
++
++      0x10, 0xCC, 0x10, 0xCD,
++      0x08, 0xCC, 0x08, 0xCD,
++
++      0xB9, 0x41, 0x49, 0xBB,
++      0x1F, 0xF0, 0x41, 0xCD,
++
++      0x51, 0x3C, 0x51, 0xAD,
++      0x00, 0x98, 0x80, 0xE9,
++
++      0x8F, 0x80, 0x07, 0xEA,
++      0x24, 0x1F, 0x20, 0xE9,
++
++      0x21, 0x45, 0x80, 0xE8,
++      0x1A, 0x4D, 0x80, 0xE8,
++
++      0x31, 0x55, 0x80, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0x41, 0x49, 0xBD,
++      0x1D, 0x41, 0x51, 0xBD,
++
++      0x2E, 0x41, 0x2A, 0xB8,
++      0x34, 0x53, 0xA0, 0xE8,
++
++      0x15, 0x30,
++      0x1D, 0x30,
++      0x58, 0xE3,
++      0x00, 0xE0,
++
++      0xB5, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x24, 0x43, 0xA0, 0xE8,
++      0x2C, 0x4B, 0xA0, 0xE8,
++
++      0x15, 0x72,
++      0x09, 0xE3,
++      0x00, 0xE0,
++      0x1D, 0x72,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0x97, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x6C, 0x64, 0xC8, 0xEC,
++      0x98, 0xE1,
++      0xB5, 0x05,
++
++      0xBD, 0x05,
++      0x2E, 0x30,
++      0x32, 0xC0, 0xA0, 0xE8,
++
++      0x33, 0xC0, 0xA0, 0xE8,
++      0x74, 0x64, 0xC8, 0xEC,
++
++      0x40, 0x3C, 0x40, 0xAD,
++      0x32, 0x6A,
++      0x2A, 0x30,
++
++      0x20, 0x73,
++      0x33, 0x6A,
++      0x00, 0xE0,
++      0x28, 0x73,
++
++      0x1C, 0x72,
++      0x83, 0xE2,
++      0x7B, 0x80, 0x15, 0xEA,
++
++      0xB8, 0x3D, 0x28, 0xDF,
++      0x30, 0x35, 0x20, 0xDF,
++
++      0x40, 0x30,
++      0x00, 0xE0,
++      0xCC, 0xE2,
++      0x64, 0x72,
++
++      0x25, 0x42, 0x52, 0xBF,
++      0x2D, 0x42, 0x4A, 0xBF,
++
++      0x30, 0x2E, 0x30, 0xDF,
++      0x38, 0x2E, 0x38, 0xDF,
++
++      0x18, 0x1D, 0x45, 0xE9,
++      0x1E, 0x15, 0x45, 0xE9,
++
++      0x2B, 0x49, 0x51, 0xBD,
++      0x00, 0xE0,
++      0x1F, 0x73,
++
++      0x38, 0x38, 0x40, 0xAF,
++      0x30, 0x30, 0x40, 0xAF,
++
++      0x24, 0x1F, 0x24, 0xDF,
++      0x1D, 0x32, 0x20, 0xE9,
++
++      0x2C, 0x1F, 0x2C, 0xDF,
++      0x1A, 0x33, 0x20, 0xE9,
++
++      0xB0, 0x10,
++      0x08, 0xE3,
++      0x40, 0x10,
++      0xB8, 0x10,
++
++      0x26, 0xF0, 0x30, 0xCD,
++      0x2F, 0xF0, 0x38, 0xCD,
++
++      0x2B, 0x80, 0x20, 0xE9,
++      0x2A, 0x80, 0x20, 0xE9,
++
++      0xA6, 0x20,
++      0x88, 0xE2,
++      0x00, 0xE0,
++      0xAF, 0x20,
++
++      0x28, 0x2A, 0x26, 0xAF,
++      0x20, 0x2A, 0xC0, 0xAF,
++
++      0x34, 0x1F, 0x34, 0xDF,
++      0x46, 0x24, 0x46, 0xDF,
++
++      0x28, 0x30, 0x80, 0xBF,
++      0x20, 0x38, 0x80, 0xBF,
++
++      0x47, 0x24, 0x47, 0xDF,
++      0x4E, 0x2C, 0x4E, 0xDF,
++
++      0x4F, 0x2C, 0x4F, 0xDF,
++      0x56, 0x34, 0x56, 0xDF,
++
++      0x28, 0x15, 0x28, 0xDF,
++      0x20, 0x1D, 0x20, 0xDF,
++
++      0x57, 0x34, 0x57, 0xDF,
++      0x00, 0xE0,
++      0x1D, 0x05,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x89, 0xE2,
++      0x2B, 0x30,
++
++      0x3F, 0xC1, 0x1D, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x68,
++      0xBF, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x20, 0xC0, 0x20, 0xAF,
++      0x28, 0x05,
++      0x97, 0x74,
++
++      0x00, 0xE0,
++      0x2A, 0x10,
++      0x16, 0xC0, 0x20, 0xE9,
++
++      0x04, 0x80, 0x10, 0xEA,
++      0x8C, 0xE2,
++      0x95, 0x05,
++
++      0x28, 0xC1, 0x28, 0xAD,
++      0x1F, 0xC1, 0x15, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA8, 0x67,
++      0x9F, 0x6B,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x28, 0xC0, 0x28, 0xAD,
++      0x1D, 0x25,
++      0x20, 0x05,
++
++      0x28, 0x32, 0x80, 0xAD,
++      0x40, 0x2A, 0x40, 0xBD,
++
++      0x1C, 0x80, 0x20, 0xE9,
++      0x20, 0x33, 0x20, 0xAD,
++
++      0x20, 0x73,
++      0x00, 0xE0,
++      0xB6, 0x49, 0x51, 0xBB,
++
++      0x26, 0x2F, 0xB0, 0xE8,
++      0x19, 0x20, 0x20, 0xE9,
++
++      0x35, 0x20, 0x35, 0xDF,
++      0x3D, 0x20, 0x3D, 0xDF,
++
++      0x15, 0x20, 0x15, 0xDF,
++      0x1D, 0x20, 0x1D, 0xDF,
++
++      0x26, 0xD0, 0x26, 0xCD,
++      0x29, 0x49, 0x2A, 0xB8,
++
++      0x26, 0x40, 0x80, 0xBD,
++      0x3B, 0x48, 0x50, 0xBD,
++
++      0x3E, 0x54, 0x57, 0x9F,
++      0x00, 0xE0,
++      0x82, 0xE1,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x26, 0x30,
++      0x29, 0x30,
++      0x48, 0x3C, 0x48, 0xAD,
++
++      0x2B, 0x72,
++      0xC2, 0xE1,
++      0x2C, 0xC0, 0x44, 0xC2,
++
++      0x05, 0x24, 0x34, 0xBF,
++      0x0D, 0x24, 0x2C, 0xBF,
++
++      0x2D, 0x46, 0x4E, 0xBF,
++      0x25, 0x46, 0x56, 0xBF,
++
++      0x20, 0x1D, 0x6F, 0x8F,
++      0x32, 0x3E, 0x5F, 0xE9,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x30,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x33, 0x1E, 0x5F, 0xE9,
++
++      0x05, 0x44, 0x54, 0xB2,
++      0x0D, 0x44, 0x4C, 0xB2,
++
++      0x19, 0xC0, 0xB0, 0xE8,
++      0x34, 0xC0, 0x44, 0xC4,
++
++      0x33, 0x73,
++      0x00, 0xE0,
++      0x3E, 0x62, 0x57, 0x9F,
++
++      0x1E, 0xAF, 0x59, 0x9F,
++      0x00, 0xE0,
++      0x0D, 0x20,
++
++      0x84, 0x3E, 0x58, 0xE9,
++      0x28, 0x1D, 0x6F, 0x8F,
++
++      0x05, 0x20,
++      0x00, 0xE0,
++      0x85, 0x1E, 0x58, 0xE9,
++
++      0x9B, 0x3B, 0x33, 0xDF,
++      0x20, 0x20, 0x42, 0xAF,
++
++      0x30, 0x42, 0x56, 0x9F,
++      0x80, 0x3E, 0x57, 0xE9,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x30, 0x80, 0x5F, 0xE9,
++
++      0x28, 0x28, 0x24, 0xAF,
++      0x81, 0x1E, 0x57, 0xE9,
++
++      0x05, 0x47, 0x57, 0xBF,
++      0x0D, 0x47, 0x4F, 0xBF,
++
++      0x88, 0x80, 0x58, 0xE9,
++      0x1B, 0x29, 0x1B, 0xDF,
++
++      0x30, 0x1D, 0x6F, 0x8F,
++      0x3A, 0x30, 0x4F, 0xE9,
++
++      0x1C, 0x30, 0x26, 0xDF,
++      0x09, 0xE3,
++      0x3B, 0x05,
++
++      0x3E, 0x50, 0x56, 0x9F,
++      0x3B, 0x3F, 0x4F, 0xE9,
++
++      0x1E, 0x8F, 0x51, 0x9F,
++      0x00, 0xE0,
++      0xAC, 0x20,
++
++      0x2D, 0x44, 0x4C, 0xB4,
++      0x2C, 0x1C, 0xC0, 0xAF,
++
++      0x25, 0x44, 0x54, 0xB4,
++      0x00, 0xE0,
++      0xC8, 0x30,
++
++      0x30, 0x46, 0x30, 0xAF,
++      0x1B, 0x1B, 0x48, 0xAF,
++
++      0x00, 0xE0,
++      0x25, 0x20,
++      0x38, 0x2C, 0x4F, 0xE9,
++
++      0x86, 0x80, 0x57, 0xE9,
++      0x38, 0x1D, 0x6F, 0x8F,
++
++      0x28, 0x74,
++      0x00, 0xE0,
++      0x0D, 0x44, 0x4C, 0xB0,
++
++      0x05, 0x44, 0x54, 0xB0,
++      0x2D, 0x20,
++      0x9B, 0x10,
++
++      0x82, 0x3E, 0x57, 0xE9,
++      0x32, 0xF0, 0x1B, 0xCD,
++
++      0x1E, 0xBD, 0x59, 0x9F,
++      0x83, 0x1E, 0x57, 0xE9,
++
++      0x38, 0x47, 0x38, 0xAF,
++      0x34, 0x20,
++      0x2A, 0x30,
++
++      0x00, 0xE0,
++      0x0D, 0x20,
++      0x32, 0x20,
++      0x05, 0x20,
++
++      0x87, 0x80, 0x57, 0xE9,
++      0x1F, 0x54, 0x57, 0x9F,
++
++      0x17, 0x42, 0x56, 0x9F,
++      0x00, 0xE0,
++      0x3B, 0x6A,
++
++      0x3F, 0x8F, 0x51, 0x9F,
++      0x37, 0x1E, 0x4F, 0xE9,
++
++      0x37, 0x32, 0x2A, 0xAF,
++      0x00, 0xE0,
++      0x32, 0x00,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x27, 0xC0, 0x44, 0xC0,
++
++      0x36, 0x1F, 0x4F, 0xE9,
++      0x1F, 0x1F, 0x26, 0xDF,
++
++      0x37, 0x1B, 0x37, 0xBF,
++      0x17, 0x26, 0x17, 0xDF,
++
++      0x3E, 0x17, 0x4F, 0xE9,
++      0x3F, 0x3F, 0x4F, 0xE9,
++
++      0x34, 0x1F, 0x34, 0xAF,
++      0x2B, 0x05,
++      0xA7, 0x20,
++
++      0x33, 0x2B, 0x37, 0xDF,
++      0x27, 0x17, 0xC0, 0xAF,
++
++      0x34, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2D, 0x21, 0x1A, 0xB0,
++      0x25, 0x21, 0x31, 0xB0,
++
++      0x0D, 0x21, 0x1A, 0xB2,
++      0x05, 0x21, 0x31, 0xB2,
++
++      0x03, 0x80, 0x2A, 0xEA,
++      0x17, 0xC1, 0x2B, 0xBD,
++
++      0x2D, 0x20,
++      0x25, 0x20,
++      0x05, 0x20,
++      0x0D, 0x20,
++
++      0xB3, 0x68,
++      0x97, 0x25,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0xC0, 0x33, 0xAF,
++      0x2F, 0xC0, 0x21, 0xC0,
++
++      0x16, 0x42, 0x56, 0x9F,
++      0x3C, 0x27, 0x4F, 0xE9,
++
++      0x1E, 0x62, 0x57, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x21, 0x31, 0xB4,
++      0x2D, 0x21, 0x1A, 0xB4,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x33, 0x05,
++      0x00, 0xE0,
++      0x28, 0x19, 0x60, 0xEC,
++
++      0x0D, 0x21, 0x1A, 0xB6,
++      0x05, 0x21, 0x31, 0xB6,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0xE0,
++      0x2F, 0x20,
++
++      0x23, 0x3B, 0x33, 0xAD,
++      0x1E, 0x26, 0x1E, 0xDF,
++
++      0xA7, 0x1E, 0x4F, 0xE9,
++      0x17, 0x26, 0x16, 0xDF,
++
++      0x2D, 0x20,
++      0x00, 0xE0,
++      0xA8, 0x3F, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x1E, 0xAF,
++      0x25, 0x20,
++      0x00, 0xE0,
++
++      0xA4, 0x16, 0x4F, 0xE9,
++      0x0F, 0xC0, 0x21, 0xC2,
++
++      0xA6, 0x80, 0x4F, 0xE9,
++      0x1F, 0x62, 0x57, 0x9F,
++
++      0x0D, 0x20,
++      0x05, 0x20,
++      0x2F, 0xC0, 0x21, 0xC6,
++
++      0x3F, 0x2F, 0x5D, 0x9F,
++      0x00, 0xE0,
++      0x0F, 0x20,
++
++      0x17, 0x50, 0x56, 0x9F,
++      0xA5, 0x37, 0x4F, 0xE9,
++
++      0x06, 0xC0, 0x21, 0xC4,
++      0x0F, 0x17, 0x0F, 0xAF,
++
++      0x37, 0x0F, 0x5C, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2F, 0x20,
++      0x00, 0xE0,
++      0xA3, 0x80, 0x4F, 0xE9,
++
++      0x06, 0x20,
++      0x00, 0xE0,
++      0x1F, 0x26, 0x1F, 0xDF,
++
++      0x17, 0x26, 0x17, 0xDF,
++      0x35, 0x17, 0x4F, 0xE9,
++
++      0xA1, 0x1F, 0x4F, 0xE9,
++      0xA2, 0x3F, 0x4F, 0xE9,
++
++      0x06, 0x06, 0x1F, 0xAF,
++      0x39, 0x37, 0x4F, 0xE9,
++
++      0x2F, 0x2F, 0x17, 0xAF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xA0, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x31, 0x80, 0x4F, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x57, 0x39, 0x20, 0xE9,
++
++      0x16, 0x28, 0x20, 0xE9,
++      0x1D, 0x3B, 0x20, 0xE9,
++
++      0x1E, 0x2B, 0x20, 0xE9,
++      0x2B, 0x32, 0x20, 0xE9,
++
++      0x1C, 0x23, 0x20, 0xE9,
++      0x57, 0x36, 0x20, 0xE9,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x40, 0x40, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x90, 0xE2,
++      0x00, 0xE0,
++
++      0x68, 0xFF, 0x20, 0xEA,
++      0x19, 0xC8, 0xC1, 0xCD,
++
++      0x1F, 0xD7, 0x18, 0xBD,
++      0x3F, 0xD7, 0x22, 0xBD,
++
++      0x9F, 0x41, 0x49, 0xBD,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x25, 0x41, 0x49, 0xBD,
++      0x2D, 0x41, 0x51, 0xBD,
++
++      0x0D, 0x80, 0x07, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x35, 0x40, 0x48, 0xBD,
++      0x3D, 0x40, 0x50, 0xBD,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x25, 0x30,
++      0x2D, 0x30,
++
++      0x35, 0x30,
++      0xB5, 0x30,
++      0xBD, 0x30,
++      0x3D, 0x30,
++
++      0x9C, 0xA7, 0x5B, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x67, 0xFF, 0x0A, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC9, 0x41, 0xC8, 0xEC,
++      0x42, 0xE1,
++      0x00, 0xE0,
++
++      0x65, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xC8, 0x40, 0xC0, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x62, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++};
++
++static unsigned char warp_g400_t2gz[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x78, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x69, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x25, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9F, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBE, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x7D, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gza[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x7C, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x6D, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x29, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x3D, 0xCF, 0x74, 0xC2,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9B, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBA, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x79, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzaf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x81, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x72, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x0F, 0xCF, 0x74, 0xC6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x55, 0xB6,
++      0x02, 0x45, 0x65, 0xB6,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x96, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xB5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x74, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x7D, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x6E, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0F, 0xCF, 0x75, 0xC6,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x28, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x0F, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x54, 0xB4,
++      0x02, 0x44, 0x64, 0xB4,
++
++      0x2A, 0x45, 0x55, 0xB6,
++      0x1A, 0x45, 0x65, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x9A, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xBB, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x78, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzs[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x85, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x76, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x31, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x20,
++      0x1A, 0x20,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA7, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x92, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xB2, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x70, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsa[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8A, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7B, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x36, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x8D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xAD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x6B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsaf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8E, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7F, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x3A, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB6,
++      0x1A, 0x44, 0x64, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x55, 0xB6,
++      0x02, 0x45, 0x65, 0xB6,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x89, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xA9, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x67, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_t2gzsf[] = {
++
++      0x00, 0x8A, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x0A, 0x40, 0x50, 0xBF,
++      0x2A, 0x40, 0x60, 0xBF,
++
++      0x32, 0x41, 0x51, 0xBF,
++      0x3A, 0x41, 0x61, 0xBF,
++
++      0xC3, 0x6B,
++      0xD3, 0x6B,
++      0x00, 0x8A, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x23, 0x9F,
++      0x00, 0xE0,
++      0x51, 0x04,
++
++      0x90, 0xE2,
++      0x61, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x51, 0x41, 0xE0, 0xEC,
++      0x39, 0x67, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x63, 0xA0, 0xE8,
++
++      0x61, 0x41, 0xE0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x8A, 0x80, 0x15, 0xEA,
++      0x10, 0x04,
++      0x20, 0x04,
++
++      0x61, 0x51, 0xE0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x52, 0xBF,
++      0x0F, 0x52, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x62, 0xBF,
++      0x1E, 0x51, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x0E, 0x61, 0x60, 0xEA,
++
++      0x32, 0x40, 0x50, 0xBD,
++      0x22, 0x40, 0x60, 0xBD,
++
++      0x12, 0x41, 0x51, 0xBD,
++      0x3A, 0x41, 0x61, 0xBD,
++
++      0xBF, 0x2F, 0x0E, 0xBD,
++      0x97, 0xE2,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x35, 0x48, 0xB1, 0xE8,
++      0x3D, 0x59, 0xB1, 0xE8,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x56, 0x31, 0x56, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x66, 0x31, 0x66, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x57, 0x39, 0x57, 0xBF,
++      0x67, 0x39, 0x67, 0xBF,
++
++      0x7B, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x35, 0x00,
++      0x3D, 0x00,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0x8D, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x75, 0xF8, 0xEC,
++      0x35, 0x20,
++      0x3D, 0x20,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x53, 0x53, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x0E, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x48, 0x35, 0x48, 0xBF,
++      0x58, 0x35, 0x58, 0xBF,
++
++      0x68, 0x35, 0x68, 0xBF,
++      0x49, 0x3D, 0x49, 0xBF,
++
++      0x59, 0x3D, 0x59, 0xBF,
++      0x69, 0x3D, 0x69, 0xBF,
++
++      0x63, 0x63, 0x2D, 0xDF,
++      0x4D, 0x7D, 0xF8, 0xEC,
++
++      0x59, 0xE3,
++      0x00, 0xE0,
++      0xB8, 0x38, 0x33, 0xBF,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x18, 0x3A, 0x41, 0xE9,
++
++      0x3F, 0x53, 0xA0, 0xE8,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x63, 0xA0, 0xE8,
++
++      0x50, 0x70, 0xF8, 0xEC,
++      0x2B, 0x50, 0x3C, 0xE9,
++
++      0x1F, 0x0F, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x59, 0x78, 0xF8, 0xEC,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x56, 0x3F, 0x56, 0xDF,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x66, 0x3D, 0x66, 0xDF,
++
++      0x1D, 0x32, 0x41, 0xE9,
++      0x67, 0x3D, 0x67, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3F, 0x57, 0xDF,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x59, 0x3F, 0x59, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x69, 0x3D, 0x69, 0xDF,
++
++      0x48, 0x37, 0x48, 0xDF,
++      0x58, 0x3F, 0x58, 0xDF,
++
++      0x68, 0x3D, 0x68, 0xDF,
++      0x49, 0x37, 0x49, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x0F, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x54, 0xB0,
++      0x02, 0x44, 0x64, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB2,
++      0x1A, 0x44, 0x64, 0xB2,
++
++      0x36, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0F, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x54, 0xB4,
++      0x1A, 0x44, 0x64, 0xB4,
++
++      0x0A, 0x45, 0x55, 0xB0,
++      0x02, 0x45, 0x65, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB2,
++      0x1A, 0x45, 0x65, 0xB2,
++
++      0x0A, 0x45, 0x55, 0xB4,
++      0x02, 0x45, 0x65, 0xB4,
++
++      0x0F, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x0F, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x55, 0xB6,
++      0x1A, 0x45, 0x65, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x56, 0xBF,
++      0x1A, 0x46, 0x66, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x57, 0xBF,
++      0x02, 0x47, 0x67, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x53, 0xBF,
++      0x1A, 0x43, 0x63, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x48, 0x58, 0xBF,
++      0x02, 0x48, 0x68, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x2A, 0x49, 0x59, 0xBF,
++      0x1A, 0x49, 0x69, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x82, 0x30, 0x57, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x83, 0x38, 0x57, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x84, 0x31, 0x5E, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x85, 0x39, 0x5E, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x87, 0x77, 0x57, 0xE9,
++      0x8B, 0x3E, 0xBF, 0xEA,
++
++      0x80, 0x30, 0x57, 0xE9,
++      0x81, 0x38, 0x57, 0xE9,
++
++      0x82, 0x31, 0x57, 0xE9,
++      0x86, 0x78, 0x57, 0xE9,
++
++      0x83, 0x39, 0x57, 0xE9,
++      0x87, 0x79, 0x57, 0xE9,
++
++      0x30, 0x1F, 0x5F, 0xE9,
++      0x8A, 0x34, 0x20, 0xE9,
++
++      0x8B, 0x3C, 0x20, 0xE9,
++      0x37, 0x50, 0x60, 0xBD,
++
++      0x57, 0x0D, 0x20, 0xE9,
++      0x35, 0x51, 0x61, 0xBD,
++
++      0x2B, 0x50, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x0E, 0x77,
++
++      0x24, 0x51, 0x20, 0xE9,
++      0x8D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x0E, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x0B, 0x46, 0xA0, 0xE8,
++      0x1B, 0x56, 0xA0, 0xE8,
++
++      0x2B, 0x66, 0xA0, 0xE8,
++      0x0C, 0x47, 0xA0, 0xE8,
++
++      0x1C, 0x57, 0xA0, 0xE8,
++      0x2C, 0x67, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x57, 0x80, 0x57, 0xCF,
++
++      0x66, 0x33, 0x66, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x67, 0x3B, 0x67, 0xCF,
++
++      0x0B, 0x48, 0xA0, 0xE8,
++      0x1B, 0x58, 0xA0, 0xE8,
++
++      0x2B, 0x68, 0xA0, 0xE8,
++      0x0C, 0x49, 0xA0, 0xE8,
++
++      0x1C, 0x59, 0xA0, 0xE8,
++      0x2C, 0x69, 0xA0, 0xE8,
++
++      0x0B, 0x00,
++      0x1B, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x0C, 0x00,
++      0x1C, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x0B, 0x65,
++      0x1B, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x0C, 0x65,
++      0x1C, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x0B, 0x1B, 0x60, 0xEC,
++      0x34, 0xD7, 0x34, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x0C, 0x1C, 0x60, 0xEC,
++
++      0x3C, 0xD7, 0x3C, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x0B, 0x2B, 0xDE, 0xE8,
++      0x1B, 0x80, 0xDE, 0xE8,
++
++      0x34, 0x80, 0x34, 0xBD,
++      0x3C, 0x80, 0x3C, 0xBD,
++
++      0x33, 0xD7, 0x0B, 0xBD,
++      0x3B, 0xD7, 0x1B, 0xBD,
++
++      0x48, 0x80, 0x48, 0xCF,
++      0x59, 0x80, 0x59, 0xCF,
++
++      0x68, 0x33, 0x68, 0xCF,
++      0x49, 0x3B, 0x49, 0xCF,
++
++      0xAD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x58, 0x33, 0x58, 0xCF,
++      0x69, 0x3B, 0x69, 0xCF,
++
++      0x6B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgz[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x58, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4A, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x1D, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAF, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD6, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x9D, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgza[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x5C, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4E, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x3D, 0xCF, 0x74, 0xC2,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x20, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAB, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD3, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x99, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzaf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x61, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x53, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x26, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x27, 0xCF, 0x74, 0xC6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x4D, 0xB6,
++      0x02, 0x45, 0x55, 0xB6,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xA6, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xCD, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x94, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x5D, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x4F, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x34, 0x80, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x27, 0xCF, 0x75, 0xC6,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x20, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x3D, 0xCF, 0x74, 0xC2,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x27, 0x20, 0xE9,
++
++      0x0A, 0x44, 0x4C, 0xB4,
++      0x02, 0x44, 0x54, 0xB4,
++
++      0x2A, 0x45, 0x4D, 0xB6,
++      0x1A, 0x45, 0x55, 0xB6,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x38, 0x3D, 0x20, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xAA, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xD3, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x98, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzs[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x65, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x57, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x29, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x20,
++      0x02, 0x20,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA7, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0xA2, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xCA, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x90, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsa[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6A, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x5C, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x9D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x8B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsaf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6E, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x60, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x32, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x74, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9C, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB6,
++      0x1A, 0x44, 0x54, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x45, 0x4D, 0xB6,
++      0x02, 0x45, 0x55, 0xB6,
++
++      0x3D, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x3D, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x9D, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x9E, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x30, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x38, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x99, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC1, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x87, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
++
++static unsigned char warp_g400_tgzsf[] = {
++
++      0x00, 0x88, 0x98, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++      0xFF, 0x80, 0xC0, 0xE9,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x22, 0x40, 0x48, 0xBF,
++      0x2A, 0x40, 0x50, 0xBF,
++
++      0x32, 0x41, 0x49, 0xBF,
++      0x3A, 0x41, 0x51, 0xBF,
++
++      0xC3, 0x6B,
++      0xCB, 0x6B,
++      0x00, 0x88, 0x98, 0xE9,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x96, 0xE2,
++      0x41, 0x04,
++
++      0x7B, 0x43, 0xA0, 0xE8,
++      0x73, 0x4B, 0xA0, 0xE8,
++
++      0xAD, 0xEE, 0x29, 0x9F,
++      0x00, 0xE0,
++      0x49, 0x04,
++
++      0x90, 0xE2,
++      0x51, 0x04,
++      0x31, 0x46, 0xB1, 0xE8,
++
++      0x49, 0x41, 0xC0, 0xEC,
++      0x39, 0x57, 0xB1, 0xE8,
++
++      0x00, 0x04,
++      0x46, 0xE2,
++      0x73, 0x53, 0xA0, 0xE8,
++
++      0x51, 0x41, 0xC0, 0xEC,
++      0x31, 0x00,
++      0x39, 0x00,
++
++      0x6A, 0x80, 0x15, 0xEA,
++      0x08, 0x04,
++      0x10, 0x04,
++
++      0x51, 0x49, 0xC0, 0xEC,
++      0x2F, 0x41, 0x60, 0xEA,
++
++      0x31, 0x20,
++      0x39, 0x20,
++      0x1F, 0x42, 0xA0, 0xE8,
++
++      0x2A, 0x42, 0x4A, 0xBF,
++      0x27, 0x4A, 0xA0, 0xE8,
++
++      0x1A, 0x42, 0x52, 0xBF,
++      0x1E, 0x49, 0x60, 0xEA,
++
++      0x73, 0x7B, 0xC8, 0xEC,
++      0x26, 0x51, 0x60, 0xEA,
++
++      0x32, 0x40, 0x48, 0xBD,
++      0x22, 0x40, 0x50, 0xBD,
++
++      0x12, 0x41, 0x49, 0xBD,
++      0x3A, 0x41, 0x51, 0xBD,
++
++      0xBF, 0x2F, 0x26, 0xBD,
++      0x00, 0xE0,
++      0x7B, 0x72,
++
++      0x32, 0x20,
++      0x22, 0x20,
++      0x12, 0x20,
++      0x3A, 0x20,
++
++      0x46, 0x31, 0x46, 0xBF,
++      0x4E, 0x31, 0x4E, 0xBF,
++
++      0xB3, 0xE2, 0x2D, 0x9F,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x56, 0x31, 0x56, 0xBF,
++      0x47, 0x39, 0x47, 0xBF,
++
++      0x4F, 0x39, 0x4F, 0xBF,
++      0x57, 0x39, 0x57, 0xBF,
++
++      0x5C, 0x80, 0x07, 0xEA,
++      0x24, 0x41, 0x20, 0xE9,
++
++      0x42, 0x73, 0xF8, 0xEC,
++      0x00, 0xE0,
++      0x2D, 0x73,
++
++      0x33, 0x72,
++      0x0C, 0xE3,
++      0xA5, 0x2F, 0x1E, 0xBD,
++
++      0x43, 0x43, 0x2D, 0xDF,
++      0x4B, 0x4B, 0x2D, 0xDF,
++
++      0xAE, 0x1E, 0x26, 0xBD,
++      0x58, 0xE3,
++      0x33, 0x66,
++
++      0x53, 0x53, 0x2D, 0xDF,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0xB8, 0x38, 0x33, 0xBF,
++      0x00, 0xE0,
++      0x59, 0xE3,
++
++      0x1E, 0x12, 0x41, 0xE9,
++      0x1A, 0x22, 0x41, 0xE9,
++
++      0x2B, 0x40, 0x3D, 0xE9,
++      0x3F, 0x4B, 0xA0, 0xE8,
++
++      0x2D, 0x73,
++      0x30, 0x76,
++      0x05, 0x80, 0x3D, 0xEA,
++
++      0x37, 0x43, 0xA0, 0xE8,
++      0x3D, 0x53, 0xA0, 0xE8,
++
++      0x48, 0x70, 0xF8, 0xEC,
++      0x2B, 0x48, 0x3C, 0xE9,
++
++      0x1F, 0x27, 0xBC, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x15, 0xC0, 0x20, 0xE9,
++      0x15, 0xC0, 0x20, 0xE9,
++
++      0x18, 0x3A, 0x41, 0xE9,
++      0x1D, 0x32, 0x41, 0xE9,
++
++      0x2A, 0x40, 0x20, 0xE9,
++      0x56, 0x3D, 0x56, 0xDF,
++
++      0x46, 0x37, 0x46, 0xDF,
++      0x4E, 0x3F, 0x4E, 0xDF,
++
++      0x16, 0x30, 0x20, 0xE9,
++      0x4F, 0x3F, 0x4F, 0xDF,
++
++      0x47, 0x37, 0x47, 0xDF,
++      0x57, 0x3D, 0x57, 0xDF,
++
++      0x32, 0x32, 0x2D, 0xDF,
++      0x22, 0x22, 0x2D, 0xDF,
++
++      0x12, 0x12, 0x2D, 0xDF,
++      0x3A, 0x3A, 0x2D, 0xDF,
++
++      0x27, 0xCF, 0x74, 0xC2,
++      0x37, 0xCF, 0x74, 0xC4,
++
++      0x0A, 0x44, 0x4C, 0xB0,
++      0x02, 0x44, 0x54, 0xB0,
++
++      0x3D, 0xCF, 0x74, 0xC0,
++      0x34, 0x37, 0x20, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x38, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3C, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB2,
++      0x1A, 0x44, 0x54, 0xB2,
++
++      0x2E, 0x80, 0x3A, 0xEA,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x27, 0xCF, 0x75, 0xC0,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x32, 0x31, 0x5F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x33, 0x39, 0x5F, 0xE9,
++
++      0x3D, 0xCF, 0x75, 0xC2,
++      0x37, 0xCF, 0x75, 0xC4,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA6, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA3, 0x3D, 0x20, 0xE9,
++
++      0x2A, 0x44, 0x4C, 0xB4,
++      0x1A, 0x44, 0x54, 0xB4,
++
++      0x0A, 0x45, 0x4D, 0xB0,
++      0x02, 0x45, 0x55, 0xB0,
++
++      0x88, 0x73, 0x5E, 0xE9,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA0, 0x37, 0x20, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x3E, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x3F, 0x38, 0x4F, 0xE9,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x3A, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x3B, 0x39, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB2,
++      0x1A, 0x45, 0x55, 0xB2,
++
++      0x0A, 0x45, 0x4D, 0xB4,
++      0x02, 0x45, 0x55, 0xB4,
++
++      0x27, 0xCF, 0x75, 0xC6,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0xA7, 0x30, 0x4F, 0xE9,
++      0x0A, 0x20,
++      0x02, 0x20,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x31, 0x27, 0x20, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA8, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x45, 0x4D, 0xB6,
++      0x1A, 0x45, 0x55, 0xB6,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x36, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x37, 0x39, 0x4F, 0xE9,
++
++      0x00, 0x80, 0x00, 0xE8,
++      0x2A, 0x20,
++      0x1A, 0x20,
++
++      0x2A, 0x46, 0x4E, 0xBF,
++      0x1A, 0x46, 0x56, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA4, 0x31, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA5, 0x39, 0x4F, 0xE9,
++
++      0x0A, 0x47, 0x4F, 0xBF,
++      0x02, 0x47, 0x57, 0xBF,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0xA1, 0x30, 0x4F, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0xA2, 0x38, 0x4F, 0xE9,
++
++      0x2A, 0x43, 0x4B, 0xBF,
++      0x1A, 0x43, 0x53, 0xBF,
++
++      0x30, 0x50, 0x2E, 0x9F,
++      0x35, 0x31, 0x4F, 0xE9,
++
++      0x38, 0x21, 0x2C, 0x9F,
++      0x39, 0x39, 0x4F, 0xE9,
++
++      0x31, 0x53, 0x2F, 0x9F,
++      0x80, 0x31, 0x57, 0xE9,
++
++      0x39, 0xE5, 0x2C, 0x9F,
++      0x81, 0x39, 0x57, 0xE9,
++
++      0x37, 0x48, 0x50, 0xBD,
++      0x8A, 0x36, 0x20, 0xE9,
++
++      0x86, 0x76, 0x57, 0xE9,
++      0x8B, 0x3E, 0x20, 0xE9,
++
++      0x82, 0x30, 0x57, 0xE9,
++      0x87, 0x77, 0x57, 0xE9,
++
++      0x83, 0x38, 0x57, 0xE9,
++      0x35, 0x49, 0x51, 0xBD,
++
++      0x84, 0x31, 0x5E, 0xE9,
++      0x30, 0x1F, 0x5F, 0xE9,
++
++      0x85, 0x39, 0x5E, 0xE9,
++      0x57, 0x25, 0x20, 0xE9,
++
++      0x2B, 0x48, 0x20, 0xE9,
++      0x1D, 0x37, 0xE1, 0xEA,
++
++      0x1E, 0x35, 0xE1, 0xEA,
++      0x00, 0xE0,
++      0x26, 0x77,
++
++      0x24, 0x49, 0x20, 0xE9,
++      0x9D, 0xFF, 0x20, 0xEA,
++
++      0x16, 0x26, 0x20, 0xE9,
++      0x57, 0x2E, 0xBF, 0xEA,
++
++      0x1C, 0x46, 0xA0, 0xE8,
++      0x23, 0x4E, 0xA0, 0xE8,
++
++      0x2B, 0x56, 0xA0, 0xE8,
++      0x1D, 0x47, 0xA0, 0xE8,
++
++      0x24, 0x4F, 0xA0, 0xE8,
++      0x2C, 0x57, 0xA0, 0xE8,
++
++      0x1C, 0x00,
++      0x23, 0x00,
++      0x2B, 0x00,
++      0x00, 0xE0,
++
++      0x1D, 0x00,
++      0x24, 0x00,
++      0x2C, 0x00,
++      0x00, 0xE0,
++
++      0x1C, 0x65,
++      0x23, 0x65,
++      0x2B, 0x65,
++      0x00, 0xE0,
++
++      0x1D, 0x65,
++      0x24, 0x65,
++      0x2C, 0x65,
++      0x00, 0xE0,
++
++      0x1C, 0x23, 0x60, 0xEC,
++      0x36, 0xD7, 0x36, 0xAD,
++
++      0x2B, 0x80, 0x60, 0xEC,
++      0x1D, 0x24, 0x60, 0xEC,
++
++      0x3E, 0xD7, 0x3E, 0xAD,
++      0x2C, 0x80, 0x60, 0xEC,
++
++      0x1C, 0x2B, 0xDE, 0xE8,
++      0x23, 0x80, 0xDE, 0xE8,
++
++      0x36, 0x80, 0x36, 0xBD,
++      0x3E, 0x80, 0x3E, 0xBD,
++
++      0x33, 0xD7, 0x1C, 0xBD,
++      0x3B, 0xD7, 0x23, 0xBD,
++
++      0x46, 0x80, 0x46, 0xCF,
++      0x4F, 0x80, 0x4F, 0xCF,
++
++      0x56, 0x33, 0x56, 0xCF,
++      0x47, 0x3B, 0x47, 0xCF,
++
++      0xC5, 0xFF, 0x20, 0xEA,
++      0x00, 0x80, 0x00, 0xE8,
++
++      0x4E, 0x33, 0x4E, 0xCF,
++      0x57, 0x3B, 0x57, 0xCF,
++
++      0x8B, 0xFF, 0x20, 0xEA,
++      0x57, 0xC0, 0xBF, 0xEA,
++
++      0x00, 0x80, 0xA0, 0xE9,
++      0x00, 0x00, 0xD8, 0xEC,
++
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/mga_warp.c git-nokia/drivers/gpu/drm-tungsten/mga_warp.c
+--- git/drivers/gpu/drm-tungsten/mga_warp.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/mga_warp.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,198 @@
++/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
++ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "mga_drm.h"
++#include "mga_drv.h"
++#include "mga_ucode.h"
++
++#define MGA_WARP_CODE_ALIGN           256     /* in bytes */
++
++#define WARP_UCODE_SIZE( which )                                      \
++      ((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
++
++#define WARP_UCODE_INSTALL( which, where )                            \
++do {                                                                  \
++      DRM_DEBUG( " pcbase = 0x%08lx  vcbase = %p\n", pcbase, vcbase );\
++      dev_priv->warp_pipe_phys[where] = pcbase;                       \
++      memcpy( vcbase, which, sizeof(which) );                         \
++      pcbase += WARP_UCODE_SIZE( which );                             \
++      vcbase += WARP_UCODE_SIZE( which );                             \
++} while (0)
++
++static const unsigned int mga_warp_g400_microcode_size =
++             (WARP_UCODE_SIZE(warp_g400_tgz) +
++              WARP_UCODE_SIZE(warp_g400_tgza) +
++              WARP_UCODE_SIZE(warp_g400_tgzaf) +
++              WARP_UCODE_SIZE(warp_g400_tgzf) +
++              WARP_UCODE_SIZE(warp_g400_tgzs) +
++              WARP_UCODE_SIZE(warp_g400_tgzsa) +
++              WARP_UCODE_SIZE(warp_g400_tgzsaf) +
++              WARP_UCODE_SIZE(warp_g400_tgzsf) +
++              WARP_UCODE_SIZE(warp_g400_t2gz) +
++              WARP_UCODE_SIZE(warp_g400_t2gza) +
++              WARP_UCODE_SIZE(warp_g400_t2gzaf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzs) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsa) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
++              WARP_UCODE_SIZE(warp_g400_t2gzsf));
++
++static const unsigned int mga_warp_g200_microcode_size =
++             (WARP_UCODE_SIZE(warp_g200_tgz) +
++              WARP_UCODE_SIZE(warp_g200_tgza) +
++              WARP_UCODE_SIZE(warp_g200_tgzaf) +
++              WARP_UCODE_SIZE(warp_g200_tgzf) +
++              WARP_UCODE_SIZE(warp_g200_tgzs) +
++              WARP_UCODE_SIZE(warp_g200_tgzsa) +
++              WARP_UCODE_SIZE(warp_g200_tgzsaf) +
++              WARP_UCODE_SIZE(warp_g200_tgzsf));
++
++
++unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
++{
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              return PAGE_ALIGN(mga_warp_g400_microcode_size);
++      case MGA_CARD_TYPE_G200:
++              return PAGE_ALIGN(mga_warp_g200_microcode_size);
++      default:
++              DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset);
++              return 0;
++      }
++}
++
++static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
++{
++      unsigned char *vcbase = dev_priv->warp->handle;
++      unsigned long pcbase = dev_priv->warp->offset;
++
++      memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
++
++      WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
++      WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
++      WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
++      WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
++      WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
++      WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
++      WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
++      WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
++
++      WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
++      WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
++      WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
++      WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
++      WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
++      WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
++
++      return 0;
++}
++
++static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
++{
++      unsigned char *vcbase = dev_priv->warp->handle;
++      unsigned long pcbase = dev_priv->warp->offset;
++
++      memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
++
++      WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
++      WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
++      WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
++      WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
++      WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
++      WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
++      WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
++      WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
++
++      return 0;
++}
++
++int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
++{
++      const unsigned int size = mga_warp_microcode_size(dev_priv);
++
++      DRM_DEBUG("MGA ucode size = %d bytes\n", size);
++      if (size > dev_priv->warp->size) {
++              DRM_ERROR("microcode too large! (%u > %lu)\n",
++                        size, dev_priv->warp->size);
++              return -ENOMEM;
++      }
++
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              return mga_warp_install_g400_microcode(dev_priv);
++      case MGA_CARD_TYPE_G200:
++              return mga_warp_install_g200_microcode(dev_priv);
++      default:
++              return -EINVAL;
++      }
++}
++
++#define WMISC_EXPECTED                (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
++
++int mga_warp_init(drm_mga_private_t * dev_priv)
++{
++      u32 wmisc;
++
++      /* FIXME: Get rid of these damned magic numbers...
++       */
++      switch (dev_priv->chipset) {
++      case MGA_CARD_TYPE_G400:
++      case MGA_CARD_TYPE_G550:
++              MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
++              MGA_WRITE(MGA_WGETMSB, 0x00000E00);
++              MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
++              MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
++              break;
++      case MGA_CARD_TYPE_G200:
++              MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
++              MGA_WRITE(MGA_WGETMSB, 0x1606);
++              MGA_WRITE(MGA_WVRTXSZ, 7);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
++                            MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
++      wmisc = MGA_READ(MGA_WMISC);
++      if (wmisc != WMISC_EXPECTED) {
++              DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
++                        wmisc, WMISC_EXPECTED);
++              return -EINVAL;
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_bo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c
+--- git/drivers/gpu/drm-tungsten/nouveau_bo.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_bo.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,296 @@
++/*
++ * Copyright 2007 Dave Airlied
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/*
++ * Authors: Dave Airlied <airlied@linux.ie>
++ *        Ben Skeggs   <darktama@iinet.net.au>
++ *        Jeremy Kolb  <jkolb@brandeis.edu>
++ */
++
++#include "drmP.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++static struct drm_ttm_backend *
++nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      switch (dev_priv->gart_info.type) {
++      case NOUVEAU_GART_AGP:
++              return drm_agp_init_ttm(dev);
++      case NOUVEAU_GART_SGDMA:
++              return nouveau_sgdma_init_ttm(dev);
++      default:
++              DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
++              break;
++      }
++
++      return NULL;
++}
++
++static int
++nouveau_bo_fence_type(struct drm_buffer_object *bo,
++                    uint32_t *fclass, uint32_t *type)
++{
++      /* When we get called, *fclass is set to the requested fence class */
++
++      if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
++              *type = 3;
++      else
++              *type = 1;
++      return 0;
++
++}
++
++static int
++nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
++{
++      /* We'll do this from user space. */
++      return 0;
++}
++
++static int
++nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
++                       struct drm_mem_type_manager *man)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                           _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              break;
++      case DRM_BO_MEM_VRAM:
++              man->flags = _DRM_FLAG_MEMTYPE_FIXED |
++                           _DRM_FLAG_MEMTYPE_MAPPABLE |
++                           _DRM_FLAG_NEEDS_IOREMAP;
++              man->io_addr = NULL;
++              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
++              man->io_offset = drm_get_resource_start(dev, 1);
++              man->io_size = drm_get_resource_len(dev, 1);
++              if (man->io_size > nouveau_mem_fb_amount(dev))
++                      man->io_size = nouveau_mem_fb_amount(dev);
++              break;
++      case DRM_BO_MEM_PRIV0:
++              /* Unmappable VRAM */
++              man->flags = _DRM_FLAG_MEMTYPE_CMA;
++              man->drm_bus_maptype = 0;
++              break;
++      case DRM_BO_MEM_TT:
++              switch (dev_priv->gart_info.type) {
++              case NOUVEAU_GART_AGP:
++                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                                   _DRM_FLAG_MEMTYPE_CSELECT |
++                                   _DRM_FLAG_NEEDS_IOREMAP;
++                      man->drm_bus_maptype = _DRM_AGP;
++                      break;
++              case NOUVEAU_GART_SGDMA:
++                      man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                                   _DRM_FLAG_MEMTYPE_CSELECT |
++                                   _DRM_FLAG_MEMTYPE_CMA;
++                      man->drm_bus_maptype = _DRM_SCATTER_GATHER;
++                      break;
++              default:
++                      DRM_ERROR("Unknown GART type: %d\n",
++                                dev_priv->gart_info.type);
++                      return -EINVAL;
++              }
++
++              man->io_offset  = dev_priv->gart_info.aper_base;
++              man->io_size    = dev_priv->gart_info.aper_size;
++              man->io_addr   = NULL;
++              break;
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++static uint64_t
++nouveau_bo_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL;
++      default:
++              return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
++      }
++      return 0;
++}
++
++
++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
++ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
++ */
++static int
++nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
++                   struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_device *dev = bo->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++      uint32_t srch, dsth, page_count;
++
++      /* Can happen during init/takedown */
++      if (!dchan->chan)
++              return -EINVAL;
++
++      srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
++      dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
++      if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
++              dchan->m2mf_dma_source = srch;
++              dchan->m2mf_dma_destin = dsth;
++
++              BEGIN_RING(NvSubM2MF,
++                         NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
++              OUT_RING  (dchan->m2mf_dma_source);
++              OUT_RING  (dchan->m2mf_dma_destin);
++      }
++
++      page_count = new_mem->num_pages;
++      while (page_count) {
++              int line_count = (page_count > 2047) ? 2047 : page_count;
++
++              BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
++              OUT_RING  (old_mem->mm_node->start << PAGE_SHIFT);
++              OUT_RING  (new_mem->mm_node->start << PAGE_SHIFT);
++              OUT_RING  (PAGE_SIZE); /* src_pitch */
++              OUT_RING  (PAGE_SIZE); /* dst_pitch */
++              OUT_RING  (PAGE_SIZE); /* line_length */
++              OUT_RING  (line_count);
++              OUT_RING  ((1<<8)|(1<<0));
++              OUT_RING  (0);
++              BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
++              OUT_RING  (0);
++
++              page_count -= line_count;
++      }
++
++      return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
++                                       DRM_FENCE_TYPE_EXE, 0, new_mem);
++}
++
++/* Flip pages into the GART and move if we can. */
++static int
++nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
++                    struct drm_bo_mem_reg *new_mem)
++{
++        struct drm_device *dev = bo->dev;
++        struct drm_bo_mem_reg tmp_mem;
++        int ret;
++
++        tmp_mem = *new_mem;
++        tmp_mem.mm_node = NULL;
++        tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
++                                DRM_BO_FLAG_CACHED |
++                                DRM_BO_FLAG_FORCE_CACHING);
++
++        ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
++        if (ret)
++                return ret;
++
++        ret = drm_ttm_bind(bo->ttm, &tmp_mem);
++        if (ret)
++                goto out_cleanup;
++
++        ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
++        if (ret)
++                goto out_cleanup;
++
++        ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
++
++out_cleanup:
++        if (tmp_mem.mm_node) {
++                mutex_lock(&dev->struct_mutex);
++                if (tmp_mem.mm_node != bo->pinned_node)
++                        drm_mm_put_block(tmp_mem.mm_node);
++                tmp_mem.mm_node = NULL;
++                mutex_unlock(&dev->struct_mutex);
++        }
++
++        return ret;
++}
++
++static int
++nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
++              struct drm_bo_mem_reg *new_mem)
++{
++      struct drm_bo_mem_reg *old_mem = &bo->mem;
++
++      if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++              if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      else
++      if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
++              if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++      else {
++              if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
++                      return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++      }
++
++      return 0;
++}
++
++static void
++nouveau_bo_flush_ttm(struct drm_ttm *ttm)
++{
++}
++
++static uint32_t nouveau_mem_prios[]  = {
++      DRM_BO_MEM_PRIV0,
++      DRM_BO_MEM_VRAM,
++      DRM_BO_MEM_TT,
++      DRM_BO_MEM_LOCAL
++};
++static uint32_t nouveau_busy_prios[] = {
++      DRM_BO_MEM_TT,
++      DRM_BO_MEM_PRIV0,
++      DRM_BO_MEM_VRAM,
++      DRM_BO_MEM_LOCAL
++};
++
++struct drm_bo_driver nouveau_bo_driver = {
++      .mem_type_prio = nouveau_mem_prios,
++      .mem_busy_prio = nouveau_busy_prios,
++      .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
++      .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
++      .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
++      .fence_type = nouveau_bo_fence_type,
++      .invalidate_caches = nouveau_bo_invalidate_caches,
++      .init_mem_type = nouveau_bo_init_mem_type,
++      .evict_flags = nouveau_bo_evict_flags,
++      .move = nouveau_bo_move,
++      .ttm_cache_flush= nouveau_bo_flush_ttm,
++      .command_stream_barrier = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c
+--- git/drivers/gpu/drm-tungsten/nouveau_dma.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,172 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++int
++nouveau_dma_channel_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      struct mem_block *pushbuf;
++      int grclass, ret, i;
++
++      DRM_DEBUG("\n");
++
++      pushbuf = nouveau_mem_alloc(dev, 0, 0x8000,
++                                  NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED,
++                                  (struct drm_file *)-2);
++      if (!pushbuf) {
++              DRM_ERROR("Failed to allocate DMA push buffer\n");
++              return -ENOMEM;
++      }
++
++      /* Allocate channel */
++      ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2,
++                               pushbuf, NvDmaFB, NvDmaTT);
++      if (ret) {
++              DRM_ERROR("Error allocating GPU channel: %d\n", ret);
++              return ret;
++      }
++      DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id);
++
++      /* Map push buffer */
++      drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev);
++      if (!dchan->chan->pushbuf_mem->map->handle) {
++              DRM_ERROR("Failed to ioremap push buffer\n");
++              return -EINVAL;
++      }
++      dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle;
++
++      /* Initialise DMA vars */
++      dchan->max  = (dchan->chan->pushbuf_mem->size >> 2) - 2;
++      dchan->put  = dchan->chan->pushbuf_base >> 2;
++      dchan->cur  = dchan->put;
++      dchan->free = dchan->max - dchan->cur;
++
++      /* Insert NOPS for NOUVEAU_DMA_SKIPS */
++      dchan->free -= NOUVEAU_DMA_SKIPS;
++      dchan->push_free = NOUVEAU_DMA_SKIPS;
++      for (i=0; i < NOUVEAU_DMA_SKIPS; i++)
++              OUT_RING(0);
++
++      /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
++      if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1,
++                                        &dchan->notify0_offset))) {
++              DRM_ERROR("Error allocating NvNotify0: %d\n", ret);
++              return ret;
++      }
++
++      /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
++      if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT;
++      else                             grclass = NV50_MEMORY_TO_MEMORY_FORMAT;
++      if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) {
++              DRM_ERROR("Error creating NvM2MF: %d\n", ret);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF,
++                                        gpuobj, NULL))) {
++              DRM_ERROR("Error referencing NvM2MF: %d\n", ret);
++              return ret;
++      }
++      dchan->m2mf_dma_source = NvDmaFB;
++      dchan->m2mf_dma_destin = NvDmaFB;
++
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
++      OUT_RING  (NvM2MF);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1);
++      OUT_RING  (NvNotify0);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
++      OUT_RING  (dchan->m2mf_dma_source);
++      OUT_RING  (dchan->m2mf_dma_destin);
++      FIRE_RING();
++
++      return 0;
++}
++
++void
++nouveau_dma_channel_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++
++      DRM_DEBUG("\n");
++
++      if (dchan->chan) {
++              nouveau_fifo_free(dchan->chan);
++              dchan->chan = NULL;
++      }
++}
++
++#define READ_GET() ((NV_READ(dchan->chan->get) -                               \
++                  dchan->chan->pushbuf_base) >> 2)
++#define WRITE_PUT(val) do {                                                    \
++      NV_WRITE(dchan->chan->put,                                             \
++               ((val) << 2) + dchan->chan->pushbuf_base);                    \
++} while(0)
++
++int
++nouveau_dma_wait(struct drm_device *dev, int size)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++      uint32_t get;
++
++      while (dchan->free < size) {
++              get = READ_GET();
++
++              if (dchan->put >= get) {
++                      dchan->free = dchan->max - dchan->cur;
++
++                      if (dchan->free < size) {
++                              dchan->push_free = 1;
++                              OUT_RING(0x20000000|dchan->chan->pushbuf_base);
++                              if (get <= NOUVEAU_DMA_SKIPS) {
++                                      /*corner case - will be idle*/
++                                      if (dchan->put <= NOUVEAU_DMA_SKIPS)
++                                              WRITE_PUT(NOUVEAU_DMA_SKIPS + 1);
++
++                                      do {
++                                              get = READ_GET();
++                                      } while (get <= NOUVEAU_DMA_SKIPS);
++                              }
++
++                              WRITE_PUT(NOUVEAU_DMA_SKIPS);
++                              dchan->cur  = dchan->put = NOUVEAU_DMA_SKIPS;
++                              dchan->free = get - (NOUVEAU_DMA_SKIPS + 1);
++                      }
++              } else {
++                      dchan->free = get - dchan->cur - 1;
++              }
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_dma.h git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h
+--- git/drivers/gpu/drm-tungsten/nouveau_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_dma.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,96 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_DMA_H__
++#define __NOUVEAU_DMA_H__
++
++typedef enum {
++      NvSubM2MF       = 0,
++} nouveau_subchannel_id_t;
++
++typedef enum {
++      NvM2MF          = 0x80039001,
++      NvDmaFB         = 0x8003d001,
++      NvDmaTT         = 0x8003d002,
++      NvNotify0       = 0x8003d003
++} nouveau_object_handle_t;
++
++#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY                     0x00000180
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE                     0x00000184
++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
++
++#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
++
++#define BEGIN_RING(subc, mthd, cnt) do {                                       \
++      int push_size = (cnt) + 1;                                             \
++      if (dchan->push_free) {                                                \
++              DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free);  \
++              break;                                                         \
++      }                                                                      \
++      if (dchan->free < push_size) {                                         \
++              if (nouveau_dma_wait(dev, push_size)) {                        \
++                      DRM_ERROR("FIFO timeout\n");                           \
++                      break;                                                 \
++              }                                                              \
++      }                                                                      \
++      dchan->free -= push_size;                                              \
++      dchan->push_free = push_size;                                          \
++      OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd);                           \
++} while(0)
++
++#define OUT_RING(data) do {                                                    \
++      if (dchan->push_free == 0) {                                           \
++              DRM_ERROR("no space left in packet\n");                        \
++              break;                                                         \
++      }                                                                      \
++      dchan->pushbuf[dchan->cur++] = (data);                                 \
++      dchan->push_free--;                                                    \
++} while(0)
++
++#define FIRE_RING() do {                                                       \
++      if (dchan->push_free) {                                                \
++              DRM_ERROR("packet incomplete: %d\n", dchan->push_free);        \
++              break;                                                         \
++      }                                                                      \
++      if (dchan->cur != dchan->put) {                                        \
++              DRM_MEMORYBARRIER();                                           \
++              dchan->put = dchan->cur;                                       \
++              NV_WRITE(dchan->chan->put, dchan->put << 2);                   \
++      }                                                                      \
++} while(0)
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drm.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h
+--- git/drivers/gpu/drm-tungsten/nouveau_drm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,184 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRM_H__
++#define __NOUVEAU_DRM_H__
++
++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11
++
++struct drm_nouveau_channel_alloc {
++      uint32_t     fb_ctxdma_handle;
++      uint32_t     tt_ctxdma_handle;
++
++      int          channel;
++      uint32_t     put_base;
++      /* FIFO control regs */
++      drm_handle_t ctrl;
++      int          ctrl_size;
++      /* DMA command buffer */
++      drm_handle_t cmdbuf;
++      int          cmdbuf_size;
++      /* Notifier memory */
++      drm_handle_t notifier;
++      int          notifier_size;
++};
++
++struct drm_nouveau_channel_free {
++      int channel;
++};
++
++struct drm_nouveau_grobj_alloc {
++      int      channel;
++      uint32_t handle;
++      int      class;
++};
++
++#define NOUVEAU_MEM_ACCESS_RO 1
++#define NOUVEAU_MEM_ACCESS_WO 2
++#define NOUVEAU_MEM_ACCESS_RW 3
++struct drm_nouveau_notifierobj_alloc {
++      int      channel;
++      uint32_t handle;
++      int      count;
++
++      uint32_t offset;
++};
++
++struct drm_nouveau_gpuobj_free {
++      int      channel;
++      uint32_t handle;
++};
++
++/* This is needed to avoid a race condition.
++ * Otherwise you may be writing in the fetch area.
++ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes?
++ */
++#define NOUVEAU_DMA_SKIPS 8
++
++#define NOUVEAU_MEM_FB                        0x00000001
++#define NOUVEAU_MEM_AGP                       0x00000002
++#define NOUVEAU_MEM_FB_ACCEPTABLE     0x00000004
++#define NOUVEAU_MEM_AGP_ACCEPTABLE    0x00000008
++#define NOUVEAU_MEM_PCI                       0x00000010
++#define NOUVEAU_MEM_PCI_ACCEPTABLE    0x00000020
++#define NOUVEAU_MEM_PINNED            0x00000040
++#define NOUVEAU_MEM_USER_BACKED               0x00000080
++#define NOUVEAU_MEM_MAPPED            0x00000100
++#define NOUVEAU_MEM_TILE              0x00000200
++#define NOUVEAU_MEM_TILE_ZETA         0x00000400
++#define NOUVEAU_MEM_INSTANCE          0x01000000 /* internal */
++#define NOUVEAU_MEM_NOTIFIER            0x02000000 /* internal */
++#define NOUVEAU_MEM_NOVM              0x04000000 /* internal */
++#define NOUVEAU_MEM_USER              0x08000000 /* internal */
++#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \
++                            NOUVEAU_MEM_NOTIFIER | \
++                            NOUVEAU_MEM_NOVM | \
++                            NOUVEAU_MEM_USER)
++
++struct drm_nouveau_mem_alloc {
++      int flags;
++      int alignment;
++      uint64_t size;  // in bytes
++      uint64_t offset;
++      drm_handle_t map_handle;
++};
++
++struct drm_nouveau_mem_free {
++      uint64_t offset;
++      int flags;
++};
++
++struct drm_nouveau_mem_tile {
++      uint64_t offset;
++      uint64_t delta;
++      uint64_t size;
++      int flags;
++};
++
++/* FIXME : maybe unify {GET,SET}PARAMs */
++#define NOUVEAU_GETPARAM_PCI_VENDOR      3
++#define NOUVEAU_GETPARAM_PCI_DEVICE      4
++#define NOUVEAU_GETPARAM_BUS_TYPE        5
++#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
++#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
++#define NOUVEAU_GETPARAM_FB_SIZE         8
++#define NOUVEAU_GETPARAM_AGP_SIZE        9
++#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
++#define NOUVEAU_GETPARAM_CHIPSET_ID      11
++struct drm_nouveau_getparam {
++      uint64_t param;
++      uint64_t value;
++};
++
++#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1
++#define NOUVEAU_SETPARAM_CMDBUF_SIZE     2
++struct drm_nouveau_setparam {
++      uint64_t param;
++      uint64_t value;
++};
++
++enum nouveau_card_type {
++      NV_UNKNOWN =0,
++      NV_04      =4,
++      NV_05      =5,
++      NV_10      =10,
++      NV_11      =11,
++      NV_17      =17,
++      NV_20      =20,
++      NV_30      =30,
++      NV_40      =40,
++      NV_44      =44,
++      NV_50      =50,
++      NV_LAST    =0xffff,
++};
++
++enum nouveau_bus_type {
++      NV_AGP     =0,
++      NV_PCI     =1,
++      NV_PCIE    =2,
++};
++
++#define NOUVEAU_MAX_SAREA_CLIPRECTS 16
++
++struct drm_nouveau_sarea {
++      /* the cliprects */
++      struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
++      unsigned int nbox;
++};
++
++#define DRM_NOUVEAU_CARD_INIT          0x00
++#define DRM_NOUVEAU_GETPARAM           0x01
++#define DRM_NOUVEAU_SETPARAM           0x02
++#define DRM_NOUVEAU_CHANNEL_ALLOC      0x03
++#define DRM_NOUVEAU_CHANNEL_FREE       0x04
++#define DRM_NOUVEAU_GROBJ_ALLOC        0x05
++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x06
++#define DRM_NOUVEAU_GPUOBJ_FREE        0x07
++#define DRM_NOUVEAU_MEM_ALLOC          0x08
++#define DRM_NOUVEAU_MEM_FREE           0x09
++#define DRM_NOUVEAU_MEM_TILE           0x0a
++#define DRM_NOUVEAU_SUSPEND            0x0b
++#define DRM_NOUVEAU_RESUME             0x0c
++
++#endif /* __NOUVEAU_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.c git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c
+--- git/drivers/gpu/drm-tungsten/nouveau_drv.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,120 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      {
++              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
++              .class = PCI_BASE_CLASS_DISPLAY << 16,
++              .class_mask  = 0xff << 16,
++      },
++      {
++              PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
++              .class = PCI_BASE_CLASS_DISPLAY << 16,
++              .class_mask  = 0xff << 16,
++      }
++};
++
++extern struct drm_ioctl_desc nouveau_ioctls[];
++extern int nouveau_max_ioctl;
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++              DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
++              DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .load = nouveau_load,
++      .firstopen = nouveau_firstopen,
++      .lastclose = nouveau_lastclose,
++      .unload = nouveau_unload,
++      .preclose = nouveau_preclose,
++      .irq_preinstall = nouveau_irq_preinstall,
++      .irq_postinstall = nouveau_irq_postinstall,
++      .irq_uninstall = nouveau_irq_uninstall,
++      .irq_handler = nouveau_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = nouveau_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = nouveau_compat_ioctl,
++#endif
++      },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++        .bo_driver = &nouveau_bo_driver,
++        .fence_driver = &nouveau_fence_driver,
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++#ifdef GIT_REVISION
++      .date = GIT_REVISION,
++#else
++      .date = DRIVER_DATE,
++#endif
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init nouveau_init(void)
++{
++      driver.num_ioctls = nouveau_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit nouveau_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(nouveau_init);
++module_exit(nouveau_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_drv.h git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h
+--- git/drivers/gpu/drm-tungsten/nouveau_drv.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_drv.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,621 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRV_H__
++#define __NOUVEAU_DRV_H__
++
++#define DRIVER_AUTHOR         "Stephane Marchesin"
++#define DRIVER_EMAIL          "dri-devel@lists.sourceforge.net"
++
++#define DRIVER_NAME           "nouveau"
++#define DRIVER_DESC           "nVidia Riva/TNT/GeForce"
++#define DRIVER_DATE           "20060213"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     11
++
++#define NOUVEAU_FAMILY   0x0000FFFF
++#define NOUVEAU_FLAGS    0xFFFF0000
++
++#include "nouveau_drm.h"
++#include "nouveau_reg.h"
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      uint64_t start;
++      uint64_t size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++      int flags;
++      drm_local_map_t *map;
++      drm_handle_t map_handle;
++};
++
++enum nouveau_flags {
++      NV_NFORCE   =0x10000000,
++      NV_NFORCE2  =0x20000000
++};
++
++#define NVOBJ_ENGINE_SW               0
++#define NVOBJ_ENGINE_GR               1
++#define NVOBJ_ENGINE_INT      0xdeadbeef
++
++#define NVOBJ_FLAG_ALLOW_NO_REFS      (1 << 0)
++#define NVOBJ_FLAG_ZERO_ALLOC         (1 << 1)
++#define NVOBJ_FLAG_ZERO_FREE          (1 << 2)
++#define NVOBJ_FLAG_FAKE                       (1 << 3)
++struct nouveau_gpuobj {
++      struct list_head list;
++
++      int im_channel;
++      struct mem_block *im_pramin;
++      struct mem_block *im_backing;
++      int im_bound;
++
++      uint32_t flags;
++      int refcount;
++
++      uint32_t engine;
++      uint32_t class;
++
++      void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
++      void *priv;
++};
++
++struct nouveau_gpuobj_ref {
++      struct list_head list;
++
++      struct nouveau_gpuobj *gpuobj;
++      uint32_t instance;
++
++      int channel;
++      int handle;
++};
++
++struct nouveau_channel
++{
++      struct drm_device *dev;
++      int id;
++
++      /* owner of this fifo */
++      struct drm_file *file_priv;
++      /* mapping of the fifo itself */
++      drm_local_map_t *map;
++      /* mapping of the regs controling the fifo */
++      drm_local_map_t *regs;
++
++      /* Fencing */
++      uint32_t next_sequence;
++
++      /* DMA push buffer */
++      struct nouveau_gpuobj_ref *pushbuf;
++      struct mem_block          *pushbuf_mem;
++      uint32_t                   pushbuf_base;
++
++      /* FIFO user control regs */
++      uint32_t user, user_size;
++      uint32_t put;
++      uint32_t get;
++      uint32_t ref_cnt;
++
++      /* Notifier memory */
++      struct mem_block *notifier_block;
++      struct mem_block *notifier_heap;
++      drm_local_map_t  *notifier_map;
++
++      /* PFIFO context */
++      struct nouveau_gpuobj_ref *ramfc;
++
++      /* PGRAPH context */
++      /* XXX may be merge 2 pointers as private data ??? */
++      struct nouveau_gpuobj_ref *ramin_grctx;
++      void *pgraph_ctx;
++
++      /* NV50 VM */
++      struct nouveau_gpuobj     *vm_pd;
++      struct nouveau_gpuobj_ref *vm_gart_pt;
++      struct nouveau_gpuobj_ref *vm_vram_pt;
++
++      /* Objects */
++      struct nouveau_gpuobj_ref *ramin; /* Private instmem */
++      struct mem_block          *ramin_heap; /* Private PRAMIN heap */
++      struct nouveau_gpuobj_ref *ramht; /* Hash table */
++      struct list_head           ramht_refs; /* Objects referenced by RAMHT */
++};
++
++struct nouveau_drm_channel {
++      struct nouveau_channel *chan;
++
++      /* DMA state */
++      int max, put, cur, free;
++      int push_free;
++      volatile uint32_t *pushbuf;
++
++      /* Notifiers */
++      uint32_t notify0_offset;
++
++      /* Buffer moves */
++      uint32_t m2mf_dma_source;
++      uint32_t m2mf_dma_destin;
++};
++
++struct nouveau_config {
++      struct {
++              int location;
++              int size;
++      } cmdbuf;
++};
++
++struct nouveau_instmem_engine {
++      void    *priv;
++
++      int     (*init)(struct drm_device *dev);
++      void    (*takedown)(struct drm_device *dev);
++
++      int     (*populate)(struct drm_device *, struct nouveau_gpuobj *,
++                          uint32_t *size);
++      void    (*clear)(struct drm_device *, struct nouveau_gpuobj *);
++      int     (*bind)(struct drm_device *, struct nouveau_gpuobj *);
++      int     (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
++};
++
++struct nouveau_mc_engine {
++      int  (*init)(struct drm_device *dev);
++      void (*takedown)(struct drm_device *dev);
++};
++
++struct nouveau_timer_engine {
++      int      (*init)(struct drm_device *dev);
++      void     (*takedown)(struct drm_device *dev);
++      uint64_t (*read)(struct drm_device *dev);
++};
++
++struct nouveau_fb_engine {
++      int  (*init)(struct drm_device *dev);
++      void (*takedown)(struct drm_device *dev);
++};
++
++struct nouveau_fifo_engine {
++      void *priv;
++
++      int  channels;
++
++      int  (*init)(struct drm_device *);
++      void (*takedown)(struct drm_device *);
++
++      int  (*channel_id)(struct drm_device *);
++
++      int  (*create_context)(struct nouveau_channel *);
++      void (*destroy_context)(struct nouveau_channel *);
++      int  (*load_context)(struct nouveau_channel *);
++      int  (*save_context)(struct nouveau_channel *);
++};
++
++struct nouveau_pgraph_engine {
++      int  (*init)(struct drm_device *);
++      void (*takedown)(struct drm_device *);
++
++      int  (*create_context)(struct nouveau_channel *);
++      void (*destroy_context)(struct nouveau_channel *);
++      int  (*load_context)(struct nouveau_channel *);
++      int  (*save_context)(struct nouveau_channel *);
++};
++
++struct nouveau_engine {
++      struct nouveau_instmem_engine instmem;
++      struct nouveau_mc_engine      mc;
++      struct nouveau_timer_engine   timer;
++      struct nouveau_fb_engine      fb;
++      struct nouveau_pgraph_engine  graph;
++      struct nouveau_fifo_engine    fifo;
++};
++
++#define NOUVEAU_MAX_CHANNEL_NR 128
++struct drm_nouveau_private {
++      enum {
++              NOUVEAU_CARD_INIT_DOWN,
++              NOUVEAU_CARD_INIT_DONE,
++              NOUVEAU_CARD_INIT_FAILED
++      } init_state;
++
++      int ttm;
++
++      /* the card type, takes NV_* as values */
++      int card_type;
++      /* exact chipset, derived from NV_PMC_BOOT_0 */
++      int chipset;
++      int flags;
++
++      drm_local_map_t *mmio;
++      drm_local_map_t *fb;
++      drm_local_map_t *ramin; /* NV40 onwards */
++
++      int fifo_alloc_count;
++      struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
++
++      struct nouveau_engine Engine;
++      struct nouveau_drm_channel channel;
++
++      /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
++      struct nouveau_gpuobj *ramht;
++      uint32_t ramin_rsvd_vram;
++      uint32_t ramht_offset;
++      uint32_t ramht_size;
++      uint32_t ramht_bits;
++      uint32_t ramfc_offset;
++      uint32_t ramfc_size;
++      uint32_t ramro_offset;
++      uint32_t ramro_size;
++
++      /* base physical adresses */
++      uint64_t fb_phys;
++      uint64_t fb_available_size;
++
++      struct {
++              enum {
++                      NOUVEAU_GART_NONE = 0,
++                      NOUVEAU_GART_AGP,
++                      NOUVEAU_GART_SGDMA
++              } type;
++              uint64_t aper_base;
++              uint64_t aper_size;
++
++              struct nouveau_gpuobj *sg_ctxdma;
++              struct page *sg_dummy_page;
++              dma_addr_t sg_dummy_bus;
++
++              /* nottm hack */
++              struct drm_ttm_backend *sg_be;
++              unsigned long sg_handle;
++      } gart_info;
++
++      /* G8x global VRAM page table */
++      struct nouveau_gpuobj *vm_vram_pt;
++
++      /* the mtrr covering the FB */
++      int fb_mtrr;
++
++      struct mem_block *agp_heap;
++      struct mem_block *fb_heap;
++      struct mem_block *fb_nomap_heap;
++      struct mem_block *ramin_heap;
++      struct mem_block *pci_heap;
++
++        /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
++        uint32_t ctx_table_size;
++      struct nouveau_gpuobj_ref *ctx_table;
++
++      struct nouveau_config config;
++
++      struct list_head gpuobj_list;
++
++      struct nouveau_suspend_resume {
++              uint32_t fifo_mode;
++              uint32_t graph_ctx_control;
++              uint32_t graph_state;
++              uint32_t *ramin_copy;
++              uint64_t ramin_size;
++      } susres;
++};
++
++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do {         \
++      struct drm_nouveau_private *nv = dev->dev_private; \
++      if (nv->init_state != NOUVEAU_CARD_INIT_DONE) {    \
++              DRM_ERROR("called without init\n");        \
++              return -EINVAL;                            \
++      }                                                  \
++} while(0)
++
++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do {  \
++      struct drm_nouveau_private *nv = dev->dev_private;   \
++      if (!nouveau_fifo_owner(dev, (cl), (id))) {          \
++              DRM_ERROR("pid %d doesn't own channel %d\n", \
++                        DRM_CURRENTPID, (id));             \
++              return -EPERM;                               \
++      }                                                    \
++      (ch) = nv->fifos[(id)];                              \
++} while(0)
++
++/* nouveau_state.c */
++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
++extern int  nouveau_load(struct drm_device *, unsigned long flags);
++extern int  nouveau_firstopen(struct drm_device *);
++extern void nouveau_lastclose(struct drm_device *);
++extern int  nouveau_unload(struct drm_device *);
++extern int  nouveau_ioctl_getparam(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern void nouveau_wait_for_idle(struct drm_device *);
++extern int  nouveau_card_init(struct drm_device *);
++extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
++                                  struct drm_file *);
++extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
++                                struct drm_file *);
++extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
++                               struct drm_file *);
++
++/* nouveau_mem.c */
++extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
++                               uint64_t size);
++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
++                                               uint64_t size, int align2,
++                                               struct drm_file *, int tail);
++extern void nouveau_mem_takedown(struct mem_block **heap);
++extern void nouveau_mem_free_block(struct mem_block *);
++extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
++extern int  nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
++                                  struct drm_file *);
++extern int  nouveau_ioctl_mem_free(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern int  nouveau_ioctl_mem_tile(struct drm_device *, void *data,
++                                 struct drm_file *);
++extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
++                                         int alignment, uint64_t size,
++                                         int flags, struct drm_file *);
++extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
++extern int  nouveau_mem_init(struct drm_device *);
++extern int  nouveau_mem_init_ttm(struct drm_device *);
++extern void nouveau_mem_close(struct drm_device *);
++
++/* nouveau_notifier.c */
++extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
++extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
++                                 int cout, uint32_t *offset);
++extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
++                                       struct drm_file *);
++extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data,
++                                      struct drm_file *);
++
++/* nouveau_fifo.c */
++extern int  nouveau_fifo_init(struct drm_device *);
++extern int  nouveau_fifo_ctx_size(struct drm_device *);
++extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);
++extern int  nouveau_fifo_owner(struct drm_device *, struct drm_file *,
++                             int channel);
++extern int  nouveau_fifo_alloc(struct drm_device *dev,
++                             struct nouveau_channel **chan,
++                             struct drm_file *file_priv,
++                             struct mem_block *pushbuf,
++                             uint32_t fb_ctxdma, uint32_t tt_ctxdma);
++extern void nouveau_fifo_free(struct nouveau_channel *);
++extern int  nouveau_channel_idle(struct nouveau_channel *chan);
++
++/* nouveau_object.c */
++extern int  nouveau_gpuobj_early_init(struct drm_device *);
++extern int  nouveau_gpuobj_init(struct drm_device *);
++extern void nouveau_gpuobj_takedown(struct drm_device *);
++extern void nouveau_gpuobj_late_takedown(struct drm_device *);
++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
++                                     uint32_t vram_h, uint32_t tt_h);
++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
++                            int size, int align, uint32_t flags,
++                            struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
++                                uint32_t handle, struct nouveau_gpuobj *,
++                                struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_del(struct drm_device *,
++                                struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
++                                 struct nouveau_gpuobj_ref **ref_ret);
++extern int nouveau_gpuobj_new_ref(struct drm_device *,
++                                struct nouveau_channel *alloc_chan,
++                                struct nouveau_channel *ref_chan,
++                                uint32_t handle, int size, int align,
++                                uint32_t flags, struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_new_fake(struct drm_device *,
++                                 uint32_t p_offset, uint32_t b_offset,
++                                 uint32_t size, uint32_t flags,
++                                 struct nouveau_gpuobj **,
++                                 struct nouveau_gpuobj_ref**);
++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
++                                uint64_t offset, uint64_t size, int access,
++                                int target, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
++                                     uint64_t offset, uint64_t size,
++                                     int access, struct nouveau_gpuobj **,
++                                     uint32_t *o_ret);
++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
++                               struct nouveau_gpuobj **);
++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
++                                   struct drm_file *);
++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
++                                   struct drm_file *);
++
++/* nouveau_irq.c */
++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
++extern void        nouveau_irq_preinstall(struct drm_device *);
++extern int         nouveau_irq_postinstall(struct drm_device *);
++extern void        nouveau_irq_uninstall(struct drm_device *);
++
++/* nouveau_sgdma.c */
++extern int nouveau_sgdma_init(struct drm_device *);
++extern void nouveau_sgdma_takedown(struct drm_device *);
++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
++                                uint32_t *page);
++extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
++extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
++extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);
++
++/* nouveau_dma.c */
++extern int  nouveau_dma_channel_init(struct drm_device *);
++extern void nouveau_dma_channel_takedown(struct drm_device *);
++extern int  nouveau_dma_wait(struct drm_device *, int size);
++
++/* nv04_fb.c */
++extern int  nv04_fb_init(struct drm_device *);
++extern void nv04_fb_takedown(struct drm_device *);
++
++/* nv10_fb.c */
++extern int  nv10_fb_init(struct drm_device *);
++extern void nv10_fb_takedown(struct drm_device *);
++
++/* nv40_fb.c */
++extern int  nv40_fb_init(struct drm_device *);
++extern void nv40_fb_takedown(struct drm_device *);
++
++/* nv04_fifo.c */
++extern int  nv04_fifo_channel_id(struct drm_device *);
++extern int  nv04_fifo_create_context(struct nouveau_channel *);
++extern void nv04_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv04_fifo_load_context(struct nouveau_channel *);
++extern int  nv04_fifo_save_context(struct nouveau_channel *);
++
++/* nv10_fifo.c */
++extern int  nv10_fifo_channel_id(struct drm_device *);
++extern int  nv10_fifo_create_context(struct nouveau_channel *);
++extern void nv10_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv10_fifo_load_context(struct nouveau_channel *);
++extern int  nv10_fifo_save_context(struct nouveau_channel *);
++
++/* nv40_fifo.c */
++extern int  nv40_fifo_init(struct drm_device *);
++extern int  nv40_fifo_create_context(struct nouveau_channel *);
++extern void nv40_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv40_fifo_load_context(struct nouveau_channel *);
++extern int  nv40_fifo_save_context(struct nouveau_channel *);
++
++/* nv50_fifo.c */
++extern int  nv50_fifo_init(struct drm_device *);
++extern void nv50_fifo_takedown(struct drm_device *);
++extern int  nv50_fifo_channel_id(struct drm_device *);
++extern int  nv50_fifo_create_context(struct nouveau_channel *);
++extern void nv50_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv50_fifo_load_context(struct nouveau_channel *);
++extern int  nv50_fifo_save_context(struct nouveau_channel *);
++
++/* nv04_graph.c */
++extern void nouveau_nv04_context_switch(struct drm_device *);
++extern int  nv04_graph_init(struct drm_device *);
++extern void nv04_graph_takedown(struct drm_device *);
++extern int  nv04_graph_create_context(struct nouveau_channel *);
++extern void nv04_graph_destroy_context(struct nouveau_channel *);
++extern int  nv04_graph_load_context(struct nouveau_channel *);
++extern int  nv04_graph_save_context(struct nouveau_channel *);
++
++/* nv10_graph.c */
++extern void nouveau_nv10_context_switch(struct drm_device *);
++extern int  nv10_graph_init(struct drm_device *);
++extern void nv10_graph_takedown(struct drm_device *);
++extern int  nv10_graph_create_context(struct nouveau_channel *);
++extern void nv10_graph_destroy_context(struct nouveau_channel *);
++extern int  nv10_graph_load_context(struct nouveau_channel *);
++extern int  nv10_graph_save_context(struct nouveau_channel *);
++
++/* nv20_graph.c */
++extern int  nv20_graph_create_context(struct nouveau_channel *);
++extern void nv20_graph_destroy_context(struct nouveau_channel *);
++extern int  nv20_graph_load_context(struct nouveau_channel *);
++extern int  nv20_graph_save_context(struct nouveau_channel *);
++extern int  nv20_graph_init(struct drm_device *);
++extern void nv20_graph_takedown(struct drm_device *);
++extern int  nv30_graph_init(struct drm_device *);
++
++/* nv40_graph.c */
++extern int  nv40_graph_init(struct drm_device *);
++extern void nv40_graph_takedown(struct drm_device *);
++extern int  nv40_graph_create_context(struct nouveau_channel *);
++extern void nv40_graph_destroy_context(struct nouveau_channel *);
++extern int  nv40_graph_load_context(struct nouveau_channel *);
++extern int  nv40_graph_save_context(struct nouveau_channel *);
++
++/* nv50_graph.c */
++extern int  nv50_graph_init(struct drm_device *);
++extern void nv50_graph_takedown(struct drm_device *);
++extern int  nv50_graph_create_context(struct nouveau_channel *);
++extern void nv50_graph_destroy_context(struct nouveau_channel *);
++extern int  nv50_graph_load_context(struct nouveau_channel *);
++extern int  nv50_graph_save_context(struct nouveau_channel *);
++
++/* nv04_instmem.c */
++extern int  nv04_instmem_init(struct drm_device *);
++extern void nv04_instmem_takedown(struct drm_device *);
++extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++                                uint32_t *size);
++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++
++/* nv50_instmem.c */
++extern int  nv50_instmem_init(struct drm_device *);
++extern void nv50_instmem_takedown(struct drm_device *);
++extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++                                uint32_t *size);
++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++
++/* nv04_mc.c */
++extern int  nv04_mc_init(struct drm_device *);
++extern void nv04_mc_takedown(struct drm_device *);
++
++/* nv40_mc.c */
++extern int  nv40_mc_init(struct drm_device *);
++extern void nv40_mc_takedown(struct drm_device *);
++
++/* nv50_mc.c */
++extern int  nv50_mc_init(struct drm_device *);
++extern void nv50_mc_takedown(struct drm_device *);
++
++/* nv04_timer.c */
++extern int  nv04_timer_init(struct drm_device *);
++extern uint64_t nv04_timer_read(struct drm_device *);
++extern void nv04_timer_takedown(struct drm_device *);
++
++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
++                               unsigned long arg);
++
++/* nouveau_buffer.c */
++extern struct drm_bo_driver nouveau_bo_driver;
++
++/* nouveau_fence.c */
++extern struct drm_fence_driver nouveau_fence_driver;
++extern void nouveau_fence_handler(struct drm_device *dev, int channel);
++
++#if defined(__powerpc__)
++#define NV_READ(reg)        in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
++#define NV_WRITE(reg,val)   out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) )
++#else
++#define NV_READ(reg)        DRM_READ32(  dev_priv->mmio, (reg) )
++#define NV_WRITE(reg,val)   DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#endif
++
++/* PRAMIN access */
++#if defined(__powerpc__)
++#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o))
++#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v))
++#else
++#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o))
++#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v))
++#endif
++
++#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2))
++#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
++
++#endif /* __NOUVEAU_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fence.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c
+--- git/drivers/gpu/drm-tungsten/nouveau_fence.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fence.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,119 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++static int
++nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
++
++      /* DRM's channel always uses IRQs to signal fences */
++      if (class == dev_priv->channel.chan->id)
++              return 1;
++
++      /* Other channels don't use IRQs at all yet */
++      return 0;
++}
++
++static int
++nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
++                 uint32_t *breadcrumb, uint32_t *native_type)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[class];
++      struct nouveau_drm_channel *dchan = &dev_priv->channel;
++
++      DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
++
++      /* We can't emit fences on client channels, update sequence number
++       * and userspace will emit the fence
++       */
++      *breadcrumb  = ++chan->next_sequence;
++      *native_type = DRM_FENCE_TYPE_EXE;
++      if (chan != dchan->chan) {
++              DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
++              return 0;
++      }
++
++      DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
++      BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
++      OUT_RING  (*breadcrumb);
++      BEGIN_RING(NvSubM2MF, 0x0150, 1);
++      OUT_RING  (0);
++      FIRE_RING ();
++
++      return 0;
++}
++
++static void
++nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
++      struct nouveau_channel *chan = dev_priv->fifos[class];
++
++      DRM_DEBUG("class=%d\n", class);
++      DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
++
++      if (waiting_types & DRM_FENCE_TYPE_EXE) {
++              uint32_t sequence = NV_READ(chan->ref_cnt);
++
++              DRM_DEBUG("got 0x%08x\n", sequence);
++              drm_fence_handler(dev, class, sequence, waiting_types, 0);
++      }
++}
++
++void
++nouveau_fence_handler(struct drm_device *dev, int channel)
++{
++      struct drm_fence_manager *fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[channel];
++
++      DRM_DEBUG("class=%d\n", channel);
++
++      write_lock(&fm->lock);
++      nouveau_fence_poll(dev, channel, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++struct drm_fence_driver nouveau_fence_driver = {
++      .num_classes    = 8,
++      .wrap_diff      = (1 << 30),
++      .flush_diff     = (1 << 29),
++      .sequence_mask  = 0xffffffffU,
++      .has_irq        = nouveau_fence_has_irq,
++      .emit           = nouveau_fence_emit,
++      .flush          = NULL,
++      .poll           = nouveau_fence_poll,
++      .needed_flush   = NULL,
++      .wait           = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_fifo.c git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c
+--- git/drivers/gpu/drm-tungsten/nouveau_fifo.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_fifo.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,601 @@
++/*
++ * Copyright 2005-2006 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++
++/* returns the size of fifo context */
++int nouveau_fifo_ctx_size(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++
++      if (dev_priv->card_type >= NV_40)
++              return 128;
++      else if (dev_priv->card_type >= NV_17)
++              return 64;
++      else
++              return 32;
++}
++
++/***********************************
++ * functions doing the actual work
++ ***********************************/
++
++static int nouveau_fifo_instmem_configure(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PFIFO_RAMHT,
++                      (0x03 << 24) /* search 128 */ |
++                      ((dev_priv->ramht_bits - 9) << 16) |
++                      (dev_priv->ramht_offset >> 8)
++                      );
++
++      NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
++
++      switch(dev_priv->card_type)
++      {
++              case NV_40:
++                      switch (dev_priv->chipset) {
++                      case 0x47:
++                      case 0x49:
++                      case 0x4b:
++                              NV_WRITE(0x2230, 1);
++                              break;
++                      default:
++                              break;
++                      }
++                      NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
++                      break;
++              case NV_44:
++                      NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
++                                      (2 << 16));
++                      break;
++              case NV_30:
++              case NV_20:
++              case NV_17:
++                      NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
++                                      (1 << 16) /* 64 Bytes entry*/);
++                      /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
++                      break;
++              case NV_11:
++              case NV_10:
++              case NV_04:
++                      NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
++                      break;
++      }
++
++      return 0;
++}
++
++int nouveau_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PFIFO);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PFIFO);
++
++      /* Enable PFIFO error reporting */
++      NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
++      NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
++
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++
++      ret = nouveau_fifo_instmem_configure(dev);
++      if (ret) {
++              DRM_ERROR("Failed to configure instance memory\n");
++              return ret;
++      }
++
++      /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
++
++      DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
++
++      /* All channels into PIO mode */
++      NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++      /* Channel 0 active, PIO mode */
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
++      /* PUT and GET to 0 */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
++      /* No cmdbuf object */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
++      NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
++                                    NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                                    NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
++#ifdef __BIG_ENDIAN
++                                    NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                                    0x00000000);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
++
++      /* FIXME on NV04 */
++      if (dev_priv->card_type >= NV_10) {
++              NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
++              NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
++              if (dev_priv->card_type >= NV_40)
++                      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
++              else
++                      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
++      } else {
++              NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
++              NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
++              NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
++      }
++
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      return 0;
++}
++
++static int
++nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct mem_block *pb = chan->pushbuf_mem;
++      struct nouveau_gpuobj *pushbuf = NULL;
++      int ret;
++
++      if (pb->flags & NOUVEAU_MEM_AGP) {
++              ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
++                                                NV_DMA_ACCESS_RO,
++                                                &pushbuf,
++                                                &chan->pushbuf_base);
++      } else
++      if (pb->flags & NOUVEAU_MEM_PCI) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start, pb->size,
++                                           NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_PCI_NONLINEAR,
++                                           &pushbuf);
++              chan->pushbuf_base = 0;
++      } else if (dev_priv->card_type != NV_04) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start, pb->size,
++                                           NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_VIDMEM, &pushbuf);
++              chan->pushbuf_base = 0;
++      } else {
++              /* NV04 cmdbuf hack, from original ddx.. not sure of it's
++               * exact reason for existing :)  PCI access to cmdbuf in
++               * VRAM.
++               */
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           pb->start +
++                                             drm_get_resource_start(dev, 1),
++                                           pb->size, NV_DMA_ACCESS_RO,
++                                           NV_DMA_TARGET_PCI, &pushbuf);
++              chan->pushbuf_base = 0;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
++                                        &chan->pushbuf))) {
++              DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
++              if (pushbuf != dev_priv->gart_info.sg_ctxdma)
++                      nouveau_gpuobj_del(dev, &pushbuf);
++              return ret;
++      }
++
++      return 0;
++}
++
++static struct mem_block *
++nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_config *config = &dev_priv->config;
++      struct mem_block *pb;
++      int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
++
++      /* Defaults for unconfigured values */
++      if (!config->cmdbuf.location)
++              config->cmdbuf.location = NOUVEAU_MEM_FB;
++      if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
++              config->cmdbuf.size = pb_min_size;
++
++      pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
++                             config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
++                             (struct drm_file *)-2);
++      if (!pb)
++              DRM_ERROR("Couldn't allocate DMA push buffer.\n");
++
++      return pb;
++}
++
++/* allocates and initializes a fifo for user space consumption */
++int
++nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
++                 struct drm_file *file_priv, struct mem_block *pushbuf,
++                 uint32_t vram_handle, uint32_t tt_handle)
++{
++      int ret;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_channel *chan;
++      int channel;
++
++      /*
++       * Alright, here is the full story
++       * Nvidia cards have multiple hw fifo contexts (praise them for that,
++       * no complicated crash-prone context switches)
++       * We allocate a new context for each app and let it write to it directly
++       * (woo, full userspace command submission !)
++       * When there are no more contexts, you lost
++       */
++      for (channel = 0; channel < engine->fifo.channels; channel++) {
++              if (dev_priv->fifos[channel] == NULL)
++                      break;
++      }
++
++      /* no more fifos. you lost. */
++      if (channel == engine->fifo.channels)
++              return -EINVAL;
++
++      dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
++                                            DRM_MEM_DRIVER);
++      if (!dev_priv->fifos[channel])
++              return -ENOMEM;
++      dev_priv->fifo_alloc_count++;
++      chan = dev_priv->fifos[channel];
++      chan->dev = dev;
++      chan->id = channel;
++      chan->file_priv = file_priv;
++      chan->pushbuf_mem = pushbuf;
++
++      DRM_INFO("Allocating FIFO number %d\n", channel);
++
++      /* Locate channel's user control regs */
++      if (dev_priv->card_type < NV_40) {
++              chan->user = NV03_USER(channel);
++              chan->user_size = NV03_USER_SIZE;
++              chan->put = NV03_USER_DMA_PUT(channel);
++              chan->get = NV03_USER_DMA_GET(channel);
++              chan->ref_cnt = NV03_USER_REF_CNT(channel);
++      } else
++      if (dev_priv->card_type < NV_50) {
++              chan->user = NV40_USER(channel);
++              chan->user_size = NV40_USER_SIZE;
++              chan->put = NV40_USER_DMA_PUT(channel);
++              chan->get = NV40_USER_DMA_GET(channel);
++              chan->ref_cnt = NV40_USER_REF_CNT(channel);
++      } else {
++              chan->user = NV50_USER(channel);
++              chan->user_size = NV50_USER_SIZE;
++              chan->put = NV50_USER_DMA_PUT(channel);
++              chan->get = NV50_USER_DMA_GET(channel);
++              chan->ref_cnt = NV50_USER_REF_CNT(channel);
++      }
++
++      /* Allocate space for per-channel fixed notifier memory */
++      ret = nouveau_notifier_init_channel(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Setup channel's default objects */
++      ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Create a dma object for the push buffer */
++      ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      nouveau_wait_for_idle(dev);
++
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      /* Create a graphics context for new channel */
++      ret = engine->graph.create_context(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* Construct inital RAMFC for new channel */
++      ret = engine->fifo.create_context(chan);
++      if (ret) {
++              nouveau_fifo_free(chan);
++              return ret;
++      }
++
++      /* setup channel's default get/put values
++       * XXX: quite possibly extremely pointless..
++       */
++      NV_WRITE(chan->get, chan->pushbuf_base);
++      NV_WRITE(chan->put, chan->pushbuf_base);
++
++      /* If this is the first channel, setup PFIFO ourselves.  For any
++       * other case, the GPU will handle this when it switches contexts.
++       */
++      if (dev_priv->fifo_alloc_count == 1) {
++              ret = engine->fifo.load_context(chan);
++              if (ret) {
++                      nouveau_fifo_free(chan);
++                      return ret;
++              }
++
++              ret = engine->graph.load_context(chan);
++              if (ret) {
++                      nouveau_fifo_free(chan);
++                      return ret;
++              }
++      }
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 1);
++
++      DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
++      *chan_ret = chan;
++      return 0;
++}
++
++int
++nouveau_channel_idle(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t caches;
++      int idle;
++
++      caches = NV_READ(NV03_PFIFO_CACHES);
++      NV_WRITE(NV03_PFIFO_CACHES, caches & ~1);
++
++      if (engine->fifo.channel_id(dev) != chan->id) {
++              struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
++
++              if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1))
++                      idle = 0;
++              else
++                      idle = 1;
++      } else {
++              idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) ==
++                      NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      }
++
++      NV_WRITE(NV03_PFIFO_CACHES, caches);
++      return idle;
++}
++
++/* stops a fifo */
++void nouveau_fifo_free(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint64_t t_start;
++
++      DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
++
++      /* Give the channel a chance to idle, wait 2s (hopefully) */
++      t_start = engine->timer.read(dev);
++      while (!nouveau_channel_idle(chan)) {
++              if (engine->timer.read(dev) - t_start > 2000000000ULL) {
++                      DRM_ERROR("Failed to idle channel %d before destroy."
++                                "Prepare for strangeness..\n", chan->id);
++                      break;
++              }
++      }
++
++      /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
++       *     from CACHE1 too?
++       */
++
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      // FIXME XXX needs more code
++
++      engine->fifo.destroy_context(chan);
++
++      /* Cleanup PGRAPH state */
++      engine->graph.destroy_context(chan);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++
++      /* Deallocate push buffer */
++      nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
++      if (chan->pushbuf_mem) {
++              nouveau_mem_free(dev, chan->pushbuf_mem);
++              chan->pushbuf_mem = NULL;
++      }
++
++      /* Destroy objects belonging to the channel */
++      nouveau_gpuobj_channel_takedown(chan);
++
++      nouveau_notifier_takedown_channel(chan);
++
++      dev_priv->fifos[chan->id] = NULL;
++      dev_priv->fifo_alloc_count--;
++      drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
++}
++
++/* cleanups all the fifos from file_priv */
++void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      DRM_DEBUG("clearing FIFO enables from file_priv\n");
++      for(i = 0; i < engine->fifo.channels; i++) {
++              struct nouveau_channel *chan = dev_priv->fifos[i];
++
++              if (chan && chan->file_priv == file_priv)
++                      nouveau_fifo_free(chan);
++      }
++}
++
++int
++nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
++                 int channel)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      if (channel >= engine->fifo.channels)
++              return 0;
++      if (dev_priv->fifos[channel] == NULL)
++              return 0;
++      return (dev_priv->fifos[channel]->file_priv == file_priv);
++}
++
++/***********************************
++ * ioctls wrapping the functions
++ ***********************************/
++
++static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
++                                  struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_channel_alloc *init = data;
++      struct drm_map_list *entry;
++      struct nouveau_channel *chan;
++      struct mem_block *pushbuf;
++      int res;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
++              return -EINVAL;
++
++      pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
++      if (!pushbuf)
++              return -ENOMEM;
++
++      res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
++                               init->fb_ctxdma_handle,
++                               init->tt_ctxdma_handle);
++      if (res)
++              return res;
++      init->channel  = chan->id;
++      init->put_base = chan->pushbuf_base;
++
++      /* make the fifo available to user space */
++      /* first, the fifo control regs */
++      init->ctrl = dev_priv->mmio->offset + chan->user;
++      init->ctrl_size = chan->user_size;
++      res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
++                       0, &chan->regs);
++      if (res != 0)
++              return res;
++
++      entry = drm_find_matching_map(dev, chan->regs);
++      if (!entry)
++              return -EINVAL;
++      init->ctrl = entry->user_token;
++
++      /* pass back FIFO map info to the caller */
++      init->cmdbuf      = chan->pushbuf_mem->map_handle;
++      init->cmdbuf_size = chan->pushbuf_mem->size;
++
++      /* and the notifier block */
++      init->notifier      = chan->notifier_block->map_handle;
++      init->notifier_size = chan->notifier_block->size;
++
++      return 0;
++}
++
++static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
++                                 struct drm_file *file_priv)
++{
++      struct drm_nouveau_channel_free *cfree = data;
++      struct nouveau_channel *chan;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
++
++      nouveau_fifo_free(chan);
++      return 0;
++}
++
++/***********************************
++ * finally, the ioctl table
++ ***********************************/
++
++struct drm_ioctl_desc nouveau_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH),
++};
++
++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_ioc32.c git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c
+--- git/drivers/gpu/drm-tungsten/nouveau_ioc32.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_ioc32.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,72 @@
++/**
++ * \file mga_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the MGA DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "nouveau_drm.h"
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
++                       unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++#if 0
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
++              fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
++#endif
++      lock_kernel();    /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_irq.c git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c
+--- git/drivers/gpu/drm-tungsten/nouveau_irq.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_irq.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,568 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama@iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_reg.h"
++#include "nouveau_swmthd.h"
++
++void
++nouveau_irq_preinstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master disable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++}
++
++int
++nouveau_irq_postinstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master enable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
++
++      return 0;
++}
++
++void
++nouveau_irq_uninstall(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Master disable */
++      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++}
++
++static void
++nouveau_fifo_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t status, reassign;
++
++      reassign = NV_READ(NV03_PFIFO_CACHES) & 1;
++      while ((status = NV_READ(NV03_PFIFO_INTR_0))) {
++              uint32_t chid, get;
++
++              NV_WRITE(NV03_PFIFO_CACHES, 0);
++
++              chid = engine->fifo.channel_id(dev);
++              get  = NV_READ(NV03_PFIFO_CACHE1_GET);
++
++              if (status & NV_PFIFO_INTR_CACHE_ERROR) {
++                      uint32_t mthd, data;
++                      int ptr;
++
++                      ptr = get >> 2;
++                      if (dev_priv->card_type < NV_40) {
++                              mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr));
++                              data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr));
++                      } else {
++                              mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr));
++                              data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr));
++                      }
++
++                      DRM_INFO("PFIFO_CACHE_ERROR - "
++                               "Ch %d/%d Mthd 0x%04x Data 0x%08x\n",
++                               chid, (mthd >> 13) & 7, mthd & 0x1ffc, data);
++
++                      NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4);
++                      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1);
++
++                      status &= ~NV_PFIFO_INTR_CACHE_ERROR;
++                      NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
++              }
++
++              if (status & NV_PFIFO_INTR_DMA_PUSHER) {
++                      DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid);
++
++                      status &= ~NV_PFIFO_INTR_DMA_PUSHER;
++                      NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
++
++                      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
++                      if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get)
++                              NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4);
++              }
++
++              if (status) {
++                      DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
++                      NV_WRITE(NV03_PFIFO_INTR_0, status);
++                      NV_WRITE(NV03_PMC_INTR_EN_0, 0);
++              }
++
++              NV_WRITE(NV03_PFIFO_CACHES, reassign);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
++}
++
++struct nouveau_bitfield_names {
++      uint32_t mask;
++      const char * name;
++};
++
++static struct nouveau_bitfield_names nouveau_nstatus_names[] =
++{
++      { NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++      { NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++      { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++      { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] =
++{
++      { NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++      { NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++      { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++      { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nouveau_nsource_names[] =
++{
++      { NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
++      { NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
++      { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
++      { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
++      { NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
++      { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
++      { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
++      { NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
++      { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
++      { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
++      { NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
++      { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
++      { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
++      { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
++      { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
++};
++
++static void
++nouveau_print_bitfield_names(uint32_t value,
++                             const struct nouveau_bitfield_names *namelist,
++                             const int namelist_len)
++{
++      int i;
++      for(i=0; i<namelist_len; ++i) {
++              uint32_t mask = namelist[i].mask;
++              if(value & mask) {
++                      printk(" %s", namelist[i].name);
++                      value &= ~mask;
++              }
++      }
++      if(value)
++              printk(" (unknown bits 0x%08x)", value);
++}
++
++static int
++nouveau_graph_chid_from_grctx(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++      int i;
++
++      if (dev_priv->card_type < NV_40)
++              return dev_priv->Engine.fifo.channels;
++      else
++      if (dev_priv->card_type < NV_50)
++              inst = (NV_READ(0x40032c) & 0xfffff) << 4;
++      else
++              inst = NV_READ(0x40032c) & 0xfffff;
++
++      for (i = 0; i < dev_priv->Engine.fifo.channels; i++) {
++              struct nouveau_channel *chan = dev_priv->fifos[i];
++
++              if (!chan || !chan->ramin_grctx)
++                      continue;
++
++              if (dev_priv->card_type < NV_50) {
++                      if (inst == chan->ramin_grctx->instance)
++                              break;
++              } else {
++                      if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0))
++                              break;
++              }
++      }
++
++      return i;
++}
++
++static int
++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int channel;
++
++      if (dev_priv->card_type < NV_10)
++              channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
++      else
++      if (dev_priv->card_type < NV_40)
++              channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
++      else
++              channel = nouveau_graph_chid_from_grctx(dev);
++
++      if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
++              DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
++              return -EINVAL;
++      }
++
++      *channel_ret = channel;
++      return 0;
++}
++
++struct nouveau_pgraph_trap {
++      int channel;
++      int class;
++      int subc, mthd, size;
++      uint32_t data, data2;
++      uint32_t nsource, nstatus;
++};
++
++static void
++nouveau_graph_trap_info(struct drm_device *dev,
++                      struct nouveau_pgraph_trap *trap)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t address;
++
++      trap->nsource = trap->nstatus = 0;
++      if (dev_priv->card_type < NV_50) {
++              trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE);
++              trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
++      }
++
++      if (nouveau_graph_trapped_channel(dev, &trap->channel))
++              trap->channel = -1;
++      address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
++
++      trap->mthd = address & 0x1FFC;
++      trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
++      if (dev_priv->card_type < NV_10) {
++              trap->subc  = (address >> 13) & 0x7;
++      } else {
++              trap->subc  = (address >> 16) & 0x7;
++              trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
++      }
++
++      if (dev_priv->card_type < NV_10) {
++              trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;
++      } else if (dev_priv->card_type < NV_40) {
++              trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;
++      } else if (dev_priv->card_type < NV_50) {
++              trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;
++      } else {
++              trap->class = NV_READ(0x400814);
++      }
++}
++
++static void
++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
++                           struct nouveau_pgraph_trap *trap)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
++
++      DRM_INFO("%s - nSource:", id);
++      nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
++                                   ARRAY_SIZE(nouveau_nsource_names));
++      printk(", nStatus:");
++      if (dev_priv->card_type < NV_10)
++              nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
++                                   ARRAY_SIZE(nouveau_nstatus_names));
++      else
++              nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10,
++                                   ARRAY_SIZE(nouveau_nstatus_names_nv10));
++      printk("\n");
++
++      DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
++               id, trap->channel, trap->subc, trap->class, trap->mthd,
++               trap->data2, trap->data);
++}
++
++static inline void
++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
++{
++      struct nouveau_pgraph_trap trap;
++      int unhandled = 0;
++
++      nouveau_graph_trap_info(dev, &trap);
++
++      if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++              /* NV4 (nvidia TNT 1) reports software methods with
++               * PGRAPH NOTIFY ILLEGAL_MTHD
++               */
++              DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
++                        trap.mthd, trap.class);
++
++              if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
++                      DRM_ERROR("Unable to execute NV04 software method %x "
++                                "for object class %x. Please report.\n",
++                                trap.mthd, trap.class);
++                      unhandled = 1;
++              }
++      } else {
++              unhandled = 1;
++      }
++
++      if (unhandled)
++              nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
++}
++
++static inline void
++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
++{
++      struct nouveau_pgraph_trap trap;
++      int unhandled = 0;
++
++      nouveau_graph_trap_info(dev, &trap);
++      trap.nsource = nsource;
++
++      if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++              if (trap.channel >= 0 && trap.mthd == 0x0150) {
++                      nouveau_fence_handler(dev, trap.channel);
++              } else
++              if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
++                      unhandled = 1;
++              }
++      } else {
++              unhandled = 1;
++      }
++
++      if (unhandled)
++              nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
++}
++
++static inline void
++nouveau_pgraph_intr_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      uint32_t chid;
++
++      chid = engine->fifo.channel_id(dev);
++      DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);
++
++      switch(dev_priv->card_type) {
++      case NV_04:
++      case NV_05:
++              nouveau_nv04_context_switch(dev);
++              break;
++      case NV_10:
++      case NV_11:
++      case NV_17:
++              nouveau_nv10_context_switch(dev);
++              break;
++      default:
++              DRM_ERROR("Context switch not implemented\n");
++              break;
++      }
++}
++
++static void
++nouveau_pgraph_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      while ((status = NV_READ(NV03_PGRAPH_INTR))) {
++              uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE);
++
++              if (status & NV_PGRAPH_INTR_NOTIFY) {
++                      nouveau_pgraph_intr_notify(dev, nsource);
++
++                      status &= ~NV_PGRAPH_INTR_NOTIFY;
++                      NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
++              }
++
++              if (status & NV_PGRAPH_INTR_ERROR) {
++                      nouveau_pgraph_intr_error(dev, nsource);
++
++                      status &= ~NV_PGRAPH_INTR_ERROR;
++                      NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
++              }
++
++              if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
++                      nouveau_pgraph_intr_context_switch(dev);
++
++                      status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
++                      NV_WRITE(NV03_PGRAPH_INTR,
++                               NV_PGRAPH_INTR_CONTEXT_SWITCH);
++              }
++
++              if (status) {
++                      DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
++                      NV_WRITE(NV03_PGRAPH_INTR, status);
++              }
++
++              if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
++                      NV_WRITE(NV04_PGRAPH_FIFO, 1);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++}
++
++static void
++nv50_pgraph_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      status = NV_READ(NV03_PGRAPH_INTR);
++
++      if (status & 0x00000020) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
++
++              status &= ~0x00000020;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00000020);
++      }
++
++      if (status & 0x00100000) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_DATA_ERROR);
++
++              status &= ~0x00100000;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00100000);
++      }
++
++      if (status & 0x00200000) {
++              nouveau_pgraph_intr_error(dev,
++                                        NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
++
++              status &= ~0x00200000;
++              NV_WRITE(NV03_PGRAPH_INTR, 0x00200000);
++      }
++
++      if (status) {
++              DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
++              NV_WRITE(NV03_PGRAPH_INTR, status);
++      }
++
++      {
++              const int isb = (1 << 16) | (1 << 0);
++
++              if ((NV_READ(0x400500) & isb) != isb)
++                      NV_WRITE(0x400500, NV_READ(0x400500) | isb);
++      }
++
++      NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++}
++
++static void
++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (crtc&1) {
++              NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
++      }
++
++      if (crtc&2) {
++              NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
++      }
++}
++
++static void
++nouveau_nv50_display_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR);
++
++      DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val);
++
++      NV_WRITE(NV50_DISPLAY_SUPERVISOR, val);
++}
++
++static void
++nouveau_nv50_i2c_irq_handler(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER));
++
++      /* This seems to be the way to acknowledge an interrupt. */
++      NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF);
++}
++
++irqreturn_t
++nouveau_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device*)arg;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t status;
++
++      status = NV_READ(NV03_PMC_INTR_0);
++      if (!status)
++              return IRQ_NONE;
++
++      if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
++              nouveau_fifo_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
++              if (dev_priv->card_type >= NV_50)
++                      nv50_pgraph_irq_handler(dev);
++              else
++                      nouveau_pgraph_irq_handler(dev);
++
++              status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
++              nouveau_crtc_irq_handler(dev, (status>>24)&3);
++              status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
++              nouveau_nv50_display_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
++      }
++
++      if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) {
++              nouveau_nv50_i2c_irq_handler(dev);
++              status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING;
++      }
++
++      if (status)
++              DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status);
++
++      return IRQ_HANDLED;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_mem.c git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c
+--- git/drivers/gpu/drm-tungsten/nouveau_mem.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_mem.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,872 @@
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ * Copyright 2005 Stephane Marchesin
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "nouveau_drv.h"
++
++static struct mem_block *
++split_block(struct mem_block *p, uint64_t start, uint64_t size,
++          struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                      drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                      drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++struct mem_block *
++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
++                      int align2, struct drm_file *file_priv, int tail)
++{
++      struct mem_block *p;
++      uint64_t mask = (1 << align2) - 1;
++
++      if (!heap)
++              return NULL;
++
++      if (tail) {
++              list_for_each_prev(p, heap) {
++                      uint64_t start = ((p->start + p->size) - size) & ~mask;
++
++                      if (p->file_priv == 0 && start >= p->start &&
++                          start + size <= p->start + p->size)
++                              return split_block(p, start, size, file_priv);
++              }
++      } else {
++              list_for_each(p, heap) {
++                      uint64_t start = (p->start + mask) & ~mask;
++
++                      if (p->file_priv == 0 &&
++                          start + size <= p->start + p->size)
++                              return split_block(p, start, size, file_priv);
++              }
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
++{
++      struct mem_block *p;
++
++      list_for_each(p, heap)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++void nouveau_mem_free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == 0) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFS);
++      }
++
++      if (p->prev->file_priv == 0) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
++                        uint64_t size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/*
++ * Free all blocks associated with the releasing file_priv
++ */
++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      list_for_each(p, heap) {
++              if (p->file_priv == file_priv)
++                      p->file_priv = NULL;
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      list_for_each(p, heap) {
++              while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
++                     (p->next!=heap)) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++              }
++      }
++}
++
++/*
++ * Cleanup everything
++ */
++void nouveau_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
++      *heap = NULL;
++}
++
++void nouveau_mem_close(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_mem_takedown(&dev_priv->agp_heap);
++      nouveau_mem_takedown(&dev_priv->fb_heap);
++      if (dev_priv->pci_heap)
++              nouveau_mem_takedown(&dev_priv->pci_heap);
++}
++
++/*XXX won't work on BSD because of pci_read_config_dword */
++static uint32_t
++nouveau_mem_fb_amount_igp(struct drm_device *dev)
++{
++#if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct pci_dev *bridge;
++      uint32_t mem;
++
++      bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
++      if (!bridge) {
++              DRM_ERROR("no bridge device\n");
++              return 0;
++      }
++
++      if (dev_priv->flags&NV_NFORCE) {
++              pci_read_config_dword(bridge, 0x7C, &mem);
++              return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
++      } else
++      if(dev_priv->flags&NV_NFORCE2) {
++              pci_read_config_dword(bridge, 0x84, &mem);
++              return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
++      }
++
++      DRM_ERROR("impossible!\n");
++#else
++      DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
++#endif
++
++      return 0;
++}
++
++/* returns the amount of FB ram in bytes */
++uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      switch(dev_priv->card_type)
++      {
++              case NV_04:
++              case NV_05:
++                      if (NV_READ(NV03_BOOT_0) & 0x00000100) {
++                              return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
++                      } else
++                      switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
++                      {
++                              case NV04_BOOT_0_RAM_AMOUNT_32MB:
++                                      return 32*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_16MB:
++                                      return 16*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_8MB:
++                                      return 8*1024*1024;
++                              case NV04_BOOT_0_RAM_AMOUNT_4MB:
++                                      return 4*1024*1024;
++                      }
++                      break;
++              case NV_10:
++              case NV_11:
++              case NV_17:
++              case NV_20:
++              case NV_30:
++              case NV_40:
++              case NV_44:
++              case NV_50:
++              default:
++                      if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
++                              return nouveau_mem_fb_amount_igp(dev);
++                      } else {
++                              uint64_t mem;
++
++                              mem = (NV_READ(NV04_FIFO_DATA) &
++                                     NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
++                                    NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
++                              return mem*1024*1024;
++                      }
++                      break;
++      }
++
++      DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
++      return 0;
++}
++
++static void nouveau_mem_reset_agp(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
++
++      saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
++      saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
++
++      /* clear busmaster bit */
++      NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
++      /* clear SBA and AGP bits */
++      NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
++
++      /* power cycle pgraph, if enabled */
++      pmc_enable = NV_READ(NV03_PMC_ENABLE);
++      if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
++              NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
++              NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                              NV_PMC_ENABLE_PGRAPH);
++      }
++
++      /* and restore (gives effect of resetting AGP) */
++      NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
++      NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
++}
++
++static int
++nouveau_mem_init_agp(struct drm_device *dev, int ttm)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_agp_info info;
++      struct drm_agp_mode mode;
++      int ret;
++
++      nouveau_mem_reset_agp(dev);
++
++      ret = drm_agp_acquire(dev);
++      if (ret) {
++              DRM_ERROR("Unable to acquire AGP: %d\n", ret);
++              return ret;
++      }
++
++      ret = drm_agp_info(dev, &info);
++      if (ret) {
++              DRM_ERROR("Unable to get AGP info: %d\n", ret);
++              return ret;
++      }
++
++      /* see agp.h for the AGPSTAT_* modes available */
++      mode.mode = info.mode;
++      ret = drm_agp_enable(dev, mode);
++      if (ret) {
++              DRM_ERROR("Unable to enable AGP: %d\n", ret);
++              return ret;
++      }
++
++      if (!ttm) {
++              struct drm_agp_buffer agp_req;
++              struct drm_agp_binding bind_req;
++
++              agp_req.size = info.aperture_size;
++              agp_req.type = 0;
++              ret = drm_agp_alloc(dev, &agp_req);
++              if (ret) {
++                      DRM_ERROR("Unable to alloc AGP: %d\n", ret);
++                              return ret;
++              }
++
++              bind_req.handle = agp_req.handle;
++              bind_req.offset = 0;
++              ret = drm_agp_bind(dev, &bind_req);
++              if (ret) {
++                      DRM_ERROR("Unable to bind AGP: %d\n", ret);
++                      return ret;
++              }
++      }
++
++      dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
++      dev_priv->gart_info.aper_base   = info.aperture_base;
++      dev_priv->gart_info.aper_size   = info.aperture_size;
++      return 0;
++}
++
++#define HACK_OLD_MM
++int
++nouveau_mem_init_ttm(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t vram_size, bar1_size;
++      int ret;
++
++      dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
++      dev_priv->fb_phys = drm_get_resource_start(dev,1);
++      dev_priv->gart_info.type = NOUVEAU_GART_NONE;
++
++      drm_bo_driver_init(dev);
++
++      /* non-mappable vram */
++      dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
++      dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
++      vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
++      bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
++      if (bar1_size < vram_size) {
++              if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
++                                        bar1_size, vram_size - bar1_size, 1))) {
++                      DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
++                      return ret;
++              }
++              vram_size = bar1_size;
++      }
++
++      /* mappable vram */
++#ifdef HACK_OLD_MM
++      vram_size /= 4;
++#endif
++      if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
++              DRM_ERROR("Failed VRAM mm init: %d\n", ret);
++              return ret;
++      }
++
++      /* GART */
++#if !defined(__powerpc__) && !defined(__ia64__)
++      if (drm_device_is_agp(dev) && dev->agp) {
++              if ((ret = nouveau_mem_init_agp(dev, 1)))
++                      DRM_ERROR("Error initialising AGP: %d\n", ret);
++      }
++#endif
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
++              if ((ret = nouveau_sgdma_init(dev)))
++                      DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
++      }
++
++      if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
++                                dev_priv->gart_info.aper_size >>
++                                PAGE_SHIFT, 1))) {
++              DRM_ERROR("Failed TT mm init: %d\n", ret);
++              return ret;
++      }
++
++#ifdef HACK_OLD_MM
++      vram_size <<= PAGE_SHIFT;
++      DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
++      if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
++              return -ENOMEM;
++#endif
++
++      return 0;
++}
++
++int nouveau_mem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_size;
++      int ret = 0;
++
++      dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
++      dev_priv->fb_phys = 0;
++      dev_priv->gart_info.type = NOUVEAU_GART_NONE;
++
++      /* setup a mtrr over the FB */
++      dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
++                                       nouveau_mem_fb_amount(dev),
++                                       DRM_MTRR_WC);
++
++      /* Init FB */
++      dev_priv->fb_phys=drm_get_resource_start(dev,1);
++      fb_size = nouveau_mem_fb_amount(dev);
++      /* On G80, limit VRAM to 512MiB temporarily due to limits in how
++       * we handle VRAM page tables.
++       */
++      if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
++              fb_size = (512 * 1024 * 1024);
++      /* On at least NV40, RAMIN is actually at the end of vram.
++       * We don't want to allocate this... */
++      if (dev_priv->card_type >= NV_40)
++              fb_size -= dev_priv->ramin_rsvd_vram;
++      dev_priv->fb_available_size = fb_size;
++      DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
++
++      if (fb_size>256*1024*1024) {
++              /* On cards with > 256Mb, you can't map everything.
++               * So we create a second FB heap for that type of memory */
++              if (nouveau_mem_init_heap(&dev_priv->fb_heap,
++                                        0, 256*1024*1024))
++                      return -ENOMEM;
++              if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
++                                        256*1024*1024, fb_size-256*1024*1024))
++                      return -ENOMEM;
++      } else {
++              if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
++                      return -ENOMEM;
++              dev_priv->fb_nomap_heap=NULL;
++      }
++
++#if !defined(__powerpc__) && !defined(__ia64__)
++      /* Init AGP / NV50 PCIEGART */
++      if (drm_device_is_agp(dev) && dev->agp) {
++              if ((ret = nouveau_mem_init_agp(dev, 0)))
++                      DRM_ERROR("Error initialising AGP: %d\n", ret);
++      }
++#endif
++
++      /*Note: this is *not* just NV50 code, but only used on NV50 for now */
++      if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
++          dev_priv->card_type >= NV_50) {
++              ret = nouveau_sgdma_init(dev);
++              if (!ret) {
++                      ret = nouveau_sgdma_nottm_hack_init(dev);
++                      if (ret)
++                              nouveau_sgdma_takedown(dev);
++              }
++
++              if (ret)
++                      DRM_ERROR("Error initialising SG DMA: %d\n", ret);
++      }
++
++      if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
++              if (nouveau_mem_init_heap(&dev_priv->agp_heap,
++                                        0, dev_priv->gart_info.aper_size)) {
++                      if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
++                              nouveau_sgdma_nottm_hack_takedown(dev);
++                              nouveau_sgdma_takedown(dev);
++                      }
++              }
++      }
++
++      /* NV04-NV40 PCIEGART */
++      if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
++              struct drm_scatter_gather sgreq;
++
++              DRM_DEBUG("Allocating sg memory for PCI DMA\n");
++              sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
++
++              if (drm_sg_alloc(dev, &sgreq)) {
++                      DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
++                                " pages for PCI DMA!",sgreq.size>>20);
++              } else {
++                      if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
++                                                dev->sg->pages * PAGE_SIZE)) {
++                              DRM_ERROR("Unable to initialize pci_heap!");
++                      }
++              }
++      }
++
++      /* G8x: Allocate shared page table to map real VRAM pages into */
++      if (dev_priv->card_type >= NV_50) {
++              unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
++
++              ret = nouveau_gpuobj_new(dev, NULL, size, 0,
++                                       NVOBJ_FLAG_ZERO_ALLOC |
++                                       NVOBJ_FLAG_ALLOW_NO_REFS,
++                                       &dev_priv->vm_vram_pt);
++              if (ret) {
++                      DRM_ERROR("Error creating VRAM page table: %d\n", ret);
++                      return ret;
++              }
++      }
++
++
++      return 0;
++}
++
++struct mem_block *
++nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
++                int flags, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct mem_block *block;
++      int type, tail = !(flags & NOUVEAU_MEM_USER);
++
++      /*
++       * Make things easier on ourselves: all allocations are page-aligned.
++       * We need that to map allocated regions into the user space
++       */
++      if (alignment < PAGE_SHIFT)
++              alignment = PAGE_SHIFT;
++
++      /* Align allocation sizes to 64KiB blocks on G8x.  We use a 64KiB
++       * page size in the GPU VM.
++       */
++      if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
++              size = (size + 65535) & ~65535;
++              if (alignment < 16)
++                      alignment = 16;
++      }
++
++      /*
++       * Warn about 0 sized allocations, but let it go through. It'll return 1 page
++       */
++      if (size == 0)
++              DRM_INFO("warning : 0 byte allocation\n");
++
++      /*
++       * Keep alloc size a multiple of the page size to keep drm_addmap() happy
++       */
++      if (size & (~PAGE_MASK))
++              size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
++
++
++#define NOUVEAU_MEM_ALLOC_AGP {\
++              type=NOUVEAU_MEM_AGP;\
++                block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
++                                                alignment, file_priv, tail); \
++                if (block) goto alloc_ok;\
++              }
++
++#define NOUVEAU_MEM_ALLOC_PCI {\
++                type = NOUVEAU_MEM_PCI;\
++                block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
++                                              alignment, file_priv, tail); \
++                if ( block ) goto alloc_ok;\
++              }
++
++#define NOUVEAU_MEM_ALLOC_FB {\
++                type=NOUVEAU_MEM_FB;\
++                if (!(flags&NOUVEAU_MEM_MAPPED)) {\
++                        block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
++                                                        size, alignment, \
++                                                      file_priv, tail); \
++                        if (block) goto alloc_ok;\
++                }\
++                block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
++                                                alignment, file_priv, tail);\
++                if (block) goto alloc_ok;\
++              }
++
++
++      if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
++      if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
++      if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
++      if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
++      if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
++      if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
++
++
++      return NULL;
++
++alloc_ok:
++      block->flags=type;
++
++      /* On G8x, map memory into VM */
++      if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
++          !(flags & NOUVEAU_MEM_NOVM)) {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start;
++              unsigned count = block->size / 65536;
++              unsigned tile = 0;
++
++              if (!pt) {
++                      DRM_ERROR("vm alloc without vm pt\n");
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++
++              /* The tiling stuff is *not* what NVIDIA does - but both the
++               * 2D and 3D engines seem happy with this simpler method.
++               * Should look into why NVIDIA do what they do at some point.
++               */
++              if (flags & NOUVEAU_MEM_TILE) {
++                      if (flags & NOUVEAU_MEM_TILE_ZETA)
++                              tile = 0x00002800;
++                      else
++                              tile = 0x00007000;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++
++                      INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
++                      offset += 65536;
++              }
++      } else {
++              block->flags |= NOUVEAU_MEM_NOVM;
++      }       
++
++      if (flags&NOUVEAU_MEM_MAPPED)
++      {
++              struct drm_map_list *entry;
++              int ret = 0;
++              block->flags|=NOUVEAU_MEM_MAPPED;
++
++              if (type == NOUVEAU_MEM_AGP) {
++                      if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_AGP, 0, &block->map);
++                      else
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_SCATTER_GATHER, 0, &block->map);
++              }
++              else if (type == NOUVEAU_MEM_FB)
++                      ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
++                                       block->size, _DRM_FRAME_BUFFER,
++                                       0, &block->map);
++              else if (type == NOUVEAU_MEM_PCI)
++                      ret = drm_addmap(dev, block->start, block->size,
++                                       _DRM_SCATTER_GATHER, 0, &block->map);
++
++              if (ret) {
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++
++              entry = drm_find_matching_map(dev, block->map);
++              if (!entry) {
++                      nouveau_mem_free_block(block);
++                      return NULL;
++              }
++              block->map_handle = entry->user_token;
++      }
++
++      DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
++      return block;
++}
++
++void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
++
++      if (block->flags&NOUVEAU_MEM_MAPPED)
++              drm_rmmap(dev, block->map);
++
++      /* G8x: Remove pages from vm */
++      if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
++          !(block->flags & NOUVEAU_MEM_NOVM)) {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start;
++              unsigned count = block->size / 65536;
++
++              if (!pt) {
++                      DRM_ERROR("vm free without vm pt\n");
++                      goto out_free;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++                      INSTANCE_WR(pt, (pte * 2) + 0, 0);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0);
++                      offset += 65536;
++              }
++      }
++
++out_free:
++      nouveau_mem_free_block(block);
++}
++
++/*
++ * Ioctls
++ */
++
++int
++nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_alloc *alloc = data;
++      struct mem_block *block;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (alloc->flags & NOUVEAU_MEM_INTERNAL)
++              return -EINVAL;
++
++      block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
++                                alloc->flags | NOUVEAU_MEM_USER, file_priv);
++      if (!block)
++              return -ENOMEM;
++      alloc->map_handle=block->map_handle;
++      alloc->offset=block->start;
++      alloc->flags=block->flags;
++
++      if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
++              alloc->offset += 512*1024*1024;
++
++      return 0;
++}
++
++int
++nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_free *memfree = data;
++      struct mem_block *block;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
++              memfree->offset -= 512*1024*1024;
++
++      block=NULL;
++      if (memfree->flags & NOUVEAU_MEM_FB)
++              block = find_block(dev_priv->fb_heap, memfree->offset);
++      else if (memfree->flags & NOUVEAU_MEM_AGP)
++              block = find_block(dev_priv->agp_heap, memfree->offset);
++      else if (memfree->flags & NOUVEAU_MEM_PCI)
++              block = find_block(dev_priv->pci_heap, memfree->offset);
++      if (!block)
++              return -EFAULT;
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      nouveau_mem_free(dev, block);
++      return 0;
++}
++
++int
++nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
++                     struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_mem_tile *memtile = data;
++      struct mem_block *block = NULL;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      if (dev_priv->card_type < NV_50)
++              return -EINVAL;
++      
++      if (memtile->flags & NOUVEAU_MEM_FB) {
++              memtile->offset -= 512*1024*1024;
++              block = find_block(dev_priv->fb_heap, memtile->offset);
++      }
++
++      if (!block)
++              return -EINVAL;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      {
++              struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
++              unsigned offset = block->start + memtile->delta;
++              unsigned count = memtile->size / 65536;
++              unsigned tile = 0;
++
++              if (memtile->flags & NOUVEAU_MEM_TILE) {
++                      if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
++                              tile = 0x00002800;
++                      else
++                              tile = 0x00007000;
++              }
++
++              while (count--) {
++                      unsigned pte = offset / 65536;
++
++                      INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
++                      INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
++                      offset += 65536;
++              }
++      }
++
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_notifier.c git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c
+--- git/drivers/gpu/drm-tungsten/nouveau_notifier.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_notifier.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,165 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nouveau_notifier_init_channel(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      int flags, ret;
++
++      flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED |
++               NOUVEAU_MEM_FB_ACCEPTABLE);
++
++      chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
++                                               (struct drm_file *)-2);
++      if (!chan->notifier_block)
++              return -ENOMEM;
++      DRM_DEBUG("Allocated notifier block in 0x%08x\n",
++                chan->notifier_block->flags);
++
++      ret = nouveau_mem_init_heap(&chan->notifier_heap,
++                                  0, chan->notifier_block->size);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++void
++nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++
++      if (chan->notifier_block) {
++              nouveau_mem_free(dev, chan->notifier_block);
++              chan->notifier_block = NULL;
++      }
++
++      nouveau_mem_takedown(&chan->notifier_heap);
++}
++
++static void
++nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
++                           struct nouveau_gpuobj *gpuobj)
++{
++      DRM_DEBUG("\n");
++
++      if (gpuobj->priv)
++              nouveau_mem_free_block(gpuobj->priv);
++}
++
++int
++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
++                     int count, uint32_t *b_offset)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *nobj = NULL;
++      struct mem_block *mem;
++      uint32_t offset;
++      int target, ret;
++
++      if (!chan->notifier_heap) {
++              DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
++                        chan->id);
++              return -EINVAL;
++      }
++
++      mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
++                                    (struct drm_file *)-2, 0);
++      if (!mem) {
++              DRM_ERROR("Channel %d notifier block full\n", chan->id);
++              return -ENOMEM;
++      }
++      mem->flags = NOUVEAU_MEM_NOTIFIER;
++
++      offset = chan->notifier_block->start;
++      if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
++              target = NV_DMA_TARGET_VIDMEM;
++      } else
++      if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
++              if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
++                  dev_priv->card_type < NV_50) {
++                      ret = nouveau_sgdma_get_page(dev, offset, &offset);
++                      if (ret)
++                              return ret;
++                      target = NV_DMA_TARGET_PCI;
++              } else {
++                      target = NV_DMA_TARGET_AGP;
++              }
++      } else
++      if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
++              target = NV_DMA_TARGET_PCI_NONLINEAR;
++      } else {
++              DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
++                        chan->notifier_block->flags);
++              return -EINVAL;
++      }
++      offset += mem->start;
++
++      if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                        offset, mem->size,
++                                        NV_DMA_ACCESS_RW, target, &nobj))) {
++              nouveau_mem_free_block(mem);
++              DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
++              return ret;
++      }
++      nobj->dtor   = nouveau_notifier_gpuobj_dtor;
++      nobj->priv   = mem;
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) {
++              nouveau_gpuobj_del(dev, &nobj);
++              nouveau_mem_free_block(mem);
++              DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      *b_offset = mem->start;
++      return 0;
++}
++
++int
++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
++                           struct drm_file *file_priv)
++{
++      struct drm_nouveau_notifierobj_alloc *na = data;
++      struct nouveau_channel *chan;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
++
++      ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset);
++      if (ret)
++              return ret;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_object.c git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c
+--- git/drivers/gpu/drm-tungsten/nouveau_object.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_object.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1178 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama@iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/* NVidia uses context objects to drive drawing operations.
++
++   Context objects can be selected into 8 subchannels in the FIFO,
++   and then used via DMA command buffers.
++
++   A context object is referenced by a user defined handle (CARD32). The HW
++   looks up graphics objects in a hash table in the instance RAM.
++
++   An entry in the hash table consists of 2 CARD32. The first CARD32 contains
++   the handle, the second one a bitfield, that contains the address of the
++   object in instance RAM.
++
++   The format of the second CARD32 seems to be:
++
++   NV4 to NV30:
++
++   15: 0  instance_addr >> 4
++   17:16  engine (here uses 1 = graphics)
++   28:24  channel id (here uses 0)
++   31   valid (use 1)
++
++   NV40:
++
++   15: 0  instance_addr >> 4   (maybe 19-0)
++   21:20  engine (here uses 1 = graphics)
++   I'm unsure about the other bits, but using 0 seems to work.
++
++   The key into the hash table depends on the object handle and channel id and
++   is given as:
++*/
++static uint32_t
++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      uint32_t hash = 0;
++      int i;
++
++      DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
++
++      for (i=32;i>0;i-=dev_priv->ramht_bits) {
++              hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
++              handle >>= dev_priv->ramht_bits;
++      }
++      if (dev_priv->card_type < NV_50)
++              hash ^= channel << (dev_priv->ramht_bits - 4);
++      hash <<= 3;
++
++      DRM_DEBUG("hash=0x%08x\n", hash);
++      return hash;
++}
++
++static int
++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
++                        uint32_t offset)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4);
++
++      if (dev_priv->card_type < NV_40)
++              return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
++      return (ctx != 0);
++}
++
++static int
++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
++      struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++      struct nouveau_gpuobj *gpuobj = ref->gpuobj;
++      uint32_t ctx, co, ho;
++
++      if (!ramht) {
++              DRM_ERROR("No hash table!\n");
++              return -EINVAL;
++      }
++
++      if (dev_priv->card_type < NV_40) {
++              ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
++                    (ref->channel   << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++                    (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
++      } else
++      if (dev_priv->card_type < NV_50) {
++              ctx = (ref->instance >> 4) |
++                    (ref->channel   << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++                    (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
++      } else {
++              ctx = (ref->instance  >> 4) |
++                    (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
++      }
++
++      co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
++      do {
++              if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
++                      DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++                                ref->channel, co, ref->handle, ctx);
++                      INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
++                      INSTANCE_WR(ramht, (co + 4)/4, ctx);
++
++                      list_add_tail(&ref->list, &chan->ramht_refs);
++                      return 0;
++              }
++              DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
++                        ref->channel, co, INSTANCE_RD(ramht, co/4));
++
++              co += 8;
++              if (co >= dev_priv->ramht_size) {
++                      DRM_INFO("no space left after collision\n");
++                      co = 0;
++                      /* exit as it seems to cause crash with nouveau_demo and
++                       * 0xdead0001 object */
++                      break;
++              }
++      } while (co != ho);
++
++      DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
++      return -ENOMEM;
++}
++
++static void
++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
++      struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++      uint32_t co, ho;
++
++      if (!ramht) {
++              DRM_ERROR("No hash table!\n");
++              return;
++      }
++
++      co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
++      do {
++              if (nouveau_ramht_entry_valid(dev, ramht, co) &&
++                  (ref->handle == INSTANCE_RD(ramht, (co/4)))) {
++                      DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++                                ref->channel, co, ref->handle,
++                                INSTANCE_RD(ramht, (co + 4)));
++                      INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
++                      INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
++
++                      list_del(&ref->list);
++                      return;
++              }
++
++              co += 8;
++              if (co >= dev_priv->ramht_size)
++                      co = 0;
++      } while (co != ho);
++
++      DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n",
++                ref->channel, ref->handle);
++}
++
++int
++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
++                 int size, int align, uint32_t flags,
++                 struct nouveau_gpuobj **gpuobj_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_gpuobj *gpuobj;
++      struct mem_block *pramin = NULL;
++      int ret;
++
++      DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
++                chan ? chan->id : -1, size, align, flags);
++
++      if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
++              return -EINVAL;
++
++      gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      if (!gpuobj)
++              return -ENOMEM;
++      DRM_DEBUG("gpuobj %p\n", gpuobj);
++      gpuobj->flags = flags;
++      gpuobj->im_channel = chan ? chan->id : -1;
++
++      list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++      /* Choose between global instmem heap, and per-channel private
++       * instmem heap.  On <NV50 allow requests for private instmem
++       * to be satisfied from global heap if no per-channel area
++       * available.
++       */
++      if (chan) {
++              if (chan->ramin_heap) {
++                      DRM_DEBUG("private heap\n");
++                      pramin = chan->ramin_heap;
++              } else
++              if (dev_priv->card_type < NV_50) {
++                      DRM_DEBUG("global heap fallback\n");
++                      pramin = dev_priv->ramin_heap;
++              }
++      } else {
++              DRM_DEBUG("global heap\n");
++              pramin = dev_priv->ramin_heap;
++      }
++
++      if (!pramin) {
++              DRM_ERROR("No PRAMIN heap!\n");
++              return -EINVAL;
++      }
++
++      if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      /* Allocate a chunk of the PRAMIN aperture */
++      gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
++                                                  drm_order(align),
++                                                  (struct drm_file *)-2, 0);
++      if (!gpuobj->im_pramin) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return -ENOMEM;
++      }
++      gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE;
++
++      if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++              int i;
++
++              for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++                      INSTANCE_WR(gpuobj, i/4, 0);
++      }
++
++      *gpuobj_ret = gpuobj;
++      return 0;
++}
++
++int
++nouveau_gpuobj_early_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      INIT_LIST_HEAD(&dev_priv->gpuobj_list);
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      if (dev_priv->card_type < NV_50) {
++              if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
++                                                 ~0, dev_priv->ramht_size,
++                                                 NVOBJ_FLAG_ZERO_ALLOC |
++                                                 NVOBJ_FLAG_ALLOW_NO_REFS,
++                                                 &dev_priv->ramht, NULL)))
++                      return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_gpuobj_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      nouveau_gpuobj_del(dev, &dev_priv->ramht);
++}
++
++void
++nouveau_gpuobj_late_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      struct list_head *entry, *tmp;
++
++      DRM_DEBUG("\n");
++
++      list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
++              gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
++
++              DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
++                        gpuobj, gpuobj->refcount);
++              gpuobj->refcount = 0;
++              nouveau_gpuobj_del(dev, &gpuobj);
++      }
++}
++
++int
++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_gpuobj *gpuobj;
++
++      DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
++
++      if (!dev_priv || !pgpuobj || !(*pgpuobj))
++              return -EINVAL;
++      gpuobj = *pgpuobj;
++
++      if (gpuobj->refcount != 0) {
++              DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount);
++              return -EINVAL;
++      }
++
++      if (gpuobj->dtor)
++              gpuobj->dtor(dev, gpuobj);
++
++      if (gpuobj->im_backing) {
++              if (gpuobj->flags & NVOBJ_FLAG_FAKE)
++                      drm_free(gpuobj->im_backing,
++                               sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
++              else
++                      engine->instmem.clear(dev, gpuobj);
++      }
++
++      if (gpuobj->im_pramin) {
++              if (gpuobj->flags & NVOBJ_FLAG_FAKE)
++                      drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin),
++                               DRM_MEM_DRIVER);
++              else
++                      nouveau_mem_free_block(gpuobj->im_pramin);
++      }
++
++      list_del(&gpuobj->list);
++
++      *pgpuobj = NULL;
++      drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      return 0;
++}
++
++static int
++nouveau_gpuobj_instance_get(struct drm_device *dev,
++                          struct nouveau_channel *chan,
++                          struct nouveau_gpuobj *gpuobj, uint32_t *inst)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *cpramin;
++
++      /* <NV50 use PRAMIN address everywhere */
++      if (dev_priv->card_type < NV_50) {
++              *inst = gpuobj->im_pramin->start;
++              return 0;
++      }
++
++      if (chan && gpuobj->im_channel != chan->id) {
++              DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
++                        gpuobj->im_channel, chan->id);
++              return -EINVAL;
++      }
++
++      /* NV50 channel-local instance */
++      if (chan > 0) {
++              cpramin = chan->ramin->gpuobj;
++              *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
++              return 0;
++      }
++
++      /* NV50 global (VRAM) instance */
++      if (gpuobj->im_channel < 0) {
++              /* ...from global heap */
++              if (!gpuobj->im_backing) {
++                      DRM_ERROR("AII, no VRAM backing gpuobj\n");
++                      return -EINVAL;
++              }
++              *inst = gpuobj->im_backing->start;
++              return 0;
++      } else {
++              /* ...from local heap */
++              cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj;
++              *inst = cpramin->im_backing->start +
++                      (gpuobj->im_pramin->start - cpramin->im_pramin->start);
++              return 0;
++      }
++
++      return -EINVAL;
++}
++
++int
++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
++                     uint32_t handle, struct nouveau_gpuobj *gpuobj,
++                     struct nouveau_gpuobj_ref **ref_ret)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj_ref *ref;
++      uint32_t instance;
++      int ret;
++
++      DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
++                chan ? chan->id : -1, handle, gpuobj);
++
++      if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
++              return -EINVAL;
++
++      if (!chan && !ref_ret)
++              return -EINVAL;
++
++      ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
++      if (ret)
++              return ret;
++
++      ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER);
++      if (!ref)
++              return -ENOMEM;
++      ref->gpuobj   = gpuobj;
++      ref->channel  = chan ? chan->id : -1;
++      ref->instance = instance;
++
++      if (!ref_ret) {
++              ref->handle = handle;
++
++              ret = nouveau_ramht_insert(dev, ref);
++              if (ret) {
++                      drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
++                      return ret;
++              }
++      } else {
++              ref->handle = ~0;
++              *ref_ret = ref;
++      }
++
++      ref->gpuobj->refcount++;
++      return 0;
++}
++
++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
++{
++      struct nouveau_gpuobj_ref *ref;
++
++      DRM_DEBUG("ref %p\n", pref ? *pref : NULL);
++
++      if (!dev || !pref || *pref == NULL)
++              return -EINVAL;
++      ref = *pref;
++
++      if (ref->handle != ~0)
++              nouveau_ramht_remove(dev, ref);
++
++      if (ref->gpuobj) {
++              ref->gpuobj->refcount--;
++
++              if (ref->gpuobj->refcount == 0) {
++                      if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
++                              nouveau_gpuobj_del(dev, &ref->gpuobj);
++              }
++      }
++
++      *pref = NULL;
++      drm_free(ref, sizeof(ref), DRM_MEM_DRIVER);
++      return 0;
++}
++
++int
++nouveau_gpuobj_new_ref(struct drm_device *dev,
++                     struct nouveau_channel *oc, struct nouveau_channel *rc,
++                     uint32_t handle, int size, int align, uint32_t flags,
++                     struct nouveau_gpuobj_ref **ref)
++{
++      struct nouveau_gpuobj *gpuobj = NULL;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj)))
++              return ret;
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) {
++              nouveau_gpuobj_del(dev, &gpuobj);
++              return ret;
++      }
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
++                      struct nouveau_gpuobj_ref **ref_ret)
++{
++      struct nouveau_gpuobj_ref *ref;
++      struct list_head *entry, *tmp;
++
++      list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++              ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++              if (ref->handle == handle) {
++                      if (ref_ret)
++                              *ref_ret = ref;
++                      return 0;
++              }
++      }
++
++      return -EINVAL;
++}
++
++int
++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
++                      uint32_t b_offset, uint32_t size,
++                      uint32_t flags, struct nouveau_gpuobj **pgpuobj,
++                      struct nouveau_gpuobj_ref **pref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      int i;
++
++      DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
++                p_offset, b_offset, size, flags);
++
++      gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
++      if (!gpuobj)
++              return -ENOMEM;
++      DRM_DEBUG("gpuobj %p\n", gpuobj);
++      gpuobj->im_channel = -1;
++      gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
++
++      list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++      if (p_offset != ~0) {
++              gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
++                                             DRM_MEM_DRIVER);
++              if (!gpuobj->im_pramin) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return -ENOMEM;
++              }
++              gpuobj->im_pramin->start = p_offset;
++              gpuobj->im_pramin->size  = size;
++      }
++
++      if (b_offset != ~0) {
++              gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
++                                             DRM_MEM_DRIVER);
++              if (!gpuobj->im_backing) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return -ENOMEM;
++              }
++              gpuobj->im_backing->start = b_offset;
++              gpuobj->im_backing->size  = size;
++      }
++
++      if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++              for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++                      INSTANCE_WR(gpuobj, i/4, 0);
++      }
++
++      if (pref) {
++              if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
++                      nouveau_gpuobj_del(dev, &gpuobj);
++                      return i;
++              }
++      }
++
++      if (pgpuobj)
++              *pgpuobj = gpuobj;
++      return 0;
++}
++
++
++static int
++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /*XXX: dodgy hack for now */
++      if (dev_priv->card_type >= NV_50)
++              return 24;
++      if (dev_priv->card_type >= NV_40)
++              return 32;
++      return 16;
++}
++
++/*
++   DMA objects are used to reference a piece of memory in the
++   framebuffer, PCI or AGP address space. Each object is 16 bytes big
++   and looks as follows:
++
++   entry[0]
++   11:0  class (seems like I can always use 0 here)
++   12    page table present?
++   13    page entry linear?
++   15:14 access: 0 rw, 1 ro, 2 wo
++   17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
++   31:20 dma adjust (bits 0-11 of the address)
++   entry[1]
++   dma limit (size of transfer)
++   entry[X]
++   1     0 readonly, 1 readwrite
++   31:12 dma frame address of the page (bits 12-31 of the address)
++   entry[N]
++   page table terminator, same value as the first pte, as does nvidia
++   rivatv uses 0xffffffff
++
++   Non linear page tables need a list of frame addresses afterwards,
++   the rivatv project has some info on this.
++
++   The method below creates a DMA object in instance RAM and returns a handle
++   to it that can be used to set up context objects.
++*/
++int
++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
++                     uint64_t offset, uint64_t size, int access,
++                     int target, struct nouveau_gpuobj **gpuobj)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++      uint32_t is_scatter_gather = 0;
++
++      /* Total number of pages covered by the request.
++       */
++      const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
++
++
++      DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
++                chan->id, class, offset, size);
++      DRM_DEBUG("access=%d target=%d\n", access, target);
++
++      switch (target) {
++        case NV_DMA_TARGET_AGP:
++                 offset += dev_priv->gart_info.aper_base;
++                 break;
++        case NV_DMA_TARGET_PCI_NONLINEAR:
++                /*assume the "offset" is a virtual memory address*/
++                is_scatter_gather = 1;
++                /*put back the right value*/
++                target = NV_DMA_TARGET_PCI;
++                break;
++        default:
++                break;
++        }
++
++      ret = nouveau_gpuobj_new(dev, chan,
++                               is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
++                               16,
++                               NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
++                               gpuobj);
++      if (ret) {
++              DRM_ERROR("Error creating gpuobj: %d\n", ret);
++              return ret;
++      }
++
++      if (dev_priv->card_type < NV_50) {
++              uint32_t frame, adjust, pte_flags = 0;
++              adjust = offset &  0x00000fff;
++              if (access != NV_DMA_ACCESS_RO)
++                              pte_flags |= (1<<1);
++
++              if ( ! is_scatter_gather )
++                      {
++                      frame  = offset & ~0x00000fff;
++
++                      INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
++                                      (adjust << 20) |
++                                       (access << 14) |
++                                       (target << 16) |
++                                        class));
++                      INSTANCE_WR(*gpuobj, 1, size - 1);
++                      INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
++                      INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
++                      }
++              else
++                      {
++                      /* Intial page entry in the scatter-gather area that
++                       * corresponds to the base offset
++                       */
++                      unsigned int idx = offset / PAGE_SIZE;
++
++                      uint32_t instance_offset;
++                      unsigned int i;
++
++                      if ((idx + page_count) > dev->sg->pages) {
++                              DRM_ERROR("Requested page range exceedes "
++                                        "allocated scatter-gather range!");
++                              return -E2BIG;
++                      }
++
++                      DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size);
++                      INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) |
++                                (adjust << 20) |
++                                (access << 14) |
++                                (target << 16) |
++                                class));
++                      INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1);
++
++
++                      /*write starting at the third dword*/
++                      instance_offset = 2;
++
++                      /*for each PAGE, get its bus address, fill in the page table entry, and advance*/
++                      for (i = 0; i < page_count; i++) {
++                              if (dev->sg->busaddr[idx] == 0) {
++                                      dev->sg->busaddr[idx] =
++                                              pci_map_page(dev->pdev,
++                                                           dev->sg->pagelist[idx],
++                                                           0,
++                                                           PAGE_SIZE,
++                                                           DMA_BIDIRECTIONAL);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++                                      /* Not a 100% sure this is the right kdev in all cases. */
++                                      if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) {
++#else
++                                      if (dma_mapping_error(dev->sg->busaddr[idx])) {
++#endif
++                                              return -ENOMEM;
++                                      }
++                              }
++
++                              frame = (uint32_t) dev->sg->busaddr[idx];
++                              INSTANCE_WR(*gpuobj, instance_offset,
++                                          frame | pte_flags);
++
++                              idx++;
++                              instance_offset ++;
++                      }
++                      }
++      } else {
++              uint32_t flags0, flags5;
++
++              if (target == NV_DMA_TARGET_VIDMEM) {
++                      flags0 = 0x00190000;
++                      flags5 = 0x00010000;
++              } else {
++                      flags0 = 0x7fc00000;
++                      flags5 = 0x00080000;
++              }
++
++              INSTANCE_WR(*gpuobj, 0, flags0 | class);
++              INSTANCE_WR(*gpuobj, 1, offset + size - 1);
++              INSTANCE_WR(*gpuobj, 2, offset);
++              INSTANCE_WR(*gpuobj, 5, flags5);
++      }
++
++      (*gpuobj)->engine = NVOBJ_ENGINE_SW;
++      (*gpuobj)->class  = class;
++      return 0;
++}
++
++int
++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
++                          uint64_t offset, uint64_t size, int access,
++                          struct nouveau_gpuobj **gpuobj,
++                          uint32_t *o_ret)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
++          (dev_priv->card_type >= NV_50 &&
++           dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           offset, size, access,
++                                           NV_DMA_TARGET_AGP, gpuobj);
++              if (o_ret)
++                      *o_ret = 0;
++      } else
++      if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
++              *gpuobj = dev_priv->gart_info.sg_ctxdma;
++              if (offset & ~0xffffffffULL) {
++                      DRM_ERROR("obj offset exceeds 32-bits\n");
++                      return -EINVAL;
++              }
++              if (o_ret)
++                      *o_ret = (uint32_t)offset;
++              ret = (*gpuobj != NULL) ? 0 : -EINVAL;
++      } else {
++              DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
++              return -EINVAL;
++      }
++
++      return ret;
++}
++
++/* Context objects in the instance RAM have the following structure.
++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
++
++   NV4 - NV30:
++
++   entry[0]
++   11:0 class
++   12   chroma key enable
++   13   user clip enable
++   14   swizzle enable
++   17:15 patch config:
++       scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
++   18   synchronize enable
++   19   endian: 1 big, 0 little
++   21:20 dither mode
++   23    single step enable
++   24    patch status: 0 invalid, 1 valid
++   25    context_surface 0: 1 valid
++   26    context surface 1: 1 valid
++   27    context pattern: 1 valid
++   28    context rop: 1 valid
++   29,30 context beta, beta4
++   entry[1]
++   7:0   mono format
++   15:8  color format
++   31:16 notify instance address
++   entry[2]
++   15:0  dma 0 instance address
++   31:16 dma 1 instance address
++   entry[3]
++   dma method traps
++
++   NV40:
++   No idea what the exact format is. Here's what can be deducted:
++
++   entry[0]:
++   11:0  class  (maybe uses more bits here?)
++   17    user clip enable
++   21:19 patch config
++   25    patch status valid ?
++   entry[1]:
++   15:0  DMA notifier  (maybe 20:0)
++   entry[2]:
++   15:0  DMA 0 instance (maybe 20:0)
++   24    big endian
++   entry[3]:
++   15:0  DMA 1 instance (maybe 20:0)
++   entry[4]:
++   entry[5]:
++   set to 0?
++*/
++int
++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
++                    struct nouveau_gpuobj **gpuobj)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
++
++      ret = nouveau_gpuobj_new(dev, chan,
++                               nouveau_gpuobj_class_instmem_size(dev, class),
++                               16,
++                               NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
++                               gpuobj);
++      if (ret) {
++              DRM_ERROR("Error creating gpuobj: %d\n", ret);
++              return ret;
++      }
++
++      if (dev_priv->card_type >= NV_50) {
++              INSTANCE_WR(*gpuobj, 0, class);
++              INSTANCE_WR(*gpuobj, 5, 0x00010000);
++      } else {
++      switch (class) {
++      case NV_CLASS_NULL:
++              INSTANCE_WR(*gpuobj, 0, 0x00001030);
++              INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF);
++              break;
++      default:
++              if (dev_priv->card_type >= NV_40) {
++                      INSTANCE_WR(*gpuobj, 0, class);
++#ifdef __BIG_ENDIAN
++                      INSTANCE_WR(*gpuobj, 2, 0x01000000);
++#endif
++              } else {
++#ifdef __BIG_ENDIAN
++                      INSTANCE_WR(*gpuobj, 0, class | 0x00080000);
++#else
++                      INSTANCE_WR(*gpuobj, 0, class);
++#endif
++              }
++      }
++      }
++
++      (*gpuobj)->engine = NVOBJ_ENGINE_GR;
++      (*gpuobj)->class  = class;
++      return 0;
++}
++
++static int
++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *pramin = NULL;
++      int size, base, ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      /* Base amount for object storage (4KiB enough?) */
++      size = 0x1000;
++      base = 0;
++
++      /* PGRAPH context */
++
++      if (dev_priv->card_type == NV_50) {
++              /* Various fixed table thingos */
++              size += 0x1400; /* mostly unknown stuff */
++              size += 0x4000; /* vm pd */
++              base  = 0x6000;
++              /* RAMHT, not sure about setting size yet, 32KiB to be safe */
++              size += 0x8000;
++              /* RAMFC */
++              size += 0x1000;
++              /* PGRAPH context */
++              size += 0x70000;
++      }
++
++      DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
++                chan->id, size, base);
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
++                                   &chan->ramin);
++      if (ret) {
++              DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
++              return ret;
++      }
++      pramin = chan->ramin->gpuobj;
++
++      ret = nouveau_mem_init_heap(&chan->ramin_heap,
++                                  pramin->im_pramin->start + base, size);
++      if (ret) {
++              DRM_ERROR("Error creating PRAMIN heap: %d\n", ret);
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++              return ret;
++      }
++
++      return 0;
++}
++
++int
++nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
++                          uint32_t vram_h, uint32_t tt_h)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *vram = NULL, *tt = NULL;
++      int ret, i;
++
++      INIT_LIST_HEAD(&chan->ramht_refs);
++
++      DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
++
++      /* Reserve a block of PRAMIN for the channel
++       *XXX: maybe on <NV50 too at some point
++       */
++      if (0 || dev_priv->card_type == NV_50) {
++              ret = nouveau_gpuobj_channel_init_pramin(chan);
++              if (ret)
++                      return ret;
++      }
++
++      /* NV50 VM
++       *  - Allocate per-channel page-directory
++       *  - Point offset 0-512MiB at shared PCIEGART table
++       *  - Point offset 512-1024MiB at shared VRAM table
++       */
++      if (dev_priv->card_type >= NV_50) {
++              uint32_t vm_offset;
++
++              vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
++              vm_offset += chan->ramin->gpuobj->im_pramin->start;
++              if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
++                                                 0, &chan->vm_pd, NULL)))
++                      return ret;
++              for (i=0; i<0x4000; i+=8) {
++                      INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000);
++                      INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
++              }
++
++              if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++                                                dev_priv->gart_info.sg_ctxdma,
++                                                &chan->vm_gart_pt)))
++                      return ret;
++              INSTANCE_WR(chan->vm_pd, (0+0)/4,
++                          chan->vm_gart_pt->instance | 0x03);
++              INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
++
++              if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++                                                dev_priv->vm_vram_pt,
++                                                &chan->vm_vram_pt)))
++                      return ret;
++              INSTANCE_WR(chan->vm_pd, (8+0)/4,
++                          chan->vm_vram_pt->instance | 0x61);
++              INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000);
++      }
++
++      /* RAMHT */
++      if (dev_priv->card_type < NV_50) {
++              ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
++                                           &chan->ramht);
++              if (ret)
++                      return ret;
++      } else {
++              ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
++                                           0x8000, 16,
++                                           NVOBJ_FLAG_ZERO_ALLOC,
++                                           &chan->ramht);
++              if (ret)
++                      return ret;
++      }
++
++      /* VRAM ctxdma */
++      if (dev_priv->card_type >= NV_50) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           0, 0x100000000ULL,
++                                           NV_DMA_ACCESS_RW,
++                                           NV_DMA_TARGET_AGP, &vram);
++              if (ret) {
++                      DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
++                      return ret;
++              }
++      } else
++      if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                        0, dev_priv->fb_available_size,
++                                        NV_DMA_ACCESS_RW,
++                                        NV_DMA_TARGET_VIDMEM, &vram))) {
++              DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
++              DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      /* TT memory ctxdma */
++      if (dev_priv->card_type >= NV_50) {
++              tt = vram;
++      } else
++      if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
++              ret = nouveau_gpuobj_gart_dma_new(chan, 0,
++                                                dev_priv->gart_info.aper_size,
++                                                NV_DMA_ACCESS_RW, &tt, NULL);
++      } else
++      if (dev_priv->pci_heap) {
++              ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++                                           0, dev->sg->pages * PAGE_SIZE,
++                                           NV_DMA_ACCESS_RW,
++                                           NV_DMA_TARGET_PCI_NONLINEAR, &tt);
++      } else {
++              DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
++              ret = -EINVAL;
++      }
++
++      if (ret) {
++              DRM_ERROR("Error creating TT ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
++      if (ret) {
++              DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct list_head *entry, *tmp;
++      struct nouveau_gpuobj_ref *ref;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++              ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++              nouveau_gpuobj_ref_del(dev, &ref);
++      }
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramht);
++
++      nouveau_gpuobj_del(dev, &chan->vm_pd);
++      nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
++      nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt);
++
++      if (chan->ramin_heap)
++              nouveau_mem_takedown(&chan->ramin_heap);
++      if (chan->ramin)
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++
++}
++
++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      struct nouveau_channel *chan;
++      struct drm_nouveau_grobj_alloc *init = data;
++      struct nouveau_gpuobj *gr = NULL;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
++
++      //FIXME: check args, only allow trusted objects to be created
++
++      if (init->handle == ~0)
++              return -EINVAL;
++
++      if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
++              return -EEXIST;
++
++      ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
++      if (ret) {
++              DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
++                        ret, init->channel, init->handle);
++              return ret;
++      }
++
++      if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
++              DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
++                        ret, init->channel, init->handle);
++              nouveau_gpuobj_del(dev, &gr);
++              return ret;
++      }
++
++      return 0;
++}
++
++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      struct drm_nouveau_gpuobj_free *objfree = data;
++      struct nouveau_gpuobj_ref *ref;
++      struct nouveau_channel *chan;
++      int ret;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++      NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
++
++      if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
++              return ret;
++      nouveau_gpuobj_ref_del(dev, &ref);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_reg.h git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h
+--- git/drivers/gpu/drm-tungsten/nouveau_reg.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_reg.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,593 @@
++
++
++#define NV03_BOOT_0                                        0x00100000
++#    define NV03_BOOT_0_RAM_AMOUNT                         0x00000003
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB                     0x00000000
++#    define NV03_BOOT_0_RAM_AMOUNT_2MB                     0x00000001
++#    define NV03_BOOT_0_RAM_AMOUNT_4MB                     0x00000002
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM               0x00000003
++#    define NV04_BOOT_0_RAM_AMOUNT_32MB                    0x00000000
++#    define NV04_BOOT_0_RAM_AMOUNT_4MB                     0x00000001
++#    define NV04_BOOT_0_RAM_AMOUNT_8MB                     0x00000002
++#    define NV04_BOOT_0_RAM_AMOUNT_16MB                    0x00000003
++
++#define NV04_FIFO_DATA                                     0x0010020c
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK              0xfff00000
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT             20
++
++#define NV_RAMIN                                           0x00700000
++
++#define NV_RAMHT_HANDLE_OFFSET                             0
++#define NV_RAMHT_CONTEXT_OFFSET                            4
++#    define NV_RAMHT_CONTEXT_VALID                         (1<<31)
++#    define NV_RAMHT_CONTEXT_CHANNEL_SHIFT                 24
++#    define NV_RAMHT_CONTEXT_ENGINE_SHIFT                  16
++#        define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE           0
++#        define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS           1
++#    define NV_RAMHT_CONTEXT_INSTANCE_SHIFT                0
++#    define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT               23
++#    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
++#    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
++
++/* DMA object defines */
++#define NV_DMA_ACCESS_RW 0
++#define NV_DMA_ACCESS_RO 1
++#define NV_DMA_ACCESS_WO 2
++#define NV_DMA_TARGET_VIDMEM 0
++#define NV_DMA_TARGET_PCI    2
++#define NV_DMA_TARGET_AGP    3
++/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/
++#define NV_DMA_TARGET_PCI_NONLINEAR   8
++
++/* Some object classes we care about in the drm */
++#define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
++#define NV_CLASS_DMA_TO_MEMORY                             0x00000003
++#define NV_CLASS_NULL                                      0x00000030
++#define NV_CLASS_DMA_IN_MEMORY                             0x0000003D
++
++#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE))
++#define NV03_USER__SIZE                                                       16
++#define NV10_USER__SIZE                                                       32
++#define NV03_USER_SIZE                                                0x00010000
++#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_PUT__SIZE                                               16
++#define NV10_USER_DMA_PUT__SIZE                                               32
++#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_GET__SIZE                                               16
++#define NV10_USER_DMA_GET__SIZE                                               32
++#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE))
++#define NV03_USER_REF_CNT__SIZE                                               16
++#define NV10_USER_REF_CNT__SIZE                                               32
++
++#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE))
++#define NV40_USER_SIZE                                                0x00001000
++#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_PUT__SIZE                                               32
++#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_GET__SIZE                                               32
++#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE))
++#define NV40_USER_REF_CNT__SIZE                                               32
++
++#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE))
++#define NV50_USER_SIZE                                                0x00002000
++#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_PUT__SIZE                                              128
++#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_GET__SIZE                                              128
++/*XXX: I don't think this actually exists.. */
++#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE))
++#define NV50_USER_REF_CNT__SIZE                                              128
++
++#define NV03_FIFO_SIZE                                     0x8000UL
++
++#define NV03_PMC_BOOT_0                                    0x00000000
++#define NV03_PMC_BOOT_1                                    0x00000004
++#define NV03_PMC_INTR_0                                    0x00000100
++#    define NV_PMC_INTR_0_PFIFO_PENDING                       (1<< 8)
++#    define NV_PMC_INTR_0_PGRAPH_PENDING                      (1<<12)
++#    define NV_PMC_INTR_0_NV50_I2C_PENDING                  (1<<21)
++#    define NV_PMC_INTR_0_CRTC0_PENDING                       (1<<24)
++#    define NV_PMC_INTR_0_CRTC1_PENDING                       (1<<25)
++#    define NV_PMC_INTR_0_NV50_DISPLAY_PENDING           (1<<26)
++#    define NV_PMC_INTR_0_CRTCn_PENDING                       (3<<24)
++#define NV03_PMC_INTR_EN_0                                 0x00000140
++#    define NV_PMC_INTR_EN_0_MASTER_ENABLE                    (1<< 0)
++#define NV03_PMC_ENABLE                                    0x00000200
++#    define NV_PMC_ENABLE_PFIFO                               (1<< 8)
++#    define NV_PMC_ENABLE_PGRAPH                              (1<<12)
++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
++ * the card will hang early on in the X init process.
++ */
++#    define NV_PMC_ENABLE_UNK13                               (1<<13)
++#define NV40_PMC_1700                                      0x00001700
++#define NV40_PMC_1704                                      0x00001704
++#define NV40_PMC_1708                                      0x00001708
++#define NV40_PMC_170C                                      0x0000170C
++
++/* probably PMC ? */
++#define NV50_PUNK_BAR0_PRAMIN                              0x00001700
++#define NV50_PUNK_BAR_CFG_BASE                             0x00001704
++#define NV50_PUNK_BAR_CFG_BASE_VALID                          (1<<30)
++#define NV50_PUNK_BAR1_CTXDMA                              0x00001708
++#define NV50_PUNK_BAR1_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_BAR3_CTXDMA                              0x0000170C
++#define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_UNK1710                                  0x00001710
++
++#define NV04_PBUS_PCI_NV_1                                 0x00001804
++#define NV04_PBUS_PCI_NV_19                                0x0000184C
++
++#define NV04_PTIMER_INTR_0                                 0x00009100
++#define NV04_PTIMER_INTR_EN_0                              0x00009140
++#define NV04_PTIMER_NUMERATOR                              0x00009200
++#define NV04_PTIMER_DENOMINATOR                            0x00009210
++#define NV04_PTIMER_TIME_0                                 0x00009400
++#define NV04_PTIMER_TIME_1                                 0x00009410
++#define NV04_PTIMER_ALARM_0                                0x00009420
++
++#define NV50_I2C_CONTROLLER                           0x0000E054
++
++#define NV04_PFB_CFG0                                      0x00100200
++#define NV04_PFB_CFG1                                      0x00100204
++#define NV40_PFB_020C                                      0x0010020C
++#define NV10_PFB_TILE(i)                                   (0x00100240 + (i*16))
++#define NV10_PFB_TILE__SIZE                                8
++#define NV10_PFB_TLIMIT(i)                                 (0x00100244 + (i*16))
++#define NV10_PFB_TSIZE(i)                                  (0x00100248 + (i*16))
++#define NV10_PFB_TSTATUS(i)                                (0x0010024C + (i*16))
++#define NV10_PFB_CLOSE_PAGE2                               0x0010033C
++#define NV40_PFB_TILE(i)                                   (0x00100600 + (i*16))
++#define NV40_PFB_TILE__SIZE_0                              12
++#define NV40_PFB_TILE__SIZE_1                              15
++#define NV40_PFB_TLIMIT(i)                                 (0x00100604 + (i*16))
++#define NV40_PFB_TSIZE(i)                                  (0x00100608 + (i*16))
++#define NV40_PFB_TSTATUS(i)                                (0x0010060C + (i*16))
++#define NV40_PFB_UNK_800                                      0x00100800
++
++#define NV04_PGRAPH_DEBUG_0                                0x00400080
++#define NV04_PGRAPH_DEBUG_1                                0x00400084
++#define NV04_PGRAPH_DEBUG_2                                0x00400088
++#define NV04_PGRAPH_DEBUG_3                                0x0040008c
++#define NV10_PGRAPH_DEBUG_4                                0x00400090
++#define NV03_PGRAPH_INTR                                   0x00400100
++#define NV03_PGRAPH_NSTATUS                                0x00400104
++#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
++#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
++#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
++#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
++#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
++#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
++#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
++#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
++#define NV03_PGRAPH_NSOURCE                                0x00400108
++#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                  (1<< 0)
++#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                    (1<< 1)
++#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR              (1<< 2)
++#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION               (1<< 3)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                   (1<< 4)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                    (1<< 5)
++#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                  (1<< 6)
++#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION              (1<< 7)
++#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION              (1<< 8)
++#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION              (1<< 9)
++#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
++#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
++#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
++#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
++#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
++#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
++#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
++#define NV03_PGRAPH_INTR_EN                                0x00400140
++#define NV40_PGRAPH_INTR_EN                                0x0040013C
++#    define NV_PGRAPH_INTR_NOTIFY                             (1<< 0)
++#    define NV_PGRAPH_INTR_MISSING_HW                         (1<< 4)
++#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
++#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
++#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
++#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
++#define NV10_PGRAPH_CTX_USER                               0x00400148
++#define NV10_PGRAPH_CTX_SWITCH1                            0x0040014C
++#define NV10_PGRAPH_CTX_SWITCH2                            0x00400150
++#define NV10_PGRAPH_CTX_SWITCH3                            0x00400154
++#define NV10_PGRAPH_CTX_SWITCH4                            0x00400158
++#define NV10_PGRAPH_CTX_SWITCH5                            0x0040015C
++#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
++#define NV10_PGRAPH_CTX_CACHE1                             0x00400160
++#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
++#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
++#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
++#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
++#define NV04_PGRAPH_CTX_USER                               0x00400174
++#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
++#define NV10_PGRAPH_CTX_CACHE2                             0x00400180
++#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
++#define NV03_PGRAPH_CTX_USER                               0x00400194
++#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
++#define NV10_PGRAPH_CTX_CACHE3                             0x004001A0
++#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
++#define NV10_PGRAPH_CTX_CACHE4                             0x004001C0
++#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
++#define NV10_PGRAPH_CTX_CACHE5                             0x004001E0
++#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
++#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
++#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
++#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
++#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
++#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
++#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK                   0x000FFFFF
++#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
++#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
++#define NV03_PGRAPH_X_MISC                                 0x00400500
++#define NV03_PGRAPH_Y_MISC                                 0x00400504
++#define NV04_PGRAPH_VALID1                                 0x00400508
++#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
++#define NV04_PGRAPH_MISC24_0                               0x00400510
++#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
++#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
++#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
++#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
++#define NV03_PGRAPH_CLIPX_0                                0x00400524
++#define NV03_PGRAPH_CLIPX_1                                0x00400528
++#define NV03_PGRAPH_CLIPY_0                                0x0040052C
++#define NV03_PGRAPH_CLIPY_1                                0x00400530
++#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
++#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
++#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
++#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
++#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
++#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
++#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
++#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
++#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
++#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
++#define NV04_PGRAPH_MISC24_1                               0x00400570
++#define NV04_PGRAPH_MISC24_2                               0x00400574
++#define NV04_PGRAPH_VALID2                                 0x00400578
++#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
++#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
++#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
++#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
++#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
++#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
++#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
++#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
++#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
++#define NV04_PGRAPH_FORMAT_0                               0x004005A8
++#define NV04_PGRAPH_FORMAT_1                               0x004005AC
++#define NV04_PGRAPH_FILTER_0                               0x004005B0
++#define NV04_PGRAPH_FILTER_1                               0x004005B4
++#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
++#define NV04_PGRAPH_ROP3                                   0x00400604
++#define NV04_PGRAPH_BETA_AND                               0x00400608
++#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
++#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
++#define NV04_PGRAPH_FORMATS                                0x00400618
++#define NV10_PGRAPH_DEBUG_2                                0x00400620
++#define NV04_PGRAPH_BOFFSET0                               0x00400640
++#define NV04_PGRAPH_BOFFSET1                               0x00400644
++#define NV04_PGRAPH_BOFFSET2                               0x00400648
++#define NV04_PGRAPH_BOFFSET3                               0x0040064C
++#define NV04_PGRAPH_BOFFSET4                               0x00400650
++#define NV04_PGRAPH_BOFFSET5                               0x00400654
++#define NV04_PGRAPH_BBASE0                                 0x00400658
++#define NV04_PGRAPH_BBASE1                                 0x0040065C
++#define NV04_PGRAPH_BBASE2                                 0x00400660
++#define NV04_PGRAPH_BBASE3                                 0x00400664
++#define NV04_PGRAPH_BBASE4                                 0x00400668
++#define NV04_PGRAPH_BBASE5                                 0x0040066C
++#define NV04_PGRAPH_BPITCH0                                0x00400670
++#define NV04_PGRAPH_BPITCH1                                0x00400674
++#define NV04_PGRAPH_BPITCH2                                0x00400678
++#define NV04_PGRAPH_BPITCH3                                0x0040067C
++#define NV04_PGRAPH_BPITCH4                                0x00400680
++#define NV04_PGRAPH_BLIMIT0                                0x00400684
++#define NV04_PGRAPH_BLIMIT1                                0x00400688
++#define NV04_PGRAPH_BLIMIT2                                0x0040068C
++#define NV04_PGRAPH_BLIMIT3                                0x00400690
++#define NV04_PGRAPH_BLIMIT4                                0x00400694
++#define NV04_PGRAPH_BLIMIT5                                0x00400698
++#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
++#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
++#define NV03_PGRAPH_STATUS                                 0x004006B0
++#define NV04_PGRAPH_STATUS                                 0x00400700
++#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
++#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
++#define NV04_PGRAPH_SURFACE                                0x0040070C
++#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
++#define NV04_PGRAPH_STATE                                  0x00400710
++#define NV10_PGRAPH_SURFACE                                0x00400710
++#define NV04_PGRAPH_NOTIFY                                 0x00400714
++#define NV10_PGRAPH_STATE                                  0x00400714
++#define NV10_PGRAPH_NOTIFY                                 0x00400718
++
++#define NV04_PGRAPH_FIFO                                   0x00400720
++
++#define NV04_PGRAPH_BPIXEL                                 0x00400724
++#define NV10_PGRAPH_RDI_INDEX                              0x00400750
++#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
++#define NV10_PGRAPH_RDI_DATA                               0x00400754
++#define NV04_PGRAPH_DMA_PITCH                              0x00400760
++#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
++#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
++#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
++#define NV10_PGRAPH_DMA_PITCH                              0x00400770
++#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
++#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
++#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
++#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
++#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
++#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
++#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
++#define NV04_PGRAPH_PATTERN                                0x00400808
++#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
++#define NV04_PGRAPH_CHROMA                                 0x00400814
++#define NV04_PGRAPH_CONTROL0                               0x00400818
++#define NV04_PGRAPH_CONTROL1                               0x0040081C
++#define NV04_PGRAPH_CONTROL2                               0x00400820
++#define NV04_PGRAPH_BLEND                                  0x00400824
++#define NV04_PGRAPH_STORED_FMT                             0x00400830
++#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
++#define NV40_PGRAPH_TILE0(i)                               (0x00400900 + (i*16))
++#define NV40_PGRAPH_TLIMIT0(i)                             (0x00400904 + (i*16))
++#define NV40_PGRAPH_TSIZE0(i)                              (0x00400908 + (i*16))
++#define NV40_PGRAPH_TSTATUS0(i)                            (0x0040090C + (i*16))
++#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
++#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
++#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
++#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
++#define NV04_PGRAPH_U_RAM                                  0x00400D00
++#define NV47_PGRAPH_TILE0(i)                               (0x00400D00 + (i*16))
++#define NV47_PGRAPH_TLIMIT0(i)                             (0x00400D04 + (i*16))
++#define NV47_PGRAPH_TSIZE0(i)                              (0x00400D08 + (i*16))
++#define NV47_PGRAPH_TSTATUS0(i)                            (0x00400D0C + (i*16))
++#define NV04_PGRAPH_V_RAM                                  0x00400D40
++#define NV04_PGRAPH_W_RAM                                  0x00400D80
++#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
++#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
++#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
++#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
++#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
++#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
++#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
++#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
++#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
++#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
++#define NV10_PGRAPH_XFMODE0                                0x00400F40
++#define NV10_PGRAPH_XFMODE1                                0x00400F44
++#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
++#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
++#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
++#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
++#define NV04_PGRAPH_DMA_START_0                            0x00401000
++#define NV04_PGRAPH_DMA_START_1                            0x00401004
++#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
++#define NV04_PGRAPH_DMA_MISC                               0x0040100C
++#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
++#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
++#define NV04_PGRAPH_DMA_RM                                 0x00401030
++#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
++#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
++#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
++#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
++#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
++#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
++#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
++#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
++#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
++#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
++#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
++#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
++#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
++#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
++#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
++#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
++#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
++#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
++#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
++#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
++
++
++/* It's a guess that this works on NV03. Confirmed on NV04, though */
++#define NV04_PFIFO_DELAY_0                                 0x00002040
++#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
++#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
++#define NV03_PFIFO_INTR_0                                  0x00002100
++#define NV03_PFIFO_INTR_EN_0                               0x00002140
++#    define NV_PFIFO_INTR_CACHE_ERROR                         (1<< 0)
++#    define NV_PFIFO_INTR_RUNOUT                              (1<< 4)
++#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                     (1<< 8)
++#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
++#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
++#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
++#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
++#define NV03_PFIFO_RAMHT                                   0x00002210
++#define NV03_PFIFO_RAMFC                                   0x00002214
++#define NV03_PFIFO_RAMRO                                   0x00002218
++#define NV40_PFIFO_RAMFC                                   0x00002220
++#define NV03_PFIFO_CACHES                                  0x00002500
++#define NV04_PFIFO_MODE                                    0x00002504
++#define NV04_PFIFO_DMA                                     0x00002508
++#define NV04_PFIFO_SIZE                                    0x0000250c
++#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
++#define NV50_PFIFO_CTX_TABLE__SIZE                                128
++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
++#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
++#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
++#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
++#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
++#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
++#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
++#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
++#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
++#define NV03_PFIFO_CACHE1_PUT                              0x00003210
++#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
++#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
++#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
++#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
++#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
++#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
++#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
++#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
++#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
++#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
++#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
++#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
++#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
++#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
++#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
++#define NV04_PFIFO_CACHE1_HASH                             0x00003258
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
++#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
++#define NV03_PFIFO_CACHE1_GET                              0x00003270
++#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
++#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
++#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
++#define NV40_PFIFO_UNK32E4                                 0x000032E4
++#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
++#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
++#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
++#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
++
++#define NV_CRTC0_INTSTAT                                   0x00600100
++#define NV_CRTC0_INTEN                                     0x00600140
++#define NV_CRTC1_INTSTAT                                   0x00602100
++#define NV_CRTC1_INTEN                                     0x00602140
++#    define NV_CRTC_INTR_VBLANK                                (1<<0)
++
++/* This name is a partial guess. */
++#define NV50_DISPLAY_SUPERVISOR                     0x00610024
++
++/* Fifo commands. These are not regs, neither masks */
++#define NV03_FIFO_CMD_JUMP                                 0x20000000
++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK                     0x1ffffffc
++#define NV03_FIFO_CMD_REWIND                               (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
++
++/* RAMFC offsets */
++#define NV04_RAMFC_DMA_PUT                                       0x00
++#define NV04_RAMFC_DMA_GET                                       0x04
++#define NV04_RAMFC_DMA_INSTANCE                                  0x08
++#define NV04_RAMFC_DMA_STATE                                     0x0C
++#define NV04_RAMFC_DMA_FETCH                                     0x10
++#define NV04_RAMFC_ENGINE                                        0x14
++#define NV04_RAMFC_PULL1_ENGINE                                  0x18
++
++#define NV10_RAMFC_DMA_PUT                                       0x00
++#define NV10_RAMFC_DMA_GET                                       0x04
++#define NV10_RAMFC_REF_CNT                                       0x08
++#define NV10_RAMFC_DMA_INSTANCE                                  0x0C
++#define NV10_RAMFC_DMA_STATE                                     0x10
++#define NV10_RAMFC_DMA_FETCH                                     0x14
++#define NV10_RAMFC_ENGINE                                        0x18
++#define NV10_RAMFC_PULL1_ENGINE                                  0x1C
++#define NV10_RAMFC_ACQUIRE_VALUE                                 0x20
++#define NV10_RAMFC_ACQUIRE_TIMESTAMP                             0x24
++#define NV10_RAMFC_ACQUIRE_TIMEOUT                               0x28
++#define NV10_RAMFC_SEMAPHORE                                     0x2C
++#define NV10_RAMFC_DMA_SUBROUTINE                                0x30
++
++#define NV40_RAMFC_DMA_PUT                                       0x00
++#define NV40_RAMFC_DMA_GET                                       0x04
++#define NV40_RAMFC_REF_CNT                                       0x08
++#define NV40_RAMFC_DMA_INSTANCE                                  0x0C
++#define NV40_RAMFC_DMA_DCOUNT /* ? */                            0x10
++#define NV40_RAMFC_DMA_STATE                                     0x14
++#define NV40_RAMFC_DMA_FETCH                                     0x18
++#define NV40_RAMFC_ENGINE                                        0x1C
++#define NV40_RAMFC_PULL1_ENGINE                                  0x20
++#define NV40_RAMFC_ACQUIRE_VALUE                                 0x24
++#define NV40_RAMFC_ACQUIRE_TIMESTAMP                             0x28
++#define NV40_RAMFC_ACQUIRE_TIMEOUT                               0x2C
++#define NV40_RAMFC_SEMAPHORE                                     0x30
++#define NV40_RAMFC_DMA_SUBROUTINE                                0x34
++#define NV40_RAMFC_GRCTX_INSTANCE /* guess */                    0x38
++#define NV40_RAMFC_DMA_TIMESLICE                                 0x3C
++#define NV40_RAMFC_UNK_40                                        0x40
++#define NV40_RAMFC_UNK_44                                        0x44
++#define NV40_RAMFC_UNK_48                                        0x48
++#define NV40_RAMFC_UNK_4C                                        0x4C
++#define NV40_RAMFC_UNK_50                                        0x50
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_sgdma.c git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c
+--- git/drivers/gpu/drm-tungsten/nouveau_sgdma.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_sgdma.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,349 @@
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++#define NV_CTXDMA_PAGE_SHIFT 12
++#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
++#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
++
++struct nouveau_sgdma_be {
++      struct drm_ttm_backend backend;
++      struct drm_device *dev;
++
++      int         pages;
++      int         pages_populated;
++      dma_addr_t *pagelist;
++      int         is_bound;
++
++      unsigned int pte_start;
++};
++
++static int
++nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
++{
++      return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
++}
++
++static int
++nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
++                     struct page **pages, struct page *dummy_read_page)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      int p, d, o;
++
++      DRM_DEBUG("num_pages = %ld\n", num_pages);
++
++      if (nvbe->pagelist)
++              return -EINVAL;
++      nvbe->pages    = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
++      nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
++                                 DRM_MEM_PAGES);
++
++      nvbe->pages_populated = d = 0;
++      for (p = 0; p < num_pages; p++) {
++              for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
++                      struct page *page = pages[p];
++                      if (!page)
++                              page = dummy_read_page;
++                      nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
++                                                       page, o,
++                                                       NV_CTXDMA_PAGE_SIZE,
++                                                       PCI_DMA_BIDIRECTIONAL);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++                      if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
++#else
++                      if (pci_dma_mapping_error(nvbe->pagelist[d])) {
++#endif
++                              be->func->clear(be);
++                              DRM_ERROR("pci_map_page failed\n");
++                              return -EINVAL;
++                      }
++                      nvbe->pages_populated = ++d;
++              }
++      }
++
++      return 0;
++}
++
++static void
++nouveau_sgdma_clear(struct drm_ttm_backend *be)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      int d;
++
++      DRM_DEBUG("\n");
++
++      if (nvbe && nvbe->pagelist) {
++              if (nvbe->is_bound)
++                      be->func->unbind(be);
++
++              for (d = 0; d < nvbe->pages_populated; d++) {
++                      pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
++                                     NV_CTXDMA_PAGE_SIZE,
++                                     PCI_DMA_BIDIRECTIONAL);
++              }
++              drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
++                       DRM_MEM_PAGES);
++      }
++}
++
++static int
++nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++      uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
++      uint32_t i;
++
++      DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
++                offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
++
++      if (offset & NV_CTXDMA_PAGE_MASK)
++              return -EINVAL;
++      nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
++      if (dev_priv->card_type < NV_50)
++              nvbe->pte_start += 2; /* skip ctxdma header */
++
++      for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
++              uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
++
++              if (pteval & NV_CTXDMA_PAGE_MASK) {
++                      DRM_ERROR("Bad pteval 0x%llx\n", pteval);
++                      return -EINVAL;
++              }
++
++              if (dev_priv->card_type < NV_50) {
++                      INSTANCE_WR(gpuobj, i, pteval | 3);
++              } else {
++                      INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
++                      INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
++              }
++      }
++
++      nvbe->is_bound  = 1;
++      return 0;
++}
++
++static int
++nouveau_sgdma_unbind(struct drm_ttm_backend *be)
++{
++      struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++      struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      if (nvbe->is_bound) {
++              struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++              unsigned int pte;
++
++              pte = nvbe->pte_start;
++              while (pte < (nvbe->pte_start + nvbe->pages)) {
++                      uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
++
++                      if (dev_priv->card_type < NV_50) {
++                              INSTANCE_WR(gpuobj, pte, pteval | 3);
++                      } else {
++                              INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
++                              INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
++                      }
++
++                      pte++;
++              }
++
++              nvbe->is_bound = 0;
++      }
++
++      return 0;
++}
++
++static void
++nouveau_sgdma_destroy(struct drm_ttm_backend *be)
++{
++      DRM_DEBUG("\n");
++      if (be) {
++              struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++              if (nvbe) {
++                      if (nvbe->pagelist)
++                              be->func->clear(be);
++                      drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
++              }
++      }
++}
++
++static struct drm_ttm_backend_func nouveau_sgdma_backend = {
++      .needs_ub_cache_adjust  = nouveau_sgdma_needs_ub_cache_adjust,
++      .populate               = nouveau_sgdma_populate,
++      .clear                  = nouveau_sgdma_clear,
++      .bind                   = nouveau_sgdma_bind,
++      .unbind                 = nouveau_sgdma_unbind,
++      .destroy                = nouveau_sgdma_destroy
++};
++
++struct drm_ttm_backend *
++nouveau_sgdma_init_ttm(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_sgdma_be *nvbe;
++
++      if (!dev_priv->gart_info.sg_ctxdma)
++              return NULL;
++
++      nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
++      if (!nvbe)
++              return NULL;
++
++      nvbe->dev = dev;
++
++      nvbe->backend.func      = &nouveau_sgdma_backend;
++
++      return &nvbe->backend;
++}
++
++int
++nouveau_sgdma_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = NULL;
++      uint32_t aper_size, obj_size;
++      int i, ret;
++
++      if (dev_priv->card_type < NV_50) {
++              aper_size = (64 * 1024 * 1024);
++              obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
++              obj_size += 8; /* ctxdma header */
++      } else {
++              /* 1 entire VM page table */
++              aper_size = (512 * 1024 * 1024);
++              obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
++      }
++
++      if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
++                                    NVOBJ_FLAG_ALLOW_NO_REFS |
++                                    NVOBJ_FLAG_ZERO_ALLOC |
++                                    NVOBJ_FLAG_ZERO_FREE, &gpuobj)))  {
++              DRM_ERROR("Error creating sgdma object: %d\n", ret);
++              return ret;
++      }
++
++      dev_priv->gart_info.sg_dummy_page =
++              alloc_page(GFP_KERNEL|__GFP_DMA32);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
++      set_page_locked(dev_priv->gart_info.sg_dummy_page);
++#else
++      SetPageLocked(dev_priv->gart_info.sg_dummy_page);
++#endif
++      dev_priv->gart_info.sg_dummy_bus =
++              pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
++                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++
++      if (dev_priv->card_type < NV_50) {
++              /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
++               * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
++               * on those cards? */
++              INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
++                                     (1 << 12) /* PT present */ |
++                                     (0 << 13) /* PT *not* linear */ |
++                                     (NV_DMA_ACCESS_RW  << 14) |
++                                     (NV_DMA_TARGET_PCI << 16));
++              INSTANCE_WR(gpuobj, 1, aper_size - 1);
++              for (i=2; i<2+(aper_size>>12); i++) {
++                      INSTANCE_WR(gpuobj, i,
++                                  dev_priv->gart_info.sg_dummy_bus | 3);
++              }
++      } else {
++              for (i=0; i<obj_size; i+=8) {
++                      INSTANCE_WR(gpuobj, (i+0)/4,
++                                  dev_priv->gart_info.sg_dummy_bus | 0x21);
++                      INSTANCE_WR(gpuobj, (i+4)/4, 0);
++              }
++      }
++
++      dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
++      dev_priv->gart_info.aper_base = 0;
++      dev_priv->gart_info.aper_size = aper_size;
++      dev_priv->gart_info.sg_ctxdma = gpuobj;
++      return 0;
++}
++
++void
++nouveau_sgdma_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (dev_priv->gart_info.sg_dummy_page) {
++              pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
++                             NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++              unlock_page(dev_priv->gart_info.sg_dummy_page);
++              __free_page(dev_priv->gart_info.sg_dummy_page);
++              dev_priv->gart_info.sg_dummy_page = NULL;
++              dev_priv->gart_info.sg_dummy_bus = 0;
++      }
++
++      nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
++}
++
++int
++nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_ttm_backend *be;
++      struct drm_scatter_gather sgreq;
++      struct drm_mm_node mm_node;
++      struct drm_bo_mem_reg mem;
++      int ret;
++
++      dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
++      if (!dev_priv->gart_info.sg_be)
++              return -ENOMEM;
++      be = dev_priv->gart_info.sg_be;
++
++      /* Hack the aperture size down to the amount of system memory
++       * we're going to bind into it.
++       */
++      if (dev_priv->gart_info.aper_size > 32*1024*1024)
++              dev_priv->gart_info.aper_size = 32*1024*1024;
++
++      sgreq.size = dev_priv->gart_info.aper_size;
++      if ((ret = drm_sg_alloc(dev, &sgreq))) {
++              DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
++              return ret;
++      }
++      dev_priv->gart_info.sg_handle = sgreq.handle;
++
++      if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
++              DRM_ERROR("failed populate: %d\n", ret);
++              return ret;
++      }
++
++      mm_node.start = 0;
++      mem.mm_node = &mm_node;
++
++      if ((ret = be->func->bind(be, &mem))) {
++              DRM_ERROR("failed bind: %d\n", ret);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
++{
++}
++
++int
++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++      int pte;
++
++      pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
++      if (dev_priv->card_type < NV_50) {
++              *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
++              return 0;
++      }
++
++      DRM_ERROR("Unimplemented on NV50\n");
++      return -EINVAL;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_state.c git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c
+--- git/drivers/gpu/drm-tungsten/nouveau_state.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_state.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,871 @@
++/*
++ * Copyright 2005 Stephane Marchesin
++ * Copyright 2008 Stuart Bennett
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++static int nouveau_init_card_mappings(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      /* resource 0 is mmio regs */
++      /* resource 1 is linear FB */
++      /* resource 2 is RAMIN (mmio regs + 0x1000000) */
++      /* resource 6 is bios */
++
++      /* map the mmio regs */
++      ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
++                            drm_get_resource_len(dev, 0),
++                            _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret) {
++              DRM_ERROR("Unable to initialize the mmio mapping (%d). "
++                        "Please report your setup to " DRIVER_EMAIL "\n",
++                        ret);
++              return -EINVAL;
++      }
++      DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);
++
++      /* map larger RAMIN aperture on NV40 cards */
++      dev_priv->ramin = NULL;
++      if (dev_priv->card_type >= NV_40) {
++              int ramin_resource = 2;
++              if (drm_get_resource_len(dev, ramin_resource) == 0)
++                      ramin_resource = 3;
++
++              ret = drm_addmap(dev,
++                               drm_get_resource_start(dev, ramin_resource),
++                               drm_get_resource_len(dev, ramin_resource),
++                               _DRM_REGISTERS, _DRM_READ_ONLY,
++                               &dev_priv->ramin);
++              if (ret) {
++                      DRM_ERROR("Failed to init RAMIN mapping, "
++                                "limited instance memory available\n");
++                      dev_priv->ramin = NULL;
++              }
++      }
++
++      /* On older cards (or if the above failed), create a map covering
++       * the BAR0 PRAMIN aperture */
++      if (!dev_priv->ramin) {
++              ret = drm_addmap(dev,
++                               drm_get_resource_start(dev, 0) + NV_RAMIN,
++                               (1*1024*1024),
++                               _DRM_REGISTERS, _DRM_READ_ONLY,
++                               &dev_priv->ramin);
++              if (ret) {
++                      DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret);
++                      return ret;
++              }
++      }
++
++      return 0;
++}
++
++static int nouveau_stub_init(struct drm_device *dev) { return 0; }
++static void nouveau_stub_takedown(struct drm_device *dev) {}
++
++static int nouveau_init_engine_ptrs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      switch (dev_priv->chipset & 0xf0) {
++      case 0x00:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv04_fb_init;
++              engine->fb.takedown     = nv04_fb_takedown;
++              engine->graph.init      = nv04_graph_init;
++              engine->graph.takedown  = nv04_graph_takedown;
++              engine->graph.create_context    = nv04_graph_create_context;
++              engine->graph.destroy_context   = nv04_graph_destroy_context;
++              engine->graph.load_context      = nv04_graph_load_context;
++              engine->graph.save_context      = nv04_graph_save_context;
++              engine->fifo.channels   = 16;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv04_fifo_channel_id;
++              engine->fifo.create_context     = nv04_fifo_create_context;
++              engine->fifo.destroy_context    = nv04_fifo_destroy_context;
++              engine->fifo.load_context       = nv04_fifo_load_context;
++              engine->fifo.save_context       = nv04_fifo_save_context;
++              break;
++      case 0x10:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv10_graph_init;
++              engine->graph.takedown  = nv10_graph_takedown;
++              engine->graph.create_context    = nv10_graph_create_context;
++              engine->graph.destroy_context   = nv10_graph_destroy_context;
++              engine->graph.load_context      = nv10_graph_load_context;
++              engine->graph.save_context      = nv10_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x20:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv20_graph_init;
++              engine->graph.takedown  = nv20_graph_takedown;
++              engine->graph.create_context    = nv20_graph_create_context;
++              engine->graph.destroy_context   = nv20_graph_destroy_context;
++              engine->graph.load_context      = nv20_graph_load_context;
++              engine->graph.save_context      = nv20_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x30:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv04_mc_init;
++              engine->mc.takedown     = nv04_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv10_fb_init;
++              engine->fb.takedown     = nv10_fb_takedown;
++              engine->graph.init      = nv30_graph_init;
++              engine->graph.takedown  = nv20_graph_takedown;
++              engine->graph.create_context    = nv20_graph_create_context;
++              engine->graph.destroy_context   = nv20_graph_destroy_context;
++              engine->graph.load_context      = nv20_graph_load_context;
++              engine->graph.save_context      = nv20_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nouveau_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv10_fifo_create_context;
++              engine->fifo.destroy_context    = nv10_fifo_destroy_context;
++              engine->fifo.load_context       = nv10_fifo_load_context;
++              engine->fifo.save_context       = nv10_fifo_save_context;
++              break;
++      case 0x40:
++      case 0x60:
++              engine->instmem.init    = nv04_instmem_init;
++              engine->instmem.takedown= nv04_instmem_takedown;
++              engine->instmem.populate        = nv04_instmem_populate;
++              engine->instmem.clear           = nv04_instmem_clear;
++              engine->instmem.bind            = nv04_instmem_bind;
++              engine->instmem.unbind          = nv04_instmem_unbind;
++              engine->mc.init         = nv40_mc_init;
++              engine->mc.takedown     = nv40_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nv40_fb_init;
++              engine->fb.takedown     = nv40_fb_takedown;
++              engine->graph.init      = nv40_graph_init;
++              engine->graph.takedown  = nv40_graph_takedown;
++              engine->graph.create_context    = nv40_graph_create_context;
++              engine->graph.destroy_context   = nv40_graph_destroy_context;
++              engine->graph.load_context      = nv40_graph_load_context;
++              engine->graph.save_context      = nv40_graph_save_context;
++              engine->fifo.channels   = 32;
++              engine->fifo.init       = nv40_fifo_init;
++              engine->fifo.takedown   = nouveau_stub_takedown;
++              engine->fifo.channel_id         = nv10_fifo_channel_id;
++              engine->fifo.create_context     = nv40_fifo_create_context;
++              engine->fifo.destroy_context    = nv40_fifo_destroy_context;
++              engine->fifo.load_context       = nv40_fifo_load_context;
++              engine->fifo.save_context       = nv40_fifo_save_context;
++              break;
++      case 0x50:
++      case 0x80: /* gotta love NVIDIA's consistency.. */
++      case 0x90:
++      case 0xA0:
++              engine->instmem.init    = nv50_instmem_init;
++              engine->instmem.takedown= nv50_instmem_takedown;
++              engine->instmem.populate        = nv50_instmem_populate;
++              engine->instmem.clear           = nv50_instmem_clear;
++              engine->instmem.bind            = nv50_instmem_bind;
++              engine->instmem.unbind          = nv50_instmem_unbind;
++              engine->mc.init         = nv50_mc_init;
++              engine->mc.takedown     = nv50_mc_takedown;
++              engine->timer.init      = nv04_timer_init;
++              engine->timer.read      = nv04_timer_read;
++              engine->timer.takedown  = nv04_timer_takedown;
++              engine->fb.init         = nouveau_stub_init;
++              engine->fb.takedown     = nouveau_stub_takedown;
++              engine->graph.init      = nv50_graph_init;
++              engine->graph.takedown  = nv50_graph_takedown;
++              engine->graph.create_context    = nv50_graph_create_context;
++              engine->graph.destroy_context   = nv50_graph_destroy_context;
++              engine->graph.load_context      = nv50_graph_load_context;
++              engine->graph.save_context      = nv50_graph_save_context;
++              engine->fifo.channels   = 128;
++              engine->fifo.init       = nv50_fifo_init;
++              engine->fifo.takedown   = nv50_fifo_takedown;
++              engine->fifo.channel_id         = nv50_fifo_channel_id;
++              engine->fifo.create_context     = nv50_fifo_create_context;
++              engine->fifo.destroy_context    = nv50_fifo_destroy_context;
++              engine->fifo.load_context       = nv50_fifo_load_context;
++              engine->fifo.save_context       = nv50_fifo_save_context;
++              break;
++      default:
++              DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset);
++              return 1;
++      }
++
++      return 0;
++}
++
++int
++nouveau_card_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine;
++      int ret;
++
++      DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
++
++      if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
++              return 0;
++      dev_priv->ttm = 0;
++
++      /* Determine exact chipset we're running on */
++      if (dev_priv->card_type < NV_10)
++              dev_priv->chipset = dev_priv->card_type;
++      else
++              dev_priv->chipset =
++                      (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20;
++
++      /* Initialise internal driver API hooks */
++      ret = nouveau_init_engine_ptrs(dev);
++      if (ret) return ret;
++      engine = &dev_priv->Engine;
++      dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
++
++      ret = nouveau_gpuobj_early_init(dev);
++      if (ret) return ret;
++
++      /* Initialise instance memory, must happen before mem_init so we
++       * know exactly how much VRAM we're able to use for "normal"
++       * purposes.
++       */
++      ret = engine->instmem.init(dev);
++      if (ret) return ret;
++
++      /* Setup the memory manager */
++      if (dev_priv->ttm) {
++              ret = nouveau_mem_init_ttm(dev);
++              if (ret) return ret;
++      } else {
++              ret = nouveau_mem_init(dev);
++              if (ret) return ret;
++      }
++
++      ret = nouveau_gpuobj_init(dev);
++      if (ret) return ret;
++
++      /* Parse BIOS tables / Run init tables? */
++
++      /* PMC */
++      ret = engine->mc.init(dev);
++      if (ret) return ret;
++
++      /* PTIMER */
++      ret = engine->timer.init(dev);
++      if (ret) return ret;
++
++      /* PFB */
++      ret = engine->fb.init(dev);
++      if (ret) return ret;
++
++      /* PGRAPH */
++      ret = engine->graph.init(dev);
++      if (ret) return ret;
++
++      /* PFIFO */
++      ret = engine->fifo.init(dev);
++      if (ret) return ret;
++
++      /* this call irq_preinstall, register irq handler and
++       * call irq_postinstall
++       */
++      ret = drm_irq_install(dev);
++      if (ret) return ret;
++
++      /* what about PVIDEO/PCRTC/PRAMDAC etc? */
++
++      ret = nouveau_dma_channel_init(dev);
++      if (ret) return ret;
++
++      dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
++      return 0;
++}
++
++static void nouveau_card_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
++
++      if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
++              nouveau_dma_channel_takedown(dev);
++
++              engine->fifo.takedown(dev);
++              engine->graph.takedown(dev);
++              engine->fb.takedown(dev);
++              engine->timer.takedown(dev);
++              engine->mc.takedown(dev);
++
++              nouveau_sgdma_nottm_hack_takedown(dev);
++              nouveau_sgdma_takedown(dev);
++
++              nouveau_gpuobj_takedown(dev);
++              nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt);
++
++              nouveau_mem_close(dev);
++              engine->instmem.takedown(dev);
++
++              drm_irq_uninstall(dev);
++
++              nouveau_gpuobj_late_takedown(dev);
++
++              dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++      }
++}
++
++/* here a client dies, release the stuff that was allocated for its
++ * file_priv */
++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_fifo_cleanup(dev, file_priv);
++      nouveau_mem_release(file_priv,dev_priv->fb_heap);
++      nouveau_mem_release(file_priv,dev_priv->agp_heap);
++      nouveau_mem_release(file_priv,dev_priv->pci_heap);
++}
++
++/* first module load, setup the mmio/fb mapping */
++int nouveau_firstopen(struct drm_device *dev)
++{
++#if defined(__powerpc__)
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct device_node *dn;
++#endif
++      int ret;
++      /* Map any PCI resources we need on the card */
++      ret = nouveau_init_card_mappings(dev);
++      if (ret) return ret;
++
++#if defined(__powerpc__)
++      /* Put the card in BE mode if it's not */
++      if (NV_READ(NV03_PMC_BOOT_1))
++              NV_WRITE(NV03_PMC_BOOT_1,0x00000001);
++
++      DRM_MEMORYBARRIER();
++#endif
++
++#if defined(__linux__) && defined(__powerpc__)
++      /* if we have an OF card, copy vbios to RAMIN */
++      dn = pci_device_to_OF_node(dev->pdev);
++      if (dn)
++      {
++              int size;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++              const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size);
++#else
++              const uint32_t *bios = get_property(dn, "NVDA,BMP", &size);
++#endif
++              if (bios)
++              {
++                      int i;
++                      for(i=0;i<size;i+=4)
++                              NV_WI32(i, bios[i/4]);
++                      DRM_INFO("OF bios successfully copied (%d bytes)\n",size);
++              }
++              else
++                      DRM_INFO("Unable to get the OF bios\n");
++      }
++      else
++              DRM_INFO("Unable to get the OF node\n");
++#endif
++      return 0;
++}
++
++#define NV40_CHIPSET_MASK 0x00000baf
++#define NV44_CHIPSET_MASK 0x00005450
++
++int nouveau_load(struct drm_device *dev, unsigned long flags)
++{
++      struct drm_nouveau_private *dev_priv;
++      void __iomem *regs;
++      uint32_t reg0,reg1;
++      uint8_t architecture = 0;
++
++      dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
++      if (!dev_priv)
++              return -ENOMEM;
++
++      dev_priv->flags = flags & NOUVEAU_FLAGS;
++      dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++
++      DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);
++
++      /* Time to determine the card architecture */
++      regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
++      if (!regs) {
++              DRM_ERROR("Could not ioremap to determine register\n");
++              return -ENOMEM;
++      }
++
++      reg0 = readl(regs+NV03_PMC_BOOT_0);
++      reg1 = readl(regs+NV03_PMC_BOOT_1);
++#if defined(__powerpc__)
++      if (reg1)
++              reg0=___swab32(reg0);
++#endif
++
++      /* We're dealing with >=NV10 */
++      if ((reg0 & 0x0f000000) > 0 ) {
++              /* Bit 27-20 contain the architecture in hex */
++              architecture = (reg0 & 0xff00000) >> 20;
++      /* NV04 or NV05 */
++      } else if ((reg0 & 0xff00fff0) == 0x20004000) {
++              architecture = 0x04;
++      }
++
++      iounmap(regs);
++
++      if (architecture >= 0x80) {
++              dev_priv->card_type = NV_50;
++      } else if (architecture >= 0x60) {
++              /* FIXME we need to figure out who's who for NV6x */
++              dev_priv->card_type = NV_44;
++      } else if (architecture >= 0x50) {
++              dev_priv->card_type = NV_50;
++      } else if (architecture >= 0x40) {
++              uint8_t subarch = architecture & 0xf;
++              /* Selection criteria borrowed from NV40EXA */
++              if (NV40_CHIPSET_MASK & (1 << subarch)) {
++                      dev_priv->card_type = NV_40;
++              } else if (NV44_CHIPSET_MASK & (1 << subarch)) {
++                      dev_priv->card_type = NV_44;
++              } else {
++                      dev_priv->card_type = NV_UNKNOWN;
++              }
++      } else if (architecture >= 0x30) {
++              dev_priv->card_type = NV_30;
++      } else if (architecture >= 0x20) {
++              dev_priv->card_type = NV_20;
++      } else if (architecture >= 0x17) {
++              dev_priv->card_type = NV_17;
++      } else if (architecture >= 0x11) {
++              dev_priv->card_type = NV_11;
++      } else if (architecture >= 0x10) {
++              dev_priv->card_type = NV_10;
++      } else if (architecture >= 0x04) {
++              dev_priv->card_type = NV_04;
++      } else {
++              dev_priv->card_type = NV_UNKNOWN;
++      }
++
++      DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0);
++
++      if (dev_priv->card_type == NV_UNKNOWN) {
++              return -EINVAL;
++      }
++
++      /* Special flags */
++      if (dev->pci_device == 0x01a0) {
++              dev_priv->flags |= NV_NFORCE;
++      } else if (dev->pci_device == 0x01f0) {
++              dev_priv->flags |= NV_NFORCE2;
++      }
++
++      dev->dev_private = (void *)dev_priv;
++
++      return 0;
++}
++
++void nouveau_lastclose(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* In the case of an error dev_priv may not be be allocated yet */
++      if (dev_priv && dev_priv->card_type) {
++              nouveau_card_takedown(dev);
++
++              if(dev_priv->fb_mtrr>0)
++              {
++                      drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC);
++                      dev_priv->fb_mtrr=0;
++              }
++      }
++}
++
++int nouveau_unload(struct drm_device *dev)
++{
++      drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++      return 0;
++}
++
++int
++nouveau_ioctl_card_init(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      return nouveau_card_init(dev);
++}
++
++int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_getparam *getparam = data;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      switch (getparam->param) {
++      case NOUVEAU_GETPARAM_CHIPSET_ID:
++              getparam->value = dev_priv->chipset;
++              break;
++      case NOUVEAU_GETPARAM_PCI_VENDOR:
++              getparam->value=dev->pci_vendor;
++              break;
++      case NOUVEAU_GETPARAM_PCI_DEVICE:
++              getparam->value=dev->pci_device;
++              break;
++      case NOUVEAU_GETPARAM_BUS_TYPE:
++              if (drm_device_is_agp(dev))
++                      getparam->value=NV_AGP;
++              else if (drm_device_is_pcie(dev))
++                      getparam->value=NV_PCIE;
++              else
++                      getparam->value=NV_PCI;
++              break;
++      case NOUVEAU_GETPARAM_FB_PHYSICAL:
++              getparam->value=dev_priv->fb_phys;
++              break;
++      case NOUVEAU_GETPARAM_AGP_PHYSICAL:
++              getparam->value=dev_priv->gart_info.aper_base;
++              break;
++      case NOUVEAU_GETPARAM_PCI_PHYSICAL:
++              if ( dev -> sg )
++                      getparam->value=(unsigned long)dev->sg->virtual;
++              else
++                   {
++                   DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
++                   return -EINVAL;
++                   }
++              break;
++      case NOUVEAU_GETPARAM_FB_SIZE:
++              getparam->value=dev_priv->fb_available_size;
++              break;
++      case NOUVEAU_GETPARAM_AGP_SIZE:
++              getparam->value=dev_priv->gart_info.aper_size;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %lld\n", getparam->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct drm_nouveau_setparam *setparam = data;
++
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      switch (setparam->param) {
++      case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
++              switch (setparam->value) {
++              case NOUVEAU_MEM_AGP:
++              case NOUVEAU_MEM_FB:
++              case NOUVEAU_MEM_PCI:
++              case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE:
++                      break;
++              default:
++                      DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
++                                      setparam->value);
++                      return -EINVAL;
++              }
++              dev_priv->config.cmdbuf.location = setparam->value;
++              break;
++      case NOUVEAU_SETPARAM_CMDBUF_SIZE:
++              dev_priv->config.cmdbuf.size = setparam->value;
++              break;
++      default:
++              DRM_ERROR("unknown parameter %lld\n", setparam->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* waits for idle */
++void nouveau_wait_for_idle(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv=dev->dev_private;
++      switch(dev_priv->card_type) {
++      case NV_50:
++              break;
++      default: {
++              /* This stuff is more or less a copy of what is seen
++               * in nv28 kmmio dump.
++               */
++              uint64_t started = dev_priv->Engine.timer.read(dev);
++              uint64_t stopped = started;
++              uint32_t status;
++              do {
++                      uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE);
++                      (void)pmc_e;
++                      status = NV_READ(NV04_PGRAPH_STATUS);
++                      if (!status)
++                              break;
++                      stopped = dev_priv->Engine.timer.read(dev);
++              /* It'll never wrap anyway... */
++              } while (stopped - started < 1000000000ULL);
++              if (status)
++                      DRM_ERROR("timed out with status 0x%08x\n",
++                                status);
++      }
++      }
++}
++
++static int nouveau_suspend(struct drm_device *dev)
++{
++      struct mem_block *p;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_suspend_resume *susres = &dev_priv->susres;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
++      susres->ramin_size = 0;
++      list_for_each(p, dev_priv->ramin_heap)
++              if (p->file_priv && (p->start + p->size) > susres->ramin_size)
++                      susres->ramin_size = p->start + p->size;
++      if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) {
++              DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n");
++              return -ENOMEM;
++      }
++
++      for (i = 0; i < engine->fifo.channels; i++) {
++              uint64_t t_start = engine->timer.read(dev);
++
++              if (dev_priv->fifos[i] == NULL)
++                      continue;
++
++              /* Give the channel a chance to idle, wait 2s (hopefully) */
++              while (!nouveau_channel_idle(dev_priv->fifos[i]))
++                      if (engine->timer.read(dev) - t_start > 2000000000ULL) {
++                              DRM_ERROR("Failed to idle channel %d before"
++                                        "suspend.", dev_priv->fifos[i]->id);
++                              return -EBUSY;
++                      }
++      }
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV04_PGRAPH_FIFO, 0);
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      susres->fifo_mode = NV_READ(NV04_PFIFO_MODE);
++
++      if (dev_priv->card_type >= NV_10) {
++              susres->graph_state = NV_READ(NV10_PGRAPH_STATE);
++              susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL);
++      } else {
++              susres->graph_state = NV_READ(NV04_PGRAPH_STATE);
++              susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL);
++      }
++
++      engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
++      engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
++      nouveau_wait_for_idle(dev);
++
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              susres->ramin_copy[i] = NV_RI32(i << 2);
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO, 1);
++
++      return 0;
++}
++
++static int nouveau_resume(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_suspend_resume *susres = &dev_priv->susres;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int i;
++
++      if (!susres->ramin_copy)
++              return -EINVAL;
++
++      DRM_DEBUG("Doing resume\n");
++
++      if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
++              struct drm_agp_info info;
++              struct drm_agp_mode mode;
++
++              /* agp bridge drivers don't re-enable agp on resume. lame. */
++              if ((i = drm_agp_info(dev, &info))) {
++                      DRM_ERROR("Unable to get AGP info: %d\n", i);
++                      return i;
++              }
++              mode.mode = info.mode;
++              if ((i = drm_agp_enable(dev, mode))) {
++                      DRM_ERROR("Unable to enable AGP: %d\n", i);
++                      return i;
++              }
++      }
++
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              NV_WI32(i << 2, susres->ramin_copy[i]);
++
++      engine->mc.init(dev);
++      engine->timer.init(dev);
++      engine->fb.init(dev);
++      engine->graph.init(dev);
++      engine->fifo.init(dev);
++
++      NV_WRITE(NV04_PGRAPH_FIFO, 0);
++      /* disable the fifo caches */
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
++
++      /* PMC power cycling PFIFO in init clobbers some of the stuff stored in
++       * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful
++       */
++      for (i = 0; i < susres->ramin_size / 4; i++)
++              NV_WI32(i << 2, susres->ramin_copy[i]);
++
++      engine->fifo.load_context(dev_priv->fifos[0]);
++      NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode);
++
++      engine->graph.load_context(dev_priv->fifos[0]);
++      nouveau_wait_for_idle(dev);
++
++      if (dev_priv->card_type >= NV_10) {
++              NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state);
++              NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
++      } else {
++              NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state);
++              NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
++      }
++
++      /* reenable the fifo caches */
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
++               NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
++
++      if (dev->irq_enabled)
++              nouveau_irq_postinstall(dev);
++
++      drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
++      susres->ramin_copy = NULL;
++      susres->ramin_size = 0;
++
++      return 0;
++}
++
++int nouveau_ioctl_suspend(struct drm_device *dev, void *data,
++                               struct drm_file *file_priv)
++{
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      return nouveau_suspend(dev);
++}
++
++int nouveau_ioctl_resume(struct drm_device *dev, void *data,
++                              struct drm_file *file_priv)
++{
++      NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++      return nouveau_resume(dev);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.c git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c
+--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,191 @@
++/*
++ * Copyright (C) 2007 Arthur Huillet.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Arthur Huillet <arthur.huillet AT free DOT fr>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_reg.h"
++
++/*TODO: add a "card_type" attribute*/
++typedef struct{
++      uint32_t oclass; /* object class for this software method */
++      uint32_t mthd; /* method number */
++      void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */
++ } nouveau_software_method_t;
++
++
++ /* This function handles the NV04 setcontext software methods.
++One function for all because they are very similar.*/
++static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF;
++      uint32_t value_to_set = 0, bit_to_set = 0;
++
++      switch ( oclass ) {
++              case 0x4a:
++                      switch ( mthd ) {
++                              case 0x188 :
++                              case 0x18c :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x198 :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                              default : ;
++                              };
++                      break;
++              case 0x5c:
++                      switch ( mthd ) {
++                              case 0x184:
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x188:
++                              case 0x18c:
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x198:
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x5f:
++                      switch ( mthd ) {
++                              case 0x184 :
++                                      bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/
++                                      break;
++                              case 0x188 :
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x18c :
++                              case 0x190 :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x19c :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x61:
++                      switch ( mthd ) {
++                              case 0x188 :
++                                      bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
++                                      break;
++                              case 0x18c :
++                              case 0x190 :
++                                      bit_to_set = 0;
++                                      break;
++                              case 0x19c :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x2fc :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
++                                      break;
++                      };
++                      break;
++              case 0x77:
++                      switch ( mthd ) {
++                              case 0x198 :
++                                      bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
++                                      break;
++                              case 0x304 :
++                                      bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG
++                                      break;
++                      };
++                      break;
++              default :;
++              };
++
++      value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set;
++
++      /*RAMIN*/
++      nouveau_wait_for_idle(dev);
++      NV_WRITE(0x00700000 | inst_loc << 4, value_to_set);
++
++      /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/
++      NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set);
++
++      /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/
++      NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set);
++}
++
++ nouveau_software_method_t nouveau_sw_methods[] = {
++      /*NV04 context software methods*/
++      { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method },
++      { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x188, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x190, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method },
++      { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method },
++      { 0x77, 0x198, nouveau_NV04_setcontext_sw_method },
++      { 0x77, 0x304, nouveau_NV04_setcontext_sw_method },
++      /*terminator*/
++      { 0x0, 0x0, NULL, },
++ };
++
++ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) {
++      int i = 0;
++      while ( nouveau_sw_methods[ i ] . method_code != NULL )
++              {
++              if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method )
++                      {
++                      nouveau_sw_methods[ i ] . method_code(dev, oclass, method);
++                      return 0;
++                      }
++              i ++;
++              }
++
++       return 1;
++ }
+diff -Nurd git/drivers/gpu/drm-tungsten/nouveau_swmthd.h git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h
+--- git/drivers/gpu/drm-tungsten/nouveau_swmthd.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nouveau_swmthd.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,33 @@
++/*
++ * Copyright (C) 2007 Arthur Huillet.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Arthur Huillet <arthur.huillet AT free DOT fr>
++ */
++
++int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fb.c git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c
+--- git/drivers/gpu/drm-tungsten/nv04_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,23 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
++       * nvidia reading PFB_CFG_0, then writing back its original value.
++       * (which was 0x701114 in this case)
++       */
++      NV_WRITE(NV04_PFB_CFG0, 0x1114);
++
++      return 0;
++}
++
++void
++nv04_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv04_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,138 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV04_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV04_RAMFC_##offset/4)
++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
++#define NV04_RAMFC__SIZE 32
++
++int
++nv04_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV03_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv04_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
++                                              NV04_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Setup initial state */
++      RAMFC_WR(DMA_PUT, chan->pushbuf_base);
++      RAMFC_WR(DMA_GET, chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                           NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                           NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                           NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                           0));
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<<chan->id));
++      return 0;
++}
++
++void
++nv04_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv04_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
++
++      tmp = RAMFC_RD(DMA_INSTANCE);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE));
++
++      /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv04_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
++      tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE);
++      RAMFC_WR(DMA_INSTANCE, tmp);
++
++      RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++      RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
++      RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_graph.c git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c
+--- git/drivers/gpu/drm-tungsten/nv04_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,516 @@
++/*
++ * Copyright 2007 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++static uint32_t nv04_graph_ctx_regs [] = {
++      NV04_PGRAPH_CTX_SWITCH1,
++      NV04_PGRAPH_CTX_SWITCH2,
++      NV04_PGRAPH_CTX_SWITCH3,
++      NV04_PGRAPH_CTX_SWITCH4,
++      NV04_PGRAPH_CTX_CACHE1,
++      NV04_PGRAPH_CTX_CACHE2,
++      NV04_PGRAPH_CTX_CACHE3,
++      NV04_PGRAPH_CTX_CACHE4,
++      0x00400184,
++      0x004001a4,
++      0x004001c4,
++      0x004001e4,
++      0x00400188,
++      0x004001a8,
++      0x004001c8,
++      0x004001e8,
++      0x0040018c,
++      0x004001ac,
++      0x004001cc,
++      0x004001ec,
++      0x00400190,
++      0x004001b0,
++      0x004001d0,
++      0x004001f0,
++      0x00400194,
++      0x004001b4,
++      0x004001d4,
++      0x004001f4,
++      0x00400198,
++      0x004001b8,
++      0x004001d8,
++      0x004001f8,
++      0x0040019c,
++      0x004001bc,
++      0x004001dc,
++      0x004001fc,
++      0x00400174,
++      NV04_PGRAPH_DMA_START_0,
++      NV04_PGRAPH_DMA_START_1,
++      NV04_PGRAPH_DMA_LENGTH,
++      NV04_PGRAPH_DMA_MISC,
++      NV04_PGRAPH_DMA_PITCH,
++      NV04_PGRAPH_BOFFSET0,
++      NV04_PGRAPH_BBASE0,
++      NV04_PGRAPH_BLIMIT0,
++      NV04_PGRAPH_BOFFSET1,
++      NV04_PGRAPH_BBASE1,
++      NV04_PGRAPH_BLIMIT1,
++      NV04_PGRAPH_BOFFSET2,
++      NV04_PGRAPH_BBASE2,
++      NV04_PGRAPH_BLIMIT2,
++      NV04_PGRAPH_BOFFSET3,
++      NV04_PGRAPH_BBASE3,
++      NV04_PGRAPH_BLIMIT3,
++      NV04_PGRAPH_BOFFSET4,
++      NV04_PGRAPH_BBASE4,
++      NV04_PGRAPH_BLIMIT4,
++      NV04_PGRAPH_BOFFSET5,
++      NV04_PGRAPH_BBASE5,
++      NV04_PGRAPH_BLIMIT5,
++      NV04_PGRAPH_BPITCH0,
++      NV04_PGRAPH_BPITCH1,
++      NV04_PGRAPH_BPITCH2,
++      NV04_PGRAPH_BPITCH3,
++      NV04_PGRAPH_BPITCH4,
++      NV04_PGRAPH_SURFACE,
++      NV04_PGRAPH_STATE,
++      NV04_PGRAPH_BSWIZZLE2,
++      NV04_PGRAPH_BSWIZZLE5,
++      NV04_PGRAPH_BPIXEL,
++      NV04_PGRAPH_NOTIFY,
++      NV04_PGRAPH_PATT_COLOR0,
++      NV04_PGRAPH_PATT_COLOR1,
++      NV04_PGRAPH_PATT_COLORRAM+0x00,
++      NV04_PGRAPH_PATT_COLORRAM+0x01,
++      NV04_PGRAPH_PATT_COLORRAM+0x02,
++      NV04_PGRAPH_PATT_COLORRAM+0x03,
++      NV04_PGRAPH_PATT_COLORRAM+0x04,
++      NV04_PGRAPH_PATT_COLORRAM+0x05,
++      NV04_PGRAPH_PATT_COLORRAM+0x06,
++      NV04_PGRAPH_PATT_COLORRAM+0x07,
++      NV04_PGRAPH_PATT_COLORRAM+0x08,
++      NV04_PGRAPH_PATT_COLORRAM+0x09,
++      NV04_PGRAPH_PATT_COLORRAM+0x0A,
++      NV04_PGRAPH_PATT_COLORRAM+0x0B,
++      NV04_PGRAPH_PATT_COLORRAM+0x0C,
++      NV04_PGRAPH_PATT_COLORRAM+0x0D,
++      NV04_PGRAPH_PATT_COLORRAM+0x0E,
++      NV04_PGRAPH_PATT_COLORRAM+0x0F,
++      NV04_PGRAPH_PATT_COLORRAM+0x10,
++      NV04_PGRAPH_PATT_COLORRAM+0x11,
++      NV04_PGRAPH_PATT_COLORRAM+0x12,
++      NV04_PGRAPH_PATT_COLORRAM+0x13,
++      NV04_PGRAPH_PATT_COLORRAM+0x14,
++      NV04_PGRAPH_PATT_COLORRAM+0x15,
++      NV04_PGRAPH_PATT_COLORRAM+0x16,
++      NV04_PGRAPH_PATT_COLORRAM+0x17,
++      NV04_PGRAPH_PATT_COLORRAM+0x18,
++      NV04_PGRAPH_PATT_COLORRAM+0x19,
++      NV04_PGRAPH_PATT_COLORRAM+0x1A,
++      NV04_PGRAPH_PATT_COLORRAM+0x1B,
++      NV04_PGRAPH_PATT_COLORRAM+0x1C,
++      NV04_PGRAPH_PATT_COLORRAM+0x1D,
++      NV04_PGRAPH_PATT_COLORRAM+0x1E,
++      NV04_PGRAPH_PATT_COLORRAM+0x1F,
++      NV04_PGRAPH_PATT_COLORRAM+0x20,
++      NV04_PGRAPH_PATT_COLORRAM+0x21,
++      NV04_PGRAPH_PATT_COLORRAM+0x22,
++      NV04_PGRAPH_PATT_COLORRAM+0x23,
++      NV04_PGRAPH_PATT_COLORRAM+0x24,
++      NV04_PGRAPH_PATT_COLORRAM+0x25,
++      NV04_PGRAPH_PATT_COLORRAM+0x26,
++      NV04_PGRAPH_PATT_COLORRAM+0x27,
++      NV04_PGRAPH_PATT_COLORRAM+0x28,
++      NV04_PGRAPH_PATT_COLORRAM+0x29,
++      NV04_PGRAPH_PATT_COLORRAM+0x2A,
++      NV04_PGRAPH_PATT_COLORRAM+0x2B,
++      NV04_PGRAPH_PATT_COLORRAM+0x2C,
++      NV04_PGRAPH_PATT_COLORRAM+0x2D,
++      NV04_PGRAPH_PATT_COLORRAM+0x2E,
++      NV04_PGRAPH_PATT_COLORRAM+0x2F,
++      NV04_PGRAPH_PATT_COLORRAM+0x30,
++      NV04_PGRAPH_PATT_COLORRAM+0x31,
++      NV04_PGRAPH_PATT_COLORRAM+0x32,
++      NV04_PGRAPH_PATT_COLORRAM+0x33,
++      NV04_PGRAPH_PATT_COLORRAM+0x34,
++      NV04_PGRAPH_PATT_COLORRAM+0x35,
++      NV04_PGRAPH_PATT_COLORRAM+0x36,
++      NV04_PGRAPH_PATT_COLORRAM+0x37,
++      NV04_PGRAPH_PATT_COLORRAM+0x38,
++      NV04_PGRAPH_PATT_COLORRAM+0x39,
++      NV04_PGRAPH_PATT_COLORRAM+0x3A,
++      NV04_PGRAPH_PATT_COLORRAM+0x3B,
++      NV04_PGRAPH_PATT_COLORRAM+0x3C,
++      NV04_PGRAPH_PATT_COLORRAM+0x3D,
++      NV04_PGRAPH_PATT_COLORRAM+0x3E,
++      NV04_PGRAPH_PATT_COLORRAM+0x3F,
++      NV04_PGRAPH_PATTERN,
++      0x0040080c,
++      NV04_PGRAPH_PATTERN_SHAPE,
++      0x00400600,
++      NV04_PGRAPH_ROP3,
++      NV04_PGRAPH_CHROMA,
++      NV04_PGRAPH_BETA_AND,
++      NV04_PGRAPH_BETA_PREMULT,
++      NV04_PGRAPH_CONTROL0,
++      NV04_PGRAPH_CONTROL1,
++      NV04_PGRAPH_CONTROL2,
++      NV04_PGRAPH_BLEND,
++      NV04_PGRAPH_STORED_FMT,
++      NV04_PGRAPH_SOURCE_COLOR,
++      0x00400560,
++      0x00400568,
++      0x00400564,
++      0x0040056c,
++      0x00400400,
++      0x00400480,
++      0x00400404,
++      0x00400484,
++      0x00400408,
++      0x00400488,
++      0x0040040c,
++      0x0040048c,
++      0x00400410,
++      0x00400490,
++      0x00400414,
++      0x00400494,
++      0x00400418,
++      0x00400498,
++      0x0040041c,
++      0x0040049c,
++      0x00400420,
++      0x004004a0,
++      0x00400424,
++      0x004004a4,
++      0x00400428,
++      0x004004a8,
++      0x0040042c,
++      0x004004ac,
++      0x00400430,
++      0x004004b0,
++      0x00400434,
++      0x004004b4,
++      0x00400438,
++      0x004004b8,
++      0x0040043c,
++      0x004004bc,
++      0x00400440,
++      0x004004c0,
++      0x00400444,
++      0x004004c4,
++      0x00400448,
++      0x004004c8,
++      0x0040044c,
++      0x004004cc,
++      0x00400450,
++      0x004004d0,
++      0x00400454,
++      0x004004d4,
++      0x00400458,
++      0x004004d8,
++      0x0040045c,
++      0x004004dc,
++      0x00400460,
++      0x004004e0,
++      0x00400464,
++      0x004004e4,
++      0x00400468,
++      0x004004e8,
++      0x0040046c,
++      0x004004ec,
++      0x00400470,
++      0x004004f0,
++      0x00400474,
++      0x004004f4,
++      0x00400478,
++      0x004004f8,
++      0x0040047c,
++      0x004004fc,
++      0x0040053c,
++      0x00400544,
++      0x00400540,
++      0x00400548,
++      0x00400560,
++      0x00400568,
++      0x00400564,
++      0x0040056c,
++      0x00400534,
++      0x00400538,
++      0x00400514,
++      0x00400518,
++      0x0040051c,
++      0x00400520,
++      0x00400524,
++      0x00400528,
++      0x0040052c,
++      0x00400530,
++      0x00400d00,
++      0x00400d40,
++      0x00400d80,
++      0x00400d04,
++      0x00400d44,
++      0x00400d84,
++      0x00400d08,
++      0x00400d48,
++      0x00400d88,
++      0x00400d0c,
++      0x00400d4c,
++      0x00400d8c,
++      0x00400d10,
++      0x00400d50,
++      0x00400d90,
++      0x00400d14,
++      0x00400d54,
++      0x00400d94,
++      0x00400d18,
++      0x00400d58,
++      0x00400d98,
++      0x00400d1c,
++      0x00400d5c,
++      0x00400d9c,
++      0x00400d20,
++      0x00400d60,
++      0x00400da0,
++      0x00400d24,
++      0x00400d64,
++      0x00400da4,
++      0x00400d28,
++      0x00400d68,
++      0x00400da8,
++      0x00400d2c,
++      0x00400d6c,
++      0x00400dac,
++      0x00400d30,
++      0x00400d70,
++      0x00400db0,
++      0x00400d34,
++      0x00400d74,
++      0x00400db4,
++      0x00400d38,
++      0x00400d78,
++      0x00400db8,
++      0x00400d3c,
++      0x00400d7c,
++      0x00400dbc,
++      0x00400590,
++      0x00400594,
++      0x00400598,
++      0x0040059c,
++      0x004005a8,
++      0x004005ac,
++      0x004005b0,
++      0x004005b4,
++      0x004005c0,
++      0x004005c4,
++      0x004005c8,
++      0x004005cc,
++      0x004005d0,
++      0x004005d4,
++      0x004005d8,
++      0x004005dc,
++      0x004005e0,
++      NV04_PGRAPH_PASSTHRU_0,
++      NV04_PGRAPH_PASSTHRU_1,
++      NV04_PGRAPH_PASSTHRU_2,
++      NV04_PGRAPH_DVD_COLORFMT,
++      NV04_PGRAPH_SCALED_FORMAT,
++      NV04_PGRAPH_MISC24_0,
++      NV04_PGRAPH_MISC24_1,
++      NV04_PGRAPH_MISC24_2,
++      0x00400500,
++      0x00400504,
++      NV04_PGRAPH_VALID1,
++      NV04_PGRAPH_VALID2
++
++
++};
++
++struct graph_state {
++      int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])];
++};
++
++void nouveau_nv04_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct nouveau_channel *next, *last;
++      int chid;
++
++      if (!dev) {
++              DRM_DEBUG("Invalid drm_device\n");
++              return;
++      }
++      dev_priv = dev->dev_private;
++      if (!dev_priv) {
++              DRM_DEBUG("Invalid drm_nouveau_private\n");
++              return;
++      }
++      if (!dev_priv->fifos) {
++              DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
++              return;
++      }
++
++      chid = engine->fifo.channel_id(dev);
++      next = dev_priv->fifos[chid];
++
++      if (!next) {
++              DRM_DEBUG("Invalid next channel\n");
++              return;
++      }
++
++      chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
++      last = dev_priv->fifos[chid];
++
++      if (!last) {
++              DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
++                        next->id);
++      } else {
++              DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
++                       last->id, next->id);
++      }
++
++/*    NV_WRITE(NV03_PFIFO_CACHES, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++
++      if (last)
++              nv04_graph_save_context(last);
++
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
++      NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
++
++      nouveau_wait_for_idle(dev);
++
++      nv04_graph_load_context(next);
++
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
++      NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
++
++/*    NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1);
++      NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/
++      NV_WRITE(NV04_PGRAPH_FIFO,0x1);
++}
++
++int nv04_graph_create_context(struct nouveau_channel *chan) {
++      struct graph_state* pgraph_ctx;
++      DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
++
++      chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
++                                            DRM_MEM_DRIVER);
++
++      if (pgraph_ctx == NULL)
++              return -ENOMEM;
++
++      //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
++      pgraph_ctx->nv04[0] = 0x0001ffff;
++      /* is it really needed ??? */
++      //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
++      //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
++
++      return 0;
++}
++
++void nv04_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++
++      drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
++      chan->pgraph_ctx = NULL;
++}
++
++int nv04_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
++              NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
++
++      return 0;
++}
++
++int nv04_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
++              pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]);
++
++      return 0;
++}
++
++int nv04_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      /* Enable PGRAPH interrupts */
++      NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_VALID1, 0);
++      NV_WRITE(NV04_PGRAPH_VALID2, 0);
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
++      /*1231C000 blob, 001 haiku*/
++      //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100);
++      /*0x72111100 blob , 01 haiku*/
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071);
++      /*haiku same*/
++
++      /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
++      /*haiku and blob 10d4*/
++
++      NV_WRITE(NV04_PGRAPH_STATE        , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_CTX_CONTROL  , 0x10010100);
++      NV_WRITE(NV04_PGRAPH_FIFO         , 0x00000001);
++
++      /* These don't belong here, they're part of a per-channel context */
++      NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
++
++      return 0;
++}
++
++void nv04_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c
+--- git/drivers/gpu/drm-tungsten/nv04_instmem.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_instmem.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,159 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++static void
++nv04_instmem_determine_amount(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      /* Figure out how much instance memory we need */
++      if (dev_priv->card_type >= NV_40) {
++              /* We'll want more instance memory than this on some NV4x cards.
++               * There's a 16MB aperture to play with that maps onto the end
++               * of vram.  For now, only reserve a small piece until we know
++               * more about what each chipset requires.
++               */
++              dev_priv->ramin_rsvd_vram = (1*1024* 1024);
++      } else {
++              /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
++               *     exist in vram on those cards as well?
++               */
++              dev_priv->ramin_rsvd_vram = (512*1024);
++      }
++      DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
++
++      /* Clear all of it, except the BIOS image that's in the first 64KiB */
++      for (i=(64*1024); i<dev_priv->ramin_rsvd_vram; i+=4)
++              NV_WI32(i, 0x00000000);
++}
++
++static void
++nv04_instmem_configure_fixed_tables(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++
++      /* FIFO hash table (RAMHT)
++       *   use 4k hash table at RAMIN+0x10000
++       *   TODO: extend the hash table
++       */
++      dev_priv->ramht_offset = 0x10000;
++      dev_priv->ramht_bits   = 9;
++      dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
++      DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
++                                                dev_priv->ramht_size);
++
++      /* FIFO runout table (RAMRO) - 512k at 0x11200 */
++      dev_priv->ramro_offset = 0x11200;
++      dev_priv->ramro_size   = 512;
++      DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
++                                                dev_priv->ramro_size);
++
++      /* FIFO context table (RAMFC)
++       *   NV40  : Not sure exactly how to position RAMFC on some cards,
++       *           0x30002 seems to position it at RAMIN+0x20000 on these
++       *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
++       *   Others: Position RAMFC at RAMIN+0x11400
++       */
++      switch(dev_priv->card_type)
++      {
++              case NV_40:
++              case NV_44:
++                      dev_priv->ramfc_offset = 0x20000;
++                      dev_priv->ramfc_size   = engine->fifo.channels *
++                                               nouveau_fifo_ctx_size(dev);
++                      break;
++              case NV_30:
++              case NV_20:
++              case NV_17:
++              case NV_11:
++              case NV_10:
++              case NV_04:
++              default:
++                      dev_priv->ramfc_offset = 0x11400;
++                      dev_priv->ramfc_size   = engine->fifo.channels *
++                                               nouveau_fifo_ctx_size(dev);
++                      break;
++      }
++      DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
++                                                dev_priv->ramfc_size);
++}
++
++int nv04_instmem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t offset;
++      int ret = 0;
++
++      nv04_instmem_determine_amount(dev);
++      nv04_instmem_configure_fixed_tables(dev);
++
++      /* Create a heap to manage RAMIN allocations, we don't allocate
++       * the space that was reserved for RAMHT/FC/RO.
++       */
++      offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
++
++      /* On my NV4E, there's *something* clobbering the 16KiB just after
++       * where we setup these fixed tables.  No idea what it is just yet,
++       * so reserve this space on all NV4X cards for now.
++       */
++      if (dev_priv->card_type >= NV_40)
++              offset += 16*1024;
++
++      ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
++                                  offset, dev_priv->ramin_rsvd_vram - offset);
++      if (ret) {
++              dev_priv->ramin_heap = NULL;
++              DRM_ERROR("Failed to init RAMIN heap\n");
++      }
++
++      return ret;
++}
++
++void
++nv04_instmem_takedown(struct drm_device *dev)
++{
++}
++
++int
++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
++{
++      if (gpuobj->im_backing)
++              return -EINVAL;
++
++      return 0;
++}
++
++void
++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (gpuobj && gpuobj->im_backing) {
++              if (gpuobj->im_bound)
++                      dev_priv->Engine.instmem.unbind(dev, gpuobj);
++              gpuobj->im_backing = NULL;
++      }
++}
++
++int
++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      if (!gpuobj->im_pramin || gpuobj->im_bound)
++              return -EINVAL;
++
++      gpuobj->im_bound = 1;
++      return 0;
++}
++
++int
++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      if (gpuobj->im_bound == 0)
++              return -EINVAL;
++
++      gpuobj->im_bound = 0;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_mc.c git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c
+--- git/drivers/gpu/drm-tungsten/nv04_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,22 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      /* Power up everything, resetting each individual unit will
++       * be done later if needed.
++       */
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      return 0;
++}
++
++void
++nv04_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv04_timer.c git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c
+--- git/drivers/gpu/drm-tungsten/nv04_timer.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv04_timer.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,53 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_timer_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000);
++      NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF);
++
++      /* Just use the pre-existing values when possible for now; these regs
++       * are not written in nv (driver writer missed a /4 on the address), and
++       * writing 8 and 3 to the correct regs breaks the timings on the LVDS
++       * hardware sequencing microcode.
++       * A correct solution (involving calculations with the GPU PLL) can
++       * be done when kernel modesetting lands
++       */
++      if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) {
++              NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008);
++              NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003);
++      }
++
++      return 0;
++}
++
++uint64_t
++nv04_timer_read(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t low;
++      /* From kmmio dumps on nv28 this looks like how the blob does this.
++       * It reads the high dword twice, before and after.
++       * The only explanation seems to be that the 64-bit timer counter
++       * advances between high and low dword reads and may corrupt the
++       * result. Not confirmed.
++       */
++      uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1);
++      uint32_t high1;
++      do {
++              high1 = high2;
++              low = NV_READ(NV04_PTIMER_TIME_0);
++              high2 = NV_READ(NV04_PTIMER_TIME_1);
++      } while(high1 != high2);
++      return (((uint64_t)high2) << 32) | (uint64_t)low;
++}
++
++void
++nv04_timer_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_fb.c git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c
+--- git/drivers/gpu/drm-tungsten/nv10_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,25 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv10_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_bar_size;
++      int i;
++
++      fb_bar_size = drm_get_resource_len(dev, 0) - 1;
++      for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(NV10_PFB_TILE(i), 0);
++              NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
++      }
++
++      return 0;
++}
++
++void
++nv10_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv10_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv10_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV10_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV10_RAMFC_##offset/4)
++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
++
++int
++nv10_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV10_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv10_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
++                                              NV10_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Fill entries that are seen filled in dumps of nvidia driver just
++       * after channel's is put into DMA mode
++       */
++      RAMFC_WR(DMA_PUT       , chan->pushbuf_base);
++      RAMFC_WR(DMA_GET       , chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE  , chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH     , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                               NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                               0);
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
++      return 0;
++}
++
++void
++nv10_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv10_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET          , RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT          , RAMFC_RD(DMA_PUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT          , RAMFC_RD(REF_CNT));
++
++      tmp = RAMFC_RD(DMA_INSTANCE);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE     , tmp & 0xFFFF);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT       , tmp >> 16);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE        , RAMFC_RD(DMA_STATE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH        , RAMFC_RD(DMA_FETCH));
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE           , RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1            , RAMFC_RD(PULL1_ENGINE));
++
++      if (dev_priv->chipset >= 0x17) {
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE,
++                       RAMFC_RD(ACQUIRE_VALUE));
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP,
++                       RAMFC_RD(ACQUIRE_TIMESTAMP));
++              NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT,
++                       RAMFC_RD(ACQUIRE_TIMEOUT));
++              NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE,
++                       RAMFC_RD(SEMAPHORE));
++              NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE,
++                       RAMFC_RD(DMA_SUBROUTINE));
++      }
++
++      /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv10_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT          , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET          , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      RAMFC_WR(REF_CNT          , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
++      tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
++      RAMFC_WR(DMA_INSTANCE     , tmp);
++
++      RAMFC_WR(DMA_STATE        , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++      RAMFC_WR(DMA_FETCH        , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
++      RAMFC_WR(ENGINE           , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE     , NV_READ(NV04_PFIFO_CACHE1_PULL1));
++
++      if (dev_priv->chipset >= 0x17) {
++              RAMFC_WR(ACQUIRE_VALUE,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++              RAMFC_WR(ACQUIRE_TIMESTAMP,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
++              RAMFC_WR(ACQUIRE_TIMEOUT,
++                       NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++              RAMFC_WR(SEMAPHORE,
++                       NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
++              RAMFC_WR(DMA_SUBROUTINE,
++                       NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      }
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv10_graph.c git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c
+--- git/drivers/gpu/drm-tungsten/nv10_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv10_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,914 @@
++/*
++ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++#define NV10_FIFO_NUMBER 32
++
++struct pipe_state {
++      uint32_t pipe_0x0000[0x040/4];
++      uint32_t pipe_0x0040[0x010/4];
++      uint32_t pipe_0x0200[0x0c0/4];
++      uint32_t pipe_0x4400[0x080/4];
++      uint32_t pipe_0x6400[0x3b0/4];
++      uint32_t pipe_0x6800[0x2f0/4];
++      uint32_t pipe_0x6c00[0x030/4];
++      uint32_t pipe_0x7000[0x130/4];
++      uint32_t pipe_0x7400[0x0c0/4];
++      uint32_t pipe_0x7800[0x0c0/4];
++};
++
++static int nv10_graph_ctx_regs [] = {
++NV10_PGRAPH_CTX_SWITCH1,
++NV10_PGRAPH_CTX_SWITCH2,
++NV10_PGRAPH_CTX_SWITCH3,
++NV10_PGRAPH_CTX_SWITCH4,
++NV10_PGRAPH_CTX_SWITCH5,
++NV10_PGRAPH_CTX_CACHE1,       /* 8 values from 0x400160 to 0x40017c */
++NV10_PGRAPH_CTX_CACHE2,       /* 8 values from 0x400180 to 0x40019c */
++NV10_PGRAPH_CTX_CACHE3,       /* 8 values from 0x4001a0 to 0x4001bc */
++NV10_PGRAPH_CTX_CACHE4,       /* 8 values from 0x4001c0 to 0x4001dc */
++NV10_PGRAPH_CTX_CACHE5,       /* 8 values from 0x4001e0 to 0x4001fc */
++0x00400164,
++0x00400184,
++0x004001a4,
++0x004001c4,
++0x004001e4,
++0x00400168,
++0x00400188,
++0x004001a8,
++0x004001c8,
++0x004001e8,
++0x0040016c,
++0x0040018c,
++0x004001ac,
++0x004001cc,
++0x004001ec,
++0x00400170,
++0x00400190,
++0x004001b0,
++0x004001d0,
++0x004001f0,
++0x00400174,
++0x00400194,
++0x004001b4,
++0x004001d4,
++0x004001f4,
++0x00400178,
++0x00400198,
++0x004001b8,
++0x004001d8,
++0x004001f8,
++0x0040017c,
++0x0040019c,
++0x004001bc,
++0x004001dc,
++0x004001fc,
++NV10_PGRAPH_CTX_USER,
++NV04_PGRAPH_DMA_START_0,
++NV04_PGRAPH_DMA_START_1,
++NV04_PGRAPH_DMA_LENGTH,
++NV04_PGRAPH_DMA_MISC,
++NV10_PGRAPH_DMA_PITCH,
++NV04_PGRAPH_BOFFSET0,
++NV04_PGRAPH_BBASE0,
++NV04_PGRAPH_BLIMIT0,
++NV04_PGRAPH_BOFFSET1,
++NV04_PGRAPH_BBASE1,
++NV04_PGRAPH_BLIMIT1,
++NV04_PGRAPH_BOFFSET2,
++NV04_PGRAPH_BBASE2,
++NV04_PGRAPH_BLIMIT2,
++NV04_PGRAPH_BOFFSET3,
++NV04_PGRAPH_BBASE3,
++NV04_PGRAPH_BLIMIT3,
++NV04_PGRAPH_BOFFSET4,
++NV04_PGRAPH_BBASE4,
++NV04_PGRAPH_BLIMIT4,
++NV04_PGRAPH_BOFFSET5,
++NV04_PGRAPH_BBASE5,
++NV04_PGRAPH_BLIMIT5,
++NV04_PGRAPH_BPITCH0,
++NV04_PGRAPH_BPITCH1,
++NV04_PGRAPH_BPITCH2,
++NV04_PGRAPH_BPITCH3,
++NV04_PGRAPH_BPITCH4,
++NV10_PGRAPH_SURFACE,
++NV10_PGRAPH_STATE,
++NV04_PGRAPH_BSWIZZLE2,
++NV04_PGRAPH_BSWIZZLE5,
++NV04_PGRAPH_BPIXEL,
++NV10_PGRAPH_NOTIFY,
++NV04_PGRAPH_PATT_COLOR0,
++NV04_PGRAPH_PATT_COLOR1,
++NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
++0x00400904,
++0x00400908,
++0x0040090c,
++0x00400910,
++0x00400914,
++0x00400918,
++0x0040091c,
++0x00400920,
++0x00400924,
++0x00400928,
++0x0040092c,
++0x00400930,
++0x00400934,
++0x00400938,
++0x0040093c,
++0x00400940,
++0x00400944,
++0x00400948,
++0x0040094c,
++0x00400950,
++0x00400954,
++0x00400958,
++0x0040095c,
++0x00400960,
++0x00400964,
++0x00400968,
++0x0040096c,
++0x00400970,
++0x00400974,
++0x00400978,
++0x0040097c,
++0x00400980,
++0x00400984,
++0x00400988,
++0x0040098c,
++0x00400990,
++0x00400994,
++0x00400998,
++0x0040099c,
++0x004009a0,
++0x004009a4,
++0x004009a8,
++0x004009ac,
++0x004009b0,
++0x004009b4,
++0x004009b8,
++0x004009bc,
++0x004009c0,
++0x004009c4,
++0x004009c8,
++0x004009cc,
++0x004009d0,
++0x004009d4,
++0x004009d8,
++0x004009dc,
++0x004009e0,
++0x004009e4,
++0x004009e8,
++0x004009ec,
++0x004009f0,
++0x004009f4,
++0x004009f8,
++0x004009fc,
++NV04_PGRAPH_PATTERN,  /* 2 values from 0x400808 to 0x40080c */
++0x0040080c,
++NV04_PGRAPH_PATTERN_SHAPE,
++NV03_PGRAPH_MONO_COLOR0,
++NV04_PGRAPH_ROP3,
++NV04_PGRAPH_CHROMA,
++NV04_PGRAPH_BETA_AND,
++NV04_PGRAPH_BETA_PREMULT,
++0x00400e70,
++0x00400e74,
++0x00400e78,
++0x00400e7c,
++0x00400e80,
++0x00400e84,
++0x00400e88,
++0x00400e8c,
++0x00400ea0,
++0x00400ea4,
++0x00400ea8,
++0x00400e90,
++0x00400e94,
++0x00400e98,
++0x00400e9c,
++NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */
++NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20 to 0x400f3c */
++0x00400f04,
++0x00400f24,
++0x00400f08,
++0x00400f28,
++0x00400f0c,
++0x00400f2c,
++0x00400f10,
++0x00400f30,
++0x00400f14,
++0x00400f34,
++0x00400f18,
++0x00400f38,
++0x00400f1c,
++0x00400f3c,
++NV10_PGRAPH_XFMODE0,
++NV10_PGRAPH_XFMODE1,
++NV10_PGRAPH_GLOBALSTATE0,
++NV10_PGRAPH_GLOBALSTATE1,
++NV04_PGRAPH_STORED_FMT,
++NV04_PGRAPH_SOURCE_COLOR,
++NV03_PGRAPH_ABS_X_RAM,        /* 32 values from 0x400400 to 0x40047c */
++NV03_PGRAPH_ABS_Y_RAM,        /* 32 values from 0x400480 to 0x4004fc */
++0x00400404,
++0x00400484,
++0x00400408,
++0x00400488,
++0x0040040c,
++0x0040048c,
++0x00400410,
++0x00400490,
++0x00400414,
++0x00400494,
++0x00400418,
++0x00400498,
++0x0040041c,
++0x0040049c,
++0x00400420,
++0x004004a0,
++0x00400424,
++0x004004a4,
++0x00400428,
++0x004004a8,
++0x0040042c,
++0x004004ac,
++0x00400430,
++0x004004b0,
++0x00400434,
++0x004004b4,
++0x00400438,
++0x004004b8,
++0x0040043c,
++0x004004bc,
++0x00400440,
++0x004004c0,
++0x00400444,
++0x004004c4,
++0x00400448,
++0x004004c8,
++0x0040044c,
++0x004004cc,
++0x00400450,
++0x004004d0,
++0x00400454,
++0x004004d4,
++0x00400458,
++0x004004d8,
++0x0040045c,
++0x004004dc,
++0x00400460,
++0x004004e0,
++0x00400464,
++0x004004e4,
++0x00400468,
++0x004004e8,
++0x0040046c,
++0x004004ec,
++0x00400470,
++0x004004f0,
++0x00400474,
++0x004004f4,
++0x00400478,
++0x004004f8,
++0x0040047c,
++0x004004fc,
++NV03_PGRAPH_ABS_UCLIP_XMIN,
++NV03_PGRAPH_ABS_UCLIP_XMAX,
++NV03_PGRAPH_ABS_UCLIP_YMIN,
++NV03_PGRAPH_ABS_UCLIP_YMAX,
++0x00400550,
++0x00400558,
++0x00400554,
++0x0040055c,
++NV03_PGRAPH_ABS_UCLIPA_XMIN,
++NV03_PGRAPH_ABS_UCLIPA_XMAX,
++NV03_PGRAPH_ABS_UCLIPA_YMIN,
++NV03_PGRAPH_ABS_UCLIPA_YMAX,
++NV03_PGRAPH_ABS_ICLIP_XMAX,
++NV03_PGRAPH_ABS_ICLIP_YMAX,
++NV03_PGRAPH_XY_LOGIC_MISC0,
++NV03_PGRAPH_XY_LOGIC_MISC1,
++NV03_PGRAPH_XY_LOGIC_MISC2,
++NV03_PGRAPH_XY_LOGIC_MISC3,
++NV03_PGRAPH_CLIPX_0,
++NV03_PGRAPH_CLIPX_1,
++NV03_PGRAPH_CLIPY_0,
++NV03_PGRAPH_CLIPY_1,
++NV10_PGRAPH_COMBINER0_IN_ALPHA,
++NV10_PGRAPH_COMBINER1_IN_ALPHA,
++NV10_PGRAPH_COMBINER0_IN_RGB,
++NV10_PGRAPH_COMBINER1_IN_RGB,
++NV10_PGRAPH_COMBINER_COLOR0,
++NV10_PGRAPH_COMBINER_COLOR1,
++NV10_PGRAPH_COMBINER0_OUT_ALPHA,
++NV10_PGRAPH_COMBINER1_OUT_ALPHA,
++NV10_PGRAPH_COMBINER0_OUT_RGB,
++NV10_PGRAPH_COMBINER1_OUT_RGB,
++NV10_PGRAPH_COMBINER_FINAL0,
++NV10_PGRAPH_COMBINER_FINAL1,
++0x00400e00,
++0x00400e04,
++0x00400e08,
++0x00400e0c,
++0x00400e10,
++0x00400e14,
++0x00400e18,
++0x00400e1c,
++0x00400e20,
++0x00400e24,
++0x00400e28,
++0x00400e2c,
++0x00400e30,
++0x00400e34,
++0x00400e38,
++0x00400e3c,
++NV04_PGRAPH_PASSTHRU_0,
++NV04_PGRAPH_PASSTHRU_1,
++NV04_PGRAPH_PASSTHRU_2,
++NV10_PGRAPH_DIMX_TEXTURE,
++NV10_PGRAPH_WDIMX_TEXTURE,
++NV10_PGRAPH_DVD_COLORFMT,
++NV10_PGRAPH_SCALED_FORMAT,
++NV04_PGRAPH_MISC24_0,
++NV04_PGRAPH_MISC24_1,
++NV04_PGRAPH_MISC24_2,
++NV03_PGRAPH_X_MISC,
++NV03_PGRAPH_Y_MISC,
++NV04_PGRAPH_VALID1,
++NV04_PGRAPH_VALID2,
++};
++
++static int nv17_graph_ctx_regs [] = {
++NV10_PGRAPH_DEBUG_4,
++0x004006b0,
++0x00400eac,
++0x00400eb0,
++0x00400eb4,
++0x00400eb8,
++0x00400ebc,
++0x00400ec0,
++0x00400ec4,
++0x00400ec8,
++0x00400ecc,
++0x00400ed0,
++0x00400ed4,
++0x00400ed8,
++0x00400edc,
++0x00400ee0,
++0x00400a00,
++0x00400a04,
++};
++
++struct graph_state {
++      int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])];
++      int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])];
++      struct pipe_state pipe_state;
++};
++
++static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      int i;
++#define PIPE_SAVE(addr) \
++      do { \
++              NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
++              for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
++                      fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
++      } while (0)
++
++      PIPE_SAVE(0x4400);
++      PIPE_SAVE(0x0200);
++      PIPE_SAVE(0x6400);
++      PIPE_SAVE(0x6800);
++      PIPE_SAVE(0x6c00);
++      PIPE_SAVE(0x7000);
++      PIPE_SAVE(0x7400);
++      PIPE_SAVE(0x7800);
++      PIPE_SAVE(0x0040);
++      PIPE_SAVE(0x0000);
++
++#undef PIPE_SAVE
++}
++
++static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      int i;
++      uint32_t xfmode0, xfmode1;
++#define PIPE_RESTORE(addr) \
++      do { \
++              NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
++              for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
++                      NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
++      } while (0)
++
++
++      nouveau_wait_for_idle(dev);
++      /* XXX check haiku comments */
++      xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
++      xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
++      NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
++      NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
++      for (i = 0; i < 4; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++      for (i = 0; i < 4; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
++      for (i = 0; i < 3; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
++      for (i = 0; i < 3; i++)
++              NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++      NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
++      NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
++
++
++      PIPE_RESTORE(0x0200);
++      nouveau_wait_for_idle(dev);
++
++      /* restore XFMODE */
++      NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
++      NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
++      PIPE_RESTORE(0x6400);
++      PIPE_RESTORE(0x6800);
++      PIPE_RESTORE(0x6c00);
++      PIPE_RESTORE(0x7000);
++      PIPE_RESTORE(0x7400);
++      PIPE_RESTORE(0x7800);
++      PIPE_RESTORE(0x4400);
++      PIPE_RESTORE(0x0000);
++      PIPE_RESTORE(0x0040);
++      nouveau_wait_for_idle(dev);
++
++#undef PIPE_RESTORE
++}
++
++static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++      uint32_t *fifo_pipe_state_addr;
++      int i;
++#define PIPE_INIT(addr) \
++      do { \
++              fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
++      } while (0)
++#define PIPE_INIT_END(addr) \
++      do { \
++              if (fifo_pipe_state_addr != \
++                              sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
++                      DRM_ERROR("incomplete pipe init for 0x%x :  %p/%p\n", addr, fifo_pipe_state_addr, \
++                                      sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
++      } while (0)
++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
++
++      PIPE_INIT(0x0200);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0200);
++
++      PIPE_INIT(0x6400);
++      for (i = 0; i < 211; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x40000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f000000);
++      NV_WRITE_PIPE_INIT(0x3f000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      PIPE_INIT_END(0x6400);
++
++      PIPE_INIT(0x6800);
++      for (i = 0; i < 162; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x3f800000);
++      for (i = 0; i < 25; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x6800);
++
++      PIPE_INIT(0x6c00);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0xbf800000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x6c00);
++
++      PIPE_INIT(0x7000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x00000000);
++      NV_WRITE_PIPE_INIT(0x7149f2ca);
++      for (i = 0; i < 35; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7000);
++
++      PIPE_INIT(0x7400);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7400);
++
++      PIPE_INIT(0x7800);
++      for (i = 0; i < 48; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x7800);
++
++      PIPE_INIT(0x4400);
++      for (i = 0; i < 32; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x4400);
++
++      PIPE_INIT(0x0000);
++      for (i = 0; i < 16; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0000);
++
++      PIPE_INIT(0x0040);
++      for (i = 0; i < 4; i++)
++              NV_WRITE_PIPE_INIT(0x00000000);
++      PIPE_INIT_END(0x0040);
++
++#undef PIPE_INIT
++#undef PIPE_INIT_END
++#undef NV_WRITE_PIPE_INIT
++}
++
++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++      int i;
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
++              if (nv10_graph_ctx_regs[i] == reg)
++                      return i;
++      }
++      DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg);
++      return -1;
++}
++
++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++      int i;
++      for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) {
++              if (nv17_graph_ctx_regs[i] == reg)
++                      return i;
++      }
++      DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg);
++      return -1;
++}
++
++int nv10_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
++              NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
++      if (dev_priv->chipset>=0x17) {
++              for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
++                      NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]);
++      }
++
++      nv10_graph_load_pipe(chan);
++
++      return 0;
++}
++
++int nv10_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int i;
++
++      for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
++              pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]);
++      if (dev_priv->chipset>=0x17) {
++              for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
++                      pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]);
++      }
++
++      nv10_graph_save_pipe(chan);
++
++      return 0;
++}
++
++void nouveau_nv10_context_switch(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv;
++      struct nouveau_engine *engine;
++      struct nouveau_channel *next, *last;
++      int chid;
++
++      if (!dev) {
++              DRM_DEBUG("Invalid drm_device\n");
++              return;
++      }
++      dev_priv = dev->dev_private;
++      if (!dev_priv) {
++              DRM_DEBUG("Invalid drm_nouveau_private\n");
++              return;
++      }
++      if (!dev_priv->fifos) {
++              DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
++              return;
++      }
++      engine = &dev_priv->Engine;
++
++      chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) &
++              (engine->fifo.channels - 1);
++      next = dev_priv->fifos[chid];
++
++      if (!next) {
++              DRM_ERROR("Invalid next channel\n");
++              return;
++      }
++
++      chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & 
++              (engine->fifo.channels - 1);
++      last = dev_priv->fifos[chid];
++
++      if (!last) {
++              DRM_INFO("WARNING: Invalid last channel, switch to %x\n",
++                        next->id);
++      } else {
++              DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
++                       last->id, next->id);
++      }
++
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      if (last) {
++              nouveau_wait_for_idle(dev);
++              nv10_graph_save_context(last);
++      }
++
++      nouveau_wait_for_idle(dev);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
++
++      nouveau_wait_for_idle(dev);
++
++      nv10_graph_load_context(next);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO,0x1);
++}
++
++#define NV_WRITE_CTX(reg, val) do { \
++      int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
++      if (offset > 0) \
++              pgraph_ctx->nv10[offset] = val; \
++      } while (0)
++
++#define NV17_WRITE_CTX(reg, val) do { \
++      int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
++      if (offset > 0) \
++              pgraph_ctx->nv17[offset] = val; \
++      } while (0)
++
++int nv10_graph_create_context(struct nouveau_channel *chan) {
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct graph_state* pgraph_ctx;
++
++      DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
++
++      chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
++                                            DRM_MEM_DRIVER);
++
++      if (pgraph_ctx == NULL)
++              return -ENOMEM;
++
++      /* mmio trace suggest that should be done in ddx with methods/objects */
++#if 0
++      uint32_t tmp, vramsz;
++      /* per channel init from ddx */
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      /*XXX the original ddx code, does this in 2 steps :
++       * tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++       * NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++       * tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++       * NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++       */
++      tmp |= 0x00020100;
++      NV_WRITE_CTX(NV10_PGRAPH_SURFACE, tmp);
++
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE_CTX(NV04_PGRAPH_BOFFSET0, 0);
++      NV_WRITE_CTX(NV04_PGRAPH_BOFFSET1, 0);
++      NV_WRITE_CTX(NV04_PGRAPH_BLIMIT0 , vramsz);
++      NV_WRITE_CTX(NV04_PGRAPH_BLIMIT1 , vramsz);
++
++      NV_WRITE_CTX(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
++      NV_WRITE_CTX(NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
++
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++#endif
++
++      NV_WRITE_CTX(0x00400e88, 0x08000000);
++      NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
++      NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
++      NV_WRITE_CTX(0x00400e10, 0x00001000);
++      NV_WRITE_CTX(0x00400e14, 0x00001000);
++      NV_WRITE_CTX(0x00400e30, 0x00080008);
++      NV_WRITE_CTX(0x00400e34, 0x00080008);
++      if (dev_priv->chipset>=0x17) {
++              /* is it really needed ??? */
++              NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
++              NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
++              NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
++              NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
++              NV17_WRITE_CTX(0x00400ec0, 0x00000080);
++              NV17_WRITE_CTX(0x00400ed0, 0x00000080);
++      }
++      NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
++
++      nv10_graph_create_pipe(chan);
++      return 0;
++}
++
++void nv10_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      struct graph_state* pgraph_ctx = chan->pgraph_ctx;
++      int chid;
++
++      drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
++      chan->pgraph_ctx = NULL;
++
++      chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
++
++      /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ????
++       */
++#if 0
++      /* does this avoid a potential context switch while we are written graph
++       * reg, or we should mask graph interrupt ???
++       */
++      NV_WRITE(NV04_PGRAPH_FIFO,0x0);
++      if (chid == chan->id) {
++              DRM_INFO("cleanning a channel with graph in current context\n");
++              nouveau_wait_for_idle(dev);
++              DRM_INFO("reseting current graph context\n");
++              /* can't be call here because of dynamic mem alloc */
++              //nv10_graph_create_context(chan);
++              nv10_graph_load_context(chan);
++      }
++      NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
++#else
++      if (chid == chan->id) {
++              DRM_INFO("cleanning a channel with graph in current context\n");
++      }
++#endif
++}
++
++int nv10_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
++      //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
++      NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
++                                    (1<<29) |
++                                    (1<<31));
++      if (dev_priv->chipset>=0x17) {
++              NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
++              NV_WRITE(0x004006b0, 0x40000020);
++      }
++      else
++              NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
++
++      /* copy tile info from PFB */
++      for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
++              NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
++              NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
++              NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
++      }
++
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      return 0;
++}
++
++void nv10_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv20_graph.c git-nokia/drivers/gpu/drm-tungsten/nv20_graph.c
+--- git/drivers/gpu/drm-tungsten/nv20_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv20_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,913 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/*
++ * NV20
++ * -----
++ * There are 3 families :
++ * NV20 is 0x10de:0x020*
++ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
++ * NV2A is 0x10de:0x02A0
++ *
++ * NV30
++ * -----
++ * There are 3 families :
++ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
++ * NV34 is 0x10de:0x032*
++ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
++ *
++ * Not seen in the wild, no dumps (probably NV35) :
++ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
++ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
++ *
++ */
++
++#define NV20_GRCTX_SIZE (3580*4)
++#define NV25_GRCTX_SIZE (3529*4)
++#define NV2A_GRCTX_SIZE (3500*4)
++
++#define NV30_31_GRCTX_SIZE (24392)
++#define NV34_GRCTX_SIZE    (18140)
++#define NV35_36_GRCTX_SIZE (22396)
++
++static void nv20_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++/*
++write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements:
+++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
+++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++
+++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
+++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000
+++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303
+++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000
+++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000
+++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008
+++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000
++
+++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000
+++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
++*/
++      INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101);
++      INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111);
++      INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8);
++      for (i = 0; i < 4; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008);
++      for (i = 0; i < 16; ++i)
++              INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000);
++      INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff);
++      INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001);
++      INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000);
++      INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001);
++      INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000);
++      INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000);
++
++/*
++...
+++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++...
+++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
++*/
++      for (i = 0; i < 0x880; i += 0x10) {
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9);
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c);
++              INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b);
++      }
++
++/*
++write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements:
+++0x00742fbc: 3f800000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x281c/4), 0x3f800000);
++
++/*
++write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements:
+++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
+++0x0074301c: 00000000 bf800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000);
++      INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000);
++      INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000);
++      INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000);
++      INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000);
++      INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000);
++      INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000);
++
++/*
++write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements:
+++0x00742fcc: 00000000 3f800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000);
++
++/*
++write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements:
+++0x0074302c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements:
+++0x00743c9c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements:
+++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000);
++
++/*
++write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements:
+++0x00743c6c: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements:
+++0x00743ccc: 00000000 000003f8 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8);
++
++/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */
++      INSTANCE_WR(ctx, 0x3540/4, 0x002fe000);
++
++/*
++write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements:
+++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
++*/
++      for (i = 0; i < 8; ++i)
++              INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c);
++}
++
++static void nv2a_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x33c/4, 0xffff0000);
++      for(i = 0x3a0; i< 0x3a8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x47c/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x4a8/4, 0x44400000);
++      for(i = 0x4d4; i< 0x4e4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x4f4; i< 0x504; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080000);
++      for(i = 0x50c; i< 0x51c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x51c; i< 0x52c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x000105b8);
++      for(i = 0x52c; i< 0x53c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for(i = 0x55c; i< 0x59c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x5fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x604/4, 0x00004000);
++      INSTANCE_WR(ctx, 0x610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x618/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x61c/4, 0x00010000);
++
++      for (i=0x1a9c; i <= 0x22fc/4; i += 32) {
++              INSTANCE_WR(ctx, i/4    , 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++
++      INSTANCE_WR(ctx, 0x269c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26dc/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x26ec/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x2700/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x3024/4, 0x000fe000);
++      INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8);
++      INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000);
++      for(i = 0x341c; i< 0x343c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x001c527c);
++}
++
++static void nv25_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++/*
++write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements:
+++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
+++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++
+++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
+++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000
+++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000
+++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303
+++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000
+++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8
++
+++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000
+++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000
+++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
+++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000
+++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d1c: 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101);
++      INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111);
++      INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080);
++      INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001);
++      INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000);
++      INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000);
++      INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303);
++      INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000);
++      INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000);
++      INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8);
++      INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008);
++      INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008);
++      for (i=0; i<16; ++i)
++              INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000);
++      INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff);
++
++/*
++write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements:
+++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0
+++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000
+++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000
+++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++...
+++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++...
+++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000
+++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080);
++      INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000);
++      INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040);
++      INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080);
++      INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0);
++      INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001);
++      INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000);
++      INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001);
++      INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000);
++      INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000);
++      for (i=0; i < 0x880/4; i+=4) {
++              INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9);
++              INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c);
++              INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b);
++      }
++
++/*
++write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements:
+++0x00742e24: 3f800000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2704/4), 0x3f800000);
++
++/*
++write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements:
+++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
+++0x00742e84: 00000000 bf800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000);
++      INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000);
++      INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000);
++      INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000);
++      INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000);
++      INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000);
++      INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000);
++
++/*
++write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements:
+++0x00742e34: 00000000 3f800000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000);
++
++/*
++write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements:
+++0x00742e94: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements:
+++0x00743804: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements:
+++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000);
++
++/*
++write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements:
+++0x007437d4: 00000000 00000000 00000000 00000000
++write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements:
+++0x00743824: 00000000 000003f8 00000000 00000000
++*/
++      INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8);
++
++/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */
++      INSTANCE_WR(ctx, 0x3468/4, 0x002fe000);
++
++/*
++write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements:
+++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
++*/
++      for (i=0; i<8; ++i)
++              INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c);
++}
++
++static void nv30_31_graph_context_init(struct drm_device *dev,
++                                       struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x410/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x428/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x444/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x448/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x44c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x460/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x48c/4, 0xffff0000);
++      for(i = 0x4e0; i< 0x4e8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4ec/4, 0x00011100);
++      for(i = 0x508; i< 0x548; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x58c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x590/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x594/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x598/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000);
++      for(i = 0x600; i< 0x640; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x640; i< 0x680; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6c0; i< 0x700; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x700; i< 0x740; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x740; i< 0x780; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x85c/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x860/4, 0x00010000);
++      for(i = 0x864; i< 0x874; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x1f18; i<= 0x3088 ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x30b8; i< 0x30c8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x344c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3808/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x381c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3848/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x384c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3850/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x3858/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x385c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3864/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);
++}
++
++static void nv34_graph_context_init(struct drm_device *dev,
++                                    struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x40c/4, 0x01000101);
++      INSTANCE_WR(ctx, 0x420/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x440/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x480/4, 0xffff0000);
++      for(i = 0x4d4; i< 0x4dc; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4e0/4, 0x00011100);
++      for(i = 0x4fc; i< 0x53c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x57c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x580/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x584/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x588/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000);
++      for(i = 0x5f0; i< 0x630; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x630; i< 0x670; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6b0; i< 0x6f0; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x6f0; i< 0x730; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x730; i< 0x770; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x850/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x854/4, 0x00010000);
++      for(i = 0x858; i< 0x868; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x15ac; i<= 0x271c ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x274c; i< 0x275c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2edc/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x2eec/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000);
++}
++
++static void nv35_36_graph_context_init(struct drm_device *dev,
++                                       struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x40c/4, 0x00000101);
++      INSTANCE_WR(ctx, 0x420/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x424/4, 0x00000060);
++      INSTANCE_WR(ctx, 0x440/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
++      INSTANCE_WR(ctx, 0x488/4, 0xffff0000);
++      for(i = 0x4dc; i< 0x4e4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x4e8/4, 0x00011100);
++      for(i = 0x504; i< 0x544; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x588/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x58c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x590/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x594/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000);
++      for(i = 0x604; i< 0x644; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00010588);
++      for(i = 0x644; i< 0x684; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00030303);
++      for(i = 0x6c4; i< 0x704; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0008aae4);
++      for(i = 0x704; i< 0x744; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for(i = 0x744; i< 0x784; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x860/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x864/4, 0x00010000);
++      for(i = 0x868; i< 0x878; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00040004);
++      for(i = 0x1f1c; i<= 0x308c ; i+= 16) {
++              INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
++              INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
++              INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
++      }
++      for(i = 0x30bc; i< 0x30cc; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x3450/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x380c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3820/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x384c/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x3850/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3854/4, 0x3f000000);
++      INSTANCE_WR(ctx, 0x385c/4, 0x40000000);
++      INSTANCE_WR(ctx, 0x3860/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x3868/4, 0xbf800000);
++      INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);
++}
++
++int nv20_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
++      unsigned int ctx_size;
++      unsigned int idoffs = 0x28/4;
++      int ret;
++
++      switch (dev_priv->chipset) {
++      case 0x20:
++              ctx_size = NV20_GRCTX_SIZE;
++              ctx_init = nv20_graph_context_init;
++              idoffs = 0;
++              break;
++      case 0x25:
++      case 0x28:
++              ctx_size = NV25_GRCTX_SIZE;
++              ctx_init = nv25_graph_context_init;
++              break;
++      case 0x2a:
++              ctx_size = NV2A_GRCTX_SIZE;
++              ctx_init = nv2a_graph_context_init;
++              idoffs = 0;
++              break;
++      case 0x30:
++      case 0x31:
++              ctx_size = NV30_31_GRCTX_SIZE;
++              ctx_init = nv30_31_graph_context_init;
++              break;
++      case 0x34:
++              ctx_size = NV34_GRCTX_SIZE;
++              ctx_init = nv34_graph_context_init;
++              break;
++      case 0x35:
++      case 0x36:
++              ctx_size = NV35_36_GRCTX_SIZE;
++              ctx_init = nv35_36_graph_context_init;
++              break;
++      default:
++              ctx_size = 0;
++              ctx_init = nv35_36_graph_context_init;
++              DRM_ERROR("Please contact the devs if you want your NV%x"
++                        " card to work\n", dev_priv->chipset);
++              return -ENOSYS;
++              break;
++      }
++
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
++                                        NVOBJ_FLAG_ZERO_ALLOC,
++                                        &chan->ramin_grctx)))
++              return ret;
++
++      /* Initialise default context values */
++      ctx_init(dev, chan->ramin_grctx->gpuobj);
++
++      /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
++      INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1);
++                                                           /* CTX_USER */
++
++      INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
++                      chan->ramin_grctx->instance >> 4);
++
++      return 0;
++}
++
++void nv20_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (chan->ramin_grctx)
++              nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++
++      INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
++}
++
++int nv20_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
++               NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++
++      nouveau_wait_for_idle(dev);
++      return 0;
++}
++
++int nv20_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
++               NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
++
++      nouveau_wait_for_idle(dev);
++      return 0;
++}
++
++static void nv20_graph_rdi(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i, writecount = 32;
++      uint32_t rdi_index = 0x2c80000;
++
++      if (dev_priv->chipset == 0x20) {
++              rdi_index = 0x3d0000;
++              writecount = 15;
++      }
++
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index);
++      for (i = 0; i < writecount; i++)
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
++
++      nouveau_wait_for_idle(dev);
++}
++
++int nv20_graph_init(struct drm_device *dev) {
++      struct drm_nouveau_private *dev_priv =
++              (struct drm_nouveau_private *)dev->dev_private;
++      uint32_t tmp, vramsz;
++      int ret, i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      if (!dev_priv->ctx_table) {
++              /* Create Context Pointer Table */
++              dev_priv->ctx_table_size = 32 * 4;
++              if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++                                                dev_priv->ctx_table_size, 16,
++                                                NVOBJ_FLAG_ZERO_ALLOC,
++                                                &dev_priv->ctx_table)))
++                      return ret;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
++               dev_priv->ctx_table->instance >> 4);
++
++      nv20_graph_rdi(dev);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
++      NV_WRITE(0x40009C           , 0x00000040);
++
++      if (dev_priv->chipset >= 0x25) {
++              NV_WRITE(0x400890, 0x00080000);
++              NV_WRITE(0x400610, 0x304B1FB6);
++              NV_WRITE(0x400B80, 0x18B82880);
++              NV_WRITE(0x400B84, 0x44000000);
++              NV_WRITE(0x400098, 0x40000080);
++              NV_WRITE(0x400B88, 0x000000ff);
++      } else {
++              NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */
++              NV_WRITE(0x400094, 0x00000005);
++              NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */
++              NV_WRITE(0x400B84, 0x24000000);
++              NV_WRITE(0x400098, 0x00000040);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
++      }
++
++      /* copy tile info from PFB */
++      for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
++                      /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i)));
++              NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
++                      /* which is NV40_PGRAPH_TSIZE0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i)));
++              NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
++                      /* which is NV40_PGRAPH_TILE0(i) ?? */
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i)));
++      }
++      for (i = 0; i < 8; i++) {
++              NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4));
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));
++      }
++      NV_WRITE(0x4009a0, NV_READ(0x100324));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324));
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      /* begin RAM config */
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++      NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1));
++      NV_WRITE(0x400820, 0);
++      NV_WRITE(0x400824, 0);
++      NV_WRITE(0x400864, vramsz-1);
++      NV_WRITE(0x400868, vramsz-1);
++
++      /* interesting.. the below overwrites some of the tile setup above.. */
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++
++      return 0;
++}
++
++void nv20_graph_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
++}
++
++int nv30_graph_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++//    uint32_t vramsz, tmp;
++      int ret, i;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      if (!dev_priv->ctx_table) {
++              /* Create Context Pointer Table */
++              dev_priv->ctx_table_size = 32 * 4;
++              if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++                                                dev_priv->ctx_table_size, 16,
++                                                NVOBJ_FLAG_ZERO_ALLOC,
++                                                &dev_priv->ctx_table)))
++                      return ret;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
++                      dev_priv->ctx_table->instance >> 4);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
++      NV_WRITE(0x400890, 0x01b463ff);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475);
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
++      NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
++      NV_WRITE(0x400B80, 0x1003d888);
++      NV_WRITE(0x400B84, 0x0c000000);
++      NV_WRITE(0x400098, 0x00000000);
++      NV_WRITE(0x40009C, 0x0005ad00);
++      NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2
++      NV_WRITE(0x4000a0, 0x00000000);
++      NV_WRITE(0x4000a4, 0x00000008);
++      NV_WRITE(0x4008a8, 0xb784a400);
++      NV_WRITE(0x400ba0, 0x002f8685);
++      NV_WRITE(0x400ba4, 0x00231f3f);
++      NV_WRITE(0x4008a4, 0x40000020);
++
++      if (dev_priv->chipset == 0x34) {
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032);
++              NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004);
++              NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002);
++      }
++
++      NV_WRITE(0x4000c0, 0x00000016);
++
++      /* copy tile info from PFB */
++      for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
++              NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
++                      /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
++              NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
++                      /* which is NV40_PGRAPH_TSIZE0(i) ?? */
++              NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
++                      /* which is NV40_PGRAPH_TILE0(i) ?? */
++      }
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(0x0040075c             , 0x00000001);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      /* begin RAM config */
++//    vramsz = drm_get_resource_len(dev, 0) - 1;
++      NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++      NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++      if (dev_priv->chipset != 0x34) {
++              NV_WRITE(0x400750, 0x00EA0000);
++              NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x400750, 0x00EA0004);
++              NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1));
++      }
++
++#if 0
++      NV_WRITE(0x400820, 0);
++      NV_WRITE(0x400824, 0);
++      NV_WRITE(0x400864, vramsz-1);
++      NV_WRITE(0x400868, vramsz-1);
++
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      /* per-context state, doesn't belong here */
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++#endif
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_fb.c git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c
+--- git/drivers/gpu/drm-tungsten/nv40_fb.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_fb.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,62 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv40_fb_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t fb_bar_size, tmp;
++      int num_tiles;
++      int i;
++
++      /* This is strictly a NV4x register (don't know about NV5x). */
++      /* The blob sets these to all kinds of values, and they mess up our setup. */
++      /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
++      /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
++      /* Any idea what this is? */
++      NV_WRITE(NV40_PFB_UNK_800, 0x1);
++
++      switch (dev_priv->chipset) {
++      case 0x40:
++      case 0x45:
++              tmp = NV_READ(NV10_PFB_CLOSE_PAGE2);
++              NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15));
++              num_tiles = NV10_PFB_TILE__SIZE;
++              break;
++      case 0x46: /* G72 */
++      case 0x47: /* G70 */
++      case 0x49: /* G71 */
++      case 0x4b: /* G73 */
++      case 0x4c: /* C51 (G7X version) */
++              num_tiles = NV40_PFB_TILE__SIZE_1;
++              break;
++      default:
++              num_tiles = NV40_PFB_TILE__SIZE_0;
++              break;
++      }
++
++      fb_bar_size = drm_get_resource_len(dev, 0) - 1;
++      switch (dev_priv->chipset) {
++      case 0x40:
++              for (i=0; i<num_tiles; i++) {
++                      NV_WRITE(NV10_PFB_TILE(i), 0);
++                      NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
++              }
++              break;
++      default:
++              for (i=0; i<num_tiles; i++) {
++                      NV_WRITE(NV40_PFB_TILE(i), 0);
++                      NV_WRITE(NV40_PFB_TLIMIT(i), fb_bar_size);
++              }
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv40_fb_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv40_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv40_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,209 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++
++#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
++                                       NV40_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)     INSTANCE_RD(chan->ramfc->gpuobj, \
++                                       NV40_RAMFC_##offset/4)
++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE))
++#define NV40_RAMFC__SIZE 128
++
++int
++nv40_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
++                                              NV40_RAMFC__SIZE,
++                                              NVOBJ_FLAG_ZERO_ALLOC |
++                                              NVOBJ_FLAG_ZERO_FREE,
++                                              NULL, &chan->ramfc)))
++              return ret;
++
++      /* Fill entries that are seen filled in dumps of nvidia driver just
++       * after channel's is put into DMA mode
++       */
++      RAMFC_WR(DMA_PUT       , chan->pushbuf_base);
++      RAMFC_WR(DMA_GET       , chan->pushbuf_base);
++      RAMFC_WR(DMA_INSTANCE  , chan->pushbuf->instance >> 4);
++      RAMFC_WR(DMA_FETCH     , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++                               NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++                               NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++                               0x30000000 /* no idea.. */);
++      RAMFC_WR(DMA_SUBROUTINE, 0);
++      RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4);
++      RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
++
++      /* enable the fifo dma operation */
++      NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
++      return 0;
++}
++
++void
++nv40_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
++
++      if (chan->ramfc)
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv40_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp, tmp2;
++
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET          , RAMFC_RD(DMA_GET));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT          , RAMFC_RD(DMA_PUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT          , RAMFC_RD(REF_CNT));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE     , RAMFC_RD(DMA_INSTANCE));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT       , RAMFC_RD(DMA_DCOUNT));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE        , RAMFC_RD(DMA_STATE));
++
++      /* No idea what 0x2058 is.. */
++      tmp   = RAMFC_RD(DMA_FETCH);
++      tmp2  = NV_READ(0x2058) & 0xFFF;
++      tmp2 |= (tmp & 0x30000000);
++      NV_WRITE(0x2058, tmp2);
++      tmp  &= ~0x30000000;
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH        , tmp);
++
++      NV_WRITE(NV04_PFIFO_CACHE1_ENGINE           , RAMFC_RD(ENGINE));
++      NV_WRITE(NV04_PFIFO_CACHE1_PULL1            , RAMFC_RD(PULL1_ENGINE));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE    , RAMFC_RD(ACQUIRE_VALUE));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP));
++      NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT  , RAMFC_RD(ACQUIRE_TIMEOUT));
++      NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE        , RAMFC_RD(SEMAPHORE));
++      NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE   , RAMFC_RD(DMA_SUBROUTINE));
++      NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE          , RAMFC_RD(GRCTX_INSTANCE));
++      NV_WRITE(0x32e4, RAMFC_RD(UNK_40));
++      /* NVIDIA does this next line twice... */
++      NV_WRITE(0x32e8, RAMFC_RD(UNK_44));
++      NV_WRITE(0x2088, RAMFC_RD(UNK_4C));
++      NV_WRITE(0x3300, RAMFC_RD(UNK_50));
++
++      /* not sure what part is PUT, and which is GET.. never seen a non-zero
++       * value appear in a mmio-trace yet..
++       */
++#if 0
++      tmp = NV_READ(UNK_84);
++      NV_WRITE(NV_PFIFO_CACHE1_GET, tmp ???);
++      NV_WRITE(NV_PFIFO_CACHE1_PUT, tmp ???);
++#endif
++
++      /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
++      tmp  = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
++      tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF;
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
++
++      /* Set channel active, and in DMA mode */
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
++               NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++
++      /* Reset DMA_CTL_AT_INFO to INVALID */
++      tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++      return 0;
++}
++
++int
++nv40_fifo_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      RAMFC_WR(DMA_PUT          , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
++      RAMFC_WR(DMA_GET          , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++      RAMFC_WR(REF_CNT          , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
++      RAMFC_WR(DMA_INSTANCE     , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
++      RAMFC_WR(DMA_DCOUNT       , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT));
++      RAMFC_WR(DMA_STATE        , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
++
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH);
++      tmp |= NV_READ(0x2058) & 0x30000000;
++      RAMFC_WR(DMA_FETCH        , tmp);
++
++      RAMFC_WR(ENGINE           , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
++      RAMFC_WR(PULL1_ENGINE     , NV_READ(NV04_PFIFO_CACHE1_PULL1));
++      RAMFC_WR(ACQUIRE_VALUE    , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++      tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
++      RAMFC_WR(ACQUIRE_TIMESTAMP, tmp);
++      RAMFC_WR(ACQUIRE_TIMEOUT  , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++      RAMFC_WR(SEMAPHORE        , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
++
++      /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
++       * more involved depending on the value of 0x3228?
++       */
++      RAMFC_WR(DMA_SUBROUTINE   , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
++
++      RAMFC_WR(GRCTX_INSTANCE   , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
++
++      /* No idea what the below is for exactly, ripped from a mmio-trace */
++      RAMFC_WR(UNK_40           , NV_READ(NV40_PFIFO_UNK32E4));
++
++      /* NVIDIA do this next line twice.. bug? */
++      RAMFC_WR(UNK_44           , NV_READ(0x32e8));
++      RAMFC_WR(UNK_4C           , NV_READ(0x2088));
++      RAMFC_WR(UNK_50           , NV_READ(0x3300));
++
++#if 0 /* no real idea which is PUT/GET in UNK_48.. */
++      tmp  = NV_READ(NV04_PFIFO_CACHE1_GET);
++      tmp |= (NV_READ(NV04_PFIFO_CACHE1_PUT) << 16);
++      RAMFC_WR(UNK_48           , tmp);
++#endif
++
++      return 0;
++}
++
++int
++nv40_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int ret;
++
++      if ((ret = nouveau_fifo_init(dev)))
++              return ret;
++
++      NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_graph.c git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c
+--- git/drivers/gpu/drm-tungsten/nv40_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2193 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++/*TODO: deciper what each offset in the context represents. The below
++ *      contexts are taken from dumps just after the 3D object is
++ *      created.
++ */
++static void
++nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      /* Always has the "instance address" of itself at offset 0 */
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      /* unknown */
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00180/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00184/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00188/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0018c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004d0/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00504/4, 0x00011100);
++      for (i=0x00520; i<=0x0055c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x00594/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x005b4/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00610/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00618/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00628/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00);
++      /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */
++      /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */
++      for (i=0x006C0; i<=0x006fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */
++      for (i=0x00700; i<=0x0073c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */
++      /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */
++      for (i=0x00780; i<=0x007bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */
++      for (i=0x007c0; i<=0x007fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */
++      for (i=0x00800; i<=0x0083c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */
++      /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */
++      for (i=0x00880; i<=0x008bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      /* unknown */
++      for (i=0x00910; i<=0x0091c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00920; i<=0x0092c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00940; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00960; i<=0x0096c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00980/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x009b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00);
++      INSTANCE_WR(ctx, 0x009d4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x80800001);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c00/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c04/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c08/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c44/4, 0x00000001);
++      for (i=0x03008; i<=0x03080; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x05288; i<=0x08570; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x08628; i<=0x08e18; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0bd28; i<=0x0f010; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0f0c8; i<=0x0f8b8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x127c8; i<=0x15ab0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x15b68; i<=0x16358; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x19268; i<=0x1c550; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x1c608; i<=0x1cdf8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x1fd08; i<=0x22ff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x230a8; i<=0x23898; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x267a8; i<=0x29a90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x29b48; i<=0x2a338; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
++      for (i = 0x00000178; i <= 0x00000180; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
++      for (i = 0x00000194; i <= 0x000001b0; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
++      for (i = 0x00000350; i <= 0x0000035c; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000430/4, 0x00011100);
++      for (i = 0x0000044c; i <= 0x00000488; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00000538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00);
++      for (i = 0x000005dc; i <= 0x00000618; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i = 0x0000061c; i <= 0x00000658; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i = 0x0000069c; i <= 0x000006d8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i = 0x000006dc; i <= 0x00000718; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i = 0x0000071c; i <= 0x00000758; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i = 0x0000079c; i <= 0x000007d8; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i = 0x0000082c; i <= 0x00000838; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i = 0x0000083c; i <= 0x00000848; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i = 0x0000085c; i <= 0x00000868; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i = 0x0000087c; i <= 0x00000888; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff);
++      for (i = 0x00000ad4; i <= 0x00000ae4; i += 4)
++              INSTANCE_WR(ctx, i/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001);
++      for (i = 0x00002ee8; i <= 0x00002f60; i += 8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x00005168; i <= 0x00007358; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00007368; i <= 0x00007758; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x0000a068; i <= 0x0000c258; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x0000c268; i <= 0x0000c658; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x0000ef68; i <= 0x00011158; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00011168; i <= 0x00011558; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i = 0x00013e68; i <= 0x00016058; i += 24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i = 0x00016068; i <= 0x00016458; i += 16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++};
++
++static void
++nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00194/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00198/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a4/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001a8/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001ac/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00);
++      for (i=0x005dc; i<=0x00618; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x0061c; i<=0x00658; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x0069c; i<=0x006d8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006dc; i<=0x00718; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x0071c; i<=0x00758; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x0079c; i<=0x007d8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x0082c; i<=0x00838; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x0083c; i<=0x00848; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x0085c; i<=0x00868; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x0087c; i<=0x00888; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x0089c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00020000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
++      for (i=0x02ec0; i<=0x02f38; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x04c80; i<=0x06e70; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x06e80; i<=0x07270; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x096c0; i<=0x0b8b0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0b8c0; i<=0x0bcb0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0e100; i<=0x102f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x10300; i<=0x106f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++};
++
++static void
++nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00138/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00144/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00184/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0018c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00190/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00194/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00198/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001a4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00370/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00374/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00378/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003a4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x003b8/4, 0x00003010);
++      INSTANCE_WR(ctx, 0x003dc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003e8/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00400/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00404/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00410/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00414/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00418/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004b0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x004d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x004d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004ec/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00500/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00504/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00508/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0050c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00510/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00514/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00518/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00520/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00524/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00528/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00530/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00534/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00538/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0053c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00011100);
++      for (i=0x00578; i<0x005b4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x00608/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00658/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00664/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00674/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00);
++      for (i=0x0070c; i<=0x00748; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x0074c; i<=0x00788; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x007cc; i<=0x00808; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x0080c; i<=0x00848; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x0084c; i<=0x00888; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x008cc; i<=0x00908; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x0095c; i<=0x00968; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x0096c; i<=0x00978; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x0098c; i<=0x00998; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009ac; i<=0x009b8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a00/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x00a28/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00a60/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00aec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b30/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b38/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bec/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000);
++      for (i=0x017f8; i<=0x01870; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x035b8; i<=0x057a8; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x057b8; i<=0x05ba8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x07f38; i<=0x0a128; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0a138; i<=0x0a528; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0c8b8; i<=0x0eaa8; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0eab8; i<=0x0eea8; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++/* This may only work on 7800 AGP cards, will include a warning */
++static void
++nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00000178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
++      for (i=0x00000194; i<=0x000001b0; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00000350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
++      for (i=0x000003c0; i<=0x000003fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00000454/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00000458/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00000474/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000490/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000);
++      for (i=0x000004a4; i<=0x000004e0; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00000500/4, 0x00011100);
++      for (i=0x0000051c; i<=0x00000558; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00000590/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00000608/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00);
++      for (i=0x000006b0; i<=0x000006ec; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x000006f0; i<=0x0000072c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00000770; i<=0x000007ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x000007b0; i<=0x000007ec; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x000007f0; i<=0x0000082c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00000870; i<=0x000008ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80);
++      INSTANCE_WR(ctx, 0x00000910/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x00000914/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x00000918/4, 0x00000202);
++      INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202);
++      for (i=0x00000930; i<=0x0000095c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00000970/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00);
++      INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001);
++      for (i=0x00000b10; i<=0x00000b8c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff);
++      for (i=0x00000bdc; i<=0x00000bf8; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000);
++      for (i=0x00003000; i<=0x00003078; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00004dc0; i<=0x00006fb0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x00006fc0; i<=0x000073b0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00009800; i<=0x0000b9f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0000ba00; i<=0x00010430; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00010440; i<=0x00010830; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x00012c80; i<=0x00014e70; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x00014e80; i<=0x00015270; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x000176c0; i<=0x000198b0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x000198c0; i<=0x00019cb0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0001c100; i<=0x0001e2f0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0001e300; i<=0x0001e6f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
++      for (i=0x00750; i<=0x0078c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00790; i<=0x007cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00810; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x00850; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00890; i<=0x008cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00910; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x009a0; i<=0x009ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x009b0; i<=0x009bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x009d0; i<=0x009dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009f0; i<=0x009fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
++      for(i=0x030a0; i<=0x03118; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x098a0; i<=0x0ba90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x0baa0; i<=0x0be90; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x0e2e0; i<=0x0fff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x10008; i<=0x104d0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x104e0; i<=0x108d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x12d20; i<=0x14f10; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x14f20; i<=0x15310; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x17760; i<=0x19950; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x19960; i<=0x19d50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x1c1a0; i<=0x1e390; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x1e3a0; i<=0x1e790; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x20be0; i<=0x22dd0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x22de0; i<=0x231d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00003010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
++      for (i=0x005d8; i<=0x00614; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00618; i<=0x00654; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00698; i<=0x006d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006d8; i<=0x00714; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00718; i<=0x00754; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00798; i<=0x007d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00828; i<=0x00834; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00838; i<=0x00844; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00858; i<=0x00864; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00878; i<=0x00884; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
++      for (i=0x016c0; i<=0x01738; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03840; i<=0x05670; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05680; i<=0x05a70; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x07e00; i<=0x09ff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0a000; i<=0x0a3f0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x0c780; i<=0x0e970; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x0e980; i<=0x0ed70; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
++      INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
++      INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
++      INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
++      for (i=0x00750; i<=0x0078c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00790; i<=0x007cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00810; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x00850; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00890; i<=0x008cc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00910; i<=0x0094c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x009a0; i<=0x009ac; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x009b0; i<=0x009bc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x009d0; i<=0x009dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x009f0; i<=0x009fc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
++      INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
++      INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
++      for(i=0x030a0; i<=0x03118; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x098a0; i<=0x0ba90; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x0baa0; i<=0x0be90; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x0e2e0; i<=0x0fff0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x10008; i<=0x104d0; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x104e0; i<=0x108d0; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x12d20; i<=0x14f10; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x14f20; i<=0x15310; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for(i=0x17760; i<=0x19950; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for(i=0x19960; i<=0x19d50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00434/4, 0x00011100);
++      for (i=0x00450; i<0x0048c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c4/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00530/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00534/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x0053c/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00);
++      for (i=0x005e0; i<=0x0061c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00620; i<=0x0065c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x006a0; i<=0x006dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006e0; i<=0x0071c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00720; i<=0x0075c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x007a0; i<=0x007dc; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00830; i<=0x0083c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00840; i<=0x0084c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00860; i<=0x0086c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00880; i<=0x0088c; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x008a0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008fc/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00934/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a74/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001);
++      for (i=0x016a0; i<0x01718; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03460; i<0x05650; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05660; i<0x05a50; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++static void
++nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i;
++
++      INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
++      INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
++      INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
++      INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
++      INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
++      INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
++      INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
++      INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
++      INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
++      for (i=0x0044c; i<=0x00488; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x07ff0000);
++      INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
++      INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
++      INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
++      INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
++      INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
++      INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
++      INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
++      INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
++      for (i=0x005d8; i<=0x00614; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00018488);
++      for (i=0x00618; i<=0x00654; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00028202);
++      for (i=0x00698; i<=0x006d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0000aae4);
++      for (i=0x006d8; i<=0x00714; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x01012000);
++      for (i=0x00718; i<=0x00754; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      for (i=0x00798; i<=0x007d4; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00100008);
++      for (i=0x00828; i<=0x00834; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x0001bc80);
++      for (i=0x00838; i<=0x00844; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000202);
++      for (i=0x00858; i<=0x00864; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00000008);
++      for (i=0x00878; i<=0x00884; i+=4)
++              INSTANCE_WR(ctx, i/4, 0x00080008);
++      INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
++      INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
++      INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
++      INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00a94/4, 0x00005555);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001);
++      for (i=0x01668; i<=0x016e0; i+=8)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++      for (i=0x03428; i<=0x05618; i+=24)
++              INSTANCE_WR(ctx, i/4, 0x00000001);
++      for (i=0x05628; i<=0x05a18; i+=16)
++              INSTANCE_WR(ctx, i/4, 0x3f800000);
++}
++
++int
++nv40_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
++      int ret;
++
++      /* These functions populate the graphics context with a whole heap
++       * of default state.  All these functions are very similar, with
++       * a minimal amount of chipset-specific changes.  However, as we're
++       * currently dependant on the context programs used by the NVIDIA
++       * binary driver these functions must match the layout expected by
++       * them.  Hopefully at some point this will all change.
++       */
++      switch (dev_priv->chipset) {
++      case 0x40:
++              ctx_init = nv40_graph_context_init;
++              break;
++      case 0x41:
++      case 0x42:
++              ctx_init = nv41_graph_context_init;
++              break;
++      case 0x43:
++              ctx_init = nv43_graph_context_init;
++              break;
++      case 0x46:
++              ctx_init = nv46_graph_context_init;
++              break;
++      case 0x47:
++              ctx_init = nv47_graph_context_init;
++              break;
++      case 0x49:
++              ctx_init = nv49_graph_context_init;
++              break;
++      case 0x44:
++      case 0x4a:
++              ctx_init = nv4a_graph_context_init;
++              break;
++      case 0x4b:
++              ctx_init = nv4b_graph_context_init;
++              break;
++      case 0x4c:
++      case 0x67:
++              ctx_init = nv4c_graph_context_init;
++              break;
++      case 0x4e:
++              ctx_init = nv4e_graph_context_init;
++              break;
++      default:
++              ctx_init = nv40_graph_context_init;
++              break;
++      }
++
++      /* Allocate a 175KiB block of PRAMIN to store the context.  This
++       * is massive overkill for a lot of chipsets, but it should be safe
++       * until we're able to implement this properly (will happen at more
++       * or less the same time we're able to write our own context programs.
++       */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
++                                        NVOBJ_FLAG_ZERO_ALLOC,
++                                        &chan->ramin_grctx)))
++              return ret;
++
++      /* Initialise default context values */
++      ctx_init(dev, chan->ramin_grctx->gpuobj);
++
++      return 0;
++}
++
++void
++nv40_graph_destroy_context(struct nouveau_channel *chan)
++{
++      nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
++}
++
++static int
++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t old_cp, tv = 1000, tmp;
++      int i;
++
++      old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++
++      tmp  = NV_READ(NV40_PGRAPH_CTXCTL_0310);
++      tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
++                    NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
++
++      tmp  = NV_READ(NV40_PGRAPH_CTXCTL_0304);
++      tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
++
++      nouveau_wait_for_idle(dev);
++
++      for (i = 0; i < tv; i++) {
++              if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
++                      break;
++      }
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
++
++      if (i == tv) {
++              uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
++              DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
++              DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
++                        ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
++                        ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
++              DRM_ERROR("0x40030C = 0x%08x\n",
++                        NV_READ(NV40_PGRAPH_CTXCTL_030C));
++              return -EBUSY;
++      }
++
++      return 0;
++}
++
++/* Save current context (from PGRAPH) into the channel's context */
++int
++nv40_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      uint32_t inst;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      return nv40_graph_transfer_context(dev, inst, 1);
++}
++
++/* Restore the context for a specific channel into PGRAPH */
++int
++nv40_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++      int ret;
++
++      if (!chan->ramin_grctx)
++              return -EINVAL;
++      inst = chan->ramin_grctx->instance >> 4;
++
++      ret = nv40_graph_transfer_context(dev, inst, 0);
++      if (ret)
++              return ret;
++
++      /* 0x40032C, no idea of it's exact function.  Could simply be a
++       * record of the currently active PGRAPH context.  It's currently
++       * unknown as to what bit 24 does.  The nv ddx has it set, so we will
++       * set it here too.
++       */
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR,
++               (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) |
++                NV40_PGRAPH_CTXCTL_CUR_LOADED);
++      /* 0x32E0 records the instance address of the active FIFO's PGRAPH
++       * context.  If at any time this doesn't match 0x40032C, you will
++       * recieve PGRAPH_INTR_CONTEXT_SWITCH
++       */
++      NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst);
++      return 0;
++}
++
++/* These blocks of "magic numbers" are actually a microcode that the GPU uses
++ * to control how graphics contexts get saved and restored between PRAMIN
++ * and PGRAPH during a context switch.  We're currently using values seen
++ * in mmio-traces of the binary driver.
++ */
++static uint32_t nv40_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406,
++      0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216,
++      0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100,
++      0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029,
++      0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6,
++      0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b,
++      0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a,
++      0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
++      0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008,
++      0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901,
++      0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038,
++      0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684,
++      0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68,
++      0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006,
++      0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284,
++      0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++static uint32_t nv41_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
++      0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800,
++      0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
++      0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
++      0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
++      0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480,
++      0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a,
++      0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv43_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
++      0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a,
++      0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965,
++      0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
++      0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
++      0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
++      0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350,
++      0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006,
++      0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
++      0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++static uint32_t nv44_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
++      0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
++      0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
++      0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
++      0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
++      0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
++      0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
++      0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
++      0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
++      0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
++      0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
++      0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
++      0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
++      0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
++      0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
++      0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
++      0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
++      0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
++      0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
++      0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
++      0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
++      0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
++      0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
++      0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv46_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
++      0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a,
++      0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691,
++      0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022,
++      0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
++      0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910,
++      0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14,
++      0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80,
++      0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709,
++      0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
++      0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320,
++      0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081,
++      0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
++      0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv47_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606,
++      0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12,
++      0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a,
++      0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
++      0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901,
++      0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19,
++      0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00,
++      0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000,
++      0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a,
++      0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006,
++      0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318,
++      0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002,
++      0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f,
++      0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880,
++      0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c,
++      0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006,
++      0x0060000e, ~0
++};
++
++//this is used for nv49 and nv4b
++static uint32_t nv49_4b_ctx_prog[] ={
++      0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
++      0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000,
++      0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e,
++      0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000,
++      0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a,
++      0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210,
++      0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280,
++      0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140,
++      0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118,
++      0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d,
++      0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
++      0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800,
++      0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00,
++      0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e,
++      0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600,
++      0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a,
++      0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88,
++      0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f,
++      0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280,
++      0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68,
++      0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e,
++      0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b,
++      0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e,
++      0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60,
++      0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e,
++      0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005,
++      0x00700006, 0x0060000e, ~0
++};
++
++
++static uint32_t nv4a_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
++      0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
++      0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,
++      0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,
++      0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,
++      0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
++      0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,
++      0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,
++      0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,
++      0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,
++      0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
++      0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,
++      0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv4c_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
++      0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
++      0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
++      0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
++      0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
++      0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
++      0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
++      0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
++      0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
++      0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
++      0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
++      0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
++      0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
++      0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
++      0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
++      0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
++};
++
++static uint32_t nv4e_ctx_prog[] = {
++      0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
++      0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
++      0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
++      0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
++      0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
++      0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
++      0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
++      0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
++      0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
++      0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
++      0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
++      0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
++      0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
++      0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
++      0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
++      0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
++      0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
++      0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06,
++      0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
++      0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06,
++      0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006,
++      0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004,
++      0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
++      0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
++      0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
++      0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
++      0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
++      0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
++      0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
++      ~0
++};
++
++/*
++ * G70                0x47
++ * G71                0x49
++ * NV45               0x48
++ * G72[M]     0x46
++ * G73                0x4b
++ * C51_G7X    0x4c
++ * C51                0x4e
++ */
++int
++nv40_graph_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv =
++              (struct drm_nouveau_private *)dev->dev_private;
++      uint32_t *ctx_prog;
++      uint32_t vramsz, tmp;
++      int i, j;
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
++                      ~NV_PMC_ENABLE_PGRAPH);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
++                       NV_PMC_ENABLE_PGRAPH);
++
++      switch (dev_priv->chipset) {
++      case 0x40: ctx_prog = nv40_ctx_prog; break;
++      case 0x41:
++      case 0x42: ctx_prog = nv41_ctx_prog; break;
++      case 0x43: ctx_prog = nv43_ctx_prog; break;
++      case 0x44: ctx_prog = nv44_ctx_prog; break;
++      case 0x46: ctx_prog = nv46_ctx_prog; break;
++      case 0x47: ctx_prog = nv47_ctx_prog; break;
++      case 0x49: ctx_prog = nv49_4b_ctx_prog; break;
++      case 0x4a: ctx_prog = nv4a_ctx_prog; break;
++      case 0x4b: ctx_prog = nv49_4b_ctx_prog; break;
++      case 0x4c:
++      case 0x67: ctx_prog = nv4c_ctx_prog; break;
++      case 0x4e: ctx_prog = nv4e_ctx_prog; break;
++      default:
++              DRM_ERROR("Context program for 0x%02x unavailable\n",
++                        dev_priv->chipset);
++              ctx_prog = NULL;
++              break;
++      }
++
++      /* Load the context program onto the card */
++      if (ctx_prog) {
++              DRM_DEBUG("Loading context program\n");
++              i = 0;
++
++              NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++              while (ctx_prog[i] != ~0) {
++                      NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]);
++                      i++;
++              }
++      }
++
++      /* No context present currently */
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
++
++      NV_WRITE(NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++      NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
++      NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055);
++      NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
++      NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
++
++      NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++      NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++      NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001);
++
++      j = NV_READ(0x1540) & 0xff;
++      if (j) {
++              for (i=0; !(j&1); j>>=1, i++);
++              NV_WRITE(0x405000, i);
++      }
++
++      if (dev_priv->chipset == 0x40) {
++              NV_WRITE(0x4009b0, 0x83280fff);
++              NV_WRITE(0x4009b4, 0x000000a0);
++      } else {
++              NV_WRITE(0x400820, 0x83280eff);
++              NV_WRITE(0x400824, 0x000000a0);
++      }
++
++      switch (dev_priv->chipset) {
++      case 0x40:
++      case 0x45:
++              NV_WRITE(0x4009b8, 0x0078e366);
++              NV_WRITE(0x4009bc, 0x0000014c);
++              break;
++      case 0x41:
++      case 0x42: /* pciid also 0x00Cx */
++//    case 0x0120: //XXX (pciid)
++              NV_WRITE(0x400828, 0x007596ff);
++              NV_WRITE(0x40082c, 0x00000108);
++              break;
++      case 0x43:
++              NV_WRITE(0x400828, 0x0072cb77);
++              NV_WRITE(0x40082c, 0x00000108);
++              break;
++      case 0x44:
++      case 0x46: /* G72 */
++      case 0x4a:
++      case 0x4c: /* G7x-based C51 */
++      case 0x4e:
++              NV_WRITE(0x400860, 0);
++              NV_WRITE(0x400864, 0);
++              break;
++      case 0x47: /* G70 */
++      case 0x49: /* G71 */
++      case 0x4b: /* G73 */
++              NV_WRITE(0x400828, 0x07830610);
++              NV_WRITE(0x40082c, 0x0000016A);
++              break;
++      default:
++              break;
++      }
++
++      NV_WRITE(0x400b38, 0x2ffff800);
++      NV_WRITE(0x400b3c, 0x00006000);
++
++      /* copy tile info from PFB */
++      switch (dev_priv->chipset) {
++      case 0x40: /* vanilla NV40 */
++              for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
++                      tmp = NV_READ(NV10_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV10_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      case 0x44:
++      case 0x4a:
++      case 0x4e: /* NV44-based cores don't have 0x406900? */
++              for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++              }
++              break;
++      case 0x46:
++      case 0x47:
++      case 0x49:
++      case 0x4b: /* G7X-based cores */
++              for (i=0; i<NV40_PFB_TILE__SIZE_1; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV47_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV47_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV47_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV47_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      default: /* everything else */
++              for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
++                      tmp = NV_READ(NV40_PFB_TILE(i));
++                      NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TLIMIT(i));
++                      NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSIZE(i));
++                      NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
++                      tmp = NV_READ(NV40_PFB_TSTATUS(i));
++                      NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
++                      NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
++              }
++              break;
++      }
++
++      /* begin RAM config */
++      vramsz = drm_get_resource_len(dev, 0) - 1;
++      switch (dev_priv->chipset) {
++      case 0x40:
++              NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x400820, 0);
++              NV_WRITE(0x400824, 0);
++              NV_WRITE(0x400864, vramsz);
++              NV_WRITE(0x400868, vramsz);
++              break;
++      default:
++              switch (dev_priv->chipset) {
++              case 0x46:
++              case 0x47:
++              case 0x49:
++              case 0x4b:
++                      NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0));
++                      NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1));
++                      break;
++              default:
++                      NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0));
++                      NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1));
++                      break;
++              }
++              NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0));
++              NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1));
++              NV_WRITE(0x400840, 0);
++              NV_WRITE(0x400844, 0);
++              NV_WRITE(0x4008A0, vramsz);
++              NV_WRITE(0x4008A4, vramsz);
++              break;
++      }
++
++      /* per-context state, doesn't belong here */
++      NV_WRITE(0x400B20, 0x00000000);
++      NV_WRITE(0x400B04, 0xFFFFFFFF);
++
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++      tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
++      NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
++
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++      NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++
++      return 0;
++}
++
++void nv40_graph_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv40_mc.c git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c
+--- git/drivers/gpu/drm-tungsten/nv40_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv40_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,38 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv40_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t tmp;
++
++      /* Power up everything, resetting each individual unit will
++       * be done later if needed.
++       */
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      switch (dev_priv->chipset) {
++      case 0x44:
++      case 0x46: /* G72 */
++      case 0x4e:
++      case 0x4c: /* C51_G7X */
++              tmp = NV_READ(NV40_PFB_020C);
++              NV_WRITE(NV40_PMC_1700, tmp);
++              NV_WRITE(NV40_PMC_1704, 0);
++              NV_WRITE(NV40_PMC_1708, 0);
++              NV_WRITE(NV40_PMC_170C, tmp);
++              break;
++      default:
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv40_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_fifo.c git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c
+--- git/drivers/gpu/drm-tungsten/nv50_fifo.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_fifo.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,343 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++struct nv50_fifo_priv {
++      struct nouveau_gpuobj_ref *thingo[2];
++      int cur_thingo;
++};
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_fifo_init_thingo(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
++      struct nouveau_gpuobj_ref *cur;
++      int i, nr;
++
++      DRM_DEBUG("\n");
++
++      cur = priv->thingo[priv->cur_thingo];
++      priv->cur_thingo = !priv->cur_thingo;
++
++      /* We never schedule channel 0 or 127 */
++      for (i = 1, nr = 0; i < 127; i++) {
++              if (dev_priv->fifos[i]) {
++                      INSTANCE_WR(cur->gpuobj, nr++, i);
++              }
++      }
++      NV_WRITE(0x32f4, cur->instance >> 12);
++      NV_WRITE(0x32ec, nr);
++      NV_WRITE(0x2500, 0x101);
++}
++
++static int
++nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan = dev_priv->fifos[channel];
++      uint32_t inst;
++
++      DRM_DEBUG("ch%d\n", channel);
++
++      if (!chan->ramfc)
++              return -EINVAL;
++
++      if (IS_G80) inst = chan->ramfc->instance >> 12;
++      else        inst = chan->ramfc->instance >> 8;
++      NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
++               inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
++
++      if (!nt) nv50_fifo_init_thingo(dev);
++      return 0;
++}
++
++static void
++nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst;
++
++      DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
++
++      if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
++      else        inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
++      NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
++
++      if (!nt) nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_reset(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_fifo_init_intr(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
++      NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
++}
++
++static void
++nv50_fifo_init_context_table(struct drm_device *dev)
++{
++      int i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++)
++              nv50_fifo_channel_disable(dev, i, 1);
++      nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_regs__nv(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x250c, 0x6f3cfc34);
++}
++
++static void
++nv50_fifo_init_regs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x2500, 0);
++      NV_WRITE(0x3250, 0);
++      NV_WRITE(0x3220, 0);
++      NV_WRITE(0x3204, 0);
++      NV_WRITE(0x3210, 0);
++      NV_WRITE(0x3270, 0);
++
++      /* Enable dummy channels setup by nv50_instmem.c */
++      nv50_fifo_channel_enable(dev, 0, 1);
++      nv50_fifo_channel_enable(dev, 127, 1);
++}
++
++int
++nv50_fifo_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv;
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
++      if (!priv)
++              return -ENOMEM;
++      dev_priv->Engine.fifo.priv = priv;
++
++      nv50_fifo_init_reset(dev);
++      nv50_fifo_init_intr(dev);
++
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
++      if (ret) {
++              DRM_ERROR("error creating thingo0: %d\n", ret);
++              return ret;
++      }
++
++      ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
++      if (ret) {
++              DRM_ERROR("error creating thingo1: %d\n", ret);
++              return ret;
++      }
++
++      nv50_fifo_init_context_table(dev);
++      nv50_fifo_init_regs__nv(dev);
++      nv50_fifo_init_regs(dev);
++
++      return 0;
++}
++
++void
++nv50_fifo_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
++
++      DRM_DEBUG("\n");
++
++      if (!priv)
++              return;
++
++      nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
++      nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
++
++      dev_priv->Engine.fifo.priv = NULL;
++      drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
++}
++
++int
++nv50_fifo_channel_id(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
++                      NV50_PFIFO_CACHE1_PUSH1_CHID_MASK);
++}
++
++int
++nv50_fifo_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramfc = NULL;
++      int ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      if (IS_G80) {
++              uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
++              uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
++              ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset,
++                                            0x100, NVOBJ_FLAG_ZERO_ALLOC |
++                                            NVOBJ_FLAG_ZERO_FREE, &ramfc,
++                                            &chan->ramfc);
++              if (ret)
++                      return ret;
++      } else {
++              ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
++                                           NVOBJ_FLAG_ZERO_ALLOC |
++                                           NVOBJ_FLAG_ZERO_FREE,
++                                           &chan->ramfc);
++              if (ret)
++                      return ret;
++              ramfc = chan->ramfc->gpuobj;
++      }
++
++      INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4);
++      INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
++      INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */
++      INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff);
++      INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff);
++      INSTANCE_WR(ramfc, 0x10/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x08/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x40/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0);
++      INSTANCE_WR(ramfc, 0x54/4, 0x000f0000);
++      INSTANCE_WR(ramfc, 0x7c/4, 0x30000001);
++      INSTANCE_WR(ramfc, 0x78/4, 0x00000000);
++      INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1);
++
++      if (!IS_G80) {
++              INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id);
++              INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
++
++              INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
++              INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
++      }
++
++      ret = nv50_fifo_channel_enable(dev, chan->id, 0);
++      if (ret) {
++              DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++              return ret;
++      }
++
++      return 0;
++}
++
++void
++nv50_fifo_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      nv50_fifo_channel_disable(dev, chan->id, 0);
++
++      /* Dummy channel, also used on ch 127 */
++      if (chan->id == 0)
++              nv50_fifo_channel_disable(dev, 127, 0);
++
++      if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id)
++              NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127);
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++int
++nv50_fifo_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      /*XXX: incomplete, only touches the regs that NV does */
++
++      NV_WRITE(0x3244, 0);
++      NV_WRITE(0x3240, 0);
++
++      NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4));
++      NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4));
++      NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4));
++      NV_WRITE(0x3254, 1);
++      NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4));
++
++      if (!IS_G80) {
++              NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4));
++              NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
++      }
++
++      NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
++      return 0;
++}
++
++int
++nv50_fifo_save_context(struct nouveau_channel *chan)
++{
++      DRM_DEBUG("ch%d\n", chan->id);
++      DRM_ERROR("stub!\n");
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_graph.c git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c
+--- git/drivers/gpu/drm-tungsten/nv50_graph.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_graph.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,8286 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_graph_init_reset(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
++      NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_graph_init_intr(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff);
++      NV_WRITE(0x400138, 0xffffffff);
++      NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);
++}
++
++static void
++nv50_graph_init_regs__nv(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(0x400804, 0xc0000000);
++      NV_WRITE(0x406800, 0xc0000000);
++      NV_WRITE(0x400c04, 0xc0000000);
++      NV_WRITE(0x401804, 0xc0000000);
++      NV_WRITE(0x405018, 0xc0000000);
++      NV_WRITE(0x402000, 0xc0000000);
++
++      NV_WRITE(0x400108, 0xffffffff);
++
++      NV_WRITE(0x400824, 0x00004000);
++      NV_WRITE(0x400500, 0x00010001);
++}
++
++static void
++nv50_graph_init_regs(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */);
++}
++
++static uint32_t nv50_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x00417e4d, 0x00401e44, 0x00401e05, 0x00401e0d, 0x00415a06,
++      0x00600005, 0x004015c5, 0x00600011, 0x00401c0b, 0x0090ffff, 0x0091ffff,
++      0x00200020, 0x00600008, 0x0050004c, 0x00600009, 0x00415a45, 0x0041754d,
++      0x0070009d, 0x004022cf, 0x0070009f, 0x0050009f, 0x00401fc0, 0x00200080,
++      0x00600008, 0x00401f4f, 0x00401fc0, 0x004025cc, 0x00700081, 0x00200000,
++      0x00600006, 0x00700000, 0x00111bfc, 0x00700080, 0x00700083, 0x00200047,
++      0x00600006, 0x0011020a, 0x002005c0, 0x00600007, 0x00300000, 0x00c000ff,
++      0x00c800ff, 0x00416507, 0x00202627, 0x008000ff, 0x00403c8c, 0x005000cb, 
++      0x00a0023f, 0x00200040, 0x00600006, 0x0070000f, 0x00170202, 0x0011020a,
++      0x00200032, 0x0010020d, 0x001b0242, 0x00120302, 0x00140402, 0x00180500,
++      0x00130509, 0x00150550, 0x00110605, 0x001e0607, 0x00110700, 0x00110900,
++      0x00110902, 0x00110a00, 0x00160b02, 0x00110b28, 0x00140b2b, 0x00110c01,
++      0x00111400, 0x00111405, 0x00111407, 0x00111409, 0x0011140b, 0x002000ea,
++      0x00101500, 0x0040640f, 0x0040644b, 0x00213700, 0x00600007, 0x00200440,
++      0x008800ff, 0x0070008f, 0x0040648c, 0x005000cb, 0x00000000, 0x001118f8,
++      0x0020002b, 0x00101a05, 0x00131c00, 0x00111c04, 0x00141c20, 0x00111c25,
++      0x00131c40, 0x00111c44, 0x00141c60, 0x00111c65, 0x00131c80, 0x00111c84,
++      0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00111cc4, 0x00141ce0, 0x00111ce5,
++      0x00131d00, 0x00111d04, 0x00141d20, 0x00111d25, 0x00131d40, 0x00111d44,
++      0x00141d60, 0x00111d65, 0x00131f00, 0x00191f40, 0x00409ee0, 0x00200217,
++      0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
++      0x00122100, 0x00122103, 0x00162200, 0x0040960f, 0x0040964b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040968c, 0x005000cb,
++      0x00000000, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
++      0x0011238b, 0x00192394, 0x0040b0e1, 0x00200285, 0x00600006, 0x00200044,
++      0x00102480, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500, 0x00122503,
++      0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702, 0x00122780,
++      0x0011278b, 0x00192794, 0x0040cce2, 0x002002f3, 0x00600006, 0x00200044,
++      0x00102880, 0x001128c6, 0x001528c9, 0x0040c00f, 0x0040c04b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x0040c08c, 0x005000cb,
++      0x00000000, 0x001928d0, 0x00122900, 0x00122903, 0x00162a00, 0x00122a07,
++      0x00112a80, 0x00112b00, 0x00112b02, 0x00122b80, 0x00112b8b, 0x00192b94, 
++      0x0040dee3, 0x00200361, 0x00600006, 0x00200044, 0x00102c80, 0x00112cc6,
++      0x00152cc9, 0x00192cd0, 0x00122d00, 0x00122d03, 0x00162e00, 0x00122e07,
++      0x00112e80, 0x00112f00, 0x00112f02, 0x00122f80, 0x00112f8b, 0x00192f94,
++      0x0040fae4, 0x002003cf, 0x00600006, 0x00200044, 0x00103080, 0x0040ec0f,
++      0x0040ec4b, 0x00213700, 0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 
++      0x0040ec8c, 0x005000cb, 0x00000000, 0x001130c6, 0x001530c9, 0x001930d0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338b, 0x00193394, 0x00410ce5, 0x0020043d,
++      0x00600006, 0x00200044, 0x00103480, 0x001134c6, 0x001534c9, 0x001934d0,
++      0x00123500, 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700,
++      0x00113702, 0x00123780, 0x0011378b, 0x00193794, 0x004128e6, 0x002004ab,
++      0x00600006, 0x00200044, 0x00103880, 0x00411a0f, 0x00411a4b, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x0070008f, 0x00411a8c, 0x005000cb,
++      0x00000000, 0x001138c6, 0x001538c9, 0x001938d0, 0x00123900, 0x00123903,
++      0x00163a00, 0x00123a07, 0x00113a80, 0x00113b00, 0x00113b02, 0x00123b80,
++      0x00113b8b, 0x00193b94, 0x00413ae7, 0x00200519, 0x00600006, 0x00200044,
++      0x00103c80, 0x00113cc6, 0x00153cc9, 0x00193cd0, 0x00123d00, 0x00123d03,
++      0x00163e00, 0x00123e07, 0x00113e80, 0x00113f00, 0x00113f02, 0x00123f80,
++      0x00113f8b, 0x00193f94, 0x00000000, 0x0041410f, 0x005000cb, 0x00213700,
++      0x00600007, 0x00200440, 0x008800ff, 0x005000cb, 0x00414487, 0x0060000a,
++      0x00000000, 0x00415300, 0x007000a0, 0x00700080, 0x002005c0, 0x00600007,
++      0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000, 0x00200000,
++      0x00600006, 0x00111bfe, 0x0041754d, 0x00700000, 0x00200000, 0x00600006,
++      0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081, 0x00600004,
++      0x0050004a, 0x00415f88, 0x0060000b, 0x00200000, 0x00600006, 0x00700000,
++      0x0041750b, 0x00111bfd, 0x00402e4d, 0x00202627, 0x008000fd, 0x005000cb,
++      0x00c00002, 0x002005c0, 0x00600007, 0x0020015f, 0x00800002, 0x005000cb,
++      0x00c01802, 0x002024c8, 0x00800002, 0x005000cb, 0x00403a4d, 0x0060000b,
++      0x0041734d, 0x00700001, 0x00700003, 0x00417906, 0x00417a05, 0x0060000d,
++      0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e, 0x0070001c,
++      0x0060000c, ~0
++};
++
++static uint32_t nv84_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06,
++      0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801,
++      0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
++      0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d,
++      0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
++      0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007,
++      0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007,
++      0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff,
++      0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
++      0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
++      0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c,
++      0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
++      0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
++      0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4,
++      0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed,
++      0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
++      0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
++      0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1,
++      0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c,
++      0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500,
++      0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
++      0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb,
++      0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0,
++      0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00,
++      0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3,
++      0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c,
++      0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00,
++      0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02,
++      0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389,
++      0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5,
++      0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b,
++      0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c,
++      0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500,
++      0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
++      0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f,
++      0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb,
++      0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080,
++      0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb,
++      0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000,
++      0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d,
++      0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000,
++      0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916,
++      0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160,
++      0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb,
++      0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003,
++      0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,
++      0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0
++};
++ 
++static uint32_t nv86_ctx_voodoo[] = {
++      0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
++      0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
++      0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,
++      0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,
++      0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
++      0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,
++      0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
++      0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,
++      0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,
++      0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,
++      0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
++      0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
++      0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
++      0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,
++      0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
++      0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
++      0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,
++      0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,
++      0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
++      0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,
++      0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,
++      0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,
++      0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,
++      0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,
++      0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,
++      0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,
++      0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,
++      0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,
++      0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,
++      0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,
++      0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,
++      0x0060000c, ~0
++};
++
++static uint32_t nv92_ctx_voodoo[] = {
++      0x0070008E, 0x0070009C, 0x00200020, 0x00600008, 0x0050004C, 0x00400E89,
++      0x00200000, 0x00600007, 0x00300000, 0x00C000FF, 0x00200000, 0x008000FF,
++      0x00700009, 0x0041924D, 0x00402944, 0x00402905, 0x0040290D, 0x00416E06,
++      0x00600005, 0x004015C5, 0x00600011, 0x0040270B, 0x004021C5, 0x00700000,
++      0x00700081, 0x00600004, 0x0050004A, 0x00219600, 0x00600007, 0x00C02701,
++      0x0020002E, 0x00800001, 0x005000CB, 0x0090FFFF, 0x0091FFFF, 0x00200020,
++      0x00600008, 0x0050004C, 0x00600009, 0x00416E45, 0x0041894D, 0x0070009D,
++      0x00402DCF, 0x0070009F, 0x0050009F, 0x00402AC0, 0x00200080, 0x00600008,
++      0x00402A4F, 0x00402AC0, 0x004030CC, 0x00700081, 0x00200000, 0x00600006,
++      0x00700000, 0x00111BFC, 0x00700083, 0x00300000, 0x00219600, 0x00600007,
++      0x00C00A01, 0x0020001E, 0x00800001, 0x005000CB, 0x00C000FF, 0x00700080,
++      0x00700083, 0x00200047, 0x00600006, 0x0011020A, 0x00200540, 0x00600007,
++      0x00300000, 0x00C000FF, 0x00C800FF, 0x00417907, 0x00202DD2, 0x008000FF,
++      0x0040508C, 0x005000CB, 0x00A0023F, 0x00200040, 0x00600006, 0x0070000F,
++      0x00170202, 0x0011020A, 0x00200032, 0x0010020D, 0x001C0242, 0x00120302,
++      0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000F,
++      0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110A00, 0x00160B02,
++      0x00120B28, 0x00140B2B, 0x00110C01, 0x00111400, 0x00111405, 0x00111407,
++      0x00111409, 0x0011140B, 0x002000CB, 0x00101500, 0x0040790F, 0x0040794B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040798C,
++      0x005000CB, 0x00000000, 0x00141A05, 0x00131A0C, 0x00131C00, 0x00121C04,
++      0x00141C20, 0x00111C25, 0x00131C40, 0x00121C44, 0x00141C60, 0x00111C65,
++      0x00131C80, 0x00121C84, 0x00141CA0, 0x00111CA5, 0x00131CC0, 0x00121CC4,
++      0x00141CE0, 0x00111CE5, 0x00131F00, 0x00191F40, 0x0040A1E0, 0x002001C9,
++      0x00600006, 0x00200044, 0x00102080, 0x001120C6, 0x001520C9, 0x001920D0,
++      0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
++      0x00112302, 0x00122380, 0x0011238B, 0x00112394, 0x0011239C, 0x0040BEE1,
++      0x00200230, 0x00600006, 0x00200044, 0x00102480, 0x0040AF0F, 0x0040AF4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040AF8C,
++      0x005000CB, 0x00000000, 0x001124C6, 0x001524C9, 0x001924D0, 0x00122500,
++      0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
++      0x00122780, 0x0011278B, 0x00112794, 0x0011279C, 0x0040D1E2, 0x00200297,
++      0x00600006, 0x00200044, 0x00102880, 0x001128C6, 0x001528C9, 0x001928D0,
++      0x00122900, 0x00122903, 0x00162A00, 0x00122A07, 0x00112A80, 0x00112B00,
++      0x00112B02, 0x00122B80, 0x00112B8B, 0x00112B94, 0x00112B9C, 0x0040EEE3,
++      0x002002FE, 0x00600006, 0x00200044, 0x00102C80, 0x0040DF0F, 0x0040DF4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x0040DF8C,
++      0x005000CB, 0x00000000, 0x00112CC6, 0x00152CC9, 0x00192CD0, 0x00122D00,
++      0x00122D03, 0x00162E00, 0x00122E07, 0x00112E80, 0x00112F00, 0x00112F02,
++      0x00122F80, 0x00112F8B, 0x00112F94, 0x00112F9C, 0x004101E4, 0x00200365,
++      0x00600006, 0x00200044, 0x00103080, 0x001130C6, 0x001530C9, 0x001930D0,
++      0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
++      0x00113302, 0x00123380, 0x0011338B, 0x00113394, 0x0011339C, 0x00411EE5,
++      0x002003CC, 0x00600006, 0x00200044, 0x00103480, 0x00410F0F, 0x00410F4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00410F8C,
++      0x005000CB, 0x00000000, 0x001134C6, 0x001534C9, 0x001934D0, 0x00123500,
++      0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
++      0x00123780, 0x0011378B, 0x00113794, 0x0011379C, 0x004131E6, 0x00200433,
++      0x00600006, 0x00200044, 0x00103880, 0x001138C6, 0x001538C9, 0x001938D0,
++      0x00123900, 0x00123903, 0x00163A00, 0x00123A07, 0x00113A80, 0x00113B00,
++      0x00113B02, 0x00123B80, 0x00113B8B, 0x00113B94, 0x00113B9C, 0x00414EE7,
++      0x0020049A, 0x00600006, 0x00200044, 0x00103C80, 0x00413F0F, 0x00413F4B,
++      0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x0070008F, 0x00413F8C,
++      0x005000CB, 0x00000000, 0x00113CC6, 0x00153CC9, 0x00193CD0, 0x00123D00,
++      0x00123D03, 0x00163E00, 0x00123E07, 0x00113E80, 0x00113F00, 0x00113F02,
++      0x00123F80, 0x00113F8B, 0x00113F94, 0x00113F9C, 0x00000000, 0x0041550F,
++      0x005000CB, 0x00217400, 0x00600007, 0x0020043E, 0x008800FF, 0x005000CB,
++      0x00415887, 0x0060000A, 0x00000000, 0x00416700, 0x007000A0, 0x00700080,
++      0x00200540, 0x00600007, 0x00200004, 0x00C000FF, 0x008000FF, 0x005000CB,
++      0x00700000, 0x00200000, 0x00600006, 0x00111BFE, 0x0041894D, 0x00700000,
++      0x00200000, 0x00600006, 0x00111BFE, 0x00700080, 0x0070001D, 0x0040114D,
++      0x00700081, 0x00600004, 0x0050004A, 0x00417388, 0x0060000B, 0x00200000,
++      0x00600006, 0x00700000, 0x0041890B, 0x00111BFD, 0x0040424D, 0x00202DD2,
++      0x008000FD, 0x005000CB, 0x00C00002, 0x00200540, 0x00600007, 0x00200160,
++      0x00800002, 0x005000CB, 0x00C01802, 0x00202C72, 0x00800002, 0x005000CB,
++      0x00404E4D, 0x0060000B, 0x0041874D, 0x00700001, 0x00700003, 0x00418D06,
++      0x00418E05, 0x0060000D, 0x00700005, 0x0070000D, 0x00700006, 0x0070000B,
++      0x0070000E, 0x0070001C, 0x0060000C, ~0
++};
++
++static uint32_t nvaa_ctx_voodoo[] = {
++      0x0070009c, 0x00300000, 0x0044f109, 0x00402d09, 0x0040e551, 0x00400a44,
++      0x00400a05, 0x00400a0d, 0x0070008e, 0x0040124d, 0x0070009d, 0x0045004d,
++      0x00700097, 0x00450121, 0x004446a1, 0x0044764d, 0x0044824d, 0x0070001d,
++      0x00401806, 0x00600005, 0x00444445, 0x0044308b, 0x00401845, 0x0040234d,
++      0x00700081, 0x00401ccf, 0x0070009f, 0x0050009f, 0x0044dc4d, 0x00700017,
++      0x0040230b, 0x00447d4d, 0x00450221, 0x004456a1, 0x007000a0, 0x00700001,
++      0x00700003, 0x00402706, 0x00402805, 0x0060000d, 0x00700005, 0x0070000d,
++      0x00700006, 0x00700002, 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c,
++      0x00000000, 0x0090ffff, 0x0091ffff, 0x0044d44d, 0x00600009, 0x0048004d,
++      0x00700096, 0x00403acf, 0x0070009f, 0x0050009f, 0x0040e551, 0x004036c0,
++      0x00200080, 0x00600008, 0x0040364f, 0x004036c0, 0x00403ecc, 0x00403651,
++      0x00700016, 0x0048004d, 0x00600011, 0x0048004d, 0x0044364d, 0x0070008e,
++      0x00700081, 0x0044704d, 0x00447d4d, 0x00700083, 0x00300000, 0x00212740,
++      0x00600007, 0x00c00b01, 0x00200022, 0x00800001, 0x005000cb, 0x00c000ff,
++      0x00445e4d, 0x0048004d, 0x0044ce08, 0x0044734d, 0x00448b4d, 0x00445e4d,
++      0x0044e24d, 0x0044764d, 0x0044824d, 0x0048004d, 0x00700083, 0x0045034d,
++      0x00a0023f, 0x00200040, 0x00600006, 0x0044fc4d, 0x00448d4d, 0x002001d0,
++      0x0044b860, 0x00200280, 0x0038ffff, 0x0044cc4d, 0x00300000, 0x005000cb,
++      0x00451c4d, 0x005000cb, 0x0044d007, 0x0048004d, 0x0044794d, 0x00111bfc,
++      0x0048004d, 0x0044794d, 0x00111bfd, 0x0048004d, 0x0044794d, 0x00111bfe,
++      0x0048004d, 0x00200000, 0x00700000, 0x00600006, 0x0048004d, 0x00200001,
++      0x00600006, 0x0044fc4d, 0x0011020a, 0x0048004d, 0x00300000, 0x00c3ffff,
++      0x00200000, 0x00600007, 0x00700000, 0x00200008, 0x008000ff, 0x005000cb,
++      0x0048004d, 0x00000000, 0x0048004d, 0x00000000, 0x00170202, 0x00200032,
++      0x0010020d, 0x001e0242, 0x001102c0, 0x00120302, 0x00150402, 0x00180500,
++      0x00130509, 0x00150550, 0x00110605, 0x00200013, 0x00100607, 0x00110700,
++      0x00110900, 0x00120902, 0x00110a00, 0x00160b02, 0x00120b28, 0x00140b2b,
++      0x00110c01, 0x00110d01, 0x00111400, 0x00111405, 0x00111407, 0x00111409,
++      0x0011140b, 0x002000d4, 0x00101500, 0x00141a05, 0x00131a0c, 0x00131c00,
++      0x00131c04, 0x00141c20, 0x00131c25, 0x00131f00, 0x00131f04, 0x00111f08,
++      0x00111f0b, 0x00200015, 0x00101f40, 0x0048004d, 0x00600006, 0x00451c4d,
++      0x00112020, 0x00112022, 0x00200085, 0x00102040, 0x001120c8, 0x001420ca,
++      0x001b20cf, 0x00122100, 0x00122103, 0x00162140, 0x00122147, 0x00122153,
++      0x001121a0, 0x001221c0, 0x001121cb, 0x001121d4, 0x001521d8, 0x0048004d,
++      0x00000000, 0x0048004d, 0x0060000b, 0x0048004d, 0x0060000a, 0x0048004d,
++      0x0060000b, 0x0040d24d, 0x00200020, 0x00600008, 0x0050004c, 0x0048004d,
++      0x002003e8, 0x00600008, 0x0050004c, 0x0048004d, 0x00600004, 0x0050004a,
++      0x0048004d, 0x00c000ff, 0x00c800ff, 0x0048004d, 0x00c000ff, 0x00c800ff,
++      0x0048004d, 0x00700016, 0x0070008e, 0x00700082, 0x00500041, 0x0044d84d,
++      0x00700095, 0x005000d1, 0x00600016, 0x00500052, 0x00700002, 0x00700015,
++      0x0040284d, 0x0070008e, 0x0044d44d, 0x00200000, 0x00600007, 0x00300000,
++      0x00c000ff, 0x00200000, 0x008000ff, 0x00700009, 0x0070000e, 0x0048004d,
++      0x00700080, 0x00480017, 0x00700000, 0x0048004d, 0x0048004d, 0x0048004d,
++      0x0048004d, 0x0070008e, 0x0044d44d, 0x00700083, 0x0044df4d, 0x00450c4d,
++      0x0070000f, 0x00410b8c, 0x005000cb, 0x0048004d, 0x00200280, 0x00600007,
++      0x00452307, 0x00451187, 0x0048004d, 0x00000000, 0x00202070, 0x0044fc4d,
++      0x008000ff, 0x0048004d, 0x00210600, 0x00600007, 0x00200428, 0x0044fc4d,
++      0x008800ff, 0x0048004d, 0x0048000f, 0x0048004b, 0x0045164d, 0x0070008f,
++      0x0048008c, 0x005000cb, 0x0048004d, 0x00202070, 0x0044fc4d, 0x008000fd,
++      0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200161, 0x0044fc4d,
++      0x00800002, 0x005000cb, 0x00c00002, 0x00201f0e, 0x0044fc4d, 0x00800002,
++      0x005000cb, 0x0048004d, ~0
++};
++
++static int
++nv50_graph_init_ctxctl(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t *voodoo = NULL;
++
++      DRM_DEBUG("\n");
++
++      switch (dev_priv->chipset) {
++      case 0x50:
++              voodoo = nv50_ctx_voodoo;
++              break;
++      case 0x84:
++              voodoo = nv84_ctx_voodoo;
++              break;
++      case 0x86:
++              voodoo = nv86_ctx_voodoo;
++              break;
++      case 0x92:
++              voodoo = nv92_ctx_voodoo;
++              break;
++      case 0xaa:
++              voodoo = nvaa_ctx_voodoo;
++              break;
++      default:
++              DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset);
++              return -EINVAL;
++      }
++
++      NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++      while (*voodoo != ~0) {
++              NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo);
++              voodoo++;
++      }
++
++      NV_WRITE(0x400320, 4);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
++
++      return 0;
++}
++
++int
++nv50_graph_init(struct drm_device *dev)
++{
++      int ret;
++
++      DRM_DEBUG("\n");
++
++      nv50_graph_init_reset(dev);
++      nv50_graph_init_intr(dev);
++      nv50_graph_init_regs__nv(dev);
++      nv50_graph_init_regs(dev);
++
++      ret = nv50_graph_init_ctxctl(dev);
++      if (ret)
++              return ret;
++
++      return 0;
++}
++
++void
++nv50_graph_takedown(struct drm_device *dev)
++{
++      DRM_DEBUG("\n");
++}
++
++static void
++nv50_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x00120/4, 0xff400040);
++      INSTANCE_WR(ctx, 0x00124/4, 0xfff00080);
++      INSTANCE_WR(ctx, 0x00128/4, 0xfff70090);
++      INSTANCE_WR(ctx, 0x0012c/4, 0xffe806a8);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00214/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x00228/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00254/4, 0x0001fd87);
++      INSTANCE_WR(ctx, 0x00268/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x0026c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002a4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002a8/4, 0x0001005f);
++      INSTANCE_WR(ctx, 0x002b0/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002b4/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x002c8/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x002e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x002e8/4, 0x00300080);
++      INSTANCE_WR(ctx, 0x002ec/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00308/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0030c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00318/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0031c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00334/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00338/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0033c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00350/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00354/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00360/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x0000000a);
++      INSTANCE_WR(ctx, 0x003cc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00420/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00438/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00450/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00484/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0048c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00494/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004a8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004c8/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x004cc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x004e0/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x004f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00558/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00598/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x005d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005e8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00600/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00628/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x00630/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x00638/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00648/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x0064c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0065c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00660/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00668/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00678/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00680/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00688/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00690/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00698/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x006a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x006ac/4, 0x00000f80);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x00730/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x00754/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x00758/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00760/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00760/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00760/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x00778/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x0077c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00784/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00784/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00784/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00784/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x0079c/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007a0/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007a8/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x007c0/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007c4/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007cc/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x007e4/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x007e8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x007f0/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x00808/4, 0x1b74f820);
++      INSTANCE_WR(ctx, 0x0080c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00814/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00814/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00814/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x00814/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x0082c/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x00834/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00840/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x00844/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x0085c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00860/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00864/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00874/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00878/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0089c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x008a4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x008ac/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x008b4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x008b8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008f4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x008f8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0091c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00924/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00934/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00938/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00960/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x0096c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00984/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00984/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00984/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00984/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x009cc/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x009e4/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x009e8/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x009f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00a14/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00a1c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00a30/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00a5c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00a64/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00a6c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00a70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00a94/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00a98/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00a9c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ab0/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00ad4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00adc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ae4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00aec/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00b18/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00b24/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00b64/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00b9c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00ba0/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00bcc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00bd0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00be4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00be8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00c0c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00c14/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00c24/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00c28/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00c4c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c50/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00c54/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c64/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00c68/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c9c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ca4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00ca8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00cd0/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00cf4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00d54/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00d84/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d88/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00d8c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d9c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00da0/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00dc4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00dcc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00dd4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00de0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00e04/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e08/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00e0c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e1c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00e20/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00e44/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00e4c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00e54/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00e5c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00e60/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00e88/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x00e94/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00ed4/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00f0c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00f10/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00f1c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00f3c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00f40/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00f44/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00f54/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00f58/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00f7c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00f84/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00f8c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00f94/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00fbc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fc0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00fc4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fd4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x00ffc/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01004/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0100c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01014/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01018/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01040/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x0104c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01064/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x01064/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x01064/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x01064/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0108c/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x010ac/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x010c4/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x010c8/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x010d4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x010f4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x010f8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x010fc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0110c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01110/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0113c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01144/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0114c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01150/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01174/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01178/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0117c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0118c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01190/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x011b4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x011bc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x011c4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x011cc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x011d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x011f8/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x01204/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x0121c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01244/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x01244/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01244/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x01264/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x01264/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x0127c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x01280/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0128c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x012ac/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x012b0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x012b4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x012c4/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x012c8/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x012ec/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x012f4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x012fc/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01304/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01308/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0132c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01330/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x01334/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01344/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01348/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x0136c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01374/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0137c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01384/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01388/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x013b0/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x013bc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x013fc/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x0141c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x01434/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x01438/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01444/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01444/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01444/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01464/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01468/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0146c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0147c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01480/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x014a4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x014ac/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x014b4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x014bc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x014c0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x014e4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x014e8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x014ec/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x014fc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x01500/4, 0x000c0000);
++      INSTANCE_WR(ctx, 0x01524/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0152c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01534/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0153c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x01540/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01568/4, 0x00007070);
++      INSTANCE_WR(ctx, 0x01574/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x0158c/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015b4/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x015d4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x015ec/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x015f0/4, 0x00007fff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x000001ff);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x015fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02b40/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x02b60/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02b80/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02ba0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02bc0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02be0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c60/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02c80/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02ca0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x02cc0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0c600/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44f80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44fa0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x44fc0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45000/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45040/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x45060/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x45080/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x450e0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x45100/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x45160/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4c9a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4cc80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4ce00/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4ce20/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4ce60/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4cee0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4cf20/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x4d080/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4d0a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4d0c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x4d1e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4d260/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4d480/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4d4a0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x4d4c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d4e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d500/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d520/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4d940/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d960/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d980/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4d9e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4da80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4daa0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4dac0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4dae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x4db40/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x4db80/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01784/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01824/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x01a04/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01bc4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01be4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c24/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c44/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x01c84/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x01e24/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x042e4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04324/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e84/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15524/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15764/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15784/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x157c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x157e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15804/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15824/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x15864/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x15924/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15964/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15984/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x159a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x159c4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x159e4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x15ac4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b04/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15b44/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15be4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15c24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15c44/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x15cc4/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x16444/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x164e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x16544/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x16584/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x165a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x165c4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x165e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16604/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16624/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x185a4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x185c4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x18664/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x187e4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x18804/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x16708/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x16768/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x16948/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x16a28/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16a48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x16aa8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16d08/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x16de8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ee8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x16f08/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17108/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x171a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x171c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x171e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x17268/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17288/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x17508/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17528/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17548/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17568/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17588/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x175e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17608/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17628/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17648/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17668/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17688/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x176e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17708/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x17be8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x17c08/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17c68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17ca8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17cc8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17ce8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17d08/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x18108/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x18128/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x18608/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x18648/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18668/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18688/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x186e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18728/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x18768/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x188a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x188c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x188e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18908/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18ec8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18ee8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18f28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18fa8/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18fc8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18fe8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19028/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19048/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x19088/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x190a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x190c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19108/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x19188/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x191a8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x19288/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x192a8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x199c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19a28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1a148/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x1a168/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x1a1c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a4a8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a508/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1a588/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a5a8/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1aa68/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1aaa8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1aae8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ab08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ab48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1aba8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1abe8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac48/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1ac68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1ac88/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x1acc8/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x25528/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x25548/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x25588/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x255a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x255c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x25608/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x25648/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x256c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x256e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25708/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25728/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25748/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25768/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25788/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x257e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25808/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25828/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25848/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25868/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25888/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x258a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x25d48/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x25d68/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x25dc8/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x0180c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0184c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x019ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01a0c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01a6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01b4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01ccc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0216c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0218c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x021ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0220c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0222c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0224c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0226c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0228c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0230c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0232c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0234c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0268c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x026cc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x027ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0282c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x029cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02acc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02bcc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02cac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02ccc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02cec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02d0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02d6c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x02dac/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0306c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0308c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x030ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x030cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x030ec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0310c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0312c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031ac/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x031cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03e8c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0402c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0404c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x040ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0418c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x042ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x042cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0430c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0458c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x047ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x047cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x047ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0480c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0482c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0484c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0486c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0488c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x048ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0490c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0492c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0494c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0496c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0498c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x04ccc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x04d0c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x04dec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0500c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0510c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0520c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x052ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0530c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0532c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0534c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0536c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x053ac/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x053ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x056ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x056cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x056ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0570c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0572c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0574c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0576c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x057ec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0580c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0648c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x064cc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0666c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0668c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x066ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x067cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x068ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0690c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0694c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06bcc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x06dec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06e8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06eac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06ecc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06eec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06f8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06fac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x06fcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0730c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0734c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0742c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0746c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x074ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0764c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0774c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0784c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x078ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0790c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0792c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0794c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0796c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0798c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x079ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x079ec/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x07a2c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x07cec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07d6c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07d8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07dac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07e2c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x07e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08acc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08b0c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x08cac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08ccc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x08d2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08e0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08f2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x08f4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x08f8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0920c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0942c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0944c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0946c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0948c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x094ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0950c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0952c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0954c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0956c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0958c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x095ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0960c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0994c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0998c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x09a6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09aac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09aec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09c8c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x09d8c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x09e8c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x09f2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09f8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x09fac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x09fcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x09fec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a02c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0a06c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0a32c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a34c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0a36c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a38c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a3ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0a3cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a3ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0a46c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0a48c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b10c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0b14c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0b2ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b30c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0b36c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b44c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0b56c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b58c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0b5cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b84c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0ba6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0ba8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0baac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bacc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0baec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bb8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bbec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bc4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0bf8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0bfcc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0c0ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c0ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c12c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c2cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0c3cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0c4cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0c56c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c58c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c5cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c5ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c60c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c62c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c66c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0c6ac/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0c96c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c98c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0c9ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c9cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0c9ec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ca0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ca2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0caac/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0cacc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0d74c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0d78c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0d92c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0d94c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0d9ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0da8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0dbac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0dbcc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0dc0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0de8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0e0ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e0cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e0ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e10c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e12c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e14c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e16c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e18c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e1ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e20c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e22c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e24c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e26c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e28c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0e5cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0e60c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0e6ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e72c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e76c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0e90c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0ea0c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0eb0c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0ebac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ebcc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ebec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ec0c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ec2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ec4c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0ec6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0ecac/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x0ecec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0efac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0efcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0efec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f00c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f02c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0f04c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f06c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0f0ec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0f10c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01730/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a30/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b90/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02050/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02070/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02090/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b0/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02250/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16710/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16950/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b10/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b30/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b50/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f90/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17010/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17050/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17150/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17230/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17250/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17290/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17430/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17470/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17490/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17530/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17550/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17570/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17590/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17630/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17730/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17750/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17850/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17910/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd0/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18310/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18330/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18350/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18370/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18390/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18510/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18530/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18550/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18570/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18590/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18610/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18630/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18650/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18670/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18690/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18810/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18830/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18870/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18930/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e90/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19190/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19210/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19270/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19350/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19410/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19470/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19510/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19530/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19730/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19750/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19830/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19990/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b0/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f0/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a10/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a50/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d90/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a410/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a430/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a450/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a470/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a530/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea70/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee50/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee70/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee90/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f050/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f110/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f330/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f350/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f590/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f0/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f610/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f630/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f810/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f830/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f850/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f890/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f910/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f930/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f970/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f990/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa90/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff50/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30030/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30070/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30610/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30630/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30650/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30670/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30690/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30710/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30810/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30830/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30850/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30870/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30890/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30910/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30930/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30950/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30970/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30990/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a10/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a50/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b50/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b90/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c30/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c50/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c90/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d30/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31010/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31030/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31050/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f0/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31570/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31610/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31630/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31730/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31810/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31830/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31850/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31870/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31890/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a90/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b10/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b90/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d10/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d30/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d50/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d70/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01734/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a14/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a34/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b74/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b94/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02054/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02074/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02094/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b4/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02254/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16714/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16954/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b14/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b34/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b54/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f94/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17014/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17054/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17154/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17234/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17254/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17294/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17434/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17474/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17494/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17534/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17554/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17574/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17594/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17614/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17634/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17734/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17754/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17854/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17914/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd4/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d54/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18314/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18334/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18354/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18374/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18394/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18514/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18534/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18554/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18574/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18594/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18614/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18634/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18654/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18674/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18694/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f4/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18814/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18834/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18874/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18934/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a54/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf4/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e74/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e94/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19194/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19214/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19274/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19354/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19414/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19474/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b4/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d4/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19514/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19534/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19734/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19754/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19774/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b4/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19834/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19994/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b4/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d4/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f4/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a14/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a54/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a94/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d74/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d94/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e94/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f14/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a414/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a434/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a454/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a474/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a514/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a534/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea54/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea74/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee34/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee54/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee74/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee94/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f054/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f114/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f314/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f334/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f354/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f374/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f514/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f594/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f614/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f634/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f794/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f814/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f834/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f854/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f894/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f934/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f994/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa94/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc14/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc74/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff54/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff74/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30034/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30074/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30614/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30634/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30654/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30674/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30694/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f4/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30714/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30814/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30834/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30854/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30874/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30894/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f4/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30914/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30934/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30954/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30974/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30994/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f4/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a14/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a54/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b54/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b74/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b94/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c34/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c54/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c94/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d34/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f14/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31014/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31034/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31054/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f4/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31574/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31614/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31734/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31774/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d4/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31814/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31834/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31854/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31874/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31894/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a94/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b14/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b94/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d14/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d34/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d54/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d74/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db4/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x01738/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a38/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01ad8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b98/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02058/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02078/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x02098/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020b8/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020d8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020f8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x02118/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021d8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x02258/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16718/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16958/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16ad8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16af8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b18/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b38/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16db8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f98/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17058/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17158/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x17238/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17258/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17298/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172b8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x17438/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17458/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17478/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x17498/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17538/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17558/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17578/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17598/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17638/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17738/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17758/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17858/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17918/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bf8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cd8/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d58/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18318/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18338/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18358/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18378/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x18398/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184b8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18518/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18538/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18558/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18578/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18598/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18618/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18638/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18658/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18678/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18698/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187f8/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18818/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18838/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18878/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188d8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18938/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189d8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a58/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cb8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cd8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cf8/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e78/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e98/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x19198/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x19218/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x19278/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19358/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19418/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19478/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194b8/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194d8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19518/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19538/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x19738/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19758/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197b8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19838/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199b8/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199d8/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199f8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a18/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a58/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d98/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19eb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ed8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19ef8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3d8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a418/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a438/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a458/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a478/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a518/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a538/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea78/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecb8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee38/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee78/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee98/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eeb8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f058/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f118/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f318/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f338/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f358/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f378/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3b8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4b8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f518/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f598/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5f8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f618/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f638/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f798/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f818/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f838/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f858/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f898/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f918/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f998/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa98/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fab8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbb8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc18/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fdd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fed8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fff8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30038/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x30078/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30618/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30638/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30658/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30678/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30698/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30718/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30818/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30838/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30858/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30878/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30898/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30918/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30938/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30958/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30978/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30998/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a18/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b58/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c38/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c98/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d38/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30db8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30dd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ff8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31018/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x31038/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x31058/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310d8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311d8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314f8/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x31578/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x31618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31638/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31738/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d8/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x31818/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31838/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31858/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31878/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31898/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a98/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ab8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31ad8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b18/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b98/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d18/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d38/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d58/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d78/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31db8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0173c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x019fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01a3c/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x01adc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b7c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b9c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x01bbc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0205c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0207c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0209c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x020bc/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x020dc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x020fc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0211c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021dc/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x0225c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x166fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1671c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1695c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x16adc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16afc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b1c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b3c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16b5c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16c7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16dbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f9c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16fdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ffc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1701c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1705c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1715c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x171bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1723c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1725c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1729c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172bc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x172dc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x1743c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1745c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1747c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1749c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x174dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x174fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1753c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1755c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1757c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1759c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x175fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1761c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1763c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1773c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1775c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1785c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x178bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x178dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1791c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x179dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17a7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17b7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17bfc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17c1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17cdc/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x17d1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17d5c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x182bc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182dc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x182fc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1831c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1833c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1835c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1837c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x1839c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x183bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x184bc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x184dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x184fc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1851c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1853c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1855c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1857c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x1859c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185bc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x185dc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x185fc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1861c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1863c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1865c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1867c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x1869c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186bc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x186dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x186fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x187fc/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x1881c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1883c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1887c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x188dc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x188fc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1893c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x189dc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x18a5c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18a7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18bbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18c5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18cbc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18cdc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18cfc/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18d7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18e7c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18e9c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1919c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x1921c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1927c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x192bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x192dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1935c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1941c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1947c/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x194bc/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x194dc/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x194fc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1951c/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x1953c/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x1973c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1975c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1977c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x197bc/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x197dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1983c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1995c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1999c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x199bc/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x199dc/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x199fc/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x19a1c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x19a5c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19a9c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19d9c/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x19e3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e9c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19ebc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19edc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19efc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x19f1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a3dc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1a3fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a41c/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x1a43c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a45c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x1a47c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a51c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a53c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a5bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ea7c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2ecbc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2ee3c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2ee5c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee7c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ee9c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eebc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2efdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f05c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f11c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f2fc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f31c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f33c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f35c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f37c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3bc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f4bc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f51c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f59c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f5fc/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f61c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f63c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f79c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f7dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f7fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f81c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f83c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f85c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f89c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f8fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f91c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f93c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f95c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f97c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f99c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa9c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2fabc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2fbbc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x2fc1c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fc3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2fd3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fddc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fedc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2ff5c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2ff7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ffbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fffc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3003c/4, 0x000003ff);
++      INSTANCE_WR(ctx, 0x3007c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x300bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3061c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3063c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3065c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3067c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3069c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306bc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306dc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x306fc/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x3071c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x3081c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x3083c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3085c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3087c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3089c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308bc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x308fc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3091c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3093c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3095c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3097c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3099c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309bc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309dc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x309fc/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a1c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30a3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30a5c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x30b5c/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30b7c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30b9c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30bdc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30c3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x30c5c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30c9c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x30d3c/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x30dbc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30ddc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30f1c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30fbc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30ffc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3101c/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x3103c/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x3105c/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x310dc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x311dc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x311fc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x314fc/4, 0x00003e60);
++      INSTANCE_WR(ctx, 0x3157c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x315dc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x3161c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3163c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x316bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3173c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x3177c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317dc/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x3181c/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x3183c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x3185c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x3187c/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x3189c/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x31a9c/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31abc/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31adc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b1c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31b3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31b9c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31cbc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31d1c/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31d3c/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31d5c/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x31d7c/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x31dbc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x4dc00/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4dc40/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc60/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc80/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dca0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd00/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd60/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd80/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dda0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dde0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4de00/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df80/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dfa0/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfc0/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfe0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e040/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e0a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e120/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e140/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e2a0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e380/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e400/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e420/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e440/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e460/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e4a0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e560/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e580/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5e0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e700/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e7a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8e0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e900/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e920/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e940/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e980/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55e00/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc24/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc44/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc64/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc84/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dce4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd44/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd64/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd84/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddc4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4dde4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df64/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df84/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfa4/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfc4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e024/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e084/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0a4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e104/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e124/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e284/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e364/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e384/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e404/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e424/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e484/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e544/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e564/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5c4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6e4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e784/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8c4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e904/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e924/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e944/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e964/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e984/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55de4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e24/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc28/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc48/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc68/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc88/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dce8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd48/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd68/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd88/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4dde8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df68/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df88/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfa8/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfc8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e028/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e088/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0a8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e108/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e128/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e288/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e368/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e388/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3a8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e408/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e428/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e448/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e488/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e548/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e568/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5c8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6e8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e788/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8c8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e908/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e928/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e948/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e968/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e988/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55de8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e28/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc2c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc4c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc6c/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc8c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd4c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd6c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd8c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddcc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddec/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df6c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df8c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfac/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfcc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e02c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e08c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0ac/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e10c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e12c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e28c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e36c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e38c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3ac/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3cc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e40c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e42c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e44c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e48c/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e54c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e56c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e78c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8cc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e90c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e92c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e94c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e96c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e98c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9cc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55dec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc30/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc50/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc70/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc90/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd50/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd70/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df90/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb0/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e090/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e290/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e370/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e390/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e410/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e430/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e490/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e550/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e570/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e790/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e910/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e930/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e950/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e970/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e990/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc34/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc54/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc74/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc94/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd54/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd74/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd94/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df74/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df94/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb4/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e034/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e094/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e134/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e294/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e374/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e394/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e414/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e434/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e494/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e554/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e574/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f4/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e794/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e934/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e994/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc38/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc58/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc78/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc98/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd58/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd78/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4ddd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddf8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df98/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfb8/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfd8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e038/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e098/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0b8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e118/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e138/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e298/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e378/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e398/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3b8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3d8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e418/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e438/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e458/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e498/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e558/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e578/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5d8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6f8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e798/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8d8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e918/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e938/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55df8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dc3c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4dc5c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dc7c/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dc9c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dcfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dd5c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd7c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4dd9c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4dddc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4ddfc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4df7c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x4df9c/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x4dfbc/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x4dfdc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e03c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e09c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0bc/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x4e0dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e11c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e13c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e29c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e37c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e39c/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3bc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3dc/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x4e3fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e41c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e43c/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x4e45c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e49c/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x4e55c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4e57c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e5bc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e5dc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x4e6fc/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x4e79c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4e8dc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e8fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e91c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e93c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x4e95c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e97c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e99c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4e9dc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x55dfc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x55e3c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00858/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00774/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00784/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00798/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007a8/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007bc/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007e0/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x007f0/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00804/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00814/4, 0x00000000);
++      INSTANCE_WR(ctx, 0x00828/4, 0x00000000);
++}
++
++static void
++nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x00130/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d8/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x0022c/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00258/4, 0x00000187);
++      INSTANCE_WR(ctx, 0x0026c/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x00270/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002ac/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df);
++      INSTANCE_WR(ctx, 0x002b8/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002d0/4, 0x01000000);
++      INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002dc/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x002f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080);
++      INSTANCE_WR(ctx, 0x002fc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00318/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0031c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00328/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0032c/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00344/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00348/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0035c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00360/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00364/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00378/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0037c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00380/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00384/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0038c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00394/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00398/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003a8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003c0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003c8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003dc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00404/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x0040c/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00420/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00434/4, 0x00000029);
++      INSTANCE_WR(ctx, 0x00438/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x0043c/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x00440/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00444/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00448/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00454/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0045c/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00460/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00464/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00468/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x0046c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x004e4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x004e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004ec/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x004f0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x004f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00500/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00504/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00508/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x0050c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0051c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00520/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00530/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00534/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00560/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00564/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00570/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0057c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00588/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x00590/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005c8/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x005cc/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x005d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x005e0/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x005f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00614/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0061c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00624/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00630/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80);
++      INSTANCE_WR(ctx, 0x00684/4, 0x007f0080);
++      INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080);
++
++      INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x006e8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x006f0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00700/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00710/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00718/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00724/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00728/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00738/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00740/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00744/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00750/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x00760/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x00768/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f);
++      INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x00774/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x00778/4, 0xb7892080);
++
++      INSTANCE_WR(ctx, 0x00784/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x0078c/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00798/4, 0x00010040);
++      INSTANCE_WR(ctx, 0x0079c/4, 0x00000022);
++
++      INSTANCE_WR(ctx, 0x007b4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007b8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x007bc/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x007d0/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x007f4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x007fc/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00804/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0080c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00810/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00834/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00838/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x0083c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00850/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00874/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x0087c/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00884/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x0088c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00890/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x008b8/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x008dc/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x008e8/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00904/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x0090c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00910/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00918/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00924/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00928/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x0092c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00930/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00950/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00954/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00958/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0096c/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00990/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00998/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x009a8/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x009ac/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x009d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x009d4/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x009d8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x009ec/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00a10/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00a28/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00a54/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00a78/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00a80/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00a84/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00acc/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00aec/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00af0/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00af4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b08/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00b34/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00b44/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00b48/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b70/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00b74/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00b88/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00bac/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00c14/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00c18/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00c20/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00c44/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00c50/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00c60/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00c64/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00c68/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00c88/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00c90/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00d08/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00d10/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00d24/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00d48/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00d50/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00d60/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00d64/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00db0/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00db4/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00db8/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00de0/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00de8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00dec/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00e00/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00e04/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00e24/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e28/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00e40/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00e64/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00e80/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00eac/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00eec/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00efc/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00f00/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00f28/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00f50/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00f54/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x00f58/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x00f74/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x00f84/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00f88/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x01000/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01008/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x01010/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01018/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x0101c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x01040/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01044/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x01048/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x0105c/4, 0x00880000);
++      INSTANCE_WR(ctx, 0x01080/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x01088/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x01090/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x01098/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x0109c/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x010c4/4, 0x00027070);
++      INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff);
++      INSTANCE_WR(ctx, 0x010e8/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x010ec/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x010f0/4, 0x05100202);
++      INSTANCE_WR(ctx, 0x010f4/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x01110/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x01118/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x01120/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01124/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01130/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01138/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x0113c/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x01148/4, 0x0077f005);
++      INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff);
++
++      INSTANCE_WR(ctx, 0x01230/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01284/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0130c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01324/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x014ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x014f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01504/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0150c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01510/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01530/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x0156c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x015d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01630/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0164c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01650/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01670/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01690/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x016c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x016e4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01724/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01744/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x0176c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01784/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0178c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x017cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01924/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01b30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b50/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01b70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01b90/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01c70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01c90/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d10/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0218c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x022ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x022ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0232c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x024cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x026cc/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x027ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x027ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0280c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0282c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0284c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0286c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x02bac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02bec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02cec/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0398c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x03bec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03dec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x03e44/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x040cc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x0480c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x0492c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0496c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x049ac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04eac/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04eec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x0522c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0524c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0526c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0528c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ac/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x052cc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x052ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0536c/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x0538c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x083a0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x083c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x083e0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x08400/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08420/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08440/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x084a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x084c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x084e0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08500/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x08520/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x11e40/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x11e60/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15044/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x152e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15304/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15324/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x15344/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x15384/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x15444/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15484/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154a4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x15504/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x155e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15624/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15664/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15704/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x15744/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x15764/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x157e4/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x16004/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x16064/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x160a4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x160c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x160e4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x16104/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16124/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16144/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x161b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x161d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x16228/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x16408/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x16410/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x164e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16508/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x16568/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16590/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x16730/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x167b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x16870/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x169c8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16a10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16a70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16a90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16b10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x16c68/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16c70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x16c88/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x16d10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16d28/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x16d48/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x16de8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f30/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16f50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16f90/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17008/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17010/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17028/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17030/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17048/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17050/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17068/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17070/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17088/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17090/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x170f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17108/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17128/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17148/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17168/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x17188/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17208/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x17210/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x17370/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17410/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x174d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17570/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x176f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17708/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x17710/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17750/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17768/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x17810/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17828/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x17850/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17c88/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x17db0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17df0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17e10/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x17e30/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e50/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e70/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17e90/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18010/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18030/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18050/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18070/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x18090/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x180b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x180d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x180f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18110/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18130/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18150/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18168/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x18170/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x18190/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x181a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x181c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x181e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x18208/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18228/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18248/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18288/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x182c8/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x182f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x18310/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18330/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x183d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x183f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x18408/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18428/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18430/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x18448/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18468/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x18550/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x186b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18750/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x187b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x187d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x187f0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x18870/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x18990/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18b08/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18b48/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18b68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18be8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18c28/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x18d10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f);
++      INSTANCE_WR(ctx, 0x18d68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18d70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18db0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18de8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18e08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x18e48/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x18e50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18f30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x19010/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x19030/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x19050/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x19070/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x19310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x19370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x194f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19530/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19550/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19570/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x19630/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19708/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x19768/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x198f0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19910/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19930/4, 0x00608080);
++      INSTANCE_WR(ctx, 0x199d0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19a70/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19a90/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19e88/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x19f08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f30/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f50/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x19f70/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x19f90/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a070/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a090/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1a110/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1a808/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1a848/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x1a888/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1a988/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d328/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x2d348/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2d368/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x2d468/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d488/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d508/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d528/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d548/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d568/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d588/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d608/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d628/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2d648/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2e990/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f070/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f110/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f330/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f350/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f390/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f410/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f430/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f450/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f470/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f490/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x2f770/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2f790/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f810/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2f970/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x301b0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x301d0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x301f0/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30210/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30230/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30250/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30270/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x30290/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x302b0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x303d0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x303f0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30410/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30430/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30450/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30470/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30490/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x304b0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x304d0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x304f0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30510/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30530/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30550/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30570/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30590/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x305b0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x305d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x306f0/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x30710/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30730/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x307d0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x307f0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x30830/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x30950/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30b50/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x30b90/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x30c70/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x31110/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x31170/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x311b0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x311d0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31250/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x312f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31330/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x31410/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x31430/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x31450/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x31470/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x31710/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x31770/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x318f0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31930/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x31950/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31970/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4a800/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a840/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x4a980/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52500/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x526a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x526c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52700/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x52780/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x52920/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x52940/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52960/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x52a80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x52b00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x52d40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x52d60/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53200/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53220/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53240/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53260/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53280/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53300/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53320/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53340/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53360/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53380/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x53400/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x53460/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x53500/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53524/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53540/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53544/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53560/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53564/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53580/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53584/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x535a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x535e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53600/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53644/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53660/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53684/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x536a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x536a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x536c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53824/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53840/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x53844/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53860/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x53864/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53880/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x53884/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x538a0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x538e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53900/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53944/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53984/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x539a0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x539a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x539c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x53c64/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53c80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53c84/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x53de4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x53e00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x53e24/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53e40/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x53e44/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x53e60/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x54004/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54020/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54144/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x54160/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x54164/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54180/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54184/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541a4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x541c0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x541c4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x541e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54200/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54204/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x54244/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x54260/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x5b700/4, 0x00000001);
++}
++
++static void
++nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x10C/4, 0x30);
++      INSTANCE_WR(ctx, 0x1D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
++      INSTANCE_WR(ctx, 0x22C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x258/4, 0x187);
++      INSTANCE_WR(ctx, 0x26C/4, 0x1018);
++      INSTANCE_WR(ctx, 0x270/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2AC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF);
++      INSTANCE_WR(ctx, 0x2B8/4, 0x600);
++      INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
++      INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x2F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F8/4, 0x80);
++      INSTANCE_WR(ctx, 0x2FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x318/4, 0x2);
++      INSTANCE_WR(ctx, 0x31C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328/4, 0x1);
++      INSTANCE_WR(ctx, 0x32C/4, 0x100);
++      INSTANCE_WR(ctx, 0x344/4, 0x2);
++      INSTANCE_WR(ctx, 0x348/4, 0x1);
++      INSTANCE_WR(ctx, 0x34C/4, 0x1);
++      INSTANCE_WR(ctx, 0x35C/4, 0x1);
++      INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x36C/4, 0x1);
++      INSTANCE_WR(ctx, 0x370/4, 0x1);
++      INSTANCE_WR(ctx, 0x378/4, 0x1);
++      INSTANCE_WR(ctx, 0x37C/4, 0x1);
++      INSTANCE_WR(ctx, 0x380/4, 0x1);
++      INSTANCE_WR(ctx, 0x384/4, 0x4);
++      INSTANCE_WR(ctx, 0x388/4, 0x1);
++      INSTANCE_WR(ctx, 0x38C/4, 0x1);
++      INSTANCE_WR(ctx, 0x390/4, 0x1);
++      INSTANCE_WR(ctx, 0x394/4, 0x7);
++      INSTANCE_WR(ctx, 0x398/4, 0x1);
++      INSTANCE_WR(ctx, 0x39C/4, 0x7);
++      INSTANCE_WR(ctx, 0x3A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3C0/4, 0x100);
++      INSTANCE_WR(ctx, 0x3C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D4/4, 0x100);
++      INSTANCE_WR(ctx, 0x3D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x404/4, 0x4);
++      INSTANCE_WR(ctx, 0x408/4, 0x70);
++      INSTANCE_WR(ctx, 0x40C/4, 0x80);
++      INSTANCE_WR(ctx, 0x420/4, 0xC);
++      INSTANCE_WR(ctx, 0x428/4, 0x8);
++      INSTANCE_WR(ctx, 0x42C/4, 0x14);
++      INSTANCE_WR(ctx, 0x434/4, 0x29);
++      INSTANCE_WR(ctx, 0x438/4, 0x27);
++      INSTANCE_WR(ctx, 0x43C/4, 0x26);
++      INSTANCE_WR(ctx, 0x440/4, 0x8);
++      INSTANCE_WR(ctx, 0x444/4, 0x4);
++      INSTANCE_WR(ctx, 0x448/4, 0x27);
++      INSTANCE_WR(ctx, 0x454/4, 0x1);
++      INSTANCE_WR(ctx, 0x458/4, 0x2);
++      INSTANCE_WR(ctx, 0x45C/4, 0x3);
++      INSTANCE_WR(ctx, 0x460/4, 0x4);
++      INSTANCE_WR(ctx, 0x464/4, 0x5);
++      INSTANCE_WR(ctx, 0x468/4, 0x6);
++      INSTANCE_WR(ctx, 0x46C/4, 0x7);
++      INSTANCE_WR(ctx, 0x470/4, 0x1);
++      INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x4E4/4, 0x80);
++      INSTANCE_WR(ctx, 0x4E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x4EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x4F0/4, 0x3);
++      INSTANCE_WR(ctx, 0x4F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x500/4, 0x12);
++      INSTANCE_WR(ctx, 0x504/4, 0x10);
++      INSTANCE_WR(ctx, 0x508/4, 0xC);
++      INSTANCE_WR(ctx, 0x50C/4, 0x1);
++      INSTANCE_WR(ctx, 0x51C/4, 0x4);
++      INSTANCE_WR(ctx, 0x520/4, 0x2);
++      INSTANCE_WR(ctx, 0x524/4, 0x4);
++      INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x55C/4, 0x4);
++      INSTANCE_WR(ctx, 0x560/4, 0x14);
++      INSTANCE_WR(ctx, 0x564/4, 0x1);
++      INSTANCE_WR(ctx, 0x570/4, 0x2);
++      INSTANCE_WR(ctx, 0x57C/4, 0x1);
++      INSTANCE_WR(ctx, 0x584/4, 0x2);
++      INSTANCE_WR(ctx, 0x588/4, 0x1000);
++      INSTANCE_WR(ctx, 0x58C/4, 0xE00);
++      INSTANCE_WR(ctx, 0x590/4, 0x1000);
++      INSTANCE_WR(ctx, 0x594/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x59C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BC/4, 0x200);
++      INSTANCE_WR(ctx, 0x5C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C8/4, 0x70);
++      INSTANCE_WR(ctx, 0x5CC/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC/4, 0x70);
++      INSTANCE_WR(ctx, 0x5E0/4, 0x80);
++      INSTANCE_WR(ctx, 0x5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x60C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x614/4, 0x2);
++      INSTANCE_WR(ctx, 0x61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x624/4, 0x1);
++      INSTANCE_WR(ctx, 0x62C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x630/4, 0xCF);
++      INSTANCE_WR(ctx, 0x634/4, 0x1);
++      INSTANCE_WR(ctx, 0x63C/4, 0xF80);
++      INSTANCE_WR(ctx, 0x684/4, 0x7F0080);
++      INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080);
++      INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6E8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6F0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6F4/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x700/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x710/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x718/4, 0x1000);
++      INSTANCE_WR(ctx, 0x71C/4, 0x1F);
++      INSTANCE_WR(ctx, 0x720/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x724/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x728/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x734/4, 0x10040);
++      INSTANCE_WR(ctx, 0x73C/4, 0x22);
++      INSTANCE_WR(ctx, 0x748/4, 0x10040);
++      INSTANCE_WR(ctx, 0x74C/4, 0x22);
++      INSTANCE_WR(ctx, 0x764/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x768/4, 0x160000);
++      INSTANCE_WR(ctx, 0x76C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x780/4, 0x8C0000);
++      INSTANCE_WR(ctx, 0x7A4/4, 0x10401);
++      INSTANCE_WR(ctx, 0x7AC/4, 0x78);
++      INSTANCE_WR(ctx, 0x7B4/4, 0xBF);
++      INSTANCE_WR(ctx, 0x7BC/4, 0x1210);
++      INSTANCE_WR(ctx, 0x7C0/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x7E4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7E8/4, 0x160000);
++      INSTANCE_WR(ctx, 0x7EC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x800/4, 0x8C0000);
++      INSTANCE_WR(ctx, 0x824/4, 0x10401);
++      INSTANCE_WR(ctx, 0x82C/4, 0x78);
++      INSTANCE_WR(ctx, 0x834/4, 0xBF);
++      INSTANCE_WR(ctx, 0x83C/4, 0x1210);
++      INSTANCE_WR(ctx, 0x840/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x868/4, 0x27070);
++      INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x88C/4, 0x120407);
++      INSTANCE_WR(ctx, 0x890/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x894/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x898/4, 0x30201);
++      INSTANCE_WR(ctx, 0x8B4/4, 0x40);
++      INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x8BC/4, 0x141210);
++      INSTANCE_WR(ctx, 0x8C0/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x8C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x8C8/4, 0x3);
++      INSTANCE_WR(ctx, 0x8D4/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x8D8/4, 0x100);
++      INSTANCE_WR(ctx, 0x8DC/4, 0x3800);
++      INSTANCE_WR(ctx, 0x8E0/4, 0x404040);
++      INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x8EC/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x7BA0/4, 0x21);
++      INSTANCE_WR(ctx, 0x7BC0/4, 0x1);
++      INSTANCE_WR(ctx, 0x7BE0/4, 0x2);
++      INSTANCE_WR(ctx, 0x7C00/4, 0x100);
++      INSTANCE_WR(ctx, 0x7C20/4, 0x100);
++      INSTANCE_WR(ctx, 0x7C40/4, 0x1);
++      INSTANCE_WR(ctx, 0x7CA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x7CC0/4, 0x2);
++      INSTANCE_WR(ctx, 0x7CE0/4, 0x100);
++      INSTANCE_WR(ctx, 0x7D00/4, 0x100);
++      INSTANCE_WR(ctx, 0x7D20/4, 0x1);
++      INSTANCE_WR(ctx, 0x11640/4, 0x4);
++      INSTANCE_WR(ctx, 0x11660/4, 0x4);
++      INSTANCE_WR(ctx, 0x49FE0/4, 0x4);
++      INSTANCE_WR(ctx, 0x4A000/4, 0x4);
++      INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A040/4, 0x3);
++      INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x4A0E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x4A100/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x4A180/4, 0x27);
++      INSTANCE_WR(ctx, 0x4A1E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x51A20/4, 0x1);
++      INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x51F00/4, 0x80);
++      INSTANCE_WR(ctx, 0x51F80/4, 0x80);
++      INSTANCE_WR(ctx, 0x51FC0/4, 0x3F);
++      INSTANCE_WR(ctx, 0x52120/4, 0x2);
++      INSTANCE_WR(ctx, 0x52140/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x52160/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x52280/4, 0x4);
++      INSTANCE_WR(ctx, 0x52300/4, 0x4);
++      INSTANCE_WR(ctx, 0x52540/4, 0x1);
++      INSTANCE_WR(ctx, 0x52560/4, 0x1001);
++      INSTANCE_WR(ctx, 0x52580/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x52C00/4, 0x10);
++      INSTANCE_WR(ctx, 0x52C60/4, 0x3);
++      INSTANCE_WR(ctx, 0xA84/4, 0xF);
++      INSTANCE_WR(ctx, 0xB24/4, 0x20);
++      INSTANCE_WR(ctx, 0xD04/4, 0x1A);
++      INSTANCE_WR(ctx, 0xEC4/4, 0x4);
++      INSTANCE_WR(ctx, 0xEE4/4, 0x4);
++      INSTANCE_WR(ctx, 0xF24/4, 0x4);
++      INSTANCE_WR(ctx, 0xF44/4, 0x8);
++      INSTANCE_WR(ctx, 0xF84/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x1124/4, 0xF);
++      INSTANCE_WR(ctx, 0x3604/4, 0xF);
++      INSTANCE_WR(ctx, 0x3644/4, 0x1);
++      INSTANCE_WR(ctx, 0x41A4/4, 0xF);
++      INSTANCE_WR(ctx, 0x14844/4, 0xF);
++      INSTANCE_WR(ctx, 0x14AE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14B04/4, 0x100);
++      INSTANCE_WR(ctx, 0x14B24/4, 0x100);
++      INSTANCE_WR(ctx, 0x14B44/4, 0x11);
++      INSTANCE_WR(ctx, 0x14B84/4, 0x8);
++      INSTANCE_WR(ctx, 0x14C44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14C84/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CA4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CC4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14CE4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x14D04/4, 0x2);
++      INSTANCE_WR(ctx, 0x14DE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E24/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14E64/4, 0x1);
++      INSTANCE_WR(ctx, 0x14F04/4, 0x4);
++      INSTANCE_WR(ctx, 0x14F44/4, 0x1);
++      INSTANCE_WR(ctx, 0x14F64/4, 0x15);
++      INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480);
++      INSTANCE_WR(ctx, 0x15764/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x15804/4, 0x100);
++      INSTANCE_WR(ctx, 0x15864/4, 0x10001);
++      INSTANCE_WR(ctx, 0x158A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x158C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x158E4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x15904/4, 0x1);
++      INSTANCE_WR(ctx, 0x15924/4, 0x4);
++      INSTANCE_WR(ctx, 0x15944/4, 0x2);
++      INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15C68/4, 0x4);
++      INSTANCE_WR(ctx, 0x15C88/4, 0x1A);
++      INSTANCE_WR(ctx, 0x15CE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x16028/4, 0xF);
++      INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16148/4, 0x11);
++      INSTANCE_WR(ctx, 0x16348/4, 0x4);
++      INSTANCE_WR(ctx, 0x163E8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16408/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x16428/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x164A8/4, 0x5);
++      INSTANCE_WR(ctx, 0x164C8/4, 0x52);
++      INSTANCE_WR(ctx, 0x16568/4, 0x1);
++      INSTANCE_WR(ctx, 0x16788/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16808/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16828/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16848/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16868/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16888/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16908/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16928/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16948/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16968/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16988/4, 0x10);
++      INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x16E88/4, 0x5);
++      INSTANCE_WR(ctx, 0x16EE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16FA8/4, 0x3);
++      INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x173C8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x17408/4, 0x3);
++      INSTANCE_WR(ctx, 0x178E8/4, 0x102);
++      INSTANCE_WR(ctx, 0x17928/4, 0x4);
++      INSTANCE_WR(ctx, 0x17948/4, 0x4);
++      INSTANCE_WR(ctx, 0x17968/4, 0x4);
++      INSTANCE_WR(ctx, 0x17988/4, 0x4);
++      INSTANCE_WR(ctx, 0x179A8/4, 0x4);
++      INSTANCE_WR(ctx, 0x179C8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17A08/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17A48/4, 0x102);
++      INSTANCE_WR(ctx, 0x17B88/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BA8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x17BE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18228/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18288/4, 0x804);
++      INSTANCE_WR(ctx, 0x182C8/4, 0x4);
++      INSTANCE_WR(ctx, 0x182E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18308/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18348/4, 0x4);
++      INSTANCE_WR(ctx, 0x18368/4, 0x4);
++      INSTANCE_WR(ctx, 0x183A8/4, 0x10);
++      INSTANCE_WR(ctx, 0x18448/4, 0x804);
++      INSTANCE_WR(ctx, 0x18468/4, 0x1);
++      INSTANCE_WR(ctx, 0x18488/4, 0x1A);
++      INSTANCE_WR(ctx, 0x184A8/4, 0x7F);
++      INSTANCE_WR(ctx, 0x184E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x18508/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18548/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18568/4, 0x4);
++      INSTANCE_WR(ctx, 0x18588/4, 0x4);
++      INSTANCE_WR(ctx, 0x185C8/4, 0x10);
++      INSTANCE_WR(ctx, 0x18648/4, 0x1);
++      INSTANCE_WR(ctx, 0x18668/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18748/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x18768/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18E88/4, 0x1);
++      INSTANCE_WR(ctx, 0x18EE8/4, 0x10);
++      INSTANCE_WR(ctx, 0x19608/4, 0x88);
++      INSTANCE_WR(ctx, 0x19628/4, 0x88);
++      INSTANCE_WR(ctx, 0x19688/4, 0x4);
++      INSTANCE_WR(ctx, 0x19968/4, 0x26);
++      INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x19A48/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19A68/4, 0x10);
++      INSTANCE_WR(ctx, 0x19F88/4, 0x52);
++      INSTANCE_WR(ctx, 0x19FC8/4, 0x26);
++      INSTANCE_WR(ctx, 0x1A008/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A028/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A068/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1A108/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A128/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A168/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A188/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x24A48/4, 0x4);
++      INSTANCE_WR(ctx, 0x24A68/4, 0x4);
++      INSTANCE_WR(ctx, 0x24AA8/4, 0x80);
++      INSTANCE_WR(ctx, 0x24AC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x24AE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x24B28/4, 0x27);
++      INSTANCE_WR(ctx, 0x24B68/4, 0x26);
++      INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C08/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C28/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C48/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C68/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24C88/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D08/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D28/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D48/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D68/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24D88/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0xB0C/4, 0x2);
++      INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0xCEC/4, 0x1);
++      INSTANCE_WR(ctx, 0xD0C/4, 0x10);
++      INSTANCE_WR(ctx, 0xD6C/4, 0x1);
++      INSTANCE_WR(ctx, 0xE0C/4, 0x4);
++      INSTANCE_WR(ctx, 0xE2C/4, 0x400);
++      INSTANCE_WR(ctx, 0xE4C/4, 0x300);
++      INSTANCE_WR(ctx, 0xE6C/4, 0x1001);
++      INSTANCE_WR(ctx, 0xE8C/4, 0x15);
++      INSTANCE_WR(ctx, 0xF4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x106C/4, 0x1);
++      INSTANCE_WR(ctx, 0x108C/4, 0x10);
++      INSTANCE_WR(ctx, 0x10CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x134C/4, 0x10);
++      INSTANCE_WR(ctx, 0x156C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x158C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x160C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x162C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x164C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x166C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x168C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x170C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x172C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x1A8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x1ACC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x1BAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1BEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1DCC/4, 0x11);
++      INSTANCE_WR(ctx, 0x1ECC/4, 0xF);
++      INSTANCE_WR(ctx, 0x1FCC/4, 0x11);
++      INSTANCE_WR(ctx, 0x20AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x20CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x20EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x210C/4, 0x2);
++      INSTANCE_WR(ctx, 0x212C/4, 0x1);
++      INSTANCE_WR(ctx, 0x214C/4, 0x2);
++      INSTANCE_WR(ctx, 0x216C/4, 0x1);
++      INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x24AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x24CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x24EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x250C/4, 0x1);
++      INSTANCE_WR(ctx, 0x252C/4, 0x2);
++      INSTANCE_WR(ctx, 0x254C/4, 0x1);
++      INSTANCE_WR(ctx, 0x256C/4, 0x1);
++      INSTANCE_WR(ctx, 0x25EC/4, 0x11);
++      INSTANCE_WR(ctx, 0x260C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328C/4, 0x2);
++      INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x346C/4, 0x1);
++      INSTANCE_WR(ctx, 0x348C/4, 0x10);
++      INSTANCE_WR(ctx, 0x34EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x358C/4, 0x4);
++      INSTANCE_WR(ctx, 0x35AC/4, 0x400);
++      INSTANCE_WR(ctx, 0x35CC/4, 0x300);
++      INSTANCE_WR(ctx, 0x35EC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x360C/4, 0x15);
++      INSTANCE_WR(ctx, 0x36CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x37EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x380C/4, 0x10);
++      INSTANCE_WR(ctx, 0x384C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3ACC/4, 0x10);
++      INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x420C/4, 0x10);
++      INSTANCE_WR(ctx, 0x424C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x432C/4, 0x1);
++      INSTANCE_WR(ctx, 0x436C/4, 0x1);
++      INSTANCE_WR(ctx, 0x43AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x454C/4, 0x11);
++      INSTANCE_WR(ctx, 0x464C/4, 0xF);
++      INSTANCE_WR(ctx, 0x474C/4, 0x11);
++      INSTANCE_WR(ctx, 0x482C/4, 0x1);
++      INSTANCE_WR(ctx, 0x484C/4, 0x1);
++      INSTANCE_WR(ctx, 0x486C/4, 0x1);
++      INSTANCE_WR(ctx, 0x488C/4, 0x2);
++      INSTANCE_WR(ctx, 0x48AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x48CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x48EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x4C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4C4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x4C6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4C8C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CAC/4, 0x2);
++      INSTANCE_WR(ctx, 0x4CCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x4D6C/4, 0x11);
++      INSTANCE_WR(ctx, 0x4D8C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA30/4, 0x4);
++      INSTANCE_WR(ctx, 0xCF0/4, 0x4);
++      INSTANCE_WR(ctx, 0xD10/4, 0x4);
++      INSTANCE_WR(ctx, 0xD30/4, 0x608080);
++      INSTANCE_WR(ctx, 0xDD0/4, 0x4);
++      INSTANCE_WR(ctx, 0xE30/4, 0x4);
++      INSTANCE_WR(ctx, 0xE50/4, 0x4);
++      INSTANCE_WR(ctx, 0xE70/4, 0x80);
++      INSTANCE_WR(ctx, 0xE90/4, 0x1E00);
++      INSTANCE_WR(ctx, 0xEB0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1350/4, 0x4);
++      INSTANCE_WR(ctx, 0x1370/4, 0x80);
++      INSTANCE_WR(ctx, 0x1390/4, 0x4);
++      INSTANCE_WR(ctx, 0x13B0/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x13D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x13F0/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x1410/4, 0x4);
++      INSTANCE_WR(ctx, 0x14B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x14D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1550/4, 0x4);
++      INSTANCE_WR(ctx, 0x159F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x15A10/4, 0x3);
++      INSTANCE_WR(ctx, 0x15C50/4, 0xF);
++      INSTANCE_WR(ctx, 0x15DD0/4, 0x4);
++      INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x15F70/4, 0x1);
++      INSTANCE_WR(ctx, 0x15FF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x160B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16250/4, 0x1);
++      INSTANCE_WR(ctx, 0x16270/4, 0x1);
++      INSTANCE_WR(ctx, 0x16290/4, 0x2);
++      INSTANCE_WR(ctx, 0x162B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x162D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x162F0/4, 0x2);
++      INSTANCE_WR(ctx, 0x16310/4, 0x1);
++      INSTANCE_WR(ctx, 0x16350/4, 0x11);
++      INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x164B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x16530/4, 0x11);
++      INSTANCE_WR(ctx, 0x16550/4, 0x1);
++      INSTANCE_WR(ctx, 0x16590/4, 0xCF);
++      INSTANCE_WR(ctx, 0x165B0/4, 0xCF);
++      INSTANCE_WR(ctx, 0x165D0/4, 0xCF);
++      INSTANCE_WR(ctx, 0x16730/4, 0x1);
++      INSTANCE_WR(ctx, 0x16750/4, 0x1);
++      INSTANCE_WR(ctx, 0x16770/4, 0x2);
++      INSTANCE_WR(ctx, 0x16790/4, 0x1);
++      INSTANCE_WR(ctx, 0x167B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x167D0/4, 0x2);
++      INSTANCE_WR(ctx, 0x167F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16830/4, 0x1);
++      INSTANCE_WR(ctx, 0x16850/4, 0x1);
++      INSTANCE_WR(ctx, 0x16870/4, 0x1);
++      INSTANCE_WR(ctx, 0x16890/4, 0x1);
++      INSTANCE_WR(ctx, 0x168B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x168D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x168F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16910/4, 0x1);
++      INSTANCE_WR(ctx, 0x16930/4, 0x11);
++      INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16A50/4, 0xF);
++      INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x16BB0/4, 0x11);
++      INSTANCE_WR(ctx, 0x16BD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16C50/4, 0x4);
++      INSTANCE_WR(ctx, 0x16D10/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB0/4, 0x11);
++      INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F30/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F50/4, 0x1);
++      INSTANCE_WR(ctx, 0x16F90/4, 0x1);
++      INSTANCE_WR(ctx, 0x16FD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17010/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17050/4, 0x1);
++      INSTANCE_WR(ctx, 0x17090/4, 0x1);
++      INSTANCE_WR(ctx, 0x175F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x17610/4, 0x8);
++      INSTANCE_WR(ctx, 0x17630/4, 0x8);
++      INSTANCE_WR(ctx, 0x17650/4, 0x8);
++      INSTANCE_WR(ctx, 0x17670/4, 0x8);
++      INSTANCE_WR(ctx, 0x17690/4, 0x8);
++      INSTANCE_WR(ctx, 0x176B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x176D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x176F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17810/4, 0x400);
++      INSTANCE_WR(ctx, 0x17830/4, 0x400);
++      INSTANCE_WR(ctx, 0x17850/4, 0x400);
++      INSTANCE_WR(ctx, 0x17870/4, 0x400);
++      INSTANCE_WR(ctx, 0x17890/4, 0x400);
++      INSTANCE_WR(ctx, 0x178B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x178D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x178F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x17910/4, 0x300);
++      INSTANCE_WR(ctx, 0x17930/4, 0x300);
++      INSTANCE_WR(ctx, 0x17950/4, 0x300);
++      INSTANCE_WR(ctx, 0x17970/4, 0x300);
++      INSTANCE_WR(ctx, 0x17990/4, 0x300);
++      INSTANCE_WR(ctx, 0x179B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x179D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x179F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x17A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A30/4, 0xF);
++      INSTANCE_WR(ctx, 0x17B30/4, 0x20);
++      INSTANCE_WR(ctx, 0x17B50/4, 0x11);
++      INSTANCE_WR(ctx, 0x17B70/4, 0x100);
++      INSTANCE_WR(ctx, 0x17BB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17C10/4, 0x40);
++      INSTANCE_WR(ctx, 0x17C30/4, 0x100);
++      INSTANCE_WR(ctx, 0x17C70/4, 0x3);
++      INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17D90/4, 0x2);
++      INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17EF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17F90/4, 0x4);
++      INSTANCE_WR(ctx, 0x17FD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17FF0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18010/4, 0x300);
++      INSTANCE_WR(ctx, 0x18030/4, 0x1001);
++      INSTANCE_WR(ctx, 0x180B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x181D0/4, 0xF);
++      INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18550/4, 0x11);
++      INSTANCE_WR(ctx, 0x185B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x185F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18610/4, 0x1);
++      INSTANCE_WR(ctx, 0x18690/4, 0x1);
++      INSTANCE_WR(ctx, 0x18730/4, 0x1);
++      INSTANCE_WR(ctx, 0x18770/4, 0x1);
++      INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x18830/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x18850/4, 0x40);
++      INSTANCE_WR(ctx, 0x18870/4, 0x100);
++      INSTANCE_WR(ctx, 0x18890/4, 0x10100);
++      INSTANCE_WR(ctx, 0x188B0/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18B50/4, 0x1);
++      INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x18BB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x18D30/4, 0x1);
++      INSTANCE_WR(ctx, 0x18D70/4, 0x1);
++      INSTANCE_WR(ctx, 0x18D90/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x18E30/4, 0x1A);
++}
++
++static void
++nv92_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x10C/4, 0x30);
++      INSTANCE_WR(ctx, 0x1D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
++      INSTANCE_WR(ctx, 0x22C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x258/4, 0x187);
++      INSTANCE_WR(ctx, 0x26C/4, 0x1018);
++      INSTANCE_WR(ctx, 0x270/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2AC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2B0/4, 0x42500DF);
++      INSTANCE_WR(ctx, 0x2B8/4, 0x600);
++      INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
++      INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
++      INSTANCE_WR(ctx, 0x2DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x2F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F8/4, 0x80);
++      INSTANCE_WR(ctx, 0x2FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x318/4, 0x2);
++      INSTANCE_WR(ctx, 0x31C/4, 0x1);
++      INSTANCE_WR(ctx, 0x328/4, 0x1);
++      INSTANCE_WR(ctx, 0x32C/4, 0x100);
++      INSTANCE_WR(ctx, 0x344/4, 0x2);
++      INSTANCE_WR(ctx, 0x348/4, 0x1);
++      INSTANCE_WR(ctx, 0x34C/4, 0x1);
++      INSTANCE_WR(ctx, 0x35C/4, 0x1);
++      INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x36C/4, 0x1);
++      INSTANCE_WR(ctx, 0x370/4, 0x1);
++      INSTANCE_WR(ctx, 0x378/4, 0x1);
++      INSTANCE_WR(ctx, 0x37C/4, 0x1);
++      INSTANCE_WR(ctx, 0x380/4, 0x1);
++      INSTANCE_WR(ctx, 0x384/4, 0x4);
++      INSTANCE_WR(ctx, 0x388/4, 0x1);
++      INSTANCE_WR(ctx, 0x38C/4, 0x1);
++      INSTANCE_WR(ctx, 0x390/4, 0x1);
++      INSTANCE_WR(ctx, 0x394/4, 0x7);
++      INSTANCE_WR(ctx, 0x398/4, 0x1);
++      INSTANCE_WR(ctx, 0x39C/4, 0x7);
++      INSTANCE_WR(ctx, 0x3A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3C0/4, 0x100);
++      INSTANCE_WR(ctx, 0x3C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D4/4, 0x100);
++      INSTANCE_WR(ctx, 0x3D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x3DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x404/4, 0x4);
++      INSTANCE_WR(ctx, 0x408/4, 0x70);
++      INSTANCE_WR(ctx, 0x40C/4, 0x80);
++      INSTANCE_WR(ctx, 0x420/4, 0xC);
++      INSTANCE_WR(ctx, 0x428/4, 0x8);
++      INSTANCE_WR(ctx, 0x42C/4, 0x14);
++      INSTANCE_WR(ctx, 0x434/4, 0x29);
++      INSTANCE_WR(ctx, 0x438/4, 0x27);
++      INSTANCE_WR(ctx, 0x43C/4, 0x26);
++      INSTANCE_WR(ctx, 0x440/4, 0x8);
++      INSTANCE_WR(ctx, 0x444/4, 0x4);
++      INSTANCE_WR(ctx, 0x448/4, 0x27);
++      INSTANCE_WR(ctx, 0x454/4, 0x1);
++      INSTANCE_WR(ctx, 0x458/4, 0x2);
++      INSTANCE_WR(ctx, 0x45C/4, 0x3);
++      INSTANCE_WR(ctx, 0x460/4, 0x4);
++      INSTANCE_WR(ctx, 0x464/4, 0x5);
++      INSTANCE_WR(ctx, 0x468/4, 0x6);
++      INSTANCE_WR(ctx, 0x46C/4, 0x7);
++      INSTANCE_WR(ctx, 0x470/4, 0x1);
++      INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x4E4/4, 0x80);
++      INSTANCE_WR(ctx, 0x4E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x4EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x4F0/4, 0x3);
++      INSTANCE_WR(ctx, 0x4F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x500/4, 0x12);
++      INSTANCE_WR(ctx, 0x504/4, 0x10);
++      INSTANCE_WR(ctx, 0x508/4, 0xC);
++      INSTANCE_WR(ctx, 0x50C/4, 0x1);
++      INSTANCE_WR(ctx, 0x51C/4, 0x4);
++      INSTANCE_WR(ctx, 0x520/4, 0x2);
++      INSTANCE_WR(ctx, 0x524/4, 0x4);
++      INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x55C/4, 0x4);
++      INSTANCE_WR(ctx, 0x560/4, 0x14);
++      INSTANCE_WR(ctx, 0x564/4, 0x1);
++      INSTANCE_WR(ctx, 0x570/4, 0x2);
++      INSTANCE_WR(ctx, 0x57C/4, 0x1);
++      INSTANCE_WR(ctx, 0x584/4, 0x2);
++      INSTANCE_WR(ctx, 0x588/4, 0x1000);
++      INSTANCE_WR(ctx, 0x58C/4, 0xE00);
++      INSTANCE_WR(ctx, 0x590/4, 0x1000);
++      INSTANCE_WR(ctx, 0x594/4, 0x1E00);
++      INSTANCE_WR(ctx, 0x59C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BC/4, 0x200);
++      INSTANCE_WR(ctx, 0x5C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C8/4, 0x70);
++      INSTANCE_WR(ctx, 0x5CC/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC/4, 0x70);
++      INSTANCE_WR(ctx, 0x5E0/4, 0x80);
++      INSTANCE_WR(ctx, 0x5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x60C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x614/4, 0x2);
++      INSTANCE_WR(ctx, 0x61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x624/4, 0x1);
++      INSTANCE_WR(ctx, 0x62C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x630/4, 0xCF);
++      INSTANCE_WR(ctx, 0x634/4, 0x1);
++      INSTANCE_WR(ctx, 0x63C/4, 0x1F80);
++      INSTANCE_WR(ctx, 0x654/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x658/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x660/4, 0x1000);
++      INSTANCE_WR(ctx, 0x664/4, 0x1F);
++      INSTANCE_WR(ctx, 0x668/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x66C/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x670/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x67C/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x680/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x688/4, 0x1000);
++      INSTANCE_WR(ctx, 0x68C/4, 0x1F);
++      INSTANCE_WR(ctx, 0x690/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x694/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x698/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6A4/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6A8/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6B0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6B4/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6B8/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6BC/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x6C0/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6CC/4, 0x3B74F821);
++      INSTANCE_WR(ctx, 0x6D0/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x6D8/4, 0x1000);
++      INSTANCE_WR(ctx, 0x6DC/4, 0x1F);
++      INSTANCE_WR(ctx, 0x6E0/4, 0x27C10FA);
++      INSTANCE_WR(ctx, 0x6E4/4, 0x400000C0);
++      INSTANCE_WR(ctx, 0x6E8/4, 0xB7892080);
++      INSTANCE_WR(ctx, 0x6F4/4, 0x390040);
++      INSTANCE_WR(ctx, 0x6FC/4, 0x22);
++      INSTANCE_WR(ctx, 0x708/4, 0x390040);
++      INSTANCE_WR(ctx, 0x70C/4, 0x22);
++      INSTANCE_WR(ctx, 0x724/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x728/4, 0x160000);
++      INSTANCE_WR(ctx, 0x72C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x73C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x740/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x764/4, 0x10401);
++      INSTANCE_WR(ctx, 0x76C/4, 0x78);
++      INSTANCE_WR(ctx, 0x774/4, 0xBF);
++      INSTANCE_WR(ctx, 0x77C/4, 0x1210);
++      INSTANCE_WR(ctx, 0x780/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x7A4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7A8/4, 0x160000);
++      INSTANCE_WR(ctx, 0x7AC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x7BC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x7C0/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x7E4/4, 0x10401);
++      INSTANCE_WR(ctx, 0x7EC/4, 0x78);
++      INSTANCE_WR(ctx, 0x7F4/4, 0xBF);
++      INSTANCE_WR(ctx, 0x7FC/4, 0x1210);
++      INSTANCE_WR(ctx, 0x800/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x828/4, 0x27070);
++      INSTANCE_WR(ctx, 0x834/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x84C/4, 0x120407);
++      INSTANCE_WR(ctx, 0x850/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x854/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x858/4, 0x30201);
++      INSTANCE_WR(ctx, 0x874/4, 0x40);
++      INSTANCE_WR(ctx, 0x878/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x87C/4, 0x141210);
++      INSTANCE_WR(ctx, 0x880/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x884/4, 0x1);
++      INSTANCE_WR(ctx, 0x888/4, 0x3);
++      INSTANCE_WR(ctx, 0x894/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x898/4, 0x100);
++      INSTANCE_WR(ctx, 0x89C/4, 0x3800);
++      INSTANCE_WR(ctx, 0x8A0/4, 0x404040);
++      INSTANCE_WR(ctx, 0x8A4/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x8AC/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x8B0/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x8C0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x8C4/4, 0x160000);
++      INSTANCE_WR(ctx, 0x8C8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x8D8/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x8DC/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x900/4, 0x10401);
++      INSTANCE_WR(ctx, 0x908/4, 0x78);
++      INSTANCE_WR(ctx, 0x910/4, 0xBF);
++      INSTANCE_WR(ctx, 0x918/4, 0x1210);
++      INSTANCE_WR(ctx, 0x91C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x940/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x944/4, 0x160000);
++      INSTANCE_WR(ctx, 0x948/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x958/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x95C/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x980/4, 0x10401);
++      INSTANCE_WR(ctx, 0x988/4, 0x78);
++      INSTANCE_WR(ctx, 0x990/4, 0xBF);
++      INSTANCE_WR(ctx, 0x998/4, 0x1210);
++      INSTANCE_WR(ctx, 0x99C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x9C4/4, 0x27070);
++      INSTANCE_WR(ctx, 0x9D0/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x9E8/4, 0x120407);
++      INSTANCE_WR(ctx, 0x9EC/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x9F0/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x9F4/4, 0x30201);
++      INSTANCE_WR(ctx, 0xA10/4, 0x40);
++      INSTANCE_WR(ctx, 0xA14/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xA18/4, 0x141210);
++      INSTANCE_WR(ctx, 0xA1C/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xA20/4, 0x1);
++      INSTANCE_WR(ctx, 0xA24/4, 0x3);
++      INSTANCE_WR(ctx, 0xA30/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xA34/4, 0x100);
++      INSTANCE_WR(ctx, 0xA38/4, 0x3800);
++      INSTANCE_WR(ctx, 0xA3C/4, 0x404040);
++      INSTANCE_WR(ctx, 0xA40/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xA48/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xA4C/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xA5C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xA60/4, 0x160000);
++      INSTANCE_WR(ctx, 0xA64/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xA74/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xA78/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xA9C/4, 0x10401);
++      INSTANCE_WR(ctx, 0xAA4/4, 0x78);
++      INSTANCE_WR(ctx, 0xAAC/4, 0xBF);
++      INSTANCE_WR(ctx, 0xAB4/4, 0x1210);
++      INSTANCE_WR(ctx, 0xAB8/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xADC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xAE0/4, 0x160000);
++      INSTANCE_WR(ctx, 0xAE4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xAF4/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xAF8/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xB1C/4, 0x10401);
++      INSTANCE_WR(ctx, 0xB24/4, 0x78);
++      INSTANCE_WR(ctx, 0xB2C/4, 0xBF);
++      INSTANCE_WR(ctx, 0xB34/4, 0x1210);
++      INSTANCE_WR(ctx, 0xB38/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xB60/4, 0x27070);
++      INSTANCE_WR(ctx, 0xB6C/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xB84/4, 0x120407);
++      INSTANCE_WR(ctx, 0xB88/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xB8C/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xB90/4, 0x30201);
++      INSTANCE_WR(ctx, 0xBAC/4, 0x40);
++      INSTANCE_WR(ctx, 0xBB0/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xBB4/4, 0x141210);
++      INSTANCE_WR(ctx, 0xBB8/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xBBC/4, 0x1);
++      INSTANCE_WR(ctx, 0xBC0/4, 0x3);
++      INSTANCE_WR(ctx, 0xBCC/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xBD0/4, 0x100);
++      INSTANCE_WR(ctx, 0xBD4/4, 0x3800);
++      INSTANCE_WR(ctx, 0xBD8/4, 0x404040);
++      INSTANCE_WR(ctx, 0xBDC/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xBE4/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xBE8/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xBF8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xBFC/4, 0x160000);
++      INSTANCE_WR(ctx, 0xC00/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC10/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xC14/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xC38/4, 0x10401);
++      INSTANCE_WR(ctx, 0xC40/4, 0x78);
++      INSTANCE_WR(ctx, 0xC48/4, 0xBF);
++      INSTANCE_WR(ctx, 0xC50/4, 0x1210);
++      INSTANCE_WR(ctx, 0xC54/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xC78/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC7C/4, 0x160000);
++      INSTANCE_WR(ctx, 0xC80/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xC90/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xC94/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xCB8/4, 0x10401);
++      INSTANCE_WR(ctx, 0xCC0/4, 0x78);
++      INSTANCE_WR(ctx, 0xCC8/4, 0xBF);
++      INSTANCE_WR(ctx, 0xCD0/4, 0x1210);
++      INSTANCE_WR(ctx, 0xCD4/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xCFC/4, 0x27070);
++      INSTANCE_WR(ctx, 0xD08/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xD20/4, 0x120407);
++      INSTANCE_WR(ctx, 0xD24/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xD28/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xD2C/4, 0x30201);
++      INSTANCE_WR(ctx, 0xD48/4, 0x40);
++      INSTANCE_WR(ctx, 0xD4C/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xD50/4, 0x141210);
++      INSTANCE_WR(ctx, 0xD54/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xD58/4, 0x1);
++      INSTANCE_WR(ctx, 0xD5C/4, 0x3);
++      INSTANCE_WR(ctx, 0xD68/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xD6C/4, 0x100);
++      INSTANCE_WR(ctx, 0xD70/4, 0x3800);
++      INSTANCE_WR(ctx, 0xD74/4, 0x404040);
++      INSTANCE_WR(ctx, 0xD78/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xD80/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xD84/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xD94/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xD98/4, 0x160000);
++      INSTANCE_WR(ctx, 0xD9C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xDAC/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xDB0/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xDD4/4, 0x10401);
++      INSTANCE_WR(ctx, 0xDDC/4, 0x78);
++      INSTANCE_WR(ctx, 0xDE4/4, 0xBF);
++      INSTANCE_WR(ctx, 0xDEC/4, 0x1210);
++      INSTANCE_WR(ctx, 0xDF0/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xE14/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xE18/4, 0x160000);
++      INSTANCE_WR(ctx, 0xE1C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xE2C/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xE30/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xE54/4, 0x10401);
++      INSTANCE_WR(ctx, 0xE5C/4, 0x78);
++      INSTANCE_WR(ctx, 0xE64/4, 0xBF);
++      INSTANCE_WR(ctx, 0xE6C/4, 0x1210);
++      INSTANCE_WR(ctx, 0xE70/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xE98/4, 0x27070);
++      INSTANCE_WR(ctx, 0xEA4/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0xEBC/4, 0x120407);
++      INSTANCE_WR(ctx, 0xEC0/4, 0x5091507);
++      INSTANCE_WR(ctx, 0xEC4/4, 0x5010202);
++      INSTANCE_WR(ctx, 0xEC8/4, 0x30201);
++      INSTANCE_WR(ctx, 0xEE4/4, 0x40);
++      INSTANCE_WR(ctx, 0xEE8/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0xEEC/4, 0x141210);
++      INSTANCE_WR(ctx, 0xEF0/4, 0x1F0);
++      INSTANCE_WR(ctx, 0xEF4/4, 0x1);
++      INSTANCE_WR(ctx, 0xEF8/4, 0x3);
++      INSTANCE_WR(ctx, 0xF04/4, 0x39E00);
++      INSTANCE_WR(ctx, 0xF08/4, 0x100);
++      INSTANCE_WR(ctx, 0xF0C/4, 0x3800);
++      INSTANCE_WR(ctx, 0xF10/4, 0x404040);
++      INSTANCE_WR(ctx, 0xF14/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0xF1C/4, 0x77F005);
++      INSTANCE_WR(ctx, 0xF20/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0xF30/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xF34/4, 0x160000);
++      INSTANCE_WR(ctx, 0xF38/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xF48/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xF4C/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xF70/4, 0x10401);
++      INSTANCE_WR(ctx, 0xF78/4, 0x78);
++      INSTANCE_WR(ctx, 0xF80/4, 0xBF);
++      INSTANCE_WR(ctx, 0xF88/4, 0x1210);
++      INSTANCE_WR(ctx, 0xF8C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0xFB0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xFB4/4, 0x160000);
++      INSTANCE_WR(ctx, 0xFB8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0xFC8/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0xFCC/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0xFF0/4, 0x10401);
++      INSTANCE_WR(ctx, 0xFF8/4, 0x78);
++      INSTANCE_WR(ctx, 0x1000/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1008/4, 0x1210);
++      INSTANCE_WR(ctx, 0x100C/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x1034/4, 0x27070);
++      INSTANCE_WR(ctx, 0x1040/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x1058/4, 0x120407);
++      INSTANCE_WR(ctx, 0x105C/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x1060/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x1064/4, 0x30201);
++      INSTANCE_WR(ctx, 0x1080/4, 0x40);
++      INSTANCE_WR(ctx, 0x1084/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x1088/4, 0x141210);
++      INSTANCE_WR(ctx, 0x108C/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x1090/4, 0x1);
++      INSTANCE_WR(ctx, 0x1094/4, 0x3);
++      INSTANCE_WR(ctx, 0x10A0/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x10A4/4, 0x100);
++      INSTANCE_WR(ctx, 0x10A8/4, 0x3800);
++      INSTANCE_WR(ctx, 0x10AC/4, 0x404040);
++      INSTANCE_WR(ctx, 0x10B0/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x10B8/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x10BC/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x10CC/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x10D0/4, 0x160000);
++      INSTANCE_WR(ctx, 0x10D4/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x10E4/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x10E8/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x110C/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1114/4, 0x78);
++      INSTANCE_WR(ctx, 0x111C/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1124/4, 0x1210);
++      INSTANCE_WR(ctx, 0x1128/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x114C/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1150/4, 0x160000);
++      INSTANCE_WR(ctx, 0x1154/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1164/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1168/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x118C/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1194/4, 0x78);
++      INSTANCE_WR(ctx, 0x119C/4, 0xBF);
++      INSTANCE_WR(ctx, 0x11A4/4, 0x1210);
++      INSTANCE_WR(ctx, 0x11A8/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x11D0/4, 0x27070);
++      INSTANCE_WR(ctx, 0x11DC/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x11F4/4, 0x120407);
++      INSTANCE_WR(ctx, 0x11F8/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x11FC/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x1200/4, 0x30201);
++      INSTANCE_WR(ctx, 0x121C/4, 0x40);
++      INSTANCE_WR(ctx, 0x1220/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x1224/4, 0x141210);
++      INSTANCE_WR(ctx, 0x1228/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x122C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1230/4, 0x3);
++      INSTANCE_WR(ctx, 0x123C/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x1240/4, 0x100);
++      INSTANCE_WR(ctx, 0x1244/4, 0x3800);
++      INSTANCE_WR(ctx, 0x1248/4, 0x404040);
++      INSTANCE_WR(ctx, 0x124C/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x1254/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x1258/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x1268/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x126C/4, 0x160000);
++      INSTANCE_WR(ctx, 0x1270/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1280/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1284/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x12A8/4, 0x10401);
++      INSTANCE_WR(ctx, 0x12B0/4, 0x78);
++      INSTANCE_WR(ctx, 0x12B8/4, 0xBF);
++      INSTANCE_WR(ctx, 0x12C0/4, 0x1210);
++      INSTANCE_WR(ctx, 0x12C4/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x12E8/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x12EC/4, 0x160000);
++      INSTANCE_WR(ctx, 0x12F0/4, 0x1800000);
++      INSTANCE_WR(ctx, 0x1300/4, 0x3FFFF);
++      INSTANCE_WR(ctx, 0x1304/4, 0x118C0000);
++      INSTANCE_WR(ctx, 0x1328/4, 0x10401);
++      INSTANCE_WR(ctx, 0x1330/4, 0x78);
++      INSTANCE_WR(ctx, 0x1338/4, 0xBF);
++      INSTANCE_WR(ctx, 0x1340/4, 0x1210);
++      INSTANCE_WR(ctx, 0x1344/4, 0x8000080);
++      INSTANCE_WR(ctx, 0x136C/4, 0x27070);
++      INSTANCE_WR(ctx, 0x1378/4, 0x3FFFFFF);
++      INSTANCE_WR(ctx, 0x1390/4, 0x120407);
++      INSTANCE_WR(ctx, 0x1394/4, 0x5091507);
++      INSTANCE_WR(ctx, 0x1398/4, 0x5010202);
++      INSTANCE_WR(ctx, 0x139C/4, 0x30201);
++      INSTANCE_WR(ctx, 0x13B8/4, 0x40);
++      INSTANCE_WR(ctx, 0x13BC/4, 0xD0C0B0A);
++      INSTANCE_WR(ctx, 0x13C0/4, 0x141210);
++      INSTANCE_WR(ctx, 0x13C4/4, 0x1F0);
++      INSTANCE_WR(ctx, 0x13C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x13CC/4, 0x3);
++      INSTANCE_WR(ctx, 0x13D8/4, 0x39E00);
++      INSTANCE_WR(ctx, 0x13DC/4, 0x100);
++      INSTANCE_WR(ctx, 0x13E0/4, 0x3800);
++      INSTANCE_WR(ctx, 0x13E4/4, 0x404040);
++      INSTANCE_WR(ctx, 0x13E8/4, 0xFF0A);
++      INSTANCE_WR(ctx, 0x13F0/4, 0x77F005);
++      INSTANCE_WR(ctx, 0x13F4/4, 0x3F7FFF);
++      INSTANCE_WR(ctx, 0x8620/4, 0x21);
++      INSTANCE_WR(ctx, 0x8640/4, 0x1);
++      INSTANCE_WR(ctx, 0x8660/4, 0x2);
++      INSTANCE_WR(ctx, 0x8680/4, 0x100);
++      INSTANCE_WR(ctx, 0x86A0/4, 0x100);
++      INSTANCE_WR(ctx, 0x86C0/4, 0x1);
++      INSTANCE_WR(ctx, 0x8720/4, 0x1);
++      INSTANCE_WR(ctx, 0x8740/4, 0x2);
++      INSTANCE_WR(ctx, 0x8760/4, 0x100);
++      INSTANCE_WR(ctx, 0x8780/4, 0x100);
++      INSTANCE_WR(ctx, 0x87A0/4, 0x1);
++      INSTANCE_WR(ctx, 0x1B8C0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1B8E0/4, 0x4);
++      INSTANCE_WR(ctx, 0x54260/4, 0x4);
++      INSTANCE_WR(ctx, 0x54280/4, 0x4);
++      INSTANCE_WR(ctx, 0x542A0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x542C0/4, 0x3);
++      INSTANCE_WR(ctx, 0x54300/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x54340/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x54360/4, 0x1);
++      INSTANCE_WR(ctx, 0x54380/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x543E0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x54400/4, 0x27);
++      INSTANCE_WR(ctx, 0x54460/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BCA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5BF80/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5C120/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C140/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C180/4, 0x80);
++      INSTANCE_WR(ctx, 0x5C200/4, 0x80);
++      INSTANCE_WR(ctx, 0x5C240/4, 0x3F);
++      INSTANCE_WR(ctx, 0x5C3A0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5C3C0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C3E0/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x5C500/4, 0x4);
++      INSTANCE_WR(ctx, 0x5C580/4, 0x4);
++      INSTANCE_WR(ctx, 0x5C7C0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5C7E0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x5C800/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C820/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C840/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5C860/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5CC80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CCE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CD80/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDA0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDC0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CDE0/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE00/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE20/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE40/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE60/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x5CE80/4, 0x10);
++      INSTANCE_WR(ctx, 0x5CEE0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1584/4, 0xF);
++      INSTANCE_WR(ctx, 0x1624/4, 0x20);
++      INSTANCE_WR(ctx, 0x1804/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19C4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19E4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A24/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A44/4, 0x8);
++      INSTANCE_WR(ctx, 0x1A84/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x1C24/4, 0xF);
++      INSTANCE_WR(ctx, 0x4104/4, 0xF);
++      INSTANCE_WR(ctx, 0x4144/4, 0x1);
++      INSTANCE_WR(ctx, 0x4CA4/4, 0xF);
++      INSTANCE_WR(ctx, 0x15344/4, 0xF);
++      INSTANCE_WR(ctx, 0x155E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x15604/4, 0x100);
++      INSTANCE_WR(ctx, 0x15624/4, 0x100);
++      INSTANCE_WR(ctx, 0x15644/4, 0x11);
++      INSTANCE_WR(ctx, 0x15684/4, 0x8);
++      INSTANCE_WR(ctx, 0x15744/4, 0x1);
++      INSTANCE_WR(ctx, 0x15784/4, 0x1);
++      INSTANCE_WR(ctx, 0x157A4/4, 0x1);
++      INSTANCE_WR(ctx, 0x157C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x157E4/4, 0xCF);
++      INSTANCE_WR(ctx, 0x15804/4, 0x2);
++      INSTANCE_WR(ctx, 0x158E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x15924/4, 0x1);
++      INSTANCE_WR(ctx, 0x15944/4, 0x1);
++      INSTANCE_WR(ctx, 0x15964/4, 0x1);
++      INSTANCE_WR(ctx, 0x15A04/4, 0x4);
++      INSTANCE_WR(ctx, 0x15A44/4, 0x1);
++      INSTANCE_WR(ctx, 0x15A64/4, 0x15);
++      INSTANCE_WR(ctx, 0x15AE4/4, 0x4444480);
++      INSTANCE_WR(ctx, 0x16264/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x16304/4, 0x100);
++      INSTANCE_WR(ctx, 0x16364/4, 0x10001);
++      INSTANCE_WR(ctx, 0x163A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x163C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x163E4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x16404/4, 0x1);
++      INSTANCE_WR(ctx, 0x16424/4, 0x4);
++      INSTANCE_WR(ctx, 0x16444/4, 0x2);
++      INSTANCE_WR(ctx, 0x183C4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x183E4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18484/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18604/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x18624/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x16508/4, 0x3FFFFF);
++      INSTANCE_WR(ctx, 0x16568/4, 0x1FFF);
++      INSTANCE_WR(ctx, 0x16748/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x16828/4, 0x4);
++      INSTANCE_WR(ctx, 0x16848/4, 0x1A);
++      INSTANCE_WR(ctx, 0x168A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B08/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x16BE8/4, 0xF);
++      INSTANCE_WR(ctx, 0x16CE8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16D08/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F08/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FA8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16FC8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x16FE8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x17068/4, 0x5);
++      INSTANCE_WR(ctx, 0x17088/4, 0x52);
++      INSTANCE_WR(ctx, 0x17128/4, 0x1);
++      INSTANCE_WR(ctx, 0x17348/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17368/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17388/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x173E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17408/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17428/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17448/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17468/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17488/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174A8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174C8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x174E8/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17508/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17528/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x17548/4, 0x10);
++      INSTANCE_WR(ctx, 0x17A28/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x17A48/4, 0x5);
++      INSTANCE_WR(ctx, 0x17AA8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AE8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B08/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B28/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B48/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x17B68/4, 0x3);
++      INSTANCE_WR(ctx, 0x17F68/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x17F88/4, 0x1A);
++      INSTANCE_WR(ctx, 0x17FC8/4, 0x3);
++      INSTANCE_WR(ctx, 0x184A8/4, 0x102);
++      INSTANCE_WR(ctx, 0x184E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18508/4, 0x4);
++      INSTANCE_WR(ctx, 0x18528/4, 0x4);
++      INSTANCE_WR(ctx, 0x18548/4, 0x4);
++      INSTANCE_WR(ctx, 0x18568/4, 0x4);
++      INSTANCE_WR(ctx, 0x18588/4, 0x4);
++      INSTANCE_WR(ctx, 0x185C8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x18608/4, 0x102);
++      INSTANCE_WR(ctx, 0x18748/4, 0x4);
++      INSTANCE_WR(ctx, 0x18768/4, 0x4);
++      INSTANCE_WR(ctx, 0x18788/4, 0x4);
++      INSTANCE_WR(ctx, 0x187A8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18DE8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x18E48/4, 0x804);
++      INSTANCE_WR(ctx, 0x18E88/4, 0x4);
++      INSTANCE_WR(ctx, 0x18EA8/4, 0x4);
++      INSTANCE_WR(ctx, 0x18EC8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x18F08/4, 0x4);
++      INSTANCE_WR(ctx, 0x18F28/4, 0x4);
++      INSTANCE_WR(ctx, 0x18F68/4, 0x10);
++      INSTANCE_WR(ctx, 0x19008/4, 0x804);
++      INSTANCE_WR(ctx, 0x19028/4, 0x1);
++      INSTANCE_WR(ctx, 0x19048/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19068/4, 0x7F);
++      INSTANCE_WR(ctx, 0x190A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x190C8/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x19108/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x19128/4, 0x4);
++      INSTANCE_WR(ctx, 0x19148/4, 0x4);
++      INSTANCE_WR(ctx, 0x19188/4, 0x10);
++      INSTANCE_WR(ctx, 0x19208/4, 0x1);
++      INSTANCE_WR(ctx, 0x19228/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x19308/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x19328/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x19A48/4, 0x1);
++      INSTANCE_WR(ctx, 0x19AA8/4, 0x10);
++      INSTANCE_WR(ctx, 0x1A1C8/4, 0x88);
++      INSTANCE_WR(ctx, 0x1A1E8/4, 0x88);
++      INSTANCE_WR(ctx, 0x1A248/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A528/4, 0x26);
++      INSTANCE_WR(ctx, 0x1A588/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x1A608/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1A628/4, 0x10);
++      INSTANCE_WR(ctx, 0x1AB48/4, 0x52);
++      INSTANCE_WR(ctx, 0x1AB88/4, 0x26);
++      INSTANCE_WR(ctx, 0x1ABC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1ABE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AC28/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1AC88/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1ACC8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1ACE8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AD28/4, 0x80);
++      INSTANCE_WR(ctx, 0x1AD48/4, 0x4);
++      INSTANCE_WR(ctx, 0x1AD68/4, 0x80C14);
++      INSTANCE_WR(ctx, 0x1ADA8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2D608/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D628/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D668/4, 0x80);
++      INSTANCE_WR(ctx, 0x2D688/4, 0x4);
++      INSTANCE_WR(ctx, 0x2D6A8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2D6E8/4, 0x27);
++      INSTANCE_WR(ctx, 0x2D728/4, 0x26);
++      INSTANCE_WR(ctx, 0x2D7A8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D7C8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D7E8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D808/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D828/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D848/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D868/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D888/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8A8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8C8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D8E8/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D908/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D928/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D948/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D968/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2D988/4, 0x4000000);
++      INSTANCE_WR(ctx, 0x2DE28/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x2DE48/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x2DEA8/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x160C/4, 0x2);
++      INSTANCE_WR(ctx, 0x164C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x180C/4, 0x10);
++      INSTANCE_WR(ctx, 0x186C/4, 0x1);
++      INSTANCE_WR(ctx, 0x190C/4, 0x4);
++      INSTANCE_WR(ctx, 0x192C/4, 0x400);
++      INSTANCE_WR(ctx, 0x194C/4, 0x300);
++      INSTANCE_WR(ctx, 0x196C/4, 0x1001);
++      INSTANCE_WR(ctx, 0x198C/4, 0x15);
++      INSTANCE_WR(ctx, 0x1A4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1B6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1B8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x1BCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1E4C/4, 0x10);
++      INSTANCE_WR(ctx, 0x206C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x208C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x20EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x210C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x212C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x214C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x216C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x218C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x21EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x220C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x222C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x224C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x258C/4, 0x10);
++      INSTANCE_WR(ctx, 0x25CC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x26AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x26EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x272C/4, 0x1);
++      INSTANCE_WR(ctx, 0x28CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x29CC/4, 0xF);
++      INSTANCE_WR(ctx, 0x2ACC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2BAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2BCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2BEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2C0C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2C2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2C4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2C6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2CAC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2CEC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FCC/4, 0x2);
++      INSTANCE_WR(ctx, 0x2FEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x300C/4, 0x1);
++      INSTANCE_WR(ctx, 0x302C/4, 0x2);
++      INSTANCE_WR(ctx, 0x304C/4, 0x1);
++      INSTANCE_WR(ctx, 0x306C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EC/4, 0x11);
++      INSTANCE_WR(ctx, 0x310C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3D8C/4, 0x2);
++      INSTANCE_WR(ctx, 0x3DCC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x3F6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x3F8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x3FEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x408C/4, 0x4);
++      INSTANCE_WR(ctx, 0x40AC/4, 0x400);
++      INSTANCE_WR(ctx, 0x40CC/4, 0x300);
++      INSTANCE_WR(ctx, 0x40EC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x410C/4, 0x15);
++      INSTANCE_WR(ctx, 0x41CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x42EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x430C/4, 0x10);
++      INSTANCE_WR(ctx, 0x434C/4, 0x1);
++      INSTANCE_WR(ctx, 0x45CC/4, 0x10);
++      INSTANCE_WR(ctx, 0x47EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x480C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x482C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x484C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x486C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x488C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x48EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x490C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x492C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x494C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x496C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x498C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x49AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x49CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x4D0C/4, 0x10);
++      INSTANCE_WR(ctx, 0x4D4C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x4E2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4E6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x4EAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x504C/4, 0x11);
++      INSTANCE_WR(ctx, 0x514C/4, 0xF);
++      INSTANCE_WR(ctx, 0x524C/4, 0x11);
++      INSTANCE_WR(ctx, 0x532C/4, 0x1);
++      INSTANCE_WR(ctx, 0x534C/4, 0x1);
++      INSTANCE_WR(ctx, 0x536C/4, 0x1);
++      INSTANCE_WR(ctx, 0x538C/4, 0x2);
++      INSTANCE_WR(ctx, 0x53AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x53CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x53EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x542C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x546C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x572C/4, 0x1);
++      INSTANCE_WR(ctx, 0x574C/4, 0x2);
++      INSTANCE_WR(ctx, 0x576C/4, 0x1);
++      INSTANCE_WR(ctx, 0x578C/4, 0x1);
++      INSTANCE_WR(ctx, 0x57AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x57CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x57EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x586C/4, 0x11);
++      INSTANCE_WR(ctx, 0x588C/4, 0x1);
++      INSTANCE_WR(ctx, 0x650C/4, 0x2);
++      INSTANCE_WR(ctx, 0x654C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x66EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x670C/4, 0x10);
++      INSTANCE_WR(ctx, 0x676C/4, 0x1);
++      INSTANCE_WR(ctx, 0x680C/4, 0x4);
++      INSTANCE_WR(ctx, 0x682C/4, 0x400);
++      INSTANCE_WR(ctx, 0x684C/4, 0x300);
++      INSTANCE_WR(ctx, 0x686C/4, 0x1001);
++      INSTANCE_WR(ctx, 0x688C/4, 0x15);
++      INSTANCE_WR(ctx, 0x694C/4, 0x2);
++      INSTANCE_WR(ctx, 0x6A6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x6A8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x6ACC/4, 0x1);
++      INSTANCE_WR(ctx, 0x6D4C/4, 0x10);
++      INSTANCE_WR(ctx, 0x6F6C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6F8C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FAC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FCC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x6FEC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x700C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x702C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x704C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x706C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x708C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x70EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x710C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x712C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x714C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x748C/4, 0x10);
++      INSTANCE_WR(ctx, 0x74CC/4, 0x3F);
++      INSTANCE_WR(ctx, 0x75AC/4, 0x1);
++      INSTANCE_WR(ctx, 0x75EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x762C/4, 0x1);
++      INSTANCE_WR(ctx, 0x77CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x78CC/4, 0xF);
++      INSTANCE_WR(ctx, 0x79CC/4, 0x11);
++      INSTANCE_WR(ctx, 0x7AAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7ACC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7AEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7B0C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7B2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7B4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7B6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7BAC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x7BEC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x7EAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7ECC/4, 0x2);
++      INSTANCE_WR(ctx, 0x7EEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F0C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F2C/4, 0x2);
++      INSTANCE_WR(ctx, 0x7F4C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7F6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x7FEC/4, 0x11);
++      INSTANCE_WR(ctx, 0x800C/4, 0x1);
++      INSTANCE_WR(ctx, 0x8C8C/4, 0x2);
++      INSTANCE_WR(ctx, 0x8CCC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x8E6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x8E8C/4, 0x10);
++      INSTANCE_WR(ctx, 0x8EEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x8F8C/4, 0x4);
++      INSTANCE_WR(ctx, 0x8FAC/4, 0x400);
++      INSTANCE_WR(ctx, 0x8FCC/4, 0x300);
++      INSTANCE_WR(ctx, 0x8FEC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x900C/4, 0x15);
++      INSTANCE_WR(ctx, 0x90CC/4, 0x2);
++      INSTANCE_WR(ctx, 0x91EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x920C/4, 0x10);
++      INSTANCE_WR(ctx, 0x924C/4, 0x1);
++      INSTANCE_WR(ctx, 0x94CC/4, 0x10);
++      INSTANCE_WR(ctx, 0x96EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x970C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x972C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x974C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x976C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x978C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x97EC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x980C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x982C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x984C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x986C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x988C/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x98AC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x98CC/4, 0x3F800000);
++      INSTANCE_WR(ctx, 0x9C0C/4, 0x10);
++      INSTANCE_WR(ctx, 0x9C4C/4, 0x3F);
++      INSTANCE_WR(ctx, 0x9D2C/4, 0x1);
++      INSTANCE_WR(ctx, 0x9D6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x9DAC/4, 0x1);
++      INSTANCE_WR(ctx, 0x9F4C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA04C/4, 0xF);
++      INSTANCE_WR(ctx, 0xA14C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA22C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA24C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA26C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA28C/4, 0x2);
++      INSTANCE_WR(ctx, 0xA2AC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA2CC/4, 0x2);
++      INSTANCE_WR(ctx, 0xA2EC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA32C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0xA36C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0xA62C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA64C/4, 0x2);
++      INSTANCE_WR(ctx, 0xA66C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA68C/4, 0x1);
++      INSTANCE_WR(ctx, 0xA6AC/4, 0x2);
++      INSTANCE_WR(ctx, 0xA6CC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA6EC/4, 0x1);
++      INSTANCE_WR(ctx, 0xA76C/4, 0x11);
++      INSTANCE_WR(ctx, 0xA78C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1530/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1810/4, 0x4);
++      INSTANCE_WR(ctx, 0x1830/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1930/4, 0x4);
++      INSTANCE_WR(ctx, 0x1950/4, 0x4);
++      INSTANCE_WR(ctx, 0x1970/4, 0x80);
++      INSTANCE_WR(ctx, 0x1990/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E30/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E50/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E70/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E90/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F70/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F90/4, 0x3);
++      INSTANCE_WR(ctx, 0x2010/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x16710/4, 0xF);
++      INSTANCE_WR(ctx, 0x16890/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16910/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A30/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B70/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D10/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D30/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D50/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D70/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D90/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB0/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E10/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F10/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F70/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF0/4, 0x11);
++      INSTANCE_WR(ctx, 0x17010/4, 0x1);
++      INSTANCE_WR(ctx, 0x17050/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17070/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17090/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17210/4, 0x1);
++      INSTANCE_WR(ctx, 0x17230/4, 0x2);
++      INSTANCE_WR(ctx, 0x17250/4, 0x1);
++      INSTANCE_WR(ctx, 0x17270/4, 0x1);
++      INSTANCE_WR(ctx, 0x17290/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17310/4, 0x1);
++      INSTANCE_WR(ctx, 0x17330/4, 0x1);
++      INSTANCE_WR(ctx, 0x17350/4, 0x1);
++      INSTANCE_WR(ctx, 0x17370/4, 0x1);
++      INSTANCE_WR(ctx, 0x17390/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17510/4, 0xF);
++      INSTANCE_WR(ctx, 0x17610/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17670/4, 0x11);
++      INSTANCE_WR(ctx, 0x17690/4, 0x1);
++      INSTANCE_WR(ctx, 0x17710/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x17870/4, 0x11);
++      INSTANCE_WR(ctx, 0x17970/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A50/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A90/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD0/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B10/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B50/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x18110/4, 0x8);
++      INSTANCE_WR(ctx, 0x18130/4, 0x8);
++      INSTANCE_WR(ctx, 0x18150/4, 0x8);
++      INSTANCE_WR(ctx, 0x18170/4, 0x8);
++      INSTANCE_WR(ctx, 0x18190/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18310/4, 0x400);
++      INSTANCE_WR(ctx, 0x18330/4, 0x400);
++      INSTANCE_WR(ctx, 0x18350/4, 0x400);
++      INSTANCE_WR(ctx, 0x18370/4, 0x400);
++      INSTANCE_WR(ctx, 0x18390/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x18410/4, 0x300);
++      INSTANCE_WR(ctx, 0x18430/4, 0x300);
++      INSTANCE_WR(ctx, 0x18450/4, 0x300);
++      INSTANCE_WR(ctx, 0x18470/4, 0x300);
++      INSTANCE_WR(ctx, 0x18490/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F0/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F0/4, 0x20);
++      INSTANCE_WR(ctx, 0x18610/4, 0x11);
++      INSTANCE_WR(ctx, 0x18630/4, 0x100);
++      INSTANCE_WR(ctx, 0x18670/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D0/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F0/4, 0x100);
++      INSTANCE_WR(ctx, 0x18730/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18850/4, 0x2);
++      INSTANCE_WR(ctx, 0x18870/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A50/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A90/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB0/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD0/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B70/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C90/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F90/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19010/4, 0x11);
++      INSTANCE_WR(ctx, 0x19070/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19150/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19230/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F0/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19310/4, 0x40);
++      INSTANCE_WR(ctx, 0x19330/4, 0x100);
++      INSTANCE_WR(ctx, 0x19350/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19370/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19610/4, 0x1);
++      INSTANCE_WR(ctx, 0x19650/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19670/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D0/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x19830/4, 0x1);
++      INSTANCE_WR(ctx, 0x19850/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19870/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19890/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F0/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19930/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF0/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C10/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C30/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD0/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D30/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D50/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D70/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D90/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A230/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A250/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A270/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A290/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B0/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D0/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A370/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A390/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A410/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B0/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D0/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB10/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC90/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED10/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE30/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF70/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F110/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F130/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F150/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F170/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F190/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F210/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F310/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F370/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F410/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F450/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F470/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F490/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F610/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F630/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F650/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F670/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F690/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F710/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F730/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F750/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F770/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F790/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F910/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA10/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA70/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA90/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB10/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC70/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF0/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE10/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE50/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE90/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED0/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF10/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF50/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B0/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D0/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F0/4, 0x8);
++      INSTANCE_WR(ctx, 0x30510/4, 0x8);
++      INSTANCE_WR(ctx, 0x30530/4, 0x8);
++      INSTANCE_WR(ctx, 0x30550/4, 0x8);
++      INSTANCE_WR(ctx, 0x30570/4, 0x8);
++      INSTANCE_WR(ctx, 0x30590/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B0/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D0/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F0/4, 0x400);
++      INSTANCE_WR(ctx, 0x30710/4, 0x400);
++      INSTANCE_WR(ctx, 0x30730/4, 0x400);
++      INSTANCE_WR(ctx, 0x30750/4, 0x400);
++      INSTANCE_WR(ctx, 0x30770/4, 0x400);
++      INSTANCE_WR(ctx, 0x30790/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B0/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D0/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F0/4, 0x300);
++      INSTANCE_WR(ctx, 0x30810/4, 0x300);
++      INSTANCE_WR(ctx, 0x30830/4, 0x300);
++      INSTANCE_WR(ctx, 0x30850/4, 0x300);
++      INSTANCE_WR(ctx, 0x30870/4, 0x300);
++      INSTANCE_WR(ctx, 0x30890/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B0/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F0/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F0/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A10/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A30/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A70/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD0/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF0/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B30/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD0/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C50/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB0/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E50/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E90/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB0/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED0/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF0/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F70/4, 0x11);
++      INSTANCE_WR(ctx, 0x31070/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31090/4, 0xF);
++      INSTANCE_WR(ctx, 0x31390/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31410/4, 0x11);
++      INSTANCE_WR(ctx, 0x31470/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B0/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31550/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31630/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B0/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F0/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31710/4, 0x40);
++      INSTANCE_WR(ctx, 0x31730/4, 0x100);
++      INSTANCE_WR(ctx, 0x31750/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31770/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F0/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A10/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A50/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A70/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD0/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C30/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C50/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C70/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C90/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB0/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF0/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1534/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1814/4, 0x4);
++      INSTANCE_WR(ctx, 0x1834/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1934/4, 0x4);
++      INSTANCE_WR(ctx, 0x1954/4, 0x4);
++      INSTANCE_WR(ctx, 0x1974/4, 0x80);
++      INSTANCE_WR(ctx, 0x1994/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E34/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E54/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E74/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E94/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F74/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F94/4, 0x3);
++      INSTANCE_WR(ctx, 0x2014/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B4/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x16714/4, 0xF);
++      INSTANCE_WR(ctx, 0x16894/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16914/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A34/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B74/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D14/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D34/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D54/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D74/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D94/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB4/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E14/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F14/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F74/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF4/4, 0x11);
++      INSTANCE_WR(ctx, 0x17014/4, 0x1);
++      INSTANCE_WR(ctx, 0x17054/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17074/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17094/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17214/4, 0x1);
++      INSTANCE_WR(ctx, 0x17234/4, 0x2);
++      INSTANCE_WR(ctx, 0x17254/4, 0x1);
++      INSTANCE_WR(ctx, 0x17274/4, 0x1);
++      INSTANCE_WR(ctx, 0x17294/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17314/4, 0x1);
++      INSTANCE_WR(ctx, 0x17334/4, 0x1);
++      INSTANCE_WR(ctx, 0x17354/4, 0x1);
++      INSTANCE_WR(ctx, 0x17374/4, 0x1);
++      INSTANCE_WR(ctx, 0x17394/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17514/4, 0xF);
++      INSTANCE_WR(ctx, 0x17614/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17674/4, 0x11);
++      INSTANCE_WR(ctx, 0x17694/4, 0x1);
++      INSTANCE_WR(ctx, 0x17714/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x17874/4, 0x11);
++      INSTANCE_WR(ctx, 0x17974/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A14/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A54/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A94/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD4/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B14/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B54/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B4/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D4/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F4/4, 0x8);
++      INSTANCE_WR(ctx, 0x18114/4, 0x8);
++      INSTANCE_WR(ctx, 0x18134/4, 0x8);
++      INSTANCE_WR(ctx, 0x18154/4, 0x8);
++      INSTANCE_WR(ctx, 0x18174/4, 0x8);
++      INSTANCE_WR(ctx, 0x18194/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D4/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F4/4, 0x400);
++      INSTANCE_WR(ctx, 0x18314/4, 0x400);
++      INSTANCE_WR(ctx, 0x18334/4, 0x400);
++      INSTANCE_WR(ctx, 0x18354/4, 0x400);
++      INSTANCE_WR(ctx, 0x18374/4, 0x400);
++      INSTANCE_WR(ctx, 0x18394/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B4/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D4/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F4/4, 0x300);
++      INSTANCE_WR(ctx, 0x18414/4, 0x300);
++      INSTANCE_WR(ctx, 0x18434/4, 0x300);
++      INSTANCE_WR(ctx, 0x18454/4, 0x300);
++      INSTANCE_WR(ctx, 0x18474/4, 0x300);
++      INSTANCE_WR(ctx, 0x18494/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B4/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F4/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F4/4, 0x20);
++      INSTANCE_WR(ctx, 0x18614/4, 0x11);
++      INSTANCE_WR(ctx, 0x18634/4, 0x100);
++      INSTANCE_WR(ctx, 0x18674/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D4/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F4/4, 0x100);
++      INSTANCE_WR(ctx, 0x18734/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D4/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18854/4, 0x2);
++      INSTANCE_WR(ctx, 0x18874/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A54/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A94/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB4/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD4/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF4/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B74/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C94/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F94/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19014/4, 0x11);
++      INSTANCE_WR(ctx, 0x19074/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19154/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19234/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B4/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F4/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19314/4, 0x40);
++      INSTANCE_WR(ctx, 0x19334/4, 0x100);
++      INSTANCE_WR(ctx, 0x19354/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19374/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19614/4, 0x1);
++      INSTANCE_WR(ctx, 0x19654/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19674/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D4/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x19834/4, 0x1);
++      INSTANCE_WR(ctx, 0x19854/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19874/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19894/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B4/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F4/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19934/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C14/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C34/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD4/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D34/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D54/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D74/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D94/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A234/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A254/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A274/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A294/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B4/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D4/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A374/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A394/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A414/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B4/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D4/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB14/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC94/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED14/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE34/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF74/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F114/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F134/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F154/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F174/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F194/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F214/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F314/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F374/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F414/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F454/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F474/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F494/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F614/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F634/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F654/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F674/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F694/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F714/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F734/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F754/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F774/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F794/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F914/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA14/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA74/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA94/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB14/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC74/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF4/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE14/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE54/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE94/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED4/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF14/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF54/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B4/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D4/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F4/4, 0x8);
++      INSTANCE_WR(ctx, 0x30514/4, 0x8);
++      INSTANCE_WR(ctx, 0x30534/4, 0x8);
++      INSTANCE_WR(ctx, 0x30554/4, 0x8);
++      INSTANCE_WR(ctx, 0x30574/4, 0x8);
++      INSTANCE_WR(ctx, 0x30594/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B4/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D4/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F4/4, 0x400);
++      INSTANCE_WR(ctx, 0x30714/4, 0x400);
++      INSTANCE_WR(ctx, 0x30734/4, 0x400);
++      INSTANCE_WR(ctx, 0x30754/4, 0x400);
++      INSTANCE_WR(ctx, 0x30774/4, 0x400);
++      INSTANCE_WR(ctx, 0x30794/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B4/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D4/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F4/4, 0x300);
++      INSTANCE_WR(ctx, 0x30814/4, 0x300);
++      INSTANCE_WR(ctx, 0x30834/4, 0x300);
++      INSTANCE_WR(ctx, 0x30854/4, 0x300);
++      INSTANCE_WR(ctx, 0x30874/4, 0x300);
++      INSTANCE_WR(ctx, 0x30894/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B4/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F4/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F4/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A14/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A34/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A74/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD4/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF4/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B34/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD4/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C54/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB4/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E54/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E94/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB4/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED4/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF4/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F74/4, 0x11);
++      INSTANCE_WR(ctx, 0x31074/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31094/4, 0xF);
++      INSTANCE_WR(ctx, 0x31394/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31414/4, 0x11);
++      INSTANCE_WR(ctx, 0x31474/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B4/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31554/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31634/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B4/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F4/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31714/4, 0x40);
++      INSTANCE_WR(ctx, 0x31734/4, 0x100);
++      INSTANCE_WR(ctx, 0x31754/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31774/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F4/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A14/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A54/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A74/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD4/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF4/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C34/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C54/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C74/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C94/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB4/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF4/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1538/4, 0x4);
++      INSTANCE_WR(ctx, 0x17F8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1818/4, 0x4);
++      INSTANCE_WR(ctx, 0x1838/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18D8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1938/4, 0x4);
++      INSTANCE_WR(ctx, 0x1958/4, 0x4);
++      INSTANCE_WR(ctx, 0x1978/4, 0x80);
++      INSTANCE_WR(ctx, 0x1998/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E38/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E58/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E78/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E98/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EB8/4, 0x3);
++      INSTANCE_WR(ctx, 0x1ED8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F78/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F98/4, 0x3);
++      INSTANCE_WR(ctx, 0x2018/4, 0x4);
++      INSTANCE_WR(ctx, 0x164B8/4, 0x4);
++      INSTANCE_WR(ctx, 0x164D8/4, 0x3);
++      INSTANCE_WR(ctx, 0x16718/4, 0xF);
++      INSTANCE_WR(ctx, 0x16898/4, 0x4);
++      INSTANCE_WR(ctx, 0x168B8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168D8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168F8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16918/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A38/4, 0x1);
++      INSTANCE_WR(ctx, 0x16AB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B78/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D18/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D38/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D58/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D78/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D98/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DB8/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E18/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F18/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F78/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FF8/4, 0x11);
++      INSTANCE_WR(ctx, 0x17018/4, 0x1);
++      INSTANCE_WR(ctx, 0x17058/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17078/4, 0xCF);
++      INSTANCE_WR(ctx, 0x17098/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17218/4, 0x1);
++      INSTANCE_WR(ctx, 0x17238/4, 0x2);
++      INSTANCE_WR(ctx, 0x17258/4, 0x1);
++      INSTANCE_WR(ctx, 0x17278/4, 0x1);
++      INSTANCE_WR(ctx, 0x17298/4, 0x2);
++      INSTANCE_WR(ctx, 0x172B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x172F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17318/4, 0x1);
++      INSTANCE_WR(ctx, 0x17338/4, 0x1);
++      INSTANCE_WR(ctx, 0x17358/4, 0x1);
++      INSTANCE_WR(ctx, 0x17378/4, 0x1);
++      INSTANCE_WR(ctx, 0x17398/4, 0x1);
++      INSTANCE_WR(ctx, 0x173B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x173D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x173F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x174F8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x17518/4, 0xF);
++      INSTANCE_WR(ctx, 0x17618/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x17678/4, 0x11);
++      INSTANCE_WR(ctx, 0x17698/4, 0x1);
++      INSTANCE_WR(ctx, 0x17718/4, 0x4);
++      INSTANCE_WR(ctx, 0x177D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x17878/4, 0x11);
++      INSTANCE_WR(ctx, 0x17978/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A18/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A58/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A98/4, 0x1);
++      INSTANCE_WR(ctx, 0x17AD8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B18/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B58/4, 0x1);
++      INSTANCE_WR(ctx, 0x180B8/4, 0x8);
++      INSTANCE_WR(ctx, 0x180D8/4, 0x8);
++      INSTANCE_WR(ctx, 0x180F8/4, 0x8);
++      INSTANCE_WR(ctx, 0x18118/4, 0x8);
++      INSTANCE_WR(ctx, 0x18138/4, 0x8);
++      INSTANCE_WR(ctx, 0x18158/4, 0x8);
++      INSTANCE_WR(ctx, 0x18178/4, 0x8);
++      INSTANCE_WR(ctx, 0x18198/4, 0x8);
++      INSTANCE_WR(ctx, 0x181B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x182B8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182D8/4, 0x400);
++      INSTANCE_WR(ctx, 0x182F8/4, 0x400);
++      INSTANCE_WR(ctx, 0x18318/4, 0x400);
++      INSTANCE_WR(ctx, 0x18338/4, 0x400);
++      INSTANCE_WR(ctx, 0x18358/4, 0x400);
++      INSTANCE_WR(ctx, 0x18378/4, 0x400);
++      INSTANCE_WR(ctx, 0x18398/4, 0x400);
++      INSTANCE_WR(ctx, 0x183B8/4, 0x400);
++      INSTANCE_WR(ctx, 0x183D8/4, 0x300);
++      INSTANCE_WR(ctx, 0x183F8/4, 0x300);
++      INSTANCE_WR(ctx, 0x18418/4, 0x300);
++      INSTANCE_WR(ctx, 0x18438/4, 0x300);
++      INSTANCE_WR(ctx, 0x18458/4, 0x300);
++      INSTANCE_WR(ctx, 0x18478/4, 0x300);
++      INSTANCE_WR(ctx, 0x18498/4, 0x300);
++      INSTANCE_WR(ctx, 0x184B8/4, 0x300);
++      INSTANCE_WR(ctx, 0x184D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x184F8/4, 0xF);
++      INSTANCE_WR(ctx, 0x185F8/4, 0x20);
++      INSTANCE_WR(ctx, 0x18618/4, 0x11);
++      INSTANCE_WR(ctx, 0x18638/4, 0x100);
++      INSTANCE_WR(ctx, 0x18678/4, 0x1);
++      INSTANCE_WR(ctx, 0x186D8/4, 0x40);
++      INSTANCE_WR(ctx, 0x186F8/4, 0x100);
++      INSTANCE_WR(ctx, 0x18738/4, 0x3);
++      INSTANCE_WR(ctx, 0x187D8/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x18858/4, 0x2);
++      INSTANCE_WR(ctx, 0x18878/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A58/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A98/4, 0x1);
++      INSTANCE_WR(ctx, 0x18AB8/4, 0x400);
++      INSTANCE_WR(ctx, 0x18AD8/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AF8/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B78/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C98/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F98/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x19018/4, 0x11);
++      INSTANCE_WR(ctx, 0x19078/4, 0x4);
++      INSTANCE_WR(ctx, 0x190B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x190D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19158/4, 0x1);
++      INSTANCE_WR(ctx, 0x191F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19238/4, 0x1);
++      INSTANCE_WR(ctx, 0x192B8/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192F8/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x19318/4, 0x40);
++      INSTANCE_WR(ctx, 0x19338/4, 0x100);
++      INSTANCE_WR(ctx, 0x19358/4, 0x10100);
++      INSTANCE_WR(ctx, 0x19378/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195D8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195F8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x19618/4, 0x1);
++      INSTANCE_WR(ctx, 0x19658/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x19678/4, 0x1);
++      INSTANCE_WR(ctx, 0x196D8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x19838/4, 0x1);
++      INSTANCE_WR(ctx, 0x19858/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x19878/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x19898/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198B8/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198F8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x19938/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BF8/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C18/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C38/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CD8/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D38/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D58/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D78/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D98/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A238/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A258/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A278/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A298/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2B8/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2D8/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A378/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A398/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A418/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8B8/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8D8/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB18/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC98/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECB8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECD8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECF8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED18/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE38/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF78/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F118/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F138/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F158/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F178/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F198/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F218/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F318/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F378/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F418/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F458/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F478/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F498/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F618/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F638/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F658/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F678/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F698/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F718/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F738/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F758/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F778/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F798/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7F8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8F8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F918/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA18/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA78/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA98/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB18/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC78/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDF8/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE18/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE58/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE98/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FED8/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF18/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF58/4, 0x1);
++      INSTANCE_WR(ctx, 0x304B8/4, 0x8);
++      INSTANCE_WR(ctx, 0x304D8/4, 0x8);
++      INSTANCE_WR(ctx, 0x304F8/4, 0x8);
++      INSTANCE_WR(ctx, 0x30518/4, 0x8);
++      INSTANCE_WR(ctx, 0x30538/4, 0x8);
++      INSTANCE_WR(ctx, 0x30558/4, 0x8);
++      INSTANCE_WR(ctx, 0x30578/4, 0x8);
++      INSTANCE_WR(ctx, 0x30598/4, 0x8);
++      INSTANCE_WR(ctx, 0x305B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x306B8/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306D8/4, 0x400);
++      INSTANCE_WR(ctx, 0x306F8/4, 0x400);
++      INSTANCE_WR(ctx, 0x30718/4, 0x400);
++      INSTANCE_WR(ctx, 0x30738/4, 0x400);
++      INSTANCE_WR(ctx, 0x30758/4, 0x400);
++      INSTANCE_WR(ctx, 0x30778/4, 0x400);
++      INSTANCE_WR(ctx, 0x30798/4, 0x400);
++      INSTANCE_WR(ctx, 0x307B8/4, 0x400);
++      INSTANCE_WR(ctx, 0x307D8/4, 0x300);
++      INSTANCE_WR(ctx, 0x307F8/4, 0x300);
++      INSTANCE_WR(ctx, 0x30818/4, 0x300);
++      INSTANCE_WR(ctx, 0x30838/4, 0x300);
++      INSTANCE_WR(ctx, 0x30858/4, 0x300);
++      INSTANCE_WR(ctx, 0x30878/4, 0x300);
++      INSTANCE_WR(ctx, 0x30898/4, 0x300);
++      INSTANCE_WR(ctx, 0x308B8/4, 0x300);
++      INSTANCE_WR(ctx, 0x308D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x308F8/4, 0xF);
++      INSTANCE_WR(ctx, 0x309F8/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A18/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A38/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A78/4, 0x1);
++      INSTANCE_WR(ctx, 0x30AD8/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AF8/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B38/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BD8/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C58/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DB8/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E58/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E98/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EB8/4, 0x400);
++      INSTANCE_WR(ctx, 0x30ED8/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EF8/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F78/4, 0x11);
++      INSTANCE_WR(ctx, 0x31078/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x31098/4, 0xF);
++      INSTANCE_WR(ctx, 0x31398/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x31418/4, 0x11);
++      INSTANCE_WR(ctx, 0x31478/4, 0x4);
++      INSTANCE_WR(ctx, 0x314B8/4, 0x1);
++      INSTANCE_WR(ctx, 0x314D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31558/4, 0x1);
++      INSTANCE_WR(ctx, 0x315F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31638/4, 0x1);
++      INSTANCE_WR(ctx, 0x316B8/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316F8/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x31718/4, 0x40);
++      INSTANCE_WR(ctx, 0x31738/4, 0x100);
++      INSTANCE_WR(ctx, 0x31758/4, 0x10100);
++      INSTANCE_WR(ctx, 0x31778/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319D8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319F8/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A18/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A58/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A78/4, 0x1);
++      INSTANCE_WR(ctx, 0x31AD8/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BF8/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C38/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C58/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C78/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C98/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CB8/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CF8/4, 0x1A);
++      INSTANCE_WR(ctx, 0x153C/4, 0x4);
++      INSTANCE_WR(ctx, 0x17FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x181C/4, 0x4);
++      INSTANCE_WR(ctx, 0x183C/4, 0x608080);
++      INSTANCE_WR(ctx, 0x18DC/4, 0x4);
++      INSTANCE_WR(ctx, 0x193C/4, 0x4);
++      INSTANCE_WR(ctx, 0x195C/4, 0x4);
++      INSTANCE_WR(ctx, 0x197C/4, 0x80);
++      INSTANCE_WR(ctx, 0x199C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E3C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E5C/4, 0x80);
++      INSTANCE_WR(ctx, 0x1E7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1E9C/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1EBC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1EDC/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1F9C/4, 0x3);
++      INSTANCE_WR(ctx, 0x201C/4, 0x4);
++      INSTANCE_WR(ctx, 0x164BC/4, 0x4);
++      INSTANCE_WR(ctx, 0x164DC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1671C/4, 0xF);
++      INSTANCE_WR(ctx, 0x1689C/4, 0x4);
++      INSTANCE_WR(ctx, 0x168BC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168DC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x168FC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x1691C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x16A3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16ABC/4, 0x1);
++      INSTANCE_WR(ctx, 0x16B7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x16D7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16D9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x16DBC/4, 0x2);
++      INSTANCE_WR(ctx, 0x16DDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x16E1C/4, 0x11);
++      INSTANCE_WR(ctx, 0x16F1C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x16F7C/4, 0x4);
++      INSTANCE_WR(ctx, 0x16FFC/4, 0x11);
++      INSTANCE_WR(ctx, 0x1701C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1705C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x1707C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x1709C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x171FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1721C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1723C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1725C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1727C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1729C/4, 0x2);
++      INSTANCE_WR(ctx, 0x172BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x172FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1731C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1733C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1735C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1737C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1739C/4, 0x1);
++      INSTANCE_WR(ctx, 0x173BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x173DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x173FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x174FC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x1751C/4, 0xF);
++      INSTANCE_WR(ctx, 0x1761C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1767C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1769C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1771C/4, 0x4);
++      INSTANCE_WR(ctx, 0x177DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1787C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1797C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x179FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x17A1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17A9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17ADC/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x17B1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x17B5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x180BC/4, 0x8);
++      INSTANCE_WR(ctx, 0x180DC/4, 0x8);
++      INSTANCE_WR(ctx, 0x180FC/4, 0x8);
++      INSTANCE_WR(ctx, 0x1811C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1813C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1815C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1817C/4, 0x8);
++      INSTANCE_WR(ctx, 0x1819C/4, 0x8);
++      INSTANCE_WR(ctx, 0x181BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x182BC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x182DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x182FC/4, 0x400);
++      INSTANCE_WR(ctx, 0x1831C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1833C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1835C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1837C/4, 0x400);
++      INSTANCE_WR(ctx, 0x1839C/4, 0x400);
++      INSTANCE_WR(ctx, 0x183BC/4, 0x400);
++      INSTANCE_WR(ctx, 0x183DC/4, 0x300);
++      INSTANCE_WR(ctx, 0x183FC/4, 0x300);
++      INSTANCE_WR(ctx, 0x1841C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1843C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1845C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1847C/4, 0x300);
++      INSTANCE_WR(ctx, 0x1849C/4, 0x300);
++      INSTANCE_WR(ctx, 0x184BC/4, 0x300);
++      INSTANCE_WR(ctx, 0x184DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x184FC/4, 0xF);
++      INSTANCE_WR(ctx, 0x185FC/4, 0x20);
++      INSTANCE_WR(ctx, 0x1861C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1863C/4, 0x100);
++      INSTANCE_WR(ctx, 0x1867C/4, 0x1);
++      INSTANCE_WR(ctx, 0x186DC/4, 0x40);
++      INSTANCE_WR(ctx, 0x186FC/4, 0x100);
++      INSTANCE_WR(ctx, 0x1873C/4, 0x3);
++      INSTANCE_WR(ctx, 0x187DC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1885C/4, 0x2);
++      INSTANCE_WR(ctx, 0x1887C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x189BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x18A5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x18A9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x18ABC/4, 0x400);
++      INSTANCE_WR(ctx, 0x18ADC/4, 0x300);
++      INSTANCE_WR(ctx, 0x18AFC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x18B7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x18C7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x18C9C/4, 0xF);
++      INSTANCE_WR(ctx, 0x18F9C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x1901C/4, 0x11);
++      INSTANCE_WR(ctx, 0x1907C/4, 0x4);
++      INSTANCE_WR(ctx, 0x190BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x190DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1915C/4, 0x1);
++      INSTANCE_WR(ctx, 0x191FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1923C/4, 0x1);
++      INSTANCE_WR(ctx, 0x192BC/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x192FC/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x1931C/4, 0x40);
++      INSTANCE_WR(ctx, 0x1933C/4, 0x100);
++      INSTANCE_WR(ctx, 0x1935C/4, 0x10100);
++      INSTANCE_WR(ctx, 0x1937C/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x195DC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x195FC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x1961C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1965C/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x1967C/4, 0x1);
++      INSTANCE_WR(ctx, 0x196DC/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x197FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x1983C/4, 0x1);
++      INSTANCE_WR(ctx, 0x1985C/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x1987C/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x1989C/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x198BC/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x198FC/4, 0x1A);
++      INSTANCE_WR(ctx, 0x1993C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19BFC/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19C3C/4, 0x608080);
++      INSTANCE_WR(ctx, 0x19CDC/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D3C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x19D7C/4, 0x80);
++      INSTANCE_WR(ctx, 0x19D9C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A23C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A25C/4, 0x80);
++      INSTANCE_WR(ctx, 0x1A27C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A29C/4, 0x3020100);
++      INSTANCE_WR(ctx, 0x1A2BC/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A2DC/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A37C/4, 0x4);
++      INSTANCE_WR(ctx, 0x1A39C/4, 0x3);
++      INSTANCE_WR(ctx, 0x1A41C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8BC/4, 0x4);
++      INSTANCE_WR(ctx, 0x2E8DC/4, 0x3);
++      INSTANCE_WR(ctx, 0x2EB1C/4, 0xF);
++      INSTANCE_WR(ctx, 0x2EC9C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2ECBC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECDC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ECFC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2ED1C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x2EE3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EEBC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2EF7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F11C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F13C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F15C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F17C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F19C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F1BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F1DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F21C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F31C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F37C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2F3FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F41C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F45C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F47C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F49C/4, 0xCF);
++      INSTANCE_WR(ctx, 0x2F5FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F61C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F63C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F65C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F67C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F69C/4, 0x2);
++      INSTANCE_WR(ctx, 0x2F6BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F6FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F71C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F73C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F75C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F77C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F79C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2F7FC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2F8FC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2F91C/4, 0xF);
++      INSTANCE_WR(ctx, 0x2FA1C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x2FA7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FA9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FB1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x2FBDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FC7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FD7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x2FDFC/4, 0x11);
++      INSTANCE_WR(ctx, 0x2FE1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FE9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FEDC/4, 0x7FF);
++      INSTANCE_WR(ctx, 0x2FF1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x2FF5C/4, 0x1);
++      INSTANCE_WR(ctx, 0x304BC/4, 0x8);
++      INSTANCE_WR(ctx, 0x304DC/4, 0x8);
++      INSTANCE_WR(ctx, 0x304FC/4, 0x8);
++      INSTANCE_WR(ctx, 0x3051C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3053C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3055C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3057C/4, 0x8);
++      INSTANCE_WR(ctx, 0x3059C/4, 0x8);
++      INSTANCE_WR(ctx, 0x305BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x306BC/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x306DC/4, 0x400);
++      INSTANCE_WR(ctx, 0x306FC/4, 0x400);
++      INSTANCE_WR(ctx, 0x3071C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3073C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3075C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3077C/4, 0x400);
++      INSTANCE_WR(ctx, 0x3079C/4, 0x400);
++      INSTANCE_WR(ctx, 0x307BC/4, 0x400);
++      INSTANCE_WR(ctx, 0x307DC/4, 0x300);
++      INSTANCE_WR(ctx, 0x307FC/4, 0x300);
++      INSTANCE_WR(ctx, 0x3081C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3083C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3085C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3087C/4, 0x300);
++      INSTANCE_WR(ctx, 0x3089C/4, 0x300);
++      INSTANCE_WR(ctx, 0x308BC/4, 0x300);
++      INSTANCE_WR(ctx, 0x308DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x308FC/4, 0xF);
++      INSTANCE_WR(ctx, 0x309FC/4, 0x20);
++      INSTANCE_WR(ctx, 0x30A1C/4, 0x11);
++      INSTANCE_WR(ctx, 0x30A3C/4, 0x100);
++      INSTANCE_WR(ctx, 0x30A7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30ADC/4, 0x40);
++      INSTANCE_WR(ctx, 0x30AFC/4, 0x100);
++      INSTANCE_WR(ctx, 0x30B3C/4, 0x3);
++      INSTANCE_WR(ctx, 0x30BDC/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x30C5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x30C7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x30DBC/4, 0x1);
++      INSTANCE_WR(ctx, 0x30E5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x30E9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x30EBC/4, 0x400);
++      INSTANCE_WR(ctx, 0x30EDC/4, 0x300);
++      INSTANCE_WR(ctx, 0x30EFC/4, 0x1001);
++      INSTANCE_WR(ctx, 0x30F7C/4, 0x11);
++      INSTANCE_WR(ctx, 0x3107C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x3109C/4, 0xF);
++      INSTANCE_WR(ctx, 0x3139C/4, 0x1FFE67);
++      INSTANCE_WR(ctx, 0x3141C/4, 0x11);
++      INSTANCE_WR(ctx, 0x3147C/4, 0x4);
++      INSTANCE_WR(ctx, 0x314BC/4, 0x1);
++      INSTANCE_WR(ctx, 0x314DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3155C/4, 0x1);
++      INSTANCE_WR(ctx, 0x315FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x3163C/4, 0x1);
++      INSTANCE_WR(ctx, 0x316BC/4, 0x2A712488);
++      INSTANCE_WR(ctx, 0x316FC/4, 0x4085C000);
++      INSTANCE_WR(ctx, 0x3171C/4, 0x40);
++      INSTANCE_WR(ctx, 0x3173C/4, 0x100);
++      INSTANCE_WR(ctx, 0x3175C/4, 0x10100);
++      INSTANCE_WR(ctx, 0x3177C/4, 0x2800000);
++      INSTANCE_WR(ctx, 0x319DC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x319FC/4, 0x4E3BFDF);
++      INSTANCE_WR(ctx, 0x31A1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31A5C/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31A7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31ADC/4, 0xFFFF00);
++      INSTANCE_WR(ctx, 0x31BFC/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C3C/4, 0x1);
++      INSTANCE_WR(ctx, 0x31C5C/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x31C7C/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x31C9C/4, 0xB8A89888);
++      INSTANCE_WR(ctx, 0x31CBC/4, 0xF8E8D8C8);
++      INSTANCE_WR(ctx, 0x31CFC/4, 0x1A);
++      INSTANCE_WR(ctx, 0x5D000/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D040/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D060/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D080/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D0A0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D100/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D160/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D1A0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1C0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D340/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D360/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D380/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D3A0/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D400/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D460/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D4A0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4C0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D620/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D700/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D720/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D740/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D760/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D780/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D7A0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7E0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D820/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8E0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D900/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D940/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D960/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA80/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB20/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC60/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC80/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCC0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCE0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD00/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD20/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD60/4, 0x4);
++      INSTANCE_WR(ctx, 0x651C0/4, 0x11);
++      INSTANCE_WR(ctx, 0x65200/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D024/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D044/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D064/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D084/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D144/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D184/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1A4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D324/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D344/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D364/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D384/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D444/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D484/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4A4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D604/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6E4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D704/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D724/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D744/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D764/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D784/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7A4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D804/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8C4/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8E4/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D924/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D944/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA64/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB04/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC44/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC64/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC84/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCC4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCE4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD04/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD44/4, 0x4);
++      INSTANCE_WR(ctx, 0x651A4/4, 0x11);
++      INSTANCE_WR(ctx, 0x651E4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D028/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D048/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D068/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D088/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D148/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D188/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1A8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D328/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D348/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D368/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D388/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D448/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D488/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4A8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D608/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6E8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D708/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D728/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D748/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D768/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D788/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7A8/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7C8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D808/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8C8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8E8/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D928/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D948/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA68/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB08/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC48/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC68/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC88/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCA8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCC8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCE8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD08/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD48/4, 0x4);
++      INSTANCE_WR(ctx, 0x651A8/4, 0x11);
++      INSTANCE_WR(ctx, 0x651E8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D02C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D04C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D06C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D08C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D14C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D18C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D32C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D34C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D36C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D38C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D44C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D48C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4AC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D60C/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6EC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D70C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D72C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D74C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D76C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D78C/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7AC/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7CC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D80C/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8CC/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8EC/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D92C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D94C/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA6C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB0C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC4C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC6C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC8C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCAC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCCC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCEC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD0C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD4C/4, 0x4);
++      INSTANCE_WR(ctx, 0x651AC/4, 0x11);
++      INSTANCE_WR(ctx, 0x651EC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D030/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D050/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D070/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D090/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D150/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D190/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D330/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D350/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D370/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D390/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D450/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D490/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D610/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F0/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D710/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D730/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D750/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D770/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D790/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B0/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D810/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D0/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F0/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D930/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D950/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA70/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB10/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC50/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC70/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC90/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB0/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD10/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD50/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B0/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F0/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D034/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D054/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D074/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D094/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D154/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D194/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D334/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D354/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D374/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D394/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D454/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D494/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D614/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F4/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D714/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D734/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D754/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D774/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D794/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B4/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D814/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D4/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F4/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D934/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D954/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA74/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB14/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC54/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC74/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC94/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB4/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD14/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD54/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B4/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F4/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D038/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D058/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D078/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D098/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D158/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D198/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D338/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D358/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D378/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D398/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D458/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D498/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4B8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D618/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6F8/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D718/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D738/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D758/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D778/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D798/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7B8/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7D8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D818/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8D8/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8F8/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D938/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D958/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA78/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB18/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC58/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC78/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC98/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCB8/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCD8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCF8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD18/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD58/4, 0x4);
++      INSTANCE_WR(ctx, 0x651B8/4, 0x11);
++      INSTANCE_WR(ctx, 0x651F8/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D03C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D05C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D07C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D09C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D0FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D15C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D19C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D1BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D33C/4, 0x80);
++      INSTANCE_WR(ctx, 0x5D35C/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x5D37C/4, 0x4000400);
++      INSTANCE_WR(ctx, 0x5D39C/4, 0x1000);
++      INSTANCE_WR(ctx, 0x5D3FC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D45C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D49C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D4BC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D61C/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D6FC/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D71C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D73C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D75C/4, 0xFFFF);
++      INSTANCE_WR(ctx, 0x5D77C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D79C/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7BC/4, 0x10001);
++      INSTANCE_WR(ctx, 0x5D7DC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5D81C/4, 0x1FE21);
++      INSTANCE_WR(ctx, 0x5D8DC/4, 0x8100C12);
++      INSTANCE_WR(ctx, 0x5D8FC/4, 0x4);
++      INSTANCE_WR(ctx, 0x5D93C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5D95C/4, 0x11);
++      INSTANCE_WR(ctx, 0x5DA7C/4, 0xFAC6881);
++      INSTANCE_WR(ctx, 0x5DB1C/4, 0x4);
++      INSTANCE_WR(ctx, 0x5DC5C/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DC7C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DC9C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCBC/4, 0x2);
++      INSTANCE_WR(ctx, 0x5DCDC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DCFC/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD1C/4, 0x1);
++      INSTANCE_WR(ctx, 0x5DD5C/4, 0x4);
++      INSTANCE_WR(ctx, 0x651BC/4, 0x11);
++      INSTANCE_WR(ctx, 0x651FC/4, 0x1);
++}
++
++static void
++nvaa_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ctx = ref->gpuobj;
++
++      INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
++      INSTANCE_WR(ctx, 0x001d0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x001d4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00220/4, 0x0000fe0c);
++      INSTANCE_WR(ctx, 0x00238/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x00264/4, 0x00000187);
++      INSTANCE_WR(ctx, 0x00278/4, 0x00001018);
++      INSTANCE_WR(ctx, 0x0027c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x002cc/4, 0x042500df);
++      INSTANCE_WR(ctx, 0x002d4/4, 0x00000600);
++      INSTANCE_WR(ctx, 0x002ec/4, 0x01000000);
++      INSTANCE_WR(ctx, 0x002f0/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x002f8/4, 0x00000800);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00310/4, 0x000e0080);
++      INSTANCE_WR(ctx, 0x00310/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00338/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0033c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00350/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00368/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x0036c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00370/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00380/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00384/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x00388/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00390/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00394/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0039c/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003e4/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003ec/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x003f8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x003fc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00400/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00408/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00414/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x00428/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0042c/4, 0x00000070);
++      INSTANCE_WR(ctx, 0x00430/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00444/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x0044c/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00450/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000029);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00458/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000006);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000007);
++      INSTANCE_WR(ctx, 0x00478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x004d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00508/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000012);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00524/4, 0x0000000c);
++      INSTANCE_WR(ctx, 0x00524/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00540/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00544/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00548/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00558/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x0055c/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00584/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00588/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0058c/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00594/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00598/4, 0x00000014);
++      INSTANCE_WR(ctx, 0x0059c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005a8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005bc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x005c4/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005dc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x005fc/4, 0x00000200);
++      INSTANCE_WR(ctx, 0x00604/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00608/4, 0x000000f0);
++      INSTANCE_WR(ctx, 0x0060c/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x00618/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0061c/4, 0x000000f0);
++      INSTANCE_WR(ctx, 0x00620/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x00628/4, 0x00000009);
++      INSTANCE_WR(ctx, 0x00634/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00638/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00640/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00650/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00658/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00660/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00668/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00670/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00674/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x00678/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00680/4, 0x00001f80);
++      INSTANCE_WR(ctx, 0x00698/4, 0x3b74f821);
++      INSTANCE_WR(ctx, 0x0069c/4, 0x89058001);
++      INSTANCE_WR(ctx, 0x006a4/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x006a8/4, 0x000000ff);
++      INSTANCE_WR(ctx, 0x006b0/4, 0x027c10fa);
++      INSTANCE_WR(ctx, 0x006b4/4, 0x400000c0);
++      INSTANCE_WR(ctx, 0x006b8/4, 0xb7892080);
++      INSTANCE_WR(ctx, 0x006cc/4, 0x003d0040);
++      INSTANCE_WR(ctx, 0x006d4/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x006f4/4, 0x003d0040);
++      INSTANCE_WR(ctx, 0x006f8/4, 0x00000022);
++      INSTANCE_WR(ctx, 0x00740/4, 0x0000ff0a);
++      INSTANCE_WR(ctx, 0x00748/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0074c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00750/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00760/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00764/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00788/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00790/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00798/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x007a0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x007a4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x007b0/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x007c8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007cc/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x007d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x007e0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x007e4/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00808/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00810/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00818/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00820/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00824/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00830/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x00848/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x0084c/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x00850/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x00860/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x00864/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00888/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00890/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00898/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x008a0/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x008a4/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x008b0/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x008c8/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008cc/4, 0x00160000);
++      INSTANCE_WR(ctx, 0x008d0/4, 0x01800000);
++      INSTANCE_WR(ctx, 0x008e0/4, 0x0003ffff);
++      INSTANCE_WR(ctx, 0x008e4/4, 0x300c0000);
++      INSTANCE_WR(ctx, 0x00908/4, 0x00010401);
++      INSTANCE_WR(ctx, 0x00910/4, 0x00000078);
++      INSTANCE_WR(ctx, 0x00918/4, 0x000000bf);
++      INSTANCE_WR(ctx, 0x00920/4, 0x00001210);
++      INSTANCE_WR(ctx, 0x00924/4, 0x08000080);
++      INSTANCE_WR(ctx, 0x00930/4, 0x0000003e);
++      INSTANCE_WR(ctx, 0x0094c/4, 0x01127070);
++      INSTANCE_WR(ctx, 0x0095c/4, 0x07ffffff);
++      INSTANCE_WR(ctx, 0x00978/4, 0x00120407);
++      INSTANCE_WR(ctx, 0x00978/4, 0x05091507);
++      INSTANCE_WR(ctx, 0x00978/4, 0x05010202);
++      INSTANCE_WR(ctx, 0x00978/4, 0x00030201);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x0d0c0b0a);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00141210);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x000001f0);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x009a0/4, 0x00008000);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00039e00);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00003800);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x003fe006);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x003fe000);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x00404040);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x0cf7f007);
++      INSTANCE_WR(ctx, 0x009c0/4, 0x02bf7fff);
++      INSTANCE_WR(ctx, 0x07ba0/4, 0x00000021);
++      INSTANCE_WR(ctx, 0x07bc0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07be0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07c00/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07c20/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07c40/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07ca0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x07cc0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x07ce0/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07d00/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x07d20/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1a7c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a7e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a800/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a820/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a840/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a860/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a880/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a8e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a900/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a920/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a940/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a960/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a980/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1a9a0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1ae40/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1ae60/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x1aec0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x1aee0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1af80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x1b020/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x1b080/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b0c0/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b0e0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1b100/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x1b120/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1b140/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1b160/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1be20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1bf00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1bf20/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1bf80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1c1e0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1c2c0/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1c3c0/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x1c3e0/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x1c5e0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1c640/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1c6a0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x1c6c0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1c6e0/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x1c760/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x1c780/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x1c820/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1ca40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1ca60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1ca80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1caa0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cac0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cae0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb40/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb60/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cb80/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cba0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cbc0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cbe0/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc00/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc20/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x1cc40/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x1d120/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x1d140/4, 0x00000005);
++      INSTANCE_WR(ctx, 0x1d1a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x1d1e0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d200/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d220/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d240/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x1d260/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1d2e0/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x1d300/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x1d340/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x1dae0/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x1db20/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db40/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db60/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1db80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dca0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dcc0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dd00/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x1dd40/4, 0x00000102);
++      INSTANCE_WR(ctx, 0x1de80/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dea0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dec0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x1dee0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a04/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a24/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00a64/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x00a84/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00aa4/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00ae4/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0b344/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b364/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b3a4/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x0b3c4/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0b3e4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0b424/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x0b464/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x010c8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x010e8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39a68/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39a88/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39aa8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39ac8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x39b08/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39b48/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x39b68/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x39b88/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39ba8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x39bc8/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x39c28/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x39c48/4, 0x00000027);
++      INSTANCE_WR(ctx, 0x39ca8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x414e8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x417c8/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00a2c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00acc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x00b6c/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x00d6c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x00f2c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f4c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f8c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00fac/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x00fec/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x0118c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0362c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x0366c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x041cc/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x1484c/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x15950/4, 0x003fffff);
++      INSTANCE_WR(ctx, 0x159b0/4, 0x00001fff);
++      INSTANCE_WR(ctx, 0x00a34/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x00bb4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00bd4/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00c74/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00c94/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x00e14/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00e54/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x00ff4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01014/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01074/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01114/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01134/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x01154/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x01174/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x01194/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x01254/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01374/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01394/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x013d4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01654/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01874/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01894/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018b4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018d4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x018f4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01914/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01934/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01954/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01974/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01994/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019b4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019d4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x019f4/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a14/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a34/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01a54/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01d94/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01dd4/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x01eb4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01ef4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01f94/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x02114/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02214/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02314/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x023f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02414/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02434/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02454/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02474/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02494/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x024b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x024f4/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x02534/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x028b4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x028d4/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x028f4/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02914/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02934/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x02954/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02974/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02a14/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x02a34/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00a18/4, 0x0000003f);
++      INSTANCE_WR(ctx, 0x00b78/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x00b98/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x00bb8/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x00cd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00d58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00f98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00fb8/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x00fd8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x00ff8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01018/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01038/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x01458/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01478/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01498/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014b8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014d8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x014f8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01518/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01538/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01558/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01578/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01598/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015b8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015d8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x015f8/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01618/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01638/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x01658/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x016b8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x01878/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x01898/4, 0x04000000);
++      INSTANCE_WR(ctx, 0x018d8/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01958/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x01a38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01a58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01a78/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x01a98/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x01ad8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x01b98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01bd8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01bf8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01c38/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x01c58/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x01d38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01d78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01d98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01db8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01e58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x01e98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x01eb8/4, 0x00000015);
++      INSTANCE_WR(ctx, 0x01f38/4, 0x04444480);
++      INSTANCE_WR(ctx, 0x02698/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x026d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02758/4, 0x2a712488);
++      INSTANCE_WR(ctx, 0x02798/4, 0x4085c000);
++      INSTANCE_WR(ctx, 0x027b8/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x027d8/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x027f8/4, 0x00010100);
++      INSTANCE_WR(ctx, 0x02818/4, 0x02800000);
++      INSTANCE_WR(ctx, 0x02b58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x02cd8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x02cf8/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d18/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d38/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02d58/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x02e78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x02fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03178/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03198/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031b8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x031d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x031f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03218/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03238/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03278/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03378/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x033d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x03458/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03478/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x034b8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x034d8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x034f8/4, 0x000000cf);
++      INSTANCE_WR(ctx, 0x03658/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03678/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03698/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x036b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x036d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x036f8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x03718/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03758/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03798/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x037f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03818/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03838/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03858/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03958/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x03978/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x03a78/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x03ad8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03af8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03b78/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x03c38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03cd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03dd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x03e58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x03e78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03eb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03f38/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x03f78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x03fb8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04518/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04538/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04558/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04578/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04598/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045b8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045d8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x045f8/4, 0x00000008);
++      INSTANCE_WR(ctx, 0x04618/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04718/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x04738/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04758/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04778/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04798/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047b8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047d8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x047f8/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04818/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04838/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04858/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04878/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04898/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048b8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048d8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x048f8/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04918/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04958/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x04a58/4, 0x00000020);
++      INSTANCE_WR(ctx, 0x04a78/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x04a98/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x04ad8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04b38/4, 0x00000040);
++      INSTANCE_WR(ctx, 0x04b58/4, 0x00000100);
++      INSTANCE_WR(ctx, 0x04b98/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x04c38/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x04cb8/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x04cd8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x04e18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04eb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x04ef8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x04f18/4, 0x00000400);
++      INSTANCE_WR(ctx, 0x04f38/4, 0x00000300);
++      INSTANCE_WR(ctx, 0x04f58/4, 0x00001001);
++      INSTANCE_WR(ctx, 0x04fd8/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x050d8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x050f8/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x053f8/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05418/4, 0x001ffe67);
++      INSTANCE_WR(ctx, 0x05498/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x054f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x05538/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05558/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x055d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05678/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05718/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x05758/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05778/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x057d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05938/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05958/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05978/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05998/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059b8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059d8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x059f8/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05a18/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05a38/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x05b38/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x05b58/4, 0x0000000f);
++      INSTANCE_WR(ctx, 0x05c58/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x05c78/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05df8/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x05e18/4, 0x04e3bfdf);
++      INSTANCE_WR(ctx, 0x05e38/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05e78/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x05e98/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x05ef8/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x06018/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06058/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x06078/4, 0x30201000);
++      INSTANCE_WR(ctx, 0x06098/4, 0x70605040);
++      INSTANCE_WR(ctx, 0x060b8/4, 0xb8a89888);
++      INSTANCE_WR(ctx, 0x060d8/4, 0xf8e8d8c8);
++      INSTANCE_WR(ctx, 0x06118/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x06158/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x063f8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06418/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06438/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x064d8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06538/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06558/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06578/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x06598/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x065b8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06a58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06a78/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x06a98/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06ab8/4, 0x03020100);
++      INSTANCE_WR(ctx, 0x06ad8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x06af8/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x06b18/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06bb8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x06bd8/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x06c58/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0aef8/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x0af18/4, 0x00000003);
++      INSTANCE_WR(ctx, 0x00abc/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00b1c/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x00b5c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00b7c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00b9c/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00bdc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00bfc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00c3c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00cdc/4, 0x00000804);
++      INSTANCE_WR(ctx, 0x00cfc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d1c/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x00d3c/4, 0x0000007f);
++      INSTANCE_WR(ctx, 0x00d7c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00d9c/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x00ddc/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00dfc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00e1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x00e5c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x00edc/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x00efc/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x00fdc/4, 0x000007ff);
++      INSTANCE_WR(ctx, 0x00ffc/4, 0x00080c14);
++      INSTANCE_WR(ctx, 0x0171c/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0177c/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x01e9c/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x01ebc/4, 0x00000088);
++      INSTANCE_WR(ctx, 0x01f1c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x021fc/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x0225c/4, 0x3f800000);
++      INSTANCE_WR(ctx, 0x022dc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x022fc/4, 0x00000010);
++      INSTANCE_WR(ctx, 0x0281c/4, 0x00000052);
++      INSTANCE_WR(ctx, 0x0285c/4, 0x00000026);
++      INSTANCE_WR(ctx, 0x0289c/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x028bc/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x028fc/4, 0x0000001a);
++      INSTANCE_WR(ctx, 0x0295c/4, 0x00ffff00);
++      INSTANCE_WR(ctx, 0x41800/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41840/4, 0x00000080);
++      INSTANCE_WR(ctx, 0x41860/4, 0x80007004);
++      INSTANCE_WR(ctx, 0x41880/4, 0x04000400);
++      INSTANCE_WR(ctx, 0x418a0/4, 0x000000c0);
++      INSTANCE_WR(ctx, 0x418c0/4, 0x00001000);
++      INSTANCE_WR(ctx, 0x41920/4, 0x00000e00);
++      INSTANCE_WR(ctx, 0x41940/4, 0x00001e00);
++      INSTANCE_WR(ctx, 0x41960/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x419c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41a00/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x41a20/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x41ba0/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41be0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x41ca0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41cc0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41ce0/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41d00/4, 0x0000ffff);
++      INSTANCE_WR(ctx, 0x41d20/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41d40/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x41d60/4, 0x00010001);
++      INSTANCE_WR(ctx, 0x41d80/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x41dc0/4, 0x0001fe21);
++      INSTANCE_WR(ctx, 0x41e80/4, 0x08100c12);
++      INSTANCE_WR(ctx, 0x41ea0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x41ee0/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x41f00/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x42020/4, 0x0fac6881);
++      INSTANCE_WR(ctx, 0x420c0/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x42200/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x42220/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42240/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42260/4, 0x00000002);
++      INSTANCE_WR(ctx, 0x42280/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x422a0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x422c0/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x42300/4, 0x00000004);
++      INSTANCE_WR(ctx, 0x49700/4, 0x00000011);
++      INSTANCE_WR(ctx, 0x49740/4, 0x00000001);
++      INSTANCE_WR(ctx, 0x0012c/4, 0x00000002);
++}
++
++int
++nv50_graph_create_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
++      struct nouveau_engine *engine = &dev_priv->Engine;
++      int grctx_size = 0x70000, hdr;
++      int ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
++                                   NVOBJ_FLAG_ZERO_ALLOC |
++                                   NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
++      if (ret)
++              return ret;
++
++      hdr = IS_G80 ? 0x200 : 0x20;
++      INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002);
++      INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
++                                         grctx_size - 1);
++      INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
++      INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0);
++      INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
++      INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
++
++      INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4,
++                  chan->ramin->instance >> 12);
++      if (dev_priv->chipset == 0xaa)
++              INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00004/4, 0x00000002);
++      else
++              INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002);
++
++      switch (dev_priv->chipset) {
++      case 0x50:
++              nv50_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x84:
++              nv84_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x86:
++              nv86_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0x92:
++              nv92_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      case 0xaa:
++              nvaa_graph_init_ctxvals(dev, chan->ramin_grctx);
++              break;
++      default:
++              /* This is complete crack, it accidently used to make at
++               * least some G8x cards work partially somehow, though there's
++               * no good reason why - and it stopped working as the rest
++               * of the code got off the drugs..
++               */
++              ret = engine->graph.load_context(chan);
++              if (ret) {
++                      DRM_ERROR("Error hacking up context: %d\n", ret);
++                      return ret;
++              }
++              break;
++      }
++
++      return 0;
++}
++
++void
++nv50_graph_destroy_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      int i, hdr;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      hdr = IS_G80 ? 0x200 : 0x20;
++      for (i=hdr; i<hdr+24; i+=4)
++              INSTANCE_WR(chan->ramin->gpuobj, i/4, 0);
++
++      nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++}
++
++static int
++nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t old_cp, tv = 20000;
++      int i;
++
++      DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);
++
++      old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(0x400824, NV_READ(0x400824) |
++               (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
++                       NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));
++      NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
++
++      for (i = 0; i < tv; i++) {
++              if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
++                      break;
++      }
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
++
++      if (i == tv) {
++              DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
++              DRM_ERROR("0x40030C = 0x%08x\n",
++                        NV_READ(NV40_PGRAPH_CTXCTL_030C));
++              return -EBUSY;
++      }
++
++      return 0;
++}
++
++int
++nv50_graph_load_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      uint32_t inst = chan->ramin->instance >> 12;
++      int ret; (void)ret;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++#if 0
++      if ((ret = nv50_graph_transfer_context(dev, inst, 0)))
++              return ret;
++#endif
++
++      NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++      NV_WRITE(0x400320, 4);
++      NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31));
++
++      return 0;
++}
++
++int
++nv50_graph_save_context(struct nouveau_channel *chan)
++{
++      struct drm_device *dev = chan->dev;
++      uint32_t inst = chan->ramin->instance >> 12;
++
++      DRM_DEBUG("ch%d\n", chan->id);
++
++      return nv50_graph_transfer_context(dev, inst, 1);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_instmem.c git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c
+--- git/drivers/gpu/drm-tungsten/nv50_instmem.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_instmem.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,324 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++typedef struct {
++      uint32_t save1700[5]; /* 0x1700->0x1710 */
++
++      struct nouveau_gpuobj_ref *pramin_pt;
++      struct nouveau_gpuobj_ref *pramin_bar;
++} nv50_instmem_priv;
++
++#define NV50_INSTMEM_PAGE_SHIFT 12
++#define NV50_INSTMEM_PAGE_SIZE  (1 << NV50_INSTMEM_PAGE_SHIFT)
++#define NV50_INSTMEM_PT_SIZE(a)       (((a) >> 12) << 3)
++
++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
++ */
++#define BAR0_WI32(g,o,v) do {                                     \
++      uint32_t offset;                                          \
++      if ((g)->im_backing) {                                    \
++              offset = (g)->im_backing->start;                  \
++      } else {                                                  \
++              offset  = chan->ramin->gpuobj->im_backing->start; \
++              offset += (g)->im_pramin->start;                  \
++      }                                                         \
++      offset += (o);                                            \
++      NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v));             \
++} while(0)
++
++int
++nv50_instmem_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      struct nouveau_channel *chan;
++      uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
++      nv50_instmem_priv *priv;
++      int ret, i;
++      uint32_t v;
++
++      priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
++      if (!priv)
++              return -ENOMEM;
++      dev_priv->Engine.instmem.priv = priv;
++
++      /* Save state, will restore at takedown. */
++      for (i = 0x1700; i <= 0x1710; i+=4)
++              priv->save1700[(i-0x1700)/4] = NV_READ(i);
++
++      /* Reserve the last MiB of VRAM, we should probably try to avoid
++       * setting up the below tables over the top of the VBIOS image at
++       * some point.
++       */
++      dev_priv->ramin_rsvd_vram = 1 << 20;
++      c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
++      c_size   = 128 << 10;
++      c_vmpd   = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
++      c_ramfc  = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
++      c_base   = c_vmpd + 0x4000;
++      pt_size  = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size);
++
++      DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset);
++      DRM_DEBUG("    VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
++      DRM_DEBUG("  Aperture size: %d MiB\n",
++                (uint32_t)dev_priv->ramin->size >> 20);
++      DRM_DEBUG("        PT size: %d KiB\n", pt_size >> 10);
++
++      NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
++
++      /* Create a fake channel, and use it as our "dummy" channels 0/127.
++       * The main reason for creating a channel is so we can use the gpuobj
++       * code.  However, it's probably worth noting that NVIDIA also setup
++       * their channels 0/127 with the same values they configure here.
++       * So, there may be some other reason for doing this.
++       *
++       * Have to create the entire channel manually, as the real channel
++       * creation code assumes we have PRAMIN access, and we don't until
++       * we're done here.
++       */
++      chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER);
++      if (!chan)
++              return -ENOMEM;
++      chan->id = 0;
++      chan->dev = dev;
++      chan->file_priv = (struct drm_file *)-2;
++      dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
++
++      /* Channel's PRAMIN object + heap */
++      if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0,
++                                         NULL, &chan->ramin)))
++              return ret;
++
++      if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
++              return -ENOMEM;
++
++      /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
++      if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
++                                         0x4000, 0, NULL, &chan->ramfc)))
++              return ret;
++
++      for (i = 0; i < c_vmpd; i += 4)
++              BAR0_WI32(chan->ramin->gpuobj, i, 0);
++
++      /* VM page directory */
++      if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
++                                         0x4000, 0, &chan->vm_pd, NULL)))
++              return ret;
++      for (i = 0; i < 0x4000; i += 8) {
++              BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
++              BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
++      }
++
++      /* PRAMIN page table, cheat and map into VM at 0x0000000000.
++       * We map the entire fake channel into the start of the PRAMIN BAR
++       */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
++                                        0, &priv->pramin_pt)))
++              return ret;
++
++      for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) {
++              if (v < (c_offset + c_size))
++                      BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
++              else
++                      BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
++              BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++      }
++
++      BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
++      BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
++
++      /* DMA object for PRAMIN BAR */
++      if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
++                                        &priv->pramin_bar)))
++              return ret;
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
++      BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
++
++      /* Poke the relevant regs, and pray it works :) */
++      NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
++      NV_WRITE(NV50_PUNK_UNK1710, 0);
++      NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
++                                       NV50_PUNK_BAR_CFG_BASE_VALID);
++      NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0);
++      NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
++                                      NV50_PUNK_BAR3_CTXDMA_VALID);
++
++      /* Assume that praying isn't enough, check that we can re-read the
++       * entire fake channel back from the PRAMIN BAR */
++      for (i = 0; i < c_size; i+=4) {
++              if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) {
++                      DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i);
++                      return -EINVAL;
++              }
++      }
++
++      /* Global PRAMIN heap */
++      if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
++                                c_size, dev_priv->ramin->size - c_size)) {
++              dev_priv->ramin_heap = NULL;
++              DRM_ERROR("Failed to init RAMIN heap\n");
++      }
++
++      /*XXX: incorrect, but needed to make hash func "work" */
++      dev_priv->ramht_offset = 0x10000;
++      dev_priv->ramht_bits   = 9;
++      dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
++      return 0;
++}
++
++void
++nv50_instmem_takedown(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      struct nouveau_channel *chan = dev_priv->fifos[0];
++      int i;
++
++      DRM_DEBUG("\n");
++
++      if (!priv)
++              return;
++
++      /* Restore state from before init */
++      for (i = 0x1700; i <= 0x1710; i+=4)
++              NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
++
++      nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
++      nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
++
++      /* Destroy dummy channel */
++      if (chan) {
++              nouveau_gpuobj_del(dev, &chan->vm_pd);
++              nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++              nouveau_gpuobj_ref_del(dev, &chan->ramin);
++              nouveau_mem_takedown(&chan->ramin_heap);
++
++              dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
++              drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
++      }
++
++      dev_priv->Engine.instmem.priv = NULL;
++      drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
++}
++
++int
++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
++{
++      if (gpuobj->im_backing)
++              return -EINVAL;
++
++      *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
++      if (*sz == 0)
++              return -EINVAL;
++
++      gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
++                                             *sz, NOUVEAU_MEM_FB |
++                                             NOUVEAU_MEM_NOVM,
++                                             (struct drm_file *)-2);
++      if (!gpuobj->im_backing) {
++              DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++void
++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      if (gpuobj && gpuobj->im_backing) {
++              if (gpuobj->im_bound)
++                      dev_priv->Engine.instmem.unbind(dev, gpuobj);
++              nouveau_mem_free(dev, gpuobj->im_backing);
++              gpuobj->im_backing = NULL;
++      }
++}
++
++int
++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      uint32_t pte, pte_end, vram;
++
++      if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
++              return -EINVAL;
++
++      DRM_DEBUG("st=0x%0llx sz=0x%0llx\n",
++                gpuobj->im_pramin->start, gpuobj->im_pramin->size);
++
++      pte     = (gpuobj->im_pramin->start >> 12) << 3;
++      pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++      vram    = gpuobj->im_backing->start;
++
++      DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
++                gpuobj->im_pramin->start, pte, pte_end);
++      DRM_DEBUG("first vram page: 0x%llx\n",
++                gpuobj->im_backing->start);
++
++      while (pte < pte_end) {
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
++
++              pte += 8;
++              vram += NV50_INSTMEM_PAGE_SIZE;
++      }
++
++      gpuobj->im_bound = 1;
++      return 0;
++}
++
++int
++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++      nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
++      uint32_t pte, pte_end;
++
++      if (gpuobj->im_bound == 0)
++              return -EINVAL;
++
++      pte     = (gpuobj->im_pramin->start >> 12) << 3;
++      pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
++      while (pte < pte_end) {
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
++              INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
++              pte += 8;
++      }
++
++      gpuobj->im_bound = 0;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv50_mc.c git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c
+--- git/drivers/gpu/drm-tungsten/nv50_mc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv50_mc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,43 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nv50_mc_init(struct drm_device *dev)
++{
++      struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++      NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++      return 0;
++}
++
++void nv50_mc_takedown(struct drm_device *dev)
++{
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.c git-nokia/drivers/gpu/drm-tungsten/nv_drv.c
+--- git/drivers/gpu/drm-tungsten/nv_drv.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,94 @@
++/* nv_drv.c -- nv driver -*- linux-c -*-
++ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * Copyright 2005 Lars Knoll <lars@trolltech.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Daryll Strauss <daryll@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Lars Knoll <lars@trolltech.com>
++ */
++
++#include "drmP.h"
++#include "nv_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      nv_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR | DRIVER_USE_AGP,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init nv_init(void)
++{
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit nv_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(nv_init);
++module_exit(nv_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/nv_drv.h git-nokia/drivers/gpu/drm-tungsten/nv_drv.h
+--- git/drivers/gpu/drm-tungsten/nv_drv.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/nv_drv.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,52 @@
++/* nv_drv.h -- NV DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
++ *
++ * Copyright 2005 Lars Knoll <lars@trolltech.com>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Lars Knoll <lars@trolltech.com>
++ */
++
++#ifndef __NV_H__
++#define __NV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Lars Knoll"
++
++#define DRIVER_NAME           "nv"
++#define DRIVER_DESC           "NV"
++#define DRIVER_DATE           "20051006"
++
++#define DRIVER_MAJOR          0
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     1
++
++#define NV04 04
++#define NV10 10
++#define NV20 20
++#define NV30 30
++#define NV40 40
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drm.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h
+--- git/drivers/gpu/drm-tungsten/pvr2d_drm.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drm.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,42 @@
++/* pvr2d_drm.h -- Public header for the PVR2D helper module -*- linux-c -*- */
++
++#ifndef __PVR2D_DRM_H__
++#define __PVR2D_DRM_H__
++
++
++/* This wouldn't work with 64 bit userland */
++struct drm_pvr2d_virt2phys {
++      uint32_t virt;
++      uint32_t length;
++      uint32_t phys_array;
++      uint32_t handle;
++};
++
++struct drm_pvr2d_buf_release {
++      uint32_t handle;
++};
++
++enum drm_pvr2d_cflush_type {
++      DRM_PVR2D_CFLUSH_FROM_GPU = 1,
++      DRM_PVR2D_CFLUSH_TO_GPU = 2
++};
++
++struct drm_pvr2d_cflush {
++      enum drm_pvr2d_cflush_type type;
++      uint32_t virt;
++      uint32_t length;
++};
++
++#define DRM_PVR2D_VIRT2PHYS   0x0
++#define DRM_PVR2D_BUF_RELEASE 0x1
++#define DRM_PVR2D_CFLUSH      0x2
++
++#define DRM_IOCTL_PVR2D_VIRT2PHYS DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR2D_VIRT2PHYS, \
++                                        struct drm_pvr2d_virt2phys)
++#define DRM_IOCTL_PVR2D_BUF_RELEASE DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_BUF_RELEASE, \
++                                        struct drm_pvr2d_buf_release)
++#define DRM_IOCTL_PVR2D_CFLUSH DRM_IOW(DRM_COMMAND_BASE + DRM_PVR2D_CFLUSH, \
++                                     struct drm_pvr2d_cflush)
++
++
++#endif /* __PVR2D_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.c git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c
+--- git/drivers/gpu/drm-tungsten/pvr2d_drv.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,537 @@
++
++#include "drmP.h"
++#include "drm_pciids.h"
++
++#include "pvr2d_drm.h"
++#include "pvr2d_drv.h"
++
++#define PVR2D_SHMEM_HASH_ORDER 12
++
++struct pvr2d_dev {
++      rwlock_t hash_lock;
++      struct drm_open_hash shmem_hash;
++};
++
++struct pvr2d_buf {
++      struct pvr2d_dev *dev_priv;
++      struct drm_hash_item hash;
++      struct page **pages;
++      struct kref kref;
++      uint32_t num_pages;
++};
++
++/*
++ * This pvr2d_ref object is needed strictly because
++ * idr_for_each doesn't exist in 2.6.22. With kernels
++ * supporting this function, we can use it to traverse
++ * the file list of buffers at file release.
++ */
++
++struct pvr2d_ref{
++      struct list_head head;
++      struct pvr2d_buf *buf;
++};
++
++struct pvr2d_file {
++      spinlock_t lock;
++      struct list_head ref_list;
++      struct idr buf_idr;
++};
++
++static inline struct pvr2d_dev *pvr2d_dp(struct drm_device *dev)
++{
++      return (struct pvr2d_dev *) dev->dev_private;
++}
++
++static inline struct pvr2d_file *pvr2d_fp(struct drm_file *file_priv)
++{
++      return (struct pvr2d_file *) file_priv->driver_priv;
++}
++
++
++static void
++pvr2d_free_buf(struct pvr2d_buf *buf)
++{
++      uint32_t i;
++
++      for (i=0; i<buf->num_pages; ++i) {
++              struct page *page = buf->pages[i];
++
++              if (!PageReserved(page))
++                      set_page_dirty_lock(page);
++
++              put_page(page);
++      }
++
++      kfree(buf->pages);
++      kfree(buf);
++}
++
++static void
++pvr2d_release_buf(struct kref *kref)
++{
++      struct pvr2d_buf *buf =
++              container_of(kref, struct pvr2d_buf, kref);
++
++      struct pvr2d_dev *dev_priv = buf->dev_priv;
++
++      drm_ht_remove_item(&dev_priv->shmem_hash, &buf->hash);
++      write_unlock(&dev_priv->hash_lock);
++      pvr2d_free_buf(buf);
++      write_lock(&dev_priv->hash_lock);
++}
++
++static struct pvr2d_buf *
++pvr2d_alloc_buf(struct pvr2d_dev *dev_priv, uint32_t num_pages)
++{
++      struct pvr2d_buf *buf = kmalloc(sizeof(*buf), GFP_KERNEL);
++
++      if (unlikely(!buf))
++              return NULL;
++
++      buf->pages = kmalloc(num_pages * sizeof(*buf->pages), GFP_KERNEL);
++      if (unlikely(!buf->pages))
++              goto out_err0;
++
++      buf->dev_priv = dev_priv;
++      buf->num_pages = num_pages;
++
++
++      DRM_DEBUG("pvr2d_alloc_buf successfully completed.\n");
++      return buf;
++
++out_err0:
++      kfree(buf);
++
++      return NULL;
++}
++
++
++static struct pvr2d_buf*
++pvr2d_lookup_buf(struct pvr2d_dev *dev_priv, struct page *first_phys)
++{
++      struct drm_hash_item *hash;
++      struct pvr2d_buf *buf = NULL;
++      int ret;
++
++      read_lock(&dev_priv->hash_lock);
++      ret = drm_ht_find_item(&dev_priv->shmem_hash,
++                             (unsigned long)first_phys,
++                             &hash);
++
++      if (likely(ret == 0)) {
++              buf = drm_hash_entry(hash, struct pvr2d_buf, hash);
++              kref_get(&buf->kref);
++      }
++      read_unlock(&dev_priv->hash_lock);
++
++      if (buf != NULL) {
++              DRM_INFO("pvr2d_lookup_buf found already used buffer.\n");
++      }
++
++      return buf;
++}
++
++
++static int
++pvr2d_virt2phys(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_pvr2d_virt2phys *v2p = data;
++      uint32_t i;
++      unsigned nr_pages = ((v2p->virt & ~PAGE_MASK) + v2p->length + PAGE_SIZE -
++                           1) / PAGE_SIZE;
++      struct page *first_page;
++      struct pvr2d_buf *buf = NULL;
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_ref *ref;
++      int ret;
++
++
++      /*
++       * Obtain a global hash key for the pvr2d buffer structure.
++       * We use the address of the struct page of the first
++       * page.
++       */
++
++      down_read(&current->mm->mmap_sem);
++        ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK,
++                             1, WRITE, 0, &first_page, NULL);
++        up_read(&current->mm->mmap_sem);
++
++      if (unlikely(ret < 1)) {
++              DRM_ERROR("Failed getting first page: %d\n", ret);
++              return -ENOMEM;
++      }
++
++      /*
++       * Look up buffer already in the hash table, or create
++       * and insert a new one.
++       */
++
++      while(buf == NULL) {
++              buf = pvr2d_lookup_buf(dev_priv, first_page);
++
++              if (likely(buf != NULL))
++                      break;
++
++              buf = pvr2d_alloc_buf(dev_priv, nr_pages);
++              if (unlikely(buf == NULL)) {
++                      DRM_ERROR("Failed allocating pvr2d buffer.\n");
++                      ret = -ENOMEM;
++                      goto out_put;
++              }
++
++              down_read(&current->mm->mmap_sem);
++              ret = get_user_pages(current, current->mm, v2p->virt & PAGE_MASK,
++                                   nr_pages, WRITE, 0, buf->pages, NULL);
++              up_read(&current->mm->mmap_sem);
++
++              if (unlikely(ret < nr_pages)) {
++                      DRM_ERROR("Failed getting user pages.\n");
++                      buf->num_pages = ret;
++                      ret = -ENOMEM;
++                      pvr2d_free_buf(buf);
++                      goto out_put;
++              }
++
++              kref_init(&buf->kref);
++              buf->hash.key = (unsigned long) first_page;
++
++              write_lock(&dev_priv->hash_lock);
++              ret = drm_ht_insert_item(&dev_priv->shmem_hash, &buf->hash);
++              write_unlock(&dev_priv->hash_lock);
++
++              if (unlikely(ret == -EINVAL)) {
++
++                      /*
++                       * Somebody raced us and already
++                       * inserted this buffer.
++                       * Very unlikely, but retry anyway.
++                       */
++
++                      pvr2d_free_buf(buf);
++                      buf = NULL;
++              }
++      }
++
++      /*
++       * Create a reference object that is used for unreferencing
++       * either by user action or when the drm file is closed.
++       */
++
++      ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++      if (unlikely(ref == NULL))
++              goto out_err0;
++
++      ref->buf = buf;
++      do {
++              if (idr_pre_get(&pvr2d_fpriv->buf_idr, GFP_KERNEL) == 0) {
++                      ret = -ENOMEM;
++                      DRM_ERROR("Failed idr_pre_get\n");
++                      goto out_err1;
++              }
++
++              spin_lock( &pvr2d_fpriv->lock );
++              ret = idr_get_new( &pvr2d_fpriv->buf_idr, ref, &v2p->handle);
++
++              if (likely(ret == 0))
++                      list_add_tail(&ref->head, &pvr2d_fpriv->ref_list);
++
++              spin_unlock( &pvr2d_fpriv->lock );
++
++      } while (unlikely(ret == -EAGAIN));
++
++      if (unlikely(ret != 0))
++              goto out_err1;
++
++
++      /*
++       * Copy info to user-space.
++       */
++
++      DRM_DEBUG("Converting range of %u bytes at virtual 0x%08x, physical array at 0x%08x\n",
++               v2p->length, v2p->virt, v2p->phys_array);
++
++      for (i = 0; i < nr_pages; i++) {
++              uint32_t physical = (uint32_t)page_to_pfn(buf->pages[i]) << PAGE_SHIFT;
++              DRM_DEBUG("Virtual 0x%08lx => Physical 0x%08x\n",
++                       v2p->virt + i * PAGE_SIZE, physical);
++
++              if (DRM_COPY_TO_USER((void*)(v2p->phys_array +
++                                           i * sizeof(uint32_t)),
++                                   &physical, sizeof(uint32_t))) {
++                      ret = -EFAULT;
++                      goto out_err2;
++              }
++
++      }
++
++#ifdef CONFIG_X86
++      /* XXX: Quick'n'dirty hack to avoid corruption on Poulsbo, remove when
++       * there's a better solution
++       */
++      wbinvd();
++#endif
++
++      DRM_DEBUG("pvr2d_virt2phys returning handle 0x%08x\n",
++               v2p->handle);
++
++out_put:
++      put_page(first_page);
++      return ret;
++
++out_err2:
++      spin_lock( &pvr2d_fpriv->lock );
++      list_del(&ref->head);
++      idr_remove( &pvr2d_fpriv->buf_idr, v2p->handle);
++      spin_unlock( &pvr2d_fpriv->lock );
++out_err1:
++      kfree(ref);
++out_err0:
++      write_lock(&dev_priv->hash_lock);
++      kref_put(&buf->kref, &pvr2d_release_buf);
++      write_unlock(&dev_priv->hash_lock);
++      put_page(first_page);
++      return ret;
++}
++
++
++static int
++pvr2d_buf_release(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct drm_pvr2d_buf_release *br = data;
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_buf *buf;
++      struct pvr2d_ref *ref;
++
++      DRM_DEBUG("pvr2d_buf_release releasing 0x%08x\n",
++                br->handle);
++
++      spin_lock( &pvr2d_fpriv->lock );
++      ref = idr_find( &pvr2d_fpriv->buf_idr, br->handle);
++
++      if (unlikely(ref == NULL)) {
++              spin_unlock( &pvr2d_fpriv->lock );
++              DRM_ERROR("Could not find pvr2d buf to unref.\n");
++              return -EINVAL;
++      }
++      (void) idr_remove( &pvr2d_fpriv->buf_idr, br->handle);
++      list_del(&ref->head);
++      spin_unlock( &pvr2d_fpriv->lock );
++
++      buf = ref->buf;
++      kfree(ref);
++
++      write_lock(&dev_priv->hash_lock);
++      kref_put(&buf->kref, &pvr2d_release_buf);
++      write_unlock(&dev_priv->hash_lock);
++
++      return 0;
++}
++
++static int
++pvr2d_cflush(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_pvr2d_cflush *cf = data;
++
++      switch (cf->type) {
++      case DRM_PVR2D_CFLUSH_FROM_GPU:
++              DRM_DEBUG("DRM_PVR2D_CFLUSH_FROM_GPU 0x%08x, length 0x%08x\n",
++                        cf->virt, cf->length);
++#ifdef CONFIG_ARM
++              dmac_inv_range((const void*)cf->virt,
++                             (const void*)(cf->virt + cf->length));
++#endif
++              return 0;
++      case DRM_PVR2D_CFLUSH_TO_GPU:
++              DRM_DEBUG("DRM_PVR2D_CFLUSH_TO_GPU 0x%08x, length 0x%08x\n",
++                        cf->virt, cf->length);
++#ifdef CONFIG_ARM
++              dmac_clean_range((const void*)cf->virt,
++                               (const void*)(cf->virt + cf->length));
++#endif
++              return 0;
++      default:
++              DRM_ERROR("Invalid cflush type 0x%x\n", cf->type);
++              return -EINVAL;
++      }
++}
++
++static int
++pvr2d_open(struct inode *inode, struct file *filp)
++{
++      int ret;
++      struct pvr2d_file *pvr2d_fpriv;
++      struct drm_file *file_priv;
++
++      pvr2d_fpriv = kmalloc(sizeof(*pvr2d_fpriv), GFP_KERNEL);
++      if (unlikely(pvr2d_fpriv == NULL))
++              return -ENOMEM;
++
++      pvr2d_fpriv->lock = SPIN_LOCK_UNLOCKED;
++      INIT_LIST_HEAD(&pvr2d_fpriv->ref_list);
++      idr_init(&pvr2d_fpriv->buf_idr);
++
++      ret = drm_open(inode, filp);
++
++      if (unlikely(ret != 0)) {
++              idr_destroy(&pvr2d_fpriv->buf_idr);
++              kfree(pvr2d_fpriv);
++              return ret;
++      }
++
++      file_priv = filp->private_data;
++      file_priv->driver_priv = pvr2d_fpriv;
++
++      DRM_DEBUG("pvr2d_open completed successfully.\n");
++      return 0;
++};
++
++
++static int
++pvr2d_release(struct inode *inode, struct file *filp)
++{
++      struct drm_file *file_priv = filp->private_data;
++      struct drm_device *dev = file_priv->minor->dev;
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++      struct pvr2d_file *pvr2d_fpriv = pvr2d_fp(file_priv);
++      struct pvr2d_buf *buf;
++      struct pvr2d_ref *ref, *next;
++
++      /*
++       * At this point we're the only user of the list, so
++       * it should be safe to release the file lock whenever we want to.
++       */
++
++      spin_lock(&pvr2d_fpriv->lock);
++
++      list_for_each_entry_safe(ref, next, &pvr2d_fpriv->ref_list,
++                               head) {
++              list_del(&ref->head);
++              buf = ref->buf;
++              kfree(ref);
++              spin_unlock(&pvr2d_fpriv->lock);
++              write_lock(&dev_priv->hash_lock);
++              kref_put(&buf->kref, &pvr2d_release_buf);
++              write_unlock(&dev_priv->hash_lock);
++              spin_lock(&pvr2d_fpriv->lock);
++      }
++
++      idr_remove_all(&pvr2d_fpriv->buf_idr);
++      idr_destroy(&pvr2d_fpriv->buf_idr);
++      spin_unlock(&pvr2d_fpriv->lock);
++
++      kfree(pvr2d_fpriv);
++
++      DRM_DEBUG("pvr2d_release calling drm_release.\n");
++      return drm_release(inode, filp);
++}
++
++static int pvr2d_load(struct drm_device *dev, unsigned long chipset)
++{
++      struct pvr2d_dev *dev_priv;
++      int ret;
++
++      dev_priv = kmalloc(sizeof(*dev_priv), GFP_KERNEL);
++      if (unlikely(dev_priv == NULL))
++              return -ENOMEM;
++
++      rwlock_init(&dev_priv->hash_lock);
++      ret = drm_ht_create(&dev_priv->shmem_hash,
++                         PVR2D_SHMEM_HASH_ORDER);
++
++      if (unlikely(ret != 0))
++              goto out_err0;
++
++      dev->dev_private = dev_priv;
++
++      DRM_DEBUG("pvr2d_load completed successfully.\n");
++      return 0;
++out_err0:
++      kfree(dev_priv);
++      return ret;
++}
++
++
++static int pvr2d_unload(struct drm_device *dev)
++{
++      struct pvr2d_dev *dev_priv = pvr2d_dp(dev);
++
++      drm_ht_remove(&dev_priv->shmem_hash);
++      kfree(dev_priv);
++      DRM_DEBUG("pvr2d_unload completed successfully.\n");
++      return 0;
++}
++
++static struct pci_device_id pciidlist[] = {
++      pvr2d_PCI_IDS
++};
++
++struct drm_ioctl_desc pvr2d_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_PVR2D_VIRT2PHYS, pvr2d_virt2phys, 0),
++      DRM_IOCTL_DEF(DRM_PVR2D_BUF_RELEASE, pvr2d_buf_release, 0),
++      DRM_IOCTL_DEF(DRM_PVR2D_CFLUSH, pvr2d_cflush, 0)
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = pvr2d_ioctls,
++      .num_ioctls = DRM_ARRAY_SIZE(pvr2d_ioctls),
++      .load = pvr2d_load,
++      .unload = pvr2d_unload,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = pvr2d_open,
++              .release = pvr2d_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init pvr2d_init(void)
++{
++#ifdef CONFIG_PCI
++      return drm_init(&driver, pciidlist);
++#else
++      return drm_get_dev(NULL, NULL, &driver);
++#endif
++}
++
++static void __exit pvr2d_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(pvr2d_init);
++module_exit(pvr2d_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/pvr2d_drv.h git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h
+--- git/drivers/gpu/drm-tungsten/pvr2d_drv.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/pvr2d_drv.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,19 @@
++/* -*- linux-c -*- */
++
++#ifndef __PVR2D_H__
++#define __PVR2D_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Tungsten Graphics Inc."
++
++#define DRIVER_NAME           "pvr2d"
++#define DRIVER_DESC           "PVR2D kernel helper"
++#define DRIVER_DATE           "20080811"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_cce.c git-nokia/drivers/gpu/drm-tungsten/r128_cce.c
+--- git/drivers/gpu/drm-tungsten/r128_cce.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_cce.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,933 @@
++/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
++ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
++ */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++#define R128_FIFO_DEBUG               0
++
++/* CCE microcode (from ATI) */
++static u32 r128_cce_microcode[] = {
++      0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
++      1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
++      599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
++      11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
++      262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
++      1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
++      30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
++      1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
++      15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
++      12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
++      46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
++      459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
++      18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
++      15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
++      268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
++      15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
++      1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
++      3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
++      1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
++      15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
++      180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
++      114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
++      33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
++      1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
++      14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
++      1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
++      198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
++      114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
++      1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
++      1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
++      16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
++      174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
++      33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
++      33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
++      409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++};
++
++static int R128_READ_PLL(struct drm_device * dev, int addr)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++
++      R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
++      return R128_READ(R128_CLOCK_CNTL_DATA);
++}
++
++#if R128_FIFO_DEBUG
++static void r128_status(drm_r128_private_t * dev_priv)
++{
++      printk("GUI_STAT           = 0x%08x\n",
++             (unsigned int)R128_READ(R128_GUI_STAT));
++      printk("PM4_STAT           = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_STAT));
++      printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
++      printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
++      printk("PM4_MICRO_CNTL     = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
++      printk("PM4_BUFFER_CNTL    = 0x%08x\n",
++             (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
++}
++#endif
++
++/* ================================================================
++ * Engine, FIFO control
++ */
++
++static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
++{
++      u32 tmp;
++      int i;
++
++      tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
++      R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
++{
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
++              if (slots >= entries)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
++{
++      int i, ret;
++
++      ret = r128_do_wait_for_fifo(dev_priv, 64);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
++                      r128_do_pixcache_flush(dev_priv);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++/* ================================================================
++ * CCE control, initialization
++ */
++
++/* Load the microcode for the CCE */
++static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
++{
++      int i;
++
++      DRM_DEBUG("\n");
++
++      r128_do_wait_for_idle(dev_priv);
++
++      R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
++      for (i = 0; i < 256; i++) {
++              R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
++              R128_WRITE(R128_PM4_MICROCODE_DATAL,
++                         r128_cce_microcode[i * 2 + 1]);
++      }
++}
++
++/* Flush any pending commands to the CCE.  This should only be used just
++ * prior to a wait for idle, as it informs the engine that the command
++ * stream is ending.
++ */
++static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
++{
++      u32 tmp;
++
++      tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
++}
++
++/* Wait for the CCE to go idle.
++ */
++int r128_do_cce_idle(drm_r128_private_t * dev_priv)
++{
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
++                      int pm4stat = R128_READ(R128_PM4_STAT);
++                      if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
++                           dev_priv->cce_fifo_size) &&
++                          !(pm4stat & (R128_PM4_BUSY |
++                                       R128_PM4_GUI_ACTIVE))) {
++                              return r128_do_pixcache_flush(dev_priv);
++                      }
++              }
++              DRM_UDELAY(1);
++      }
++
++#if R128_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      r128_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++/* Start the Concurrent Command Engine.
++ */
++static void r128_do_cce_start(drm_r128_private_t * dev_priv)
++{
++      r128_do_wait_for_idle(dev_priv);
++
++      R128_WRITE(R128_PM4_BUFFER_CNTL,
++                 dev_priv->cce_mode | dev_priv->ring.size_l2qw
++                 | R128_PM4_BUFFER_CNTL_NOUPDATE);
++      R128_READ(R128_PM4_BUFFER_ADDR);        /* as per the sample code */
++      R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
++
++      dev_priv->cce_running = 1;
++}
++
++/* Reset the Concurrent Command Engine.  This will not flush any pending
++ * commands, so you must wait for the CCE command stream to complete
++ * before calling this routine.
++ */
++static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
++{
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
++      R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
++      dev_priv->ring.tail = 0;
++}
++
++/* Stop the Concurrent Command Engine.  This will not flush any pending
++ * commands, so you must flush the command stream and wait for the CCE
++ * to go idle before calling this routine.
++ */
++static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
++{
++      R128_WRITE(R128_PM4_MICRO_CNTL, 0);
++      R128_WRITE(R128_PM4_BUFFER_CNTL,
++                 R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
++
++      dev_priv->cce_running = 0;
++}
++
++/* Reset the engine.  This will stop the CCE if it is running.
++ */
++static int r128_do_engine_reset(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
++
++      r128_do_pixcache_flush(dev_priv);
++
++      clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
++      mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
++
++      R128_WRITE_PLL(R128_MCLK_CNTL,
++                     mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
++
++      gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
++
++      /* Taken from the sample code - do not change */
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
++      R128_READ(R128_GEN_RESET_CNTL);
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
++      R128_READ(R128_GEN_RESET_CNTL);
++
++      R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
++      R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
++      R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
++
++      /* Reset the CCE ring */
++      r128_do_cce_reset(dev_priv);
++
++      /* The CCE is no longer running after an engine reset */
++      dev_priv->cce_running = 0;
++
++      /* Reset any pending vertex, indirect buffers */
++      r128_freelist_reset(dev);
++
++      return 0;
++}
++
++static void r128_cce_init_ring_buffer(struct drm_device * dev,
++                                    drm_r128_private_t * dev_priv)
++{
++      u32 ring_start;
++      u32 tmp;
++
++      DRM_DEBUG("\n");
++
++      /* The manual (p. 2) says this address is in "VM space".  This
++       * means it's an offset from the start of AGP space.
++       */
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci)
++              ring_start = dev_priv->cce_ring->offset - dev->agp->base;
++      else
++#endif
++              ring_start = dev_priv->cce_ring->offset -
++                              (unsigned long)dev->sg->virtual;
++
++      R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
++
++      R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
++      R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
++
++      /* Set watermark control */
++      R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
++                 ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
++                 | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
++                 | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
++                 | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
++
++      /* Force read.  Why?  Because it's in the examples... */
++      R128_READ(R128_PM4_BUFFER_ADDR);
++
++      /* Turn on bus mastering */
++      tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
++      R128_WRITE(R128_BUS_CNTL, tmp);
++}
++
++static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
++{
++      drm_r128_private_t *dev_priv;
++
++      DRM_DEBUG("\n");
++
++      dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_r128_private_t));
++
++      dev_priv->is_pci = init->is_pci;
++
++      if (dev_priv->is_pci && !dev->sg) {
++              DRM_ERROR("PCI GART memory not allocated!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->usec_timeout = init->usec_timeout;
++      if (dev_priv->usec_timeout < 1 ||
++          dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
++              DRM_DEBUG("TIMEOUT problem!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->cce_mode = init->cce_mode;
++
++      /* GH: Simple idle check.
++       */
++      atomic_set(&dev_priv->idle_count, 0);
++
++      /* We don't support anything other than bus-mastering ring mode,
++       * but the ring can be in either AGP or PCI space for the ring
++       * read pointer.
++       */
++      if ((init->cce_mode != R128_PM4_192BM) &&
++          (init->cce_mode != R128_PM4_128BM_64INDBM) &&
++          (init->cce_mode != R128_PM4_64BM_128INDBM) &&
++          (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
++              DRM_DEBUG("Bad cce_mode!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      switch (init->cce_mode) {
++      case R128_PM4_NONPM4:
++              dev_priv->cce_fifo_size = 0;
++              break;
++      case R128_PM4_192PIO:
++      case R128_PM4_192BM:
++              dev_priv->cce_fifo_size = 192;
++              break;
++      case R128_PM4_128PIO_64INDBM:
++      case R128_PM4_128BM_64INDBM:
++              dev_priv->cce_fifo_size = 128;
++              break;
++      case R128_PM4_64PIO_128INDBM:
++      case R128_PM4_64BM_128INDBM:
++      case R128_PM4_64PIO_64VCBM_64INDBM:
++      case R128_PM4_64BM_64VCBM_64INDBM:
++      case R128_PM4_64PIO_64VCPIO_64INDPIO:
++              dev_priv->cce_fifo_size = 64;
++              break;
++      }
++
++      switch (init->fb_bpp) {
++      case 16:
++              dev_priv->color_fmt = R128_DATATYPE_RGB565;
++              break;
++      case 32:
++      default:
++              dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
++              break;
++      }
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      switch (init->depth_bpp) {
++      case 16:
++              dev_priv->depth_fmt = R128_DATATYPE_RGB565;
++              break;
++      case 24:
++      case 32:
++      default:
++              dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
++              break;
++      }
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++      dev_priv->span_offset = init->span_offset;
++
++      dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
++                                        (dev_priv->front_offset >> 5));
++      dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
++                                       (dev_priv->back_offset >> 5));
++      dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
++                                        (dev_priv->depth_offset >> 5) |
++                                        R128_DST_TILE);
++      dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
++                                       (dev_priv->span_offset >> 5));
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("could not find mmio region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->cce_ring) {
++              DRM_ERROR("could not find cce ring region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
++      if (!dev_priv->ring_rptr) {
++              DRM_ERROR("could not find ring read pointer!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              DRM_ERROR("could not find dma buffer region!\n");
++              dev->dev_private = (void *)dev_priv;
++              r128_do_cleanup_cce(dev);
++              return -EINVAL;
++      }
++
++      if (!dev_priv->is_pci) {
++              dev_priv->agp_textures =
++                  drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("could not find agp texture region!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev_priv->sarea_priv =
++          (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                                init->sarea_priv_offset);
++
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci) {
++              drm_core_ioremap(dev_priv->cce_ring, dev);
++              drm_core_ioremap(dev_priv->ring_rptr, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev_priv->cce_ring->handle ||
++                  !dev_priv->ring_rptr->handle ||
++                  !dev->agp_buffer_map->handle) {
++                      DRM_ERROR("Could not ioremap agp regions!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -ENOMEM;
++              }
++      } else
++#endif
++      {
++              dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
++              dev_priv->ring_rptr->handle =
++                  (void *)dev_priv->ring_rptr->offset;
++              dev->agp_buffer_map->handle =
++                  (void *)dev->agp_buffer_map->offset;
++      }
++
++#if __OS_HAS_AGP
++      if (!dev_priv->is_pci)
++              dev_priv->cce_buffers_offset = dev->agp->base;
++      else
++#endif
++              dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
++
++      dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
++      dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
++                            + init->ring_size / sizeof(u32));
++      dev_priv->ring.size = init->ring_size;
++      dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
++
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++
++      dev_priv->ring.high_mark = 128;
++
++      dev_priv->sarea_priv->last_frame = 0;
++      R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
++
++      dev_priv->sarea_priv->last_dispatch = 0;
++      R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
++
++#if __OS_HAS_AGP
++      if (dev_priv->is_pci) {
++#endif
++              dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
++              dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
++              dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
++              dev_priv->gart_info.addr = NULL;
++              dev_priv->gart_info.bus_addr = 0;
++              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++              if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
++                      DRM_ERROR("failed to init PCI GART!\n");
++                      dev->dev_private = (void *)dev_priv;
++                      r128_do_cleanup_cce(dev);
++                      return -ENOMEM;
++              }
++              R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
++#if __OS_HAS_AGP
++      }
++#endif
++
++      r128_cce_init_ring_buffer(dev, dev_priv);
++      r128_cce_load_microcode(dev_priv);
++
++      dev->dev_private = (void *)dev_priv;
++
++      r128_do_engine_reset(dev);
++
++      return 0;
++}
++
++int r128_do_cleanup_cce(struct drm_device * dev)
++{
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++      if (dev->dev_private) {
++              drm_r128_private_t *dev_priv = dev->dev_private;
++
++#if __OS_HAS_AGP
++              if (!dev_priv->is_pci) {
++                      if (dev_priv->cce_ring != NULL)
++                              drm_core_ioremapfree(dev_priv->cce_ring, dev);
++                      if (dev_priv->ring_rptr != NULL)
++                              drm_core_ioremapfree(dev_priv->ring_rptr, dev);
++                      if (dev->agp_buffer_map != NULL) {
++                              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                              dev->agp_buffer_map = NULL;
++                      }
++              } else
++#endif
++              {
++                      if (dev_priv->gart_info.bus_addr)
++                              if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
++                                      DRM_ERROR("failed to cleanup PCI GART!\n");
++              }
++
++              drm_free(dev->dev_private, sizeof(drm_r128_private_t),
++                       DRM_MEM_DRIVER);
++              dev->dev_private = NULL;
++      }
++
++      return 0;
++}
++
++int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case R128_INIT_CCE:
++              return r128_do_init_cce(dev, init);
++      case R128_CLEANUP_CCE:
++              return r128_do_cleanup_cce(dev);
++      }
++
++      return -EINVAL;
++}
++
++int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
++              DRM_DEBUG("while CCE running\n");
++              return 0;
++      }
++
++      r128_do_cce_start(dev_priv);
++
++      return 0;
++}
++
++/* Stop the CCE.  The engine must have been idled before calling this
++ * routine.
++ */
++int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_cce_stop_t *stop = data;
++      int ret;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Flush any pending CCE commands.  This ensures any outstanding
++       * commands are exectuted by the engine before we turn it off.
++       */
++      if (stop->flush) {
++              r128_do_cce_flush(dev_priv);
++      }
++
++      /* If we fail to make the engine go idle, we return an error
++       * code so that the DRM ioctl wrapper can try again.
++       */
++      if (stop->idle) {
++              ret = r128_do_cce_idle(dev_priv);
++              if (ret)
++                      return ret;
++      }
++
++      /* Finally, we can turn off the CCE.  If the engine isn't idle,
++       * we will get some dropped triangles as they won't be fully
++       * rendered before the CCE is shut down.
++       */
++      r128_do_cce_stop(dev_priv);
++
++      /* Reset the engine */
++      r128_do_engine_reset(dev);
++
++      return 0;
++}
++
++/* Just reset the CCE ring.  Called as part of an X Server engine reset.
++ */
++int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_DEBUG("called before init done\n");
++              return -EINVAL;
++      }
++
++      r128_do_cce_reset(dev_priv);
++
++      /* The CCE is no longer running after an engine reset */
++      dev_priv->cce_running = 0;
++
++      return 0;
++}
++
++int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cce_running) {
++              r128_do_cce_flush(dev_priv);
++      }
++
++      return r128_do_cce_idle(dev_priv);
++}
++
++int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return r128_do_engine_reset(dev);
++}
++
++int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      return -EINVAL;
++}
++
++/* ================================================================
++ * Freelist management
++ */
++#define R128_BUFFER_USED      0xffffffff
++#define R128_BUFFER_FREE      0
++
++#if 0
++static int r128_freelist_init(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_freelist_t *entry;
++      int i;
++
++      dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
++      if (dev_priv->head == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
++      dev_priv->head->age = R128_BUFFER_USED;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++
++              entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
++              if (!entry)
++                      return -ENOMEM;
++
++              entry->age = R128_BUFFER_FREE;
++              entry->buf = buf;
++              entry->prev = dev_priv->head;
++              entry->next = dev_priv->head->next;
++              if (!entry->next)
++                      dev_priv->tail = entry;
++
++              buf_priv->discard = 0;
++              buf_priv->dispatched = 0;
++              buf_priv->list_entry = entry;
++
++              dev_priv->head->next = entry;
++
++              if (dev_priv->head->next)
++                      dev_priv->head->next->prev = entry;
++      }
++
++      return 0;
++
++}
++#endif
++
++static struct drm_buf *r128_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++
++      /* FIXME: Optimize -- use freelist code */
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              buf_priv = buf->dev_private;
++              if (buf->file_priv == 0)
++                      return buf;
++      }
++
++      for (t = 0; t < dev_priv->usec_timeout; t++) {
++              u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
++
++              for (i = 0; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->pending && buf_priv->age <= done_age) {
++                              /* The buffer has been processed, so it
++                               * can now be used.
++                               */
++                              buf->pending = 0;
++                              return buf;
++                      }
++              }
++              DRM_UDELAY(1);
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++void r128_freelist_reset(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int i;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++              buf_priv->age = 0;
++      }
++}
++
++/* ================================================================
++ * CCE command submission
++ */
++
++int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
++{
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring;
++      int i;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              r128_update_ring_snapshot(dev_priv);
++              if (ring->space >= n)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This is being ignored... */
++      DRM_ERROR("failed!\n");
++      return -EBUSY;
++}
++
++static int r128_cce_get_buffers(struct drm_device * dev,
++                              struct drm_file *file_priv,
++                              struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = r128_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int ret = 0;
++      struct drm_dma *d = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = r128_cce_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drm.h git-nokia/drivers/gpu/drm-tungsten/r128_drm.h
+--- git/drivers/gpu/drm-tungsten/r128_drm.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drm.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,326 @@
++/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
++ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
++ */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ */
++
++#ifndef __R128_DRM_H__
++#define __R128_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the X server file (r128_sarea.h)
++ */
++#ifndef __R128_SAREA_DEFINES__
++#define __R128_SAREA_DEFINES__
++
++/* What needs to be changed for the current vertex buffer?
++ */
++#define R128_UPLOAD_CONTEXT           0x001
++#define R128_UPLOAD_SETUP             0x002
++#define R128_UPLOAD_TEX0              0x004
++#define R128_UPLOAD_TEX1              0x008
++#define R128_UPLOAD_TEX0IMAGES                0x010
++#define R128_UPLOAD_TEX1IMAGES                0x020
++#define R128_UPLOAD_CORE              0x040
++#define R128_UPLOAD_MASKS             0x080
++#define R128_UPLOAD_WINDOW            0x100
++#define R128_UPLOAD_CLIPRECTS         0x200   /* handled client-side */
++#define R128_REQUIRE_QUIESCENCE               0x400
++#define R128_UPLOAD_ALL                       0x7ff
++
++#define R128_FRONT                    0x1
++#define R128_BACK                     0x2
++#define R128_DEPTH                    0x4
++
++/* Primitive types
++ */
++#define R128_POINTS                   0x1
++#define R128_LINES                    0x2
++#define R128_LINE_STRIP                       0x3
++#define R128_TRIANGLES                        0x4
++#define R128_TRIANGLE_FAN             0x5
++#define R128_TRIANGLE_STRIP           0x6
++
++/* Vertex/indirect buffer size
++ */
++#define R128_BUFFER_SIZE              16384
++
++/* Byte offsets for indirect buffer data
++ */
++#define R128_INDEX_PRIM_OFFSET                20
++#define R128_HOSTDATA_BLIT_OFFSET     32
++
++/* Keep these small for testing.
++ */
++#define R128_NR_SAREA_CLIPRECTS               12
++
++/* There are 2 heaps (local/AGP).  Each region within a heap is a
++ *  minimum of 64k, and there are at most 64 of them per heap.
++ */
++#define R128_LOCAL_TEX_HEAP           0
++#define R128_AGP_TEX_HEAP             1
++#define R128_NR_TEX_HEAPS             2
++#define R128_NR_TEX_REGIONS           64
++#define R128_LOG_TEX_GRANULARITY      16
++
++#define R128_NR_CONTEXT_REGS          12
++
++#define R128_MAX_TEXTURE_LEVELS               11
++#define R128_MAX_TEXTURE_UNITS                2
++
++#endif                                /* __R128_SAREA_DEFINES__ */
++
++typedef struct {
++      /* Context state - can be written in one large chunk */
++      unsigned int dst_pitch_offset_c;
++      unsigned int dp_gui_master_cntl_c;
++      unsigned int sc_top_left_c;
++      unsigned int sc_bottom_right_c;
++      unsigned int z_offset_c;
++      unsigned int z_pitch_c;
++      unsigned int z_sten_cntl_c;
++      unsigned int tex_cntl_c;
++      unsigned int misc_3d_state_cntl_reg;
++      unsigned int texture_clr_cmp_clr_c;
++      unsigned int texture_clr_cmp_msk_c;
++      unsigned int fog_color_c;
++
++      /* Texture state */
++      unsigned int tex_size_pitch_c;
++      unsigned int constant_color_c;
++
++      /* Setup state */
++      unsigned int pm4_vc_fpu_setup;
++      unsigned int setup_cntl;
++
++      /* Mask state */
++      unsigned int dp_write_mask;
++      unsigned int sten_ref_mask_c;
++      unsigned int plane_3d_mask_c;
++
++      /* Window state */
++      unsigned int window_xy_offset;
++
++      /* Core state */
++      unsigned int scale_3d_cntl;
++} drm_r128_context_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int tex_cntl;
++      unsigned int tex_combine_cntl;
++      unsigned int tex_size_pitch;
++      unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
++      unsigned int tex_border_color;
++} drm_r128_texture_regs_t;
++
++typedef struct drm_r128_sarea {
++      /* The channel for communication of state information to the kernel
++       * on firing a vertex buffer.
++       */
++      drm_r128_context_regs_t context_state;
++      drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
++      unsigned int dirty;
++      unsigned int vertsize;
++      unsigned int vc_format;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int last_frame;
++      unsigned int last_dispatch;
++
++      struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
++      unsigned int tex_age[R128_NR_TEX_HEAPS];
++      int ctx_owner;
++      int pfAllowPageFlip;    /* number of 3d windows (0,1,2 or more) */
++      int pfCurrentPage;      /* which buffer is being displayed? */
++} drm_r128_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmR128.h)
++ */
++
++/* Rage 128 specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_R128_INIT       0x00
++#define DRM_R128_CCE_START  0x01
++#define DRM_R128_CCE_STOP   0x02
++#define DRM_R128_CCE_RESET  0x03
++#define DRM_R128_CCE_IDLE   0x04
++/* 0x05 not used */
++#define DRM_R128_RESET      0x06
++#define DRM_R128_SWAP       0x07
++#define DRM_R128_CLEAR      0x08
++#define DRM_R128_VERTEX     0x09
++#define DRM_R128_INDICES    0x0a
++#define DRM_R128_BLIT       0x0b
++#define DRM_R128_DEPTH      0x0c
++#define DRM_R128_STIPPLE    0x0d
++/* 0x0e not used */
++#define DRM_R128_INDIRECT   0x0f
++#define DRM_R128_FULLSCREEN 0x10
++#define DRM_R128_CLEAR2     0x11
++#define DRM_R128_GETPARAM   0x12
++#define DRM_R128_FLIP       0x13
++
++#define DRM_IOCTL_R128_INIT       DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
++#define DRM_IOCTL_R128_CCE_START  DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_START)
++#define DRM_IOCTL_R128_CCE_STOP   DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
++#define DRM_IOCTL_R128_CCE_RESET  DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
++#define DRM_IOCTL_R128_CCE_IDLE   DRM_IO(  DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
++/* 0x05 not used */
++#define DRM_IOCTL_R128_RESET      DRM_IO(  DRM_COMMAND_BASE + DRM_R128_RESET)
++#define DRM_IOCTL_R128_SWAP       DRM_IO(  DRM_COMMAND_BASE + DRM_R128_SWAP)
++#define DRM_IOCTL_R128_CLEAR      DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
++#define DRM_IOCTL_R128_VERTEX     DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
++#define DRM_IOCTL_R128_INDICES    DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
++#define DRM_IOCTL_R128_BLIT       DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
++#define DRM_IOCTL_R128_DEPTH      DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
++#define DRM_IOCTL_R128_STIPPLE    DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
++/* 0x0e not used */
++#define DRM_IOCTL_R128_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
++#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
++#define DRM_IOCTL_R128_CLEAR2     DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
++#define DRM_IOCTL_R128_GETPARAM   DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
++#define DRM_IOCTL_R128_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_R128_FLIP)
++
++typedef struct drm_r128_init {
++      enum {
++              R128_INIT_CCE = 0x01,
++              R128_CLEANUP_CCE = 0x02
++      } func;
++      unsigned long sarea_priv_offset;
++      int is_pci;
++      int cce_mode;
++      int cce_secure;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int span_offset;
++
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++} drm_r128_init_t;
++
++typedef struct drm_r128_cce_stop {
++      int flush;
++      int idle;
++} drm_r128_cce_stop_t;
++
++typedef struct drm_r128_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;
++} drm_r128_clear_t;
++
++typedef struct drm_r128_vertex {
++      int prim;
++      int idx;                /* Index of vertex buffer */
++      int count;              /* Number of vertices in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_r128_vertex_t;
++
++typedef struct drm_r128_indices {
++      int prim;
++      int idx;
++      int start;
++      int end;
++      int discard;            /* Client finished with buffer? */
++} drm_r128_indices_t;
++
++typedef struct drm_r128_blit {
++      int idx;
++      int pitch;
++      int offset;
++      int format;
++      unsigned short x, y;
++      unsigned short width, height;
++} drm_r128_blit_t;
++
++typedef struct drm_r128_depth {
++      enum {
++              R128_WRITE_SPAN = 0x01,
++              R128_WRITE_PIXELS = 0x02,
++              R128_READ_SPAN = 0x03,
++              R128_READ_PIXELS = 0x04
++      } func;
++      int n;
++      int __user *x;
++      int __user *y;
++      unsigned int __user *buffer;
++      unsigned char __user *mask;
++} drm_r128_depth_t;
++
++typedef struct drm_r128_stipple {
++      unsigned int __user *mask;
++} drm_r128_stipple_t;
++
++typedef struct drm_r128_indirect {
++      int idx;
++      int start;
++      int end;
++      int discard;
++} drm_r128_indirect_t;
++
++typedef struct drm_r128_fullscreen {
++      enum {
++              R128_INIT_FULLSCREEN = 0x01,
++              R128_CLEANUP_FULLSCREEN = 0x02
++      } func;
++} drm_r128_fullscreen_t;
++
++/* 2.3: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define R128_PARAM_IRQ_NR            1
++
++typedef struct drm_r128_getparam {
++      int param;
++      void __user *value;
++} drm_r128_getparam_t;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.c git-nokia/drivers/gpu/drm-tungsten/r128_drv.c
+--- git/drivers/gpu/drm-tungsten/r128_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,113 @@
++/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
++ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      r128_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
++          DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof(drm_r128_buf_priv_t),
++      .preclose = r128_driver_preclose,
++      .lastclose = r128_driver_lastclose,
++      .get_vblank_counter = r128_get_vblank_counter,
++      .enable_vblank = r128_enable_vblank,
++      .disable_vblank = r128_disable_vblank,
++      .irq_preinstall = r128_driver_irq_preinstall,
++      .irq_postinstall = r128_driver_irq_postinstall,
++      .irq_uninstall = r128_driver_irq_uninstall,
++      .irq_handler = r128_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = r128_ioctls,
++      .dma_ioctl = r128_cce_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = r128_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init r128_init(void)
++{
++      driver.num_ioctls = r128_max_ioctl;
++
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit r128_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(r128_init);
++module_exit(r128_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_drv.h git-nokia/drivers/gpu/drm-tungsten/r128_drv.h
+--- git/drivers/gpu/drm-tungsten/r128_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,525 @@
++/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
++ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
++ */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Michel D�zer <daenzerm@student.ethz.ch>
++ */
++
++#ifndef __R128_DRV_H__
++#define __R128_DRV_H__
++
++/* General customization:
++ */
++#define DRIVER_AUTHOR         "Gareth Hughes, VA Linux Systems Inc."
++
++#define DRIVER_NAME           "r128"
++#define DRIVER_DESC           "ATI Rage 128"
++#define DRIVER_DATE           "20030725"
++
++/* Interface history:
++ *
++ * ??  - ??
++ * 2.4 - Add support for ycbcr textures (no new ioctls)
++ * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
++ */
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          5
++#define DRIVER_PATCHLEVEL     0
++
++#define GET_RING_HEAD(dev_priv)               R128_READ( R128_PM4_BUFFER_DL_RPTR )
++
++typedef struct drm_r128_freelist {
++      unsigned int age;
++      struct drm_buf *buf;
++      struct drm_r128_freelist *next;
++      struct drm_r128_freelist *prev;
++} drm_r128_freelist_t;
++
++typedef struct drm_r128_ring_buffer {
++      u32 *start;
++      u32 *end;
++      int size;
++      int size_l2qw;
++
++      u32 tail;
++      u32 tail_mask;
++      int space;
++
++      int high_mark;
++} drm_r128_ring_buffer_t;
++
++typedef struct drm_r128_private {
++      drm_r128_ring_buffer_t ring;
++      drm_r128_sarea_t *sarea_priv;
++
++      int cce_mode;
++      int cce_fifo_size;
++      int cce_running;
++
++      drm_r128_freelist_t *head;
++      drm_r128_freelist_t *tail;
++
++      int usec_timeout;
++      int is_pci;
++      unsigned long cce_buffers_offset;
++
++      atomic_t idle_count;
++
++      int page_flipping;
++      int current_page;
++      u32 crtc_offset;
++      u32 crtc_offset_cntl;
++
++      atomic_t vbl_received;
++
++      u32 color_fmt;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      u32 depth_fmt;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++      unsigned int span_offset;
++
++      u32 front_pitch_offset_c;
++      u32 back_pitch_offset_c;
++      u32 depth_pitch_offset_c;
++      u32 span_pitch_offset_c;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *cce_ring;
++      drm_local_map_t *ring_rptr;
++      drm_local_map_t *agp_textures;
++      struct drm_ati_pcigart_info gart_info;
++} drm_r128_private_t;
++
++typedef struct drm_r128_buf_priv {
++      u32 age;
++      int prim;
++      int discard;
++      int dispatched;
++      drm_r128_freelist_t *list_entry;
++} drm_r128_buf_priv_t;
++
++extern struct drm_ioctl_desc r128_ioctls[];
++extern int r128_max_ioctl;
++
++                              /* r128_cce.c */
++extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++extern void r128_freelist_reset(struct drm_device * dev);
++
++extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
++
++extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
++extern int r128_do_cleanup_cce(struct drm_device * dev);
++
++extern int r128_enable_vblank(struct drm_device *dev, int crtc);
++extern void r128_disable_vblank(struct drm_device *dev, int crtc);
++extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
++extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
++extern void r128_driver_irq_preinstall(struct drm_device * dev);
++extern int r128_driver_irq_postinstall(struct drm_device * dev);
++extern void r128_driver_irq_uninstall(struct drm_device * dev);
++extern void r128_driver_lastclose(struct drm_device * dev);
++extern void r128_driver_preclose(struct drm_device * dev,
++                               struct drm_file *file_priv);
++
++extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
++                            unsigned long arg);
++
++/* Register definitions, register access macros and drmAddMap constants
++ * for Rage 128 kernel driver.
++ */
++
++#define R128_AUX_SC_CNTL              0x1660
++#     define R128_AUX1_SC_EN                  (1 << 0)
++#     define R128_AUX1_SC_MODE_OR             (0 << 1)
++#     define R128_AUX1_SC_MODE_NAND           (1 << 1)
++#     define R128_AUX2_SC_EN                  (1 << 2)
++#     define R128_AUX2_SC_MODE_OR             (0 << 3)
++#     define R128_AUX2_SC_MODE_NAND           (1 << 3)
++#     define R128_AUX3_SC_EN                  (1 << 4)
++#     define R128_AUX3_SC_MODE_OR             (0 << 5)
++#     define R128_AUX3_SC_MODE_NAND           (1 << 5)
++#define R128_AUX1_SC_LEFT             0x1664
++#define R128_AUX1_SC_RIGHT            0x1668
++#define R128_AUX1_SC_TOP              0x166c
++#define R128_AUX1_SC_BOTTOM           0x1670
++#define R128_AUX2_SC_LEFT             0x1674
++#define R128_AUX2_SC_RIGHT            0x1678
++#define R128_AUX2_SC_TOP              0x167c
++#define R128_AUX2_SC_BOTTOM           0x1680
++#define R128_AUX3_SC_LEFT             0x1684
++#define R128_AUX3_SC_RIGHT            0x1688
++#define R128_AUX3_SC_TOP              0x168c
++#define R128_AUX3_SC_BOTTOM           0x1690
++
++#define R128_BRUSH_DATA0              0x1480
++#define R128_BUS_CNTL                 0x0030
++#     define R128_BUS_MASTER_DIS              (1 << 6)
++
++#define R128_CLOCK_CNTL_INDEX         0x0008
++#define R128_CLOCK_CNTL_DATA          0x000c
++#     define R128_PLL_WR_EN                   (1 << 7)
++#define R128_CONSTANT_COLOR_C         0x1d34
++#define R128_CRTC_OFFSET              0x0224
++#define R128_CRTC_OFFSET_CNTL         0x0228
++#     define R128_CRTC_OFFSET_FLIP_CNTL       (1 << 16)
++
++#define R128_DP_GUI_MASTER_CNTL               0x146c
++#       define R128_GMC_SRC_PITCH_OFFSET_CNTL (1    <<  0)
++#       define R128_GMC_DST_PITCH_OFFSET_CNTL (1    <<  1)
++#     define R128_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
++#     define R128_GMC_BRUSH_NONE              (15   <<  4)
++#     define R128_GMC_DST_16BPP               (4    <<  8)
++#     define R128_GMC_DST_24BPP               (5    <<  8)
++#     define R128_GMC_DST_32BPP               (6    <<  8)
++#       define R128_GMC_DST_DATATYPE_SHIFT    8
++#     define R128_GMC_SRC_DATATYPE_COLOR      (3    << 12)
++#     define R128_DP_SRC_SOURCE_MEMORY        (2    << 24)
++#     define R128_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
++#     define R128_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
++#     define R128_GMC_AUX_CLIP_DIS            (1    << 29)
++#     define R128_GMC_WR_MSK_DIS              (1    << 30)
++#     define R128_ROP3_S                      0x00cc0000
++#     define R128_ROP3_P                      0x00f00000
++#define R128_DP_WRITE_MASK            0x16cc
++#define R128_DST_PITCH_OFFSET_C               0x1c80
++#     define R128_DST_TILE                    (1 << 31)
++
++#define R128_GEN_INT_CNTL             0x0040
++#     define R128_CRTC_VBLANK_INT_EN          (1 <<  0)
++#define R128_GEN_INT_STATUS           0x0044
++#     define R128_CRTC_VBLANK_INT             (1 <<  0)
++#     define R128_CRTC_VBLANK_INT_AK          (1 <<  0)
++#define R128_GEN_RESET_CNTL           0x00f0
++#     define R128_SOFT_RESET_GUI              (1 <<  0)
++
++#define R128_GUI_SCRATCH_REG0         0x15e0
++#define R128_GUI_SCRATCH_REG1         0x15e4
++#define R128_GUI_SCRATCH_REG2         0x15e8
++#define R128_GUI_SCRATCH_REG3         0x15ec
++#define R128_GUI_SCRATCH_REG4         0x15f0
++#define R128_GUI_SCRATCH_REG5         0x15f4
++
++#define R128_GUI_STAT                 0x1740
++#     define R128_GUI_FIFOCNT_MASK            0x0fff
++#     define R128_GUI_ACTIVE                  (1 << 31)
++
++#define R128_MCLK_CNTL                        0x000f
++#     define R128_FORCE_GCP                   (1 << 16)
++#     define R128_FORCE_PIPE3D_CP             (1 << 17)
++#     define R128_FORCE_RCP                   (1 << 18)
++
++#define R128_PC_GUI_CTLSTAT           0x1748
++#define R128_PC_NGUI_CTLSTAT          0x0184
++#     define R128_PC_FLUSH_GUI                (3 << 0)
++#     define R128_PC_RI_GUI                   (1 << 2)
++#     define R128_PC_FLUSH_ALL                0x00ff
++#     define R128_PC_BUSY                     (1 << 31)
++
++#define R128_PCI_GART_PAGE            0x017c
++#define R128_PRIM_TEX_CNTL_C          0x1cb0
++
++#define R128_SCALE_3D_CNTL            0x1a00
++#define R128_SEC_TEX_CNTL_C           0x1d00
++#define R128_SEC_TEXTURE_BORDER_COLOR_C       0x1d3c
++#define R128_SETUP_CNTL                       0x1bc4
++#define R128_STEN_REF_MASK_C          0x1d40
++
++#define R128_TEX_CNTL_C                       0x1c9c
++#     define R128_TEX_CACHE_FLUSH             (1 << 23)
++
++#define R128_WAIT_UNTIL                       0x1720
++#     define R128_EVENT_CRTC_OFFSET           (1 << 0)
++#define R128_WINDOW_XY_OFFSET         0x1bcc
++
++/* CCE registers
++ */
++#define R128_PM4_BUFFER_OFFSET                0x0700
++#define R128_PM4_BUFFER_CNTL          0x0704
++#     define R128_PM4_MASK                    (15 << 28)
++#     define R128_PM4_NONPM4                  (0  << 28)
++#     define R128_PM4_192PIO                  (1  << 28)
++#     define R128_PM4_192BM                   (2  << 28)
++#     define R128_PM4_128PIO_64INDBM          (3  << 28)
++#     define R128_PM4_128BM_64INDBM           (4  << 28)
++#     define R128_PM4_64PIO_128INDBM          (5  << 28)
++#     define R128_PM4_64BM_128INDBM           (6  << 28)
++#     define R128_PM4_64PIO_64VCBM_64INDBM    (7  << 28)
++#     define R128_PM4_64BM_64VCBM_64INDBM     (8  << 28)
++#     define R128_PM4_64PIO_64VCPIO_64INDPIO  (15 << 28)
++#     define R128_PM4_BUFFER_CNTL_NOUPDATE    (1  << 27)
++
++#define R128_PM4_BUFFER_WM_CNTL               0x0708
++#     define R128_WMA_SHIFT                   0
++#     define R128_WMB_SHIFT                   8
++#     define R128_WMC_SHIFT                   16
++#     define R128_WB_WM_SHIFT                 24
++
++#define R128_PM4_BUFFER_DL_RPTR_ADDR  0x070c
++#define R128_PM4_BUFFER_DL_RPTR               0x0710
++#define R128_PM4_BUFFER_DL_WPTR               0x0714
++#     define R128_PM4_BUFFER_DL_DONE          (1 << 31)
++
++#define R128_PM4_VC_FPU_SETUP         0x071c
++
++#define R128_PM4_IW_INDOFF            0x0738
++#define R128_PM4_IW_INDSIZE           0x073c
++
++#define R128_PM4_STAT                 0x07b8
++#     define R128_PM4_FIFOCNT_MASK            0x0fff
++#     define R128_PM4_BUSY                    (1 << 16)
++#     define R128_PM4_GUI_ACTIVE              (1 << 31)
++
++#define R128_PM4_MICROCODE_ADDR               0x07d4
++#define R128_PM4_MICROCODE_RADDR      0x07d8
++#define R128_PM4_MICROCODE_DATAH      0x07dc
++#define R128_PM4_MICROCODE_DATAL      0x07e0
++
++#define R128_PM4_BUFFER_ADDR          0x07f0
++#define R128_PM4_MICRO_CNTL           0x07fc
++#     define R128_PM4_MICRO_FREERUN           (1 << 30)
++
++#define R128_PM4_FIFO_DATA_EVEN               0x1000
++#define R128_PM4_FIFO_DATA_ODD                0x1004
++
++/* CCE command packets
++ */
++#define R128_CCE_PACKET0              0x00000000
++#define R128_CCE_PACKET1              0x40000000
++#define R128_CCE_PACKET2              0x80000000
++#define R128_CCE_PACKET3              0xC0000000
++#     define R128_CNTL_HOSTDATA_BLT           0x00009400
++#     define R128_CNTL_PAINT_MULTI            0x00009A00
++#     define R128_CNTL_BITBLT_MULTI           0x00009B00
++#     define R128_3D_RNDR_GEN_INDX_PRIM       0x00002300
++
++#define R128_CCE_PACKET_MASK          0xC0000000
++#define R128_CCE_PACKET_COUNT_MASK    0x3fff0000
++#define R128_CCE_PACKET0_REG_MASK     0x000007ff
++#define R128_CCE_PACKET1_REG0_MASK    0x000007ff
++#define R128_CCE_PACKET1_REG1_MASK    0x003ff800
++
++#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE               0x00000000
++#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT      0x00000001
++#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE               0x00000002
++#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE  0x00000003
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST   0x00000004
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN    0x00000005
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP  0x00000006
++#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2  0x00000007
++#define R128_CCE_VC_CNTL_PRIM_WALK_IND                0x00000010
++#define R128_CCE_VC_CNTL_PRIM_WALK_LIST               0x00000020
++#define R128_CCE_VC_CNTL_PRIM_WALK_RING               0x00000030
++#define R128_CCE_VC_CNTL_NUM_SHIFT            16
++
++#define R128_DATATYPE_VQ              0
++#define R128_DATATYPE_CI4             1
++#define R128_DATATYPE_CI8             2
++#define R128_DATATYPE_ARGB1555                3
++#define R128_DATATYPE_RGB565          4
++#define R128_DATATYPE_RGB888          5
++#define R128_DATATYPE_ARGB8888                6
++#define R128_DATATYPE_RGB332          7
++#define R128_DATATYPE_Y8              8
++#define R128_DATATYPE_RGB8            9
++#define R128_DATATYPE_CI16            10
++#define R128_DATATYPE_YVYU422         11
++#define R128_DATATYPE_VYUY422         12
++#define R128_DATATYPE_AYUV444         14
++#define R128_DATATYPE_ARGB4444                15
++
++/* Constants */
++#define R128_AGP_OFFSET                       0x02000000
++
++#define R128_WATERMARK_L              16
++#define R128_WATERMARK_M              8
++#define R128_WATERMARK_N              8
++#define R128_WATERMARK_K              128
++
++#define R128_MAX_USEC_TIMEOUT         100000  /* 100 ms */
++
++#define R128_LAST_FRAME_REG           R128_GUI_SCRATCH_REG0
++#define R128_LAST_DISPATCH_REG                R128_GUI_SCRATCH_REG1
++#define R128_MAX_VB_AGE                       0x7fffffff
++#define R128_MAX_VB_VERTS             (0xffff)
++
++#define R128_RING_HIGH_MARK           128
++
++#define R128_PERFORMANCE_BOXES                0
++
++#define R128_PCIGART_TABLE_SIZE         32768
++
++#define R128_READ(reg)                DRM_READ32(  dev_priv->mmio, (reg) )
++#define R128_WRITE(reg,val)   DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#define R128_READ8(reg)               DRM_READ8(   dev_priv->mmio, (reg) )
++#define R128_WRITE8(reg,val)  DRM_WRITE8(  dev_priv->mmio, (reg), (val) )
++
++#define R128_WRITE_PLL(addr,val)                                      \
++do {                                                                  \
++      R128_WRITE8(R128_CLOCK_CNTL_INDEX,                              \
++                  ((addr) & 0x1f) | R128_PLL_WR_EN);                  \
++      R128_WRITE(R128_CLOCK_CNTL_DATA, (val));                        \
++} while (0)
++
++#define CCE_PACKET0( reg, n )         (R128_CCE_PACKET0 |             \
++                                       ((n) << 16) | ((reg) >> 2))
++#define CCE_PACKET1( reg0, reg1 )     (R128_CCE_PACKET1 |             \
++                                       (((reg1) >> 2) << 11) | ((reg0) >> 2))
++#define CCE_PACKET2()                 (R128_CCE_PACKET2)
++#define CCE_PACKET3( pkt, n )         (R128_CCE_PACKET3 |             \
++                                       (pkt) | ((n) << 16))
++
++static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
++{
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring;
++      ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
++      if (ring->space <= 0)
++              ring->space += ring->size;
++}
++
++/* ================================================================
++ * Misc helper macros
++ */
++
++#define RING_SPACE_TEST_WITH_RETURN( dev_priv )                               \
++do {                                                                  \
++      drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i;          \
++      if ( ring->space < ring->high_mark ) {                          \
++              for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {      \
++                      r128_update_ring_snapshot( dev_priv );          \
++                      if ( ring->space >= ring->high_mark )           \
++                              goto __ring_space_done;                 \
++                      DRM_UDELAY(1);                          \
++              }                                                       \
++              DRM_ERROR( "ring space check failed!\n" );              \
++              return -EBUSY;                          \
++      }                                                               \
++ __ring_space_done:                                                   \
++      ;                                                               \
++} while (0)
++
++#define VB_AGE_TEST_WITH_RETURN( dev_priv )                           \
++do {                                                                  \
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;            \
++      if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) {           \
++              int __ret = r128_do_cce_idle( dev_priv );               \
++              if ( __ret ) return __ret;                              \
++              sarea_priv->last_dispatch = 0;                          \
++              r128_freelist_reset( dev );                             \
++      }                                                               \
++} while (0)
++
++#define R128_WAIT_UNTIL_PAGE_FLIPPED() do {                           \
++      OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) );                  \
++      OUT_RING( R128_EVENT_CRTC_OFFSET );                             \
++} while (0)
++
++/* ================================================================
++ * Ring control
++ */
++
++#define R128_VERBOSE  0
++
++#define RING_LOCALS                                                   \
++      int write, _nr; unsigned int tail_mask; volatile u32 *ring;
++
++#define BEGIN_RING( n ) do {                                          \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "BEGIN_RING( %d )\n", (n));                   \
++      }                                                               \
++      if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {              \
++              COMMIT_RING();                                          \
++              r128_wait_ring( dev_priv, (n) * sizeof(u32) );          \
++      }                                                               \
++      _nr = n; dev_priv->ring.space -= (n) * sizeof(u32);             \
++      ring = dev_priv->ring.start;                                    \
++      write = dev_priv->ring.tail;                                    \
++      tail_mask = dev_priv->ring.tail_mask;                           \
++} while (0)
++
++/* You can set this to zero if you want.  If the card locks up, you'll
++ * need to keep this set.  It works around a bug in early revs of the
++ * Rage 128 chipset, where the CCE would read 32 dwords past the end of
++ * the ring buffer before wrapping around.
++ */
++#define R128_BROKEN_CCE       1
++
++#define ADVANCE_RING() do {                                           \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        write, dev_priv->ring.tail );                 \
++      }                                                               \
++      if ( R128_BROKEN_CCE && write < 32 ) {                          \
++              memcpy( dev_priv->ring.end,                             \
++                      dev_priv->ring.start,                           \
++                      write * sizeof(u32) );                          \
++      }                                                               \
++      if (((dev_priv->ring.tail + _nr) & tail_mask) != write) {       \
++              DRM_ERROR(                                              \
++                      "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",        \
++                      ((dev_priv->ring.tail + _nr) & tail_mask),      \
++                      write, __LINE__);                               \
++      } else                                                          \
++              dev_priv->ring.tail = write;                            \
++} while (0)
++
++#define COMMIT_RING() do {                                            \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "COMMIT_RING() tail=0x%06x\n",                \
++                      dev_priv->ring.tail );                          \
++      }                                                               \
++      DRM_MEMORYBARRIER();                                            \
++      R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail );     \
++      R128_READ( R128_PM4_BUFFER_DL_WPTR );                           \
++} while (0)
++
++#define OUT_RING( x ) do {                                            \
++      if ( R128_VERBOSE ) {                                           \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",            \
++                         (unsigned int)(x), write );                  \
++      }                                                               \
++      ring[write++] = cpu_to_le32( x );                               \
++      write &= tail_mask;                                             \
++} while (0)
++
++#endif                                /* __R128_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_ioc32.c git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c
+--- git/drivers/gpu/drm-tungsten/r128_ioc32.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_ioc32.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,222 @@
++/**
++ * \file r128_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the R128 DRM.
++ *
++ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++
++typedef struct drm_r128_init32 {
++      int func;
++      unsigned int sarea_priv_offset;
++      int is_pci;
++      int cce_mode;
++      int cce_secure;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++      unsigned int span_offset;
++
++      unsigned int fb_offset;
++      unsigned int mmio_offset;
++      unsigned int ring_offset;
++      unsigned int ring_rptr_offset;
++      unsigned int buffers_offset;
++      unsigned int agp_textures_offset;
++} drm_r128_init32_t;
++
++static int compat_r128_init(struct file *file, unsigned int cmd,
++                          unsigned long arg)
++{
++      drm_r128_init32_t init32;
++      drm_r128_init_t __user *init;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.is_pci, &init->is_pci)
++          || __put_user(init32.cce_mode, &init->cce_mode)
++          || __put_user(init32.cce_secure, &init->cce_secure)
++          || __put_user(init32.ring_size, &init->ring_size)
++          || __put_user(init32.usec_timeout, &init->usec_timeout)
++          || __put_user(init32.fb_bpp, &init->fb_bpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_bpp, &init->depth_bpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.span_offset, &init->span_offset)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.ring_offset, &init->ring_offset)
++          || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset)
++          || __put_user(init32.agp_textures_offset,
++                        &init->agp_textures_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_INIT, (unsigned long)init);
++}
++
++
++typedef struct drm_r128_depth32 {
++      int func;
++      int n;
++      u32 x;
++      u32 y;
++      u32 buffer;
++      u32 mask;
++} drm_r128_depth32_t;
++
++static int compat_r128_depth(struct file *file, unsigned int cmd,
++                           unsigned long arg)
++{
++      drm_r128_depth32_t depth32;
++      drm_r128_depth_t __user *depth;
++
++      if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
++              return -EFAULT;
++
++      depth = compat_alloc_user_space(sizeof(*depth));
++      if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
++          || __put_user(depth32.func, &depth->func)
++          || __put_user(depth32.n, &depth->n)
++          || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
++          || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
++          || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
++                        &depth->buffer)
++          || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
++                        &depth->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
++
++}
++
++typedef struct drm_r128_stipple32 {
++      u32 mask;
++} drm_r128_stipple32_t;
++
++static int compat_r128_stipple(struct file *file, unsigned int cmd,
++                             unsigned long arg)
++{
++      drm_r128_stipple32_t stipple32;
++      drm_r128_stipple_t __user *stipple;
++
++      if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
++              return -EFAULT;
++
++      stipple = compat_alloc_user_space(sizeof(*stipple));
++      if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
++          || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
++                        &stipple->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
++}
++
++typedef struct drm_r128_getparam32 {
++      int param;
++      u32 value;
++} drm_r128_getparam32_t;
++
++static int compat_r128_getparam(struct file *file, unsigned int cmd,
++                              unsigned long arg)
++{
++      drm_r128_getparam32_t getparam32;
++      drm_r128_getparam_t __user *getparam;
++
++      if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
++              return -EFAULT;
++
++      getparam = compat_alloc_user_space(sizeof(*getparam));
++      if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
++          || __put_user(getparam32.param, &getparam->param)
++          || __put_user((void __user *)(unsigned long)getparam32.value,
++                        &getparam->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
++}
++
++drm_ioctl_compat_t *r128_compat_ioctls[] = {
++      [DRM_R128_INIT] = compat_r128_init,
++      [DRM_R128_DEPTH] = compat_r128_depth,
++      [DRM_R128_STIPPLE] = compat_r128_stipple,
++      [DRM_R128_GETPARAM] = compat_r128_getparam,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
++              fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_irq.c git-nokia/drivers/gpu/drm-tungsten/r128_irq.c
+--- git/drivers/gpu/drm-tungsten/r128_irq.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_irq.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,116 @@
++/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Eric Anholt <anholt@FreeBSD.org>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      const drm_r128_private_t *dev_priv = dev->dev_private;
++
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++      int status;
++
++      status = R128_READ(R128_GEN_INT_STATUS);
++
++      /* VBLANK interrupt */
++      if (status & R128_CRTC_VBLANK_INT) {
++              R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
++              atomic_inc(&dev_priv->vbl_received);
++              drm_handle_vblank(dev, 0);
++              return IRQ_HANDLED;
++      }
++      return IRQ_NONE;
++}
++
++int r128_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++
++      if (crtc != 0) {
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++              return -EINVAL;
++      }
++
++      R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
++      return 0;
++}
++
++void r128_disable_vblank(struct drm_device *dev, int crtc)
++{
++      if (crtc != 0)
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++
++      /*
++       * FIXME: implement proper interrupt disable by using the vblank
++       * counter register (if available)
++       *
++       * R128_WRITE(R128_GEN_INT_CNTL,
++       *            R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
++       */
++}
++
++void r128_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++
++      /* Disable *all* interrupts */
++      R128_WRITE(R128_GEN_INT_CNTL, 0);
++      /* Clear vblank bit if it's already high */
++      R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
++}
++
++int r128_driver_irq_postinstall(struct drm_device * dev)
++{
++      return drm_vblank_init(dev, 1);
++}
++
++void r128_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      /* Disable *all* interrupts */
++      R128_WRITE(R128_GEN_INT_CNTL, 0);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r128_state.c git-nokia/drivers/gpu/drm-tungsten/r128_state.c
+--- git/drivers/gpu/drm-tungsten/r128_state.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r128_state.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1681 @@
++/* r128_state.c -- State support for r128 -*- linux-c -*-
++ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "r128_drm.h"
++#include "r128_drv.h"
++
++/* ================================================================
++ * CCE hardware state programming functions
++ */
++
++static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
++                               struct drm_clip_rect * boxes, int count)
++{
++      u32 aux_sc_cntl = 0x00000000;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
++
++      if (count >= 1) {
++              OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
++              OUT_RING(boxes[0].x1);
++              OUT_RING(boxes[0].x2 - 1);
++              OUT_RING(boxes[0].y1);
++              OUT_RING(boxes[0].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
++      }
++      if (count >= 2) {
++              OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
++              OUT_RING(boxes[1].x1);
++              OUT_RING(boxes[1].x2 - 1);
++              OUT_RING(boxes[1].y1);
++              OUT_RING(boxes[1].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
++      }
++      if (count >= 3) {
++              OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
++              OUT_RING(boxes[2].x1);
++              OUT_RING(boxes[2].x2 - 1);
++              OUT_RING(boxes[2].y1);
++              OUT_RING(boxes[2].y2 - 1);
++
++              aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
++      OUT_RING(aux_sc_cntl);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
++      OUT_RING(ctx->scale_3d_cntl);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(13);
++
++      OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
++      OUT_RING(ctx->dst_pitch_offset_c);
++      OUT_RING(ctx->dp_gui_master_cntl_c);
++      OUT_RING(ctx->sc_top_left_c);
++      OUT_RING(ctx->sc_bottom_right_c);
++      OUT_RING(ctx->z_offset_c);
++      OUT_RING(ctx->z_pitch_c);
++      OUT_RING(ctx->z_sten_cntl_c);
++      OUT_RING(ctx->tex_cntl_c);
++      OUT_RING(ctx->misc_3d_state_cntl_reg);
++      OUT_RING(ctx->texture_clr_cmp_clr_c);
++      OUT_RING(ctx->texture_clr_cmp_msk_c);
++      OUT_RING(ctx->fog_color_c);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(3);
++
++      OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
++      OUT_RING(ctx->setup_cntl);
++      OUT_RING(ctx->pm4_vc_fpu_setup);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(5);
++
++      OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
++      OUT_RING(ctx->dp_write_mask);
++
++      OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
++      OUT_RING(ctx->sten_ref_mask_c);
++      OUT_RING(ctx->plane_3d_mask_c);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
++      OUT_RING(ctx->window_xy_offset);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
++      drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
++
++      OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
++                           2 + R128_MAX_TEXTURE_LEVELS));
++      OUT_RING(tex->tex_cntl);
++      OUT_RING(tex->tex_combine_cntl);
++      OUT_RING(ctx->tex_size_pitch_c);
++      for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
++              OUT_RING(tex->tex_offset[i]);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
++      OUT_RING(ctx->constant_color_c);
++      OUT_RING(tex->tex_border_color);
++
++      ADVANCE_RING();
++}
++
++static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
++
++      OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
++      OUT_RING(tex->tex_cntl);
++      OUT_RING(tex->tex_combine_cntl);
++      for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
++              OUT_RING(tex->tex_offset[i]);
++      }
++
++      OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
++      OUT_RING(tex->tex_border_color);
++
++      ADVANCE_RING();
++}
++
++static void r128_emit_state(drm_r128_private_t * dev_priv)
++{
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      unsigned int dirty = sarea_priv->dirty;
++
++      DRM_DEBUG("dirty=0x%08x\n", dirty);
++
++      if (dirty & R128_UPLOAD_CORE) {
++              r128_emit_core(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_CORE;
++      }
++
++      if (dirty & R128_UPLOAD_CONTEXT) {
++              r128_emit_context(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
++      }
++
++      if (dirty & R128_UPLOAD_SETUP) {
++              r128_emit_setup(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
++      }
++
++      if (dirty & R128_UPLOAD_MASKS) {
++              r128_emit_masks(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
++      }
++
++      if (dirty & R128_UPLOAD_WINDOW) {
++              r128_emit_window(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
++      }
++
++      if (dirty & R128_UPLOAD_TEX0) {
++              r128_emit_tex0(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
++      }
++
++      if (dirty & R128_UPLOAD_TEX1) {
++              r128_emit_tex1(dev_priv);
++              sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
++      }
++
++      /* Turn off the texture cache flushing */
++      sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
++
++      sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
++}
++
++#if R128_PERFORMANCE_BOXES
++/* ================================================================
++ * Performance monitoring functions
++ */
++
++static void r128_clear_box(drm_r128_private_t * dev_priv,
++                         int x, int y, int w, int h, int r, int g, int b)
++{
++      u32 pitch, offset;
++      u32 fb_bpp, color;
++      RING_LOCALS;
++
++      switch (dev_priv->fb_bpp) {
++      case 16:
++              fb_bpp = R128_GMC_DST_16BPP;
++              color = (((r & 0xf8) << 8) |
++                       ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
++              break;
++      case 24:
++              fb_bpp = R128_GMC_DST_24BPP;
++              color = ((r << 16) | (g << 8) | b);
++              break;
++      case 32:
++              fb_bpp = R128_GMC_DST_32BPP;
++              color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
++              break;
++      default:
++              return;
++      }
++
++      offset = dev_priv->back_offset;
++      pitch = dev_priv->back_pitch >> 3;
++
++      BEGIN_RING(6);
++
++      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++               R128_GMC_BRUSH_SOLID_COLOR |
++               fb_bpp |
++               R128_GMC_SRC_DATATYPE_COLOR |
++               R128_ROP3_P |
++               R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
++
++      OUT_RING((pitch << 21) | (offset >> 5));
++      OUT_RING(color);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((w << 16) | h);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
++{
++      if (atomic_read(&dev_priv->idle_count) == 0) {
++              r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
++      } else {
++              atomic_set(&dev_priv->idle_count, 0);
++      }
++}
++
++#endif
++
++/* ================================================================
++ * CCE command dispatch functions
++ */
++
++static void r128_print_dirty(const char *msg, unsigned int flags)
++{
++      DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
++               msg,
++               flags,
++               (flags & R128_UPLOAD_CORE) ? "core, " : "",
++               (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
++               (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
++               (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
++               (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
++               (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
++               (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
++               (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
++               (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
++}
++
++static void r128_cce_dispatch_clear(struct drm_device * dev,
++                                  drm_r128_clear_t * clear)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      unsigned int flags = clear->flags;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      if (dev_priv->page_flipping && dev_priv->current_page == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(R128_FRONT | R128_BACK);
++              if (tmp & R128_FRONT)
++                      flags |= R128_BACK;
++              if (tmp & R128_BACK)
++                      flags |= R128_FRONT;
++      }
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
++                        pbox[i].x1, pbox[i].y1, pbox[i].x2,
++                        pbox[i].y2, flags);
++
++              if (flags & (R128_FRONT | R128_BACK)) {
++                      BEGIN_RING(2);
++
++                      OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
++                      OUT_RING(clear->color_mask);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_FRONT) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->color_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS);
++
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++                      OUT_RING(clear->clear_color);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_BACK) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->color_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS);
++
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++                      OUT_RING(clear->clear_color);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++
++              if (flags & R128_DEPTH) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(clear->clear_depth);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((w << 16) | h);
++
++                      ADVANCE_RING();
++              }
++      }
++}
++
++static void r128_cce_dispatch_swap(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++#if R128_PERFORMANCE_BOXES
++      /* Do some trivial performance monitoring...
++       */
++      r128_cce_performance_boxes(dev_priv);
++#endif
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              BEGIN_RING(7);
++
++              OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++                       R128_GMC_DST_PITCH_OFFSET_CNTL |
++                       R128_GMC_BRUSH_NONE |
++                       (dev_priv->color_fmt << 8) |
++                       R128_GMC_SRC_DATATYPE_COLOR |
++                       R128_ROP3_S |
++                       R128_DP_SRC_SOURCE_MEMORY |
++                       R128_GMC_CLR_CMP_CNTL_DIS |
++                       R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
++
++              /* Make this work even if front & back are flipped:
++               */
++              if (dev_priv->current_page == 0) {
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++              } else {
++                      OUT_RING(dev_priv->front_pitch_offset_c);
++                      OUT_RING(dev_priv->back_pitch_offset_c);
++              }
++
++              OUT_RING((x << 16) | y);
++              OUT_RING((x << 16) | y);
++              OUT_RING((w << 16) | h);
++
++              ADVANCE_RING();
++      }
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
++      OUT_RING(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_dispatch_flip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("page=%d pfCurrentPage=%d\n",
++                dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
++
++#if R128_PERFORMANCE_BOXES
++      /* Do some trivial performance monitoring...
++       */
++      r128_cce_performance_boxes(dev_priv);
++#endif
++
++      BEGIN_RING(4);
++
++      R128_WAIT_UNTIL_PAGE_FLIPPED();
++      OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
++
++      if (dev_priv->current_page == 0) {
++              OUT_RING(dev_priv->back_offset);
++      } else {
++              OUT_RING(dev_priv->front_offset);
++      }
++
++      ADVANCE_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++      dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
++          1 - dev_priv->current_page;
++
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
++      OUT_RING(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int format = sarea_priv->vc_format;
++      int offset = buf->bus_address;
++      int size = buf->used;
++      int prim = buf_priv->prim;
++      int i = 0;
++      RING_LOCALS;
++      DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
++
++      if (0)
++              r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
++
++      if (buf->used) {
++              buf_priv->dispatched = 1;
++
++              if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
++                      r128_emit_state(dev_priv);
++              }
++
++              do {
++                      /* Emit the next set of up to three cliprects */
++                      if (i < sarea_priv->nbox) {
++                              r128_emit_clip_rects(dev_priv,
++                                                   &sarea_priv->boxes[i],
++                                                   sarea_priv->nbox - i);
++                      }
++
++                      /* Emit the vertex buffer rendering commands */
++                      BEGIN_RING(5);
++
++                      OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
++                      OUT_RING(offset);
++                      OUT_RING(size);
++                      OUT_RING(format);
++                      OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
++                               (size << R128_CCE_VC_CNTL_NUM_SHIFT));
++
++                      ADVANCE_RING();
++
++                      i += 3;
++              } while (i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              buf->used = 0;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++
++      sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++}
++
++static void r128_cce_dispatch_indirect(struct drm_device * dev,
++                                     struct drm_buf * buf, int start, int end)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
++
++      if (start != end) {
++              int offset = buf->bus_address + start;
++              int dwords = (end - start + 3) / sizeof(u32);
++
++              /* Indirect buffer data must be an even number of
++               * dwords, so if we've been given an odd number we must
++               * pad the data with a Type-2 CCE packet.
++               */
++              if (dwords & 1) {
++                      u32 *data = (u32 *)
++                          ((char *)dev->agp_buffer_map->handle
++                           + buf->offset + start);
++                      data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
++              }
++
++              buf_priv->dispatched = 1;
++
++              /* Fire off the indirect buffer */
++              BEGIN_RING(3);
++
++              OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
++              OUT_RING(offset);
++              OUT_RING(dwords);
++
++              ADVANCE_RING();
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the indirect buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              buf->used = 0;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++}
++
++static void r128_cce_dispatch_indices(struct drm_device * dev,
++                                    struct drm_buf * buf,
++                                    int start, int end, int count)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_buf_priv_t *buf_priv = buf->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int format = sarea_priv->vc_format;
++      int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
++      int prim = buf_priv->prim;
++      u32 *data;
++      int dwords;
++      int i = 0;
++      RING_LOCALS;
++      DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
++
++      if (0)
++              r128_print_dirty("dispatch_indices", sarea_priv->dirty);
++
++      if (start != end) {
++              buf_priv->dispatched = 1;
++
++              if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
++                      r128_emit_state(dev_priv);
++              }
++
++              dwords = (end - start + 3) / sizeof(u32);
++
++              data = (u32 *) ((char *)dev->agp_buffer_map->handle
++                              + buf->offset + start);
++
++              data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
++                                                dwords - 2));
++
++              data[1] = cpu_to_le32(offset);
++              data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
++              data[3] = cpu_to_le32(format);
++              data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
++                                     (count << 16)));
++
++              if (count & 0x1) {
++#ifdef __LITTLE_ENDIAN
++                      data[dwords - 1] &= 0x0000ffff;
++#else
++                      data[dwords - 1] &= 0xffff0000;
++#endif
++              }
++
++              do {
++                      /* Emit the next set of up to three cliprects */
++                      if (i < sarea_priv->nbox) {
++                              r128_emit_clip_rects(dev_priv,
++                                                   &sarea_priv->boxes[i],
++                                                   sarea_priv->nbox - i);
++                      }
++
++                      r128_cce_dispatch_indirect(dev, buf, start, end);
++
++                      i += 3;
++              } while (i < sarea_priv->nbox);
++      }
++
++      if (buf_priv->discard) {
++              buf_priv->age = dev_priv->sarea_priv->last_dispatch;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++
++              OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
++              OUT_RING(buf_priv->age);
++
++              ADVANCE_RING();
++
++              buf->pending = 1;
++              /* FIXME: Check dispatched field */
++              buf_priv->dispatched = 0;
++      }
++
++      dev_priv->sarea_priv->last_dispatch++;
++
++      sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
++      sarea_priv->nbox = 0;
++}
++
++static int r128_cce_dispatch_blit(struct drm_device * dev,
++                                struct drm_file *file_priv,
++                                drm_r128_blit_t * blit)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      u32 *data;
++      int dword_shift, dwords;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (blit->format) {
++      case R128_DATATYPE_ARGB8888:
++              dword_shift = 0;
++              break;
++      case R128_DATATYPE_ARGB1555:
++      case R128_DATATYPE_RGB565:
++      case R128_DATATYPE_ARGB4444:
++      case R128_DATATYPE_YVYU422:
++      case R128_DATATYPE_VYUY422:
++              dword_shift = 1;
++              break;
++      case R128_DATATYPE_CI8:
++      case R128_DATATYPE_RGB8:
++              dword_shift = 2;
++              break;
++      default:
++              DRM_ERROR("invalid blit format %d\n", blit->format);
++              return -EINVAL;
++      }
++
++      /* Flush the pixel cache, and mark the contents as Read Invalid.
++       * This ensures no pixel data gets mixed up with the texture
++       * data from the host data blit, otherwise part of the texture
++       * image may be corrupted.
++       */
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
++      OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
++
++      ADVANCE_RING();
++
++      /* Dispatch the indirect buffer.
++       */
++      buf = dma->buflist[blit->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", blit->idx);
++              return -EINVAL;
++      }
++
++      buf_priv->discard = 1;
++
++      dwords = (blit->width * blit->height) >> dword_shift;
++
++      data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
++
++      data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
++      data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
++                             R128_GMC_BRUSH_NONE |
++                             (blit->format << 8) |
++                             R128_GMC_SRC_DATATYPE_COLOR |
++                             R128_ROP3_S |
++                             R128_DP_SRC_SOURCE_HOST_DATA |
++                             R128_GMC_CLR_CMP_CNTL_DIS |
++                             R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
++
++      data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
++      data[3] = cpu_to_le32(0xffffffff);
++      data[4] = cpu_to_le32(0xffffffff);
++      data[5] = cpu_to_le32((blit->y << 16) | blit->x);
++      data[6] = cpu_to_le32((blit->height << 16) | blit->width);
++      data[7] = cpu_to_le32(dwords);
++
++      buf->used = (dwords + 8) * sizeof(u32);
++
++      r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
++
++      /* Flush the pixel cache after the blit completes.  This ensures
++       * the texture data is written out to memory before rendering
++       * continues.
++       */
++      BEGIN_RING(2);
++
++      OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
++      OUT_RING(R128_PC_FLUSH_GUI);
++
++      ADVANCE_RING();
++
++      return 0;
++}
++
++/* ================================================================
++ * Tiled depth buffer management
++ *
++ * FIXME: These should all set the destination write mask for when we
++ * have hardware stencil support.
++ */
++
++static int r128_cce_dispatch_write_span(struct drm_device * dev,
++                                      drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, x, y;
++      u32 *buffer;
++      u8 *mask;
++      int i, buffer_size, mask_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
++              return -EFAULT;
++      }
++
++      buffer_size = depth->n * sizeof(u32);
++      buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
++      if (buffer == NULL)
++              return -ENOMEM;
++      if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
++              drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      mask_size = depth->n * sizeof(u8);
++      if (depth->mask) {
++              mask = drm_alloc(mask_size, DRM_MEM_BUFS);
++              if (mask == NULL) {
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      return -ENOMEM;
++              }
++              if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      drm_free(mask, mask_size, DRM_MEM_BUFS);
++                      return -EFAULT;
++              }
++
++              for (i = 0; i < count; i++, x++) {
++                      if (mask[i]) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                                       R128_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->depth_fmt << 8) |
++                                       R128_GMC_SRC_DATATYPE_COLOR |
++                                       R128_ROP3_P |
++                                       R128_GMC_CLR_CMP_CNTL_DIS |
++                                       R128_GMC_WR_MSK_DIS);
++
++                              OUT_RING(dev_priv->depth_pitch_offset_c);
++                              OUT_RING(buffer[i]);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((1 << 16) | 1);
++
++                              ADVANCE_RING();
++                      }
++              }
++
++              drm_free(mask, mask_size, DRM_MEM_BUFS);
++      } else {
++              for (i = 0; i < count; i++, x++) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(buffer[i]);
++
++                      OUT_RING((x << 16) | y);
++                      OUT_RING((1 << 16) | 1);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
++                                        drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, *x, *y;
++      u32 *buffer;
++      u8 *mask;
++      int i, xbuf_size, ybuf_size, buffer_size, mask_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      xbuf_size = count * sizeof(*x);
++      ybuf_size = count * sizeof(*y);
++      x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
++      if (x == NULL) {
++              return -ENOMEM;
++      }
++      y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
++      if (y == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      buffer_size = depth->n * sizeof(u32);
++      buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
++      if (buffer == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      if (depth->mask) {
++              mask_size = depth->n * sizeof(u8);
++              mask = drm_alloc(mask_size, DRM_MEM_BUFS);
++              if (mask == NULL) {
++                      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++                      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      return -ENOMEM;
++              }
++              if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
++                      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++                      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++                      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++                      drm_free(mask, mask_size, DRM_MEM_BUFS);
++                      return -EFAULT;
++              }
++
++              for (i = 0; i < count; i++) {
++                      if (mask[i]) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                                       R128_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->depth_fmt << 8) |
++                                       R128_GMC_SRC_DATATYPE_COLOR |
++                                       R128_ROP3_P |
++                                       R128_GMC_CLR_CMP_CNTL_DIS |
++                                       R128_GMC_WR_MSK_DIS);
++
++                              OUT_RING(dev_priv->depth_pitch_offset_c);
++                              OUT_RING(buffer[i]);
++
++                              OUT_RING((x[i] << 16) | y[i]);
++                              OUT_RING((1 << 16) | 1);
++
++                              ADVANCE_RING();
++                      }
++              }
++
++              drm_free(mask, mask_size, DRM_MEM_BUFS);
++      } else {
++              for (i = 0; i < count; i++) {
++                      BEGIN_RING(6);
++
++                      OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
++                      OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
++                               R128_GMC_BRUSH_SOLID_COLOR |
++                               (dev_priv->depth_fmt << 8) |
++                               R128_GMC_SRC_DATATYPE_COLOR |
++                               R128_ROP3_P |
++                               R128_GMC_CLR_CMP_CNTL_DIS |
++                               R128_GMC_WR_MSK_DIS);
++
++                      OUT_RING(dev_priv->depth_pitch_offset_c);
++                      OUT_RING(buffer[i]);
++
++                      OUT_RING((x[i] << 16) | y[i]);
++                      OUT_RING((1 << 16) | 1);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++      drm_free(buffer, buffer_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++static int r128_cce_dispatch_read_span(struct drm_device * dev,
++                                     drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, x, y;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
++              return -EFAULT;
++      }
++
++      BEGIN_RING(7);
++
++      OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++      OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++               R128_GMC_DST_PITCH_OFFSET_CNTL |
++               R128_GMC_BRUSH_NONE |
++               (dev_priv->depth_fmt << 8) |
++               R128_GMC_SRC_DATATYPE_COLOR |
++               R128_ROP3_S |
++               R128_DP_SRC_SOURCE_MEMORY |
++               R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
++
++      OUT_RING(dev_priv->depth_pitch_offset_c);
++      OUT_RING(dev_priv->span_pitch_offset_c);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((0 << 16) | 0);
++      OUT_RING((count << 16) | 1);
++
++      ADVANCE_RING();
++
++      return 0;
++}
++
++static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
++                                       drm_r128_depth_t * depth)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int count, *x, *y;
++      int i, xbuf_size, ybuf_size;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      count = depth->n;
++      if (count > 4096 || count <= 0)
++              return -EMSGSIZE;
++
++      if (count > dev_priv->depth_pitch) {
++              count = dev_priv->depth_pitch;
++      }
++
++      xbuf_size = count * sizeof(*x);
++      ybuf_size = count * sizeof(*y);
++      x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
++      if (x == NULL) {
++              return -ENOMEM;
++      }
++      y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
++      if (y == NULL) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++      if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
++              drm_free(x, xbuf_size, DRM_MEM_BUFS);
++              drm_free(y, ybuf_size, DRM_MEM_BUFS);
++              return -EFAULT;
++      }
++
++      for (i = 0; i < count; i++) {
++              BEGIN_RING(7);
++
++              OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
++                       R128_GMC_DST_PITCH_OFFSET_CNTL |
++                       R128_GMC_BRUSH_NONE |
++                       (dev_priv->depth_fmt << 8) |
++                       R128_GMC_SRC_DATATYPE_COLOR |
++                       R128_ROP3_S |
++                       R128_DP_SRC_SOURCE_MEMORY |
++                       R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
++
++              OUT_RING(dev_priv->depth_pitch_offset_c);
++              OUT_RING(dev_priv->span_pitch_offset_c);
++
++              OUT_RING((x[i] << 16) | y[i]);
++              OUT_RING((i << 16) | 0);
++              OUT_RING((1 << 16) | 1);
++
++              ADVANCE_RING();
++      }
++
++      drm_free(x, xbuf_size, DRM_MEM_BUFS);
++      drm_free(y, ybuf_size, DRM_MEM_BUFS);
++
++      return 0;
++}
++
++/* ================================================================
++ * Polygon stipple
++ */
++
++static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(33);
++
++      OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
++      for (i = 0; i < 32; i++) {
++              OUT_RING(stipple[i]);
++      }
++
++      ADVANCE_RING();
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++
++static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_r128_clear_t *clear = data;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
++
++      r128_cce_dispatch_clear(dev, clear);
++      COMMIT_RING();
++
++      /* Make sure we restore the 3D state next time.
++       */
++      dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
++
++      return 0;
++}
++
++static int r128_do_init_pageflip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
++      dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
++
++      R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
++      R128_WRITE(R128_CRTC_OFFSET_CNTL,
++                 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
++
++      dev_priv->page_flipping = 1;
++      dev_priv->current_page = 0;
++      dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
++
++      return 0;
++}
++
++static int r128_do_cleanup_pageflip(struct drm_device * dev)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
++      R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
++
++      if (dev_priv->current_page != 0) {
++              r128_cce_dispatch_flip(dev);
++              COMMIT_RING();
++      }
++
++      dev_priv->page_flipping = 0;
++      return 0;
++}
++
++/* Swapping and flipping are different operations, need different ioctls.
++ * They can & should be intermixed to support multiple 3d windows.
++ */
++
++static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (!dev_priv->page_flipping)
++              r128_do_init_pageflip(dev);
++
++      r128_cce_dispatch_flip(dev);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
++
++      r128_cce_dispatch_swap(dev);
++      dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
++                                      R128_UPLOAD_MASKS);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_vertex_t *vertex = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (vertex->prim < 0 ||
++          vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      buf->used = vertex->count;
++      buf_priv->prim = vertex->prim;
++      buf_priv->discard = vertex->discard;
++
++      r128_cce_dispatch_vertex(dev, buf);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_indices_t *elts = data;
++      int count;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
++                elts->idx, elts->start, elts->end, elts->discard);
++
++      if (elts->idx < 0 || elts->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        elts->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (elts->prim < 0 ||
++          elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
++              DRM_ERROR("buffer prim %d\n", elts->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[elts->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", elts->idx);
++              return -EINVAL;
++      }
++
++      count = (elts->end - elts->start) / sizeof(u16);
++      elts->start -= R128_INDEX_PRIM_OFFSET;
++
++      if (elts->start & 0x7) {
++              DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
++              return -EINVAL;
++      }
++      if (elts->start < buf->used) {
++              DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
++              return -EINVAL;
++      }
++
++      buf->used = elts->end;
++      buf_priv->prim = elts->prim;
++      buf_priv->discard = elts->discard;
++
++      r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_blit_t *blit = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
++
++      if (blit->idx < 0 || blit->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        blit->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      ret = r128_cce_dispatch_blit(dev, file_priv, blit);
++
++      COMMIT_RING();
++      return ret;
++}
++
++static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_depth_t *depth = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      ret = -EINVAL;
++      switch (depth->func) {
++      case R128_WRITE_SPAN:
++              ret = r128_cce_dispatch_write_span(dev, depth);
++              break;
++      case R128_WRITE_PIXELS:
++              ret = r128_cce_dispatch_write_pixels(dev, depth);
++              break;
++      case R128_READ_SPAN:
++              ret = r128_cce_dispatch_read_span(dev, depth);
++              break;
++      case R128_READ_PIXELS:
++              ret = r128_cce_dispatch_read_pixels(dev, depth);
++              break;
++      }
++
++      COMMIT_RING();
++      return ret;
++}
++
++static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_stipple_t *stipple = data;
++      u32 mask[32];
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      r128_cce_dispatch_stipple(dev, mask);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_r128_buf_priv_t *buf_priv;
++      drm_r128_indirect_t *indirect = data;
++#if 0
++      RING_LOCALS;
++#endif
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
++                indirect->idx, indirect->start, indirect->end,
++                indirect->discard);
++
++      if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        indirect->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      buf = dma->buflist[indirect->idx];
++      buf_priv = buf->dev_private;
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", indirect->idx);
++              return -EINVAL;
++      }
++
++      if (indirect->start < buf->used) {
++              DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
++                        indirect->start, buf->used);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf->used = indirect->end;
++      buf_priv->discard = indirect->discard;
++
++#if 0
++      /* Wait for the 3D stream to idle before the indirect buffer
++       * containing 2D acceleration commands is processed.
++       */
++      BEGIN_RING(2);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      ADVANCE_RING();
++#endif
++
++      /* Dispatch the indirect buffer full of commands from the
++       * X server.  This is insecure and is thus only available to
++       * privileged clients.
++       */
++      r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_r128_private_t *dev_priv = dev->dev_private;
++      drm_r128_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case R128_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_r128_private_t *dev_priv = dev->dev_private;
++              if (dev_priv->page_flipping) {
++                      r128_do_cleanup_pageflip(dev);
++              }
++      }
++}
++
++void r128_driver_lastclose(struct drm_device * dev)
++{
++      r128_do_cleanup_cce(dev);
++}
++
++struct drm_ioctl_desc r128_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
++};
++
++int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/r300_cmdbuf.c git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c
+--- git/drivers/gpu/drm-tungsten/r300_cmdbuf.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r300_cmdbuf.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1198 @@
++/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
++ *
++ * Copyright (C) The Weather Channel, Inc.  2002.
++ * Copyright (C) 2004 Nicolai Haehnle.
++ * All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Nicolai Haehnle <prefect_@gmx.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++#include "r300_reg.h"
++
++#define R300_SIMULTANEOUS_CLIPRECTS           4
++
++/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
++ */
++static const int r300_cliprect_cntl[4] = {
++      0xAAAA,
++      0xEEEE,
++      0xFEFE,
++      0xFFFE
++};
++
++/**
++ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
++ * buffer, starting with index n.
++ */
++static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
++                             drm_radeon_kcmd_buffer_t *cmdbuf, int n)
++{
++      struct drm_clip_rect box;
++      int nr;
++      int i;
++      RING_LOCALS;
++
++      nr = cmdbuf->nbox - n;
++      if (nr > R300_SIMULTANEOUS_CLIPRECTS)
++              nr = R300_SIMULTANEOUS_CLIPRECTS;
++
++      DRM_DEBUG("%i cliprects\n", nr);
++
++      if (nr) {
++              BEGIN_RING(6 + nr * 2);
++              OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
++
++              for (i = 0; i < nr; ++i) {
++                      if (DRM_COPY_FROM_USER_UNCHECKED
++                          (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
++                              DRM_ERROR("copy cliprect faulted\n");
++                              return -EFAULT;
++                      }
++
++                      box.x2--; /* Hardware expects inclusive bottom-right corner */
++                      box.y2--;
++
++                      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++                              box.x1 = (box.x1) &
++                                      R300_CLIPRECT_MASK;
++                              box.y1 = (box.y1) &
++                                      R300_CLIPRECT_MASK;
++                              box.x2 = (box.x2) &
++                                      R300_CLIPRECT_MASK;
++                              box.y2 = (box.y2) &
++                                      R300_CLIPRECT_MASK;
++                      } else {
++                              box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                              box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
++                                      R300_CLIPRECT_MASK;
++                      }
++
++                      OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
++                               (box.y1 << R300_CLIPRECT_Y_SHIFT));
++                      OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
++                               (box.y2 << R300_CLIPRECT_Y_SHIFT));
++
++              }
++
++              OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
++
++              /* TODO/SECURITY: Force scissors to a safe value, otherwise the
++               * client might be able to trample over memory.
++               * The impact should be very limited, but I'd rather be safe than
++               * sorry.
++               */
++              OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
++              OUT_RING(0);
++              OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
++              ADVANCE_RING();
++      } else {
++              /* Why we allow zero cliprect rendering:
++               * There are some commands in a command buffer that must be submitted
++               * even when there are no cliprects, e.g. DMA buffer discard
++               * or state setting (though state setting could be avoided by
++               * simulating a loss of context).
++               *
++               * Now since the cmdbuf interface is so chaotic right now (and is
++               * bound to remain that way for a bit until things settle down),
++               * it is basically impossible to filter out the commands that are
++               * necessary and those that aren't.
++               *
++               * So I choose the safe way and don't do any filtering at all;
++               * instead, I simply set up the engine so that all rendering
++               * can't produce any fragments.
++               */
++              BEGIN_RING(2);
++              OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
++              ADVANCE_RING();
++      }
++
++      /* flus cache and wait idle clean after cliprect change */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      ADVANCE_RING();
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      return 0;
++}
++
++static u8 r300_reg_flags[0x10000 >> 2];
++
++void r300_init_reg_flags(struct drm_device *dev)
++{
++      int i;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      memset(r300_reg_flags, 0, 0x10000 >> 2);
++#define ADD_RANGE_MARK(reg, count,mark) \
++              for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
++                      r300_reg_flags[i]|=(mark);
++
++#define MARK_SAFE             1
++#define MARK_CHECK_OFFSET     2
++
++#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
++
++      /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
++      ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
++      ADD_RANGE(R300_VAP_CNTL, 1);
++      ADD_RANGE(R300_SE_VTE_CNTL, 2);
++      ADD_RANGE(0x2134, 2);
++      ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
++      ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
++      ADD_RANGE(0x21DC, 1);
++      ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
++      ADD_RANGE(R300_VAP_CLIP_X_0, 4);
++      ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
++      ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
++      ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
++      ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
++      ADD_RANGE(R300_GB_ENABLE, 1);
++      ADD_RANGE(R300_GB_MSPOS0, 5);
++      ADD_RANGE(R300_TX_INVALTAGS, 1);
++      ADD_RANGE(R300_TX_ENABLE, 1);
++      ADD_RANGE(0x4200, 4);
++      ADD_RANGE(0x4214, 1);
++      ADD_RANGE(R300_RE_POINTSIZE, 1);
++      ADD_RANGE(0x4230, 3);
++      ADD_RANGE(R300_RE_LINE_CNT, 1);
++      ADD_RANGE(R300_RE_UNK4238, 1);
++      ADD_RANGE(0x4260, 3);
++      ADD_RANGE(R300_RE_SHADE, 4);
++      ADD_RANGE(R300_RE_POLYGON_MODE, 5);
++      ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
++      ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
++      ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
++      ADD_RANGE(R300_RE_CULL_CNTL, 1);
++      ADD_RANGE(0x42C0, 2);
++      ADD_RANGE(R300_RS_CNTL_0, 2);
++
++      ADD_RANGE(R300_SC_HYPERZ, 2);
++      ADD_RANGE(0x43E8, 1);
++
++      ADD_RANGE(0x46A4, 5);
++
++      ADD_RANGE(R300_RE_FOG_STATE, 1);
++      ADD_RANGE(R300_FOG_COLOR_R, 3);
++      ADD_RANGE(R300_PP_ALPHA_TEST, 2);
++      ADD_RANGE(0x4BD8, 1);
++      ADD_RANGE(R300_PFS_PARAM_0_X, 64);
++      ADD_RANGE(0x4E00, 1);
++      ADD_RANGE(R300_RB3D_CBLEND, 2);
++      ADD_RANGE(R300_RB3D_COLORMASK, 1);
++      ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
++      ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);   /* check offset */
++      ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
++      ADD_RANGE(0x4E50, 9);
++      ADD_RANGE(0x4E88, 1);
++      ADD_RANGE(0x4EA0, 2);
++      ADD_RANGE(R300_ZB_CNTL, 3);
++      ADD_RANGE(R300_ZB_FORMAT, 4);
++      ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);      /* check offset */
++      ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
++      ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
++      ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
++
++      ADD_RANGE(R300_TX_FILTER_0, 16);
++      ADD_RANGE(R300_TX_FILTER1_0, 16);
++      ADD_RANGE(R300_TX_SIZE_0, 16);
++      ADD_RANGE(R300_TX_FORMAT_0, 16);
++      ADD_RANGE(R300_TX_PITCH_0, 16);
++      /* Texture offset is dangerous and needs more checking */
++      ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
++      ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
++      ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
++
++      /* Sporadic registers used as primitives are emitted */
++      ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
++      ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
++      ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
++      ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++              ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
++              ADD_RANGE(R500_US_CONFIG, 2);
++              ADD_RANGE(R500_US_CODE_ADDR, 3);
++              ADD_RANGE(R500_US_FC_CTRL, 1);
++              ADD_RANGE(R500_RS_IP_0, 16);
++              ADD_RANGE(R500_RS_INST_0, 16);
++              ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
++              ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
++              ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
++      } else {
++              ADD_RANGE(R300_PFS_CNTL_0, 3);
++              ADD_RANGE(R300_PFS_NODE_0, 4);
++              ADD_RANGE(R300_PFS_TEXI_0, 64);
++              ADD_RANGE(R300_PFS_INSTR0_0, 64);
++              ADD_RANGE(R300_PFS_INSTR1_0, 64);
++              ADD_RANGE(R300_PFS_INSTR2_0, 64);
++              ADD_RANGE(R300_PFS_INSTR3_0, 64);
++              ADD_RANGE(R300_RS_INTERP_0, 8);
++              ADD_RANGE(R300_RS_ROUTE_0, 8);
++
++      }
++}
++
++static __inline__ int r300_check_range(unsigned reg, int count)
++{
++      int i;
++      if (reg & ~0xffff)
++              return -1;
++      for (i = (reg >> 2); i < (reg >> 2) + count; i++)
++              if (r300_reg_flags[i] != MARK_SAFE)
++                      return 1;
++      return 0;
++}
++
++static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
++                                                        dev_priv,
++                                                        drm_radeon_kcmd_buffer_t
++                                                        * cmdbuf,
++                                                        drm_r300_cmd_header_t
++                                                        header)
++{
++      int reg;
++      int sz;
++      int i;
++      int values[64];
++      RING_LOCALS;
++
++      sz = header.packet0.count;
++      reg = (header.packet0.reghi << 8) | header.packet0.reglo;
++
++      if ((sz > 64) || (sz < 0)) {
++              DRM_ERROR
++                  ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
++                   reg, sz);
++              return -EINVAL;
++      }
++      for (i = 0; i < sz; i++) {
++              values[i] = ((int *)cmdbuf->buf)[i];
++              switch (r300_reg_flags[(reg >> 2) + i]) {
++              case MARK_SAFE:
++                      break;
++              case MARK_CHECK_OFFSET:
++                      if (!radeon_check_offset(dev_priv, (u32) values[i])) {
++                              DRM_ERROR
++                                  ("Offset failed range check (reg=%04x sz=%d)\n",
++                                   reg, sz);
++                              return -EINVAL;
++                      }
++                      break;
++              default:
++                      DRM_ERROR("Register %04x failed check as flag=%02x\n",
++                                reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
++                      return -EINVAL;
++              }
++      }
++
++      BEGIN_RING(1 + sz);
++      OUT_RING(CP_PACKET0(reg, sz - 1));
++      OUT_RING_TABLE(values, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 4;
++      cmdbuf->bufsz -= sz * 4;
++
++      return 0;
++}
++
++/**
++ * Emits a packet0 setting arbitrary registers.
++ * Called by r300_do_cp_cmdbuf.
++ *
++ * Note that checks are performed on contents and addresses of the registers
++ */
++static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      drm_r300_cmd_header_t header)
++{
++      int reg;
++      int sz;
++      RING_LOCALS;
++
++      sz = header.packet0.count;
++      reg = (header.packet0.reghi << 8) | header.packet0.reglo;
++
++      DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz);
++      if (!sz)
++              return 0;
++
++      if (sz * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      if (reg + sz * 4 >= 0x10000) {
++              DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
++                        sz);
++              return -EINVAL;
++      }
++
++      if (r300_check_range(reg, sz)) {
++              /* go and check everything */
++              return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
++                                                         header);
++      }
++      /* the rest of the data is safe to emit, whatever the values the user passed */
++
++      BEGIN_RING(1 + sz);
++      OUT_RING(CP_PACKET0(reg, sz - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 4;
++      cmdbuf->bufsz -= sz * 4;
++
++      return 0;
++}
++
++/**
++ * Uploads user-supplied vertex program instructions or parameters onto
++ * the graphics card.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
++                                  drm_radeon_kcmd_buffer_t *cmdbuf,
++                                  drm_r300_cmd_header_t header)
++{
++      int sz;
++      int addr;
++      RING_LOCALS;
++
++      sz = header.vpu.count;
++      addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
++
++      if (!sz)
++              return 0;
++      if (sz * 16 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      /* VAP is very sensitive so we purge cache before we program it
++       * and we also flush its state before & after */
++      BEGIN_RING(6);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      BEGIN_RING(3 + sz * 4);
++      OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
++      OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
++      ADVANCE_RING();
++
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * 16;
++      cmdbuf->bufsz -= sz * 16;
++
++      return 0;
++}
++
++/**
++ * Emit a clear packet from userspace.
++ * Called by r300_emit_packet3.
++ */
++static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
++                                    drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      RING_LOCALS;
++
++      if (8 * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(10);
++      OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
++      OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
++               (1 << R300_PRIM_NUM_VERTICES_SHIFT));
++      OUT_RING_TABLE((int *)cmdbuf->buf, 8);
++      ADVANCE_RING();
++
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(R300_RB3D_DC_FLUSH);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush flag */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED;
++
++      cmdbuf->buf += 8 * 4;
++      cmdbuf->bufsz -= 8 * 4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
++                                             drm_radeon_kcmd_buffer_t *cmdbuf,
++                                             u32 header)
++{
++      int count, i, k;
++#define MAX_ARRAY_PACKET  64
++      u32 payload[MAX_ARRAY_PACKET];
++      u32 narrays;
++      RING_LOCALS;
++
++      count = (header >> 16) & 0x3fff;
++
++      if ((count + 1) > MAX_ARRAY_PACKET) {
++              DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
++                        count);
++              return -EINVAL;
++      }
++      memset(payload, 0, MAX_ARRAY_PACKET * 4);
++      memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
++
++      /* carefully check packet contents */
++
++      narrays = payload[0];
++      k = 0;
++      i = 1;
++      while ((k < narrays) && (i < (count + 1))) {
++              i++;            /* skip attribute field */
++              if (!radeon_check_offset(dev_priv, payload[i])) {
++                      DRM_ERROR
++                          ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
++                           k, i);
++                      return -EINVAL;
++              }
++              k++;
++              i++;
++              if (k == narrays)
++                      break;
++              /* have one more to process, they come in pairs */
++              if (!radeon_check_offset(dev_priv, payload[i])) {
++                      DRM_ERROR
++                          ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
++                           k, i);
++                      return -EINVAL;
++              }
++              k++;
++              i++;
++      }
++      /* do the counts match what we expect ? */
++      if ((k != narrays) || (i != (count + 1))) {
++              DRM_ERROR
++                  ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
++                   k, i, narrays, count + 1);
++              return -EINVAL;
++      }
++
++      /* all clear, output packet */
++
++      BEGIN_RING(count + 2);
++      OUT_RING(header);
++      OUT_RING_TABLE(payload, count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count + 2) * 4;
++      cmdbuf->bufsz -= (count + 2) * 4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
++                                           drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 *cmd = (u32 *) cmdbuf->buf;
++      int count, ret;
++      RING_LOCALS;
++
++      count=(cmd[0]>>16) & 0x3fff;
++
++      if (cmd[0] & 0x8000) {
++              u32 offset;
++
++              if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
++                            | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[2] << 10;
++                      ret = !radeon_check_offset(dev_priv, offset);
++                      if (ret) {
++                              DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
++                              return -EINVAL;
++                      }
++              }
++
++              if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
++                  (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[3] << 10;
++                      ret = !radeon_check_offset(dev_priv, offset);
++                      if (ret) {
++                              DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
++                              return -EINVAL;
++                      }
++
++              }
++      }
++
++      BEGIN_RING(count+2);
++      OUT_RING(cmd[0]);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count+2)*4;
++      cmdbuf->bufsz -= (count+2)*4;
++
++      return 0;
++}
++
++static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
++                                          drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 *cmd;
++      int count;
++      int expected_count;
++      RING_LOCALS;
++
++      cmd = (u32 *) cmdbuf->buf;
++      count = (cmd[0]>>16) & 0x3fff;
++      expected_count = cmd[1] >> 16;
++      if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
++              expected_count = (expected_count+1)/2;
++
++      if (count && count != expected_count) {
++              DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
++                      count, expected_count);
++              return -EINVAL;
++      }
++
++      BEGIN_RING(count+2);
++      OUT_RING(cmd[0]);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count+2)*4;
++      cmdbuf->bufsz -= (count+2)*4;
++
++      if (!count) {
++              drm_r300_cmd_header_t header;
++
++              if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
++                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
++                      return -EINVAL;
++              }
++
++              header.u = *(unsigned int *)cmdbuf->buf;
++
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++              cmd = (u32 *) cmdbuf->buf;
++
++              if (header.header.cmd_type != R300_CMD_PACKET3 ||
++                  header.packet3.packet != R300_CMD_PACKET3_RAW ||
++                  cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
++                      DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
++                      return -EINVAL;
++              }
++
++              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
++                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
++                      return -EINVAL;
++              }
++              if (!radeon_check_offset(dev_priv, cmd[2])) {
++                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
++                      return -EINVAL;
++              }
++              if (cmd[3] != expected_count) {
++                      DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
++                              cmd[3], expected_count);
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(4);
++              OUT_RING(cmd[0]);
++              OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
++              ADVANCE_RING();
++
++              cmdbuf->buf += 4*4;
++              cmdbuf->bufsz -= 4*4;
++      }
++
++      return 0;
++}
++
++static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
++                                          drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      u32 header;
++      int count;
++      RING_LOCALS;
++
++      if (4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      /* Fixme !! This simply emits a packet without much checking.
++         We need to be smarter. */
++
++      /* obtain first word - actual packet3 header */
++      header = *(u32 *) cmdbuf->buf;
++
++      /* Is it packet 3 ? */
++      if ((header >> 30) != 0x3) {
++              DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
++              return -EINVAL;
++      }
++
++      count = (header >> 16) & 0x3fff;
++
++      /* Check again now that we know how much data to expect */
++      if ((count + 2) * 4 > cmdbuf->bufsz) {
++              DRM_ERROR
++                  ("Expected packet3 of length %d but have only %d bytes left\n",
++                   (count + 2) * 4, cmdbuf->bufsz);
++              return -EINVAL;
++      }
++
++      /* Is it a packet type we know about ? */
++      switch (header & 0xff00) {
++      case RADEON_3D_LOAD_VBPNTR:     /* load vertex array pointers */
++              return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
++
++      case RADEON_CNTL_BITBLT_MULTI:
++              return r300_emit_bitblt_multi(dev_priv, cmdbuf);
++
++      case RADEON_CP_INDX_BUFFER:
++              DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
++              return -EINVAL;
++      case RADEON_CP_3D_DRAW_IMMD_2:
++              /* triggers drawing using in-packet vertex data */
++      case RADEON_CP_3D_DRAW_VBUF_2:
++              /* triggers drawing of vertex buffers setup elsewhere */
++              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
++                                         RADEON_PURGE_EMITED);
++              break;
++      case RADEON_CP_3D_DRAW_INDX_2:
++              /* triggers drawing using indices to vertex buffer */
++              /* whenever we send vertex we clear flush & purge */
++              dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
++                                         RADEON_PURGE_EMITED);
++              return r300_emit_draw_indx_2(dev_priv, cmdbuf);
++      case RADEON_WAIT_FOR_IDLE:
++      case RADEON_CP_NOP:
++              /* these packets are safe */
++              break;
++      default:
++              DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
++              return -EINVAL;
++      }
++
++      BEGIN_RING(count + 2);
++      OUT_RING(header);
++      OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
++      ADVANCE_RING();
++
++      cmdbuf->buf += (count + 2) * 4;
++      cmdbuf->bufsz -= (count + 2) * 4;
++
++      return 0;
++}
++
++/**
++ * Emit a rendering packet3 from userspace.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      drm_r300_cmd_header_t header)
++{
++      int n;
++      int ret;
++      char *orig_buf = cmdbuf->buf;
++      int orig_bufsz = cmdbuf->bufsz;
++
++      /* This is a do-while-loop so that we run the interior at least once,
++       * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
++       */
++      n = 0;
++      do {
++              if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
++                      ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
++                      if (ret)
++                              return ret;
++
++                      cmdbuf->buf = orig_buf;
++                      cmdbuf->bufsz = orig_bufsz;
++              }
++
++              switch (header.packet3.packet) {
++              case R300_CMD_PACKET3_CLEAR:
++                      DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
++                      ret = r300_emit_clear(dev_priv, cmdbuf);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_clear failed\n");
++                              return ret;
++                      }
++                      break;
++
++              case R300_CMD_PACKET3_RAW:
++                      DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
++                      ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_raw_packet3 failed\n");
++                              return ret;
++                      }
++                      break;
++
++              default:
++                      DRM_ERROR("bad packet3 type %i at %p\n",
++                                header.packet3.packet,
++                                cmdbuf->buf - sizeof(header));
++                      return -EINVAL;
++              }
++
++              n += R300_SIMULTANEOUS_CLIPRECTS;
++      } while (n < cmdbuf->nbox);
++
++      return 0;
++}
++
++/* Some of the R300 chips seem to be extremely touchy about the two registers
++ * that are configured in r300_pacify.
++ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
++ * sends a command buffer that contains only state setting commands and a
++ * vertex program/parameter upload sequence, this will eventually lead to a
++ * lockup, unless the sequence is bracketed by calls to r300_pacify.
++ * So we should take great care to *always* call r300_pacify before
++ * *anything* 3D related, and again afterwards. This is what the
++ * call bracket in r300_do_cp_cmdbuf is for.
++ */
++
++/**
++ * Emit the sequence to pacify R300.
++ */
++static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
++{
++      uint32_t cache_z, cache_3d, cache_2d;
++      RING_LOCALS;
++
++      cache_z = R300_ZC_FLUSH;
++      cache_2d = R300_RB2D_DC_FLUSH;
++      cache_3d = R300_RB3D_DC_FLUSH;
++      if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
++              /* we can purge, primitive where draw since last purge */
++              cache_z |= R300_ZC_FREE;
++              cache_2d |= R300_RB2D_DC_FREE;
++              cache_3d |= R300_RB3D_DC_FREE;
++      }
++
++      /* flush & purge zbuffer */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
++      OUT_RING(cache_z);
++      ADVANCE_RING();
++      /* flush & purge 3d */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(cache_3d);
++      ADVANCE_RING();
++      /* flush & purge texture */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      /* FIXME: is this one really needed ? */
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
++      OUT_RING(0);
++      ADVANCE_RING();
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
++      ADVANCE_RING();
++      /* flush & purge 2d through E2 as RB2D will trigger lockup */
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
++      OUT_RING(cache_2d);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
++               RADEON_WAIT_HOST_IDLECLEAN);
++      ADVANCE_RING();
++      /* set flush & purge flags */
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
++}
++
++/**
++ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
++ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
++ * be careful about how this function is called.
++ */
++static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++
++      buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
++      buf->pending = 1;
++      buf->used = 0;
++}
++
++static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
++                        drm_r300_cmd_header_t header)
++{
++      u32 wait_until;
++      RING_LOCALS;
++
++      if (!header.wait.flags)
++              return;
++
++      wait_until = 0;
++
++      switch(header.wait.flags) {
++      case R300_WAIT_2D:
++              wait_until = RADEON_WAIT_2D_IDLE;
++              break;
++      case R300_WAIT_3D:
++              wait_until = RADEON_WAIT_3D_IDLE;
++              break;
++      case R300_NEW_WAIT_2D_3D:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
++              break;
++      case R300_NEW_WAIT_2D_2D_CLEAN:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
++              break;
++      case R300_NEW_WAIT_3D_3D_CLEAN:
++              wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
++              break;
++      case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
++              wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
++              wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
++              break;
++      default:
++              return;
++      }
++
++      BEGIN_RING(2);
++      OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
++      OUT_RING(wait_until);
++      ADVANCE_RING();
++}
++
++static int r300_scratch(drm_radeon_private_t *dev_priv,
++                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                      drm_r300_cmd_header_t header)
++{
++      u32 *ref_age_base;
++      u32 i, buf_idx, h_pending;
++      RING_LOCALS;
++
++      if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {
++              return -EINVAL;
++      }
++
++      if (header.scratch.reg >= 5) {
++              return -EINVAL;
++      }
++
++      dev_priv->scratch_ages[header.scratch.reg] ++;
++
++      ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
++
++      cmdbuf->buf += sizeof(uint64_t);
++      cmdbuf->bufsz -= sizeof(uint64_t);
++
++      for (i=0; i < header.scratch.n_bufs; i++) {
++              buf_idx = *(u32 *)cmdbuf->buf;
++              buf_idx *= 2; /* 8 bytes per buf */
++
++              if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              if (h_pending == 0) {
++                      return -EINVAL;
++              }
++
++              h_pending--;
++
++              if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
++                      return -EINVAL;
++              }
++
++              cmdbuf->buf += sizeof(buf_idx);
++              cmdbuf->bufsz -= sizeof(buf_idx);
++      }
++
++      BEGIN_RING(2);
++      OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
++      OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
++      ADVANCE_RING();
++
++      return 0;
++}
++
++/**
++ * Uploads user-supplied vertex program instructions or parameters onto
++ * the graphics card.
++ * Called by r300_do_cp_cmdbuf.
++ */
++static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
++                                     drm_radeon_kcmd_buffer_t *cmdbuf,
++                                     drm_r300_cmd_header_t header)
++{
++      int sz;
++      int addr;
++      int type;
++      int clamp;
++      int stride;
++      RING_LOCALS;
++
++      sz = header.r500fp.count;
++      /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
++      addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
++
++      type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
++      clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
++
++      addr |= (type << 16);
++      addr |= (clamp << 17);
++
++      stride = type ? 4 : 6;
++
++      DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
++      if (!sz)
++              return 0;
++      if (sz * stride * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(3 + sz * stride);
++      OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
++      OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
++      OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
++
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * stride * 4;
++      cmdbuf->bufsz -= sz * stride * 4;
++
++      return 0;
++}
++
++
++/**
++ * Parses and validates a user-supplied command buffer and emits appropriate
++ * commands on the DMA ring buffer.
++ * Called by the ioctl handler function radeon_cp_cmdbuf.
++ */
++int r300_do_cp_cmdbuf(struct drm_device *dev,
++                    struct drm_file *file_priv,
++                    drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf = NULL;
++      int emit_dispatch_age = 0;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      /* pacify */
++      r300_pacify(dev_priv);
++
++      if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
++              ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
++              if (ret)
++                      goto cleanup;
++      }
++
++      while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
++              int idx;
++              drm_r300_cmd_header_t header;
++
++              header.u = *(unsigned int *)cmdbuf->buf;
++
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++
++              switch (header.header.cmd_type) {
++              case R300_CMD_PACKET0:
++                      ret = r300_emit_packet0(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_packet0 failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_VPU:
++                      DRM_DEBUG("R300_CMD_VPU\n");
++                      ret = r300_emit_vpu(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_vpu failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_PACKET3:
++                      DRM_DEBUG("R300_CMD_PACKET3\n");
++                      ret = r300_emit_packet3(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_packet3 failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_END3D:
++                      DRM_DEBUG("R300_CMD_END3D\n");
++                      /* TODO:
++                         Ideally userspace driver should not need to issue this call,
++                         i.e. the drm driver should issue it automatically and prevent
++                         lockups.
++
++                         In practice, we do not understand why this call is needed and what
++                         it does (except for some vague guesses that it has to do with cache
++                         coherence) and so the user space driver does it.
++
++                         Once we are sure which uses prevent lockups the code could be moved
++                         into the kernel and the userspace driver will not
++                         need to use this command.
++
++                         Note that issuing this command does not hurt anything
++                         except, possibly, performance */
++                      r300_pacify(dev_priv);
++                      break;
++
++              case R300_CMD_CP_DELAY:
++                      /* simple enough, we can do it here */
++                      DRM_DEBUG("R300_CMD_CP_DELAY\n");
++                      {
++                              int i;
++                              RING_LOCALS;
++
++                              BEGIN_RING(header.delay.count);
++                              for (i = 0; i < header.delay.count; i++)
++                                      OUT_RING(RADEON_CP_PACKET2);
++                              ADVANCE_RING();
++                      }
++                      break;
++
++              case R300_CMD_DMA_DISCARD:
++                      DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
++                      idx = header.dma.buf_idx;
++                      if (idx < 0 || idx >= dma->buf_count) {
++                              DRM_ERROR("buffer index %d (of %d max)\n",
++                                        idx, dma->buf_count - 1);
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++
++                      buf = dma->buflist[idx];
++                      if (buf->file_priv != file_priv || buf->pending) {
++                              DRM_ERROR("bad buffer %p %p %d\n",
++                                        buf->file_priv, file_priv,
++                                        buf->pending);
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++
++                      emit_dispatch_age = 1;
++                      r300_discard_buffer(dev, buf);
++                      break;
++
++              case R300_CMD_WAIT:
++                      DRM_DEBUG("R300_CMD_WAIT\n");
++                      r300_cmd_wait(dev_priv, header);
++                      break;
++
++              case R300_CMD_SCRATCH:
++                      DRM_DEBUG("R300_CMD_SCRATCH\n");
++                      ret = r300_scratch(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_scratch failed\n");
++                              goto cleanup;
++                      }
++                      break;
++
++              case R300_CMD_R500FP:
++                      if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
++                              DRM_ERROR("Calling r500 command on r300 card\n");
++                              ret = -EINVAL;
++                              goto cleanup;
++                      }
++                      DRM_DEBUG("R300_CMD_R500FP\n");
++                      ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
++                      if (ret) {
++                              DRM_ERROR("r300_emit_r500fp failed\n");
++                              goto cleanup;
++                      }
++                      break;
++              default:
++                      DRM_ERROR("bad cmd_type %i at %p\n",
++                                header.header.cmd_type,
++                                cmdbuf->buf - sizeof(header));
++                      ret = -EINVAL;
++                      goto cleanup;
++              }
++      }
++
++      DRM_DEBUG("END\n");
++
++      cleanup:
++      r300_pacify(dev_priv);
++
++      /* We emit the vertex buffer age here, outside the pacifier "brackets"
++       * for two reasons:
++       *  (1) This may coalesce multiple age emissions into a single one and
++       *  (2) more importantly, some chips lock up hard when scratch registers
++       *      are written inside the pacifier bracket.
++       */
++      if (emit_dispatch_age) {
++              RING_LOCALS;
++
++              /* Emit the vertex buffer age */
++              BEGIN_RING(2);
++              RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
++              ADVANCE_RING();
++      }
++
++      COMMIT_RING();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/r300_reg.h git-nokia/drivers/gpu/drm-tungsten/r300_reg.h
+--- git/drivers/gpu/drm-tungsten/r300_reg.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/r300_reg.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1778 @@
++/**************************************************************************
++
++Copyright (C) 2004-2005 Nicolai Haehnle et al.
++
++Permission is hereby granted, free of charge, to any person obtaining a
++copy of this software and associated documentation files (the "Software"),
++to deal in the Software without restriction, including without limitation
++on the rights to use, copy, modify, merge, publish, distribute, sub
++license, and/or sell copies of the Software, and to permit persons to whom
++the Software is furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice (including the next
++paragraph) shall be included in all copies or substantial portions of the
++Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
++DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++USE OR OTHER DEALINGS IN THE SOFTWARE.
++
++**************************************************************************/
++
++/* *INDENT-OFF* */
++
++#ifndef _R300_REG_H
++#define _R300_REG_H
++
++#define R300_MC_INIT_MISC_LAT_TIMER   0x180
++#     define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT      0
++#     define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT       4
++#     define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT   8
++#     define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT   12
++#     define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT    16
++#     define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT      20
++#     define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT    24
++#     define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT    28
++
++
++#define R300_MC_INIT_GFX_LAT_TIMER    0x154
++#     define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT    0
++#     define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT    4
++#     define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT    8
++#     define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT    12
++#     define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT     16
++#     define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT     20
++#     define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT    24
++#     define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT    28
++
++/*
++ * This file contains registers and constants for the R300. They have been
++ * found mostly by examining command buffers captured using glxtest, as well
++ * as by extrapolating some known registers and constants from the R200.
++ * I am fairly certain that they are correct unless stated otherwise
++ * in comments.
++ */
++
++#define R300_SE_VPORT_XSCALE                0x1D98
++#define R300_SE_VPORT_XOFFSET               0x1D9C
++#define R300_SE_VPORT_YSCALE                0x1DA0
++#define R300_SE_VPORT_YOFFSET               0x1DA4
++#define R300_SE_VPORT_ZSCALE                0x1DA8
++#define R300_SE_VPORT_ZOFFSET               0x1DAC
++
++
++/*
++ * Vertex Array Processing (VAP) Control
++ * Stolen from r200 code from Christoph Brill (It's a guess!)
++ */
++#define R300_VAP_CNTL 0x2080
++
++/* This register is written directly and also starts data section
++ * in many 3d CP_PACKET3's
++ */
++#define R300_VAP_VF_CNTL      0x2084
++#     define  R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
++#     define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
++#     define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
++
++#     define  R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
++      /* State based - direct writes to registers trigger vertex
++           generation */
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
++#     define  R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
++
++      /* I don't think I saw these three used.. */
++#     define  R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
++#     define  R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
++#     define  R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
++
++      /* index size - when not set the indices are assumed to be 16 bit */
++#     define  R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
++      /* number of vertices */
++#     define  R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
++
++/* BEGIN: Wild guesses */
++#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
++#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
++#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
++
++#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
++      /* each of the following is 3 bits wide, specifies number
++         of components */
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
++#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
++/* END: Wild guesses */
++
++#define R300_SE_VTE_CNTL                  0x20b0
++#     define     R300_VPORT_X_SCALE_ENA                0x00000001
++#     define     R300_VPORT_X_OFFSET_ENA               0x00000002
++#     define     R300_VPORT_Y_SCALE_ENA                0x00000004
++#     define     R300_VPORT_Y_OFFSET_ENA               0x00000008
++#     define     R300_VPORT_Z_SCALE_ENA                0x00000010
++#     define     R300_VPORT_Z_OFFSET_ENA               0x00000020
++#     define     R300_VTX_XY_FMT                       0x00000100
++#     define     R300_VTX_Z_FMT                        0x00000200
++#     define     R300_VTX_W0_FMT                       0x00000400
++#     define     R300_VTX_W0_NORMALIZE                 0x00000800
++#     define     R300_VTX_ST_DENORMALIZED              0x00001000
++
++/* BEGIN: Vertex data assembly - lots of uncertainties */
++
++/* gap */
++
++#define R300_VAP_CNTL_STATUS              0x2140
++#     define R300_VC_NO_SWAP                  (0 << 0)
++#     define R300_VC_16BIT_SWAP               (1 << 0)
++#     define R300_VC_32BIT_SWAP               (2 << 0)
++#     define R300_VAP_TCL_BYPASS              (1 << 8)
++
++/* gap */
++
++/* Where do we get our vertex data?
++ *
++ * Vertex data either comes either from immediate mode registers or from
++ * vertex arrays.
++ * There appears to be no mixed mode (though we can force the pitch of
++ * vertex arrays to 0, effectively reusing the same element over and over
++ * again).
++ *
++ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
++ * if these registers influence vertex array processing.
++ *
++ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
++ *
++ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
++ *
++ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
++ * into the vertex processor's input registers.
++ * The first word routes the first input, the second word the second, etc.
++ * The corresponding input is routed into the register with the given index.
++ * The list is ended by a word with INPUT_ROUTE_END set.
++ *
++ * Always set COMPONENTS_4 in immediate mode.
++ */
++
++#define R300_VAP_INPUT_ROUTE_0_0            0x2150
++#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
++#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
++#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
++#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
++#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
++#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
++#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
++#define R300_VAP_INPUT_ROUTE_0_1            0x2154
++#define R300_VAP_INPUT_ROUTE_0_2            0x2158
++#define R300_VAP_INPUT_ROUTE_0_3            0x215C
++#define R300_VAP_INPUT_ROUTE_0_4            0x2160
++#define R300_VAP_INPUT_ROUTE_0_5            0x2164
++#define R300_VAP_INPUT_ROUTE_0_6            0x2168
++#define R300_VAP_INPUT_ROUTE_0_7            0x216C
++
++/* gap */
++
++/* Notes:
++ *  - always set up to produce at least two attributes:
++ *    if vertex program uses only position, fglrx will set normal, too
++ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
++ */
++#define R300_VAP_INPUT_CNTL_0               0x2180
++#       define R300_INPUT_CNTL_0_COLOR           0x00000001
++#define R300_VAP_INPUT_CNTL_1               0x2184
++#       define R300_INPUT_CNTL_POS               0x00000001
++#       define R300_INPUT_CNTL_NORMAL            0x00000002
++#       define R300_INPUT_CNTL_COLOR             0x00000004
++#       define R300_INPUT_CNTL_TC0               0x00000400
++#       define R300_INPUT_CNTL_TC1               0x00000800
++#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
++#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
++#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
++#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
++#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
++#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
++
++/* gap */
++
++/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
++ * are set to a swizzling bit pattern, other words are 0.
++ *
++ * In immediate mode, the pattern is always set to xyzw. In vertex array
++ * mode, the swizzling pattern is e.g. used to set zw components in texture
++ * coordinates with only tweo components.
++ */
++#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
++#       define R300_INPUT_ROUTE_SELECT_X    0
++#       define R300_INPUT_ROUTE_SELECT_Y    1
++#       define R300_INPUT_ROUTE_SELECT_Z    2
++#       define R300_INPUT_ROUTE_SELECT_W    3
++#       define R300_INPUT_ROUTE_SELECT_ZERO 4
++#       define R300_INPUT_ROUTE_SELECT_ONE  5
++#       define R300_INPUT_ROUTE_SELECT_MASK 7
++#       define R300_INPUT_ROUTE_X_SHIFT     0
++#       define R300_INPUT_ROUTE_Y_SHIFT     3
++#       define R300_INPUT_ROUTE_Z_SHIFT     6
++#       define R300_INPUT_ROUTE_W_SHIFT     9
++#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
++#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
++#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
++#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
++#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
++#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
++#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
++#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
++
++/* END: Vertex data assembly */
++
++/* gap */
++
++/* BEGIN: Upload vertex program and data */
++
++/*
++ * The programmable vertex shader unit has a memory bank of unknown size
++ * that can be written to in 16 byte units by writing the address into
++ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
++ *
++ * Pointers into the memory bank are always in multiples of 16 bytes.
++ *
++ * The memory bank is divided into areas with fixed meaning.
++ *
++ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
++ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
++ * whereas the difference between known addresses suggests size 512.
++ *
++ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
++ * Native reported limits and the VPI layout suggest size 256, whereas
++ * difference between known addresses suggests size 512.
++ *
++ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
++ * floating point pointsize. The exact purpose of this state is uncertain,
++ * as there is also the R300_RE_POINTSIZE register.
++ *
++ * Multiple vertex programs and parameter sets can be loaded at once,
++ * which could explain the size discrepancy.
++ */
++#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
++#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
++#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
++#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
++
++/* gap */
++
++#define R300_VAP_PVS_UPLOAD_DATA            0x2208
++
++/* END: Upload vertex program and data */
++
++/* gap */
++
++/* I do not know the purpose of this register. However, I do know that
++ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
++ * for normal rendering.
++ */
++#define R300_VAP_UNKNOWN_221C               0x221C
++#       define R300_221C_NORMAL                  0x00000000
++#       define R300_221C_CLEAR                   0x0001C000
++
++/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
++ * plane is per-pixel and the second plane is per-vertex.
++ *
++ * This was determined by experimentation alone but I believe it is correct.
++ *
++ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
++ */
++#define R300_VAP_CLIP_X_0                   0x2220
++#define R300_VAP_CLIP_X_1                   0x2224
++#define R300_VAP_CLIP_Y_0                   0x2228
++#define R300_VAP_CLIP_Y_1                   0x2230
++
++/* gap */
++
++/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
++ * rendering commands and overwriting vertex program parameters.
++ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
++ * avoids bugs caused by still running shaders reading bad data from memory.
++ */
++#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
++
++/* Absolutely no clue what this register is about. */
++#define R300_VAP_UNKNOWN_2288               0x2288
++#       define R300_2288_R300                    0x00750000 /* -- nh */
++#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
++
++/* gap */
++
++/* Addresses are relative to the vertex program instruction area of the
++ * memory bank. PROGRAM_END points to the last instruction of the active
++ * program
++ *
++ * The meaning of the two UNKNOWN fields is obviously not known. However,
++ * experiments so far have shown that both *must* point to an instruction
++ * inside the vertex program, otherwise the GPU locks up.
++ *
++ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
++ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
++ * position takes place.
++ *
++ * Most likely this is used to ignore rest of the program in cases
++ * where group of verts arent visible. For some reason this "section"
++ * is sometimes accepted other instruction that have no relationship with
++ * position calculations.
++ */
++#define R300_VAP_PVS_CNTL_1                 0x22D0
++#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
++#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
++#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
++/* Addresses are relative the the vertex program parameters area. */
++#define R300_VAP_PVS_CNTL_2                 0x22D4
++#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
++#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
++#define R300_VAP_PVS_CNTL_3              0x22D8
++#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
++#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
++
++/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
++ * immediate vertices
++ */
++#define R300_VAP_VTX_COLOR_R                0x2464
++#define R300_VAP_VTX_COLOR_G                0x2468
++#define R300_VAP_VTX_COLOR_B                0x246C
++#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
++#define R300_VAP_VTX_POS_0_Y_1              0x2494
++#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
++#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
++#define R300_VAP_VTX_POS_0_Y_2              0x24A4
++#define R300_VAP_VTX_POS_0_Z_2              0x24A8
++/* write 0 to indicate end of packet? */
++#define R300_VAP_VTX_END_OF_PKT             0x24AC
++
++/* gap */
++
++/* These are values from r300_reg/r300_reg.h - they are known to be correct
++ * and are here so we can use one register file instead of several
++ * - Vladimir
++ */
++#define R300_GB_VAP_RASTER_VTX_FMT_0  0x4000
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT        (1<<0)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT    (1<<1)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT    (1<<2)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT    (1<<3)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT    (1<<4)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE        (0xf<<5)
++#     define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT    (0x1<<16)
++
++#define R300_GB_VAP_RASTER_VTX_FMT_1  0x4004
++      /* each of the following is 3 bits wide, specifies number
++         of components */
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT       0
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT       3
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT       6
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT       9
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT       12
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT       15
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT       18
++#     define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT       21
++
++/* UNK30 seems to enables point to quad transformation on textures
++ * (or something closely related to that).
++ * This bit is rather fatal at the time being due to lackings at pixel
++ * shader side
++ */
++#define R300_GB_ENABLE        0x4008
++#     define R300_GB_POINT_STUFF_ENABLE       (1<<0)
++#     define R300_GB_LINE_STUFF_ENABLE        (1<<1)
++#     define R300_GB_TRIANGLE_STUFF_ENABLE    (1<<2)
++#     define R300_GB_STENCIL_AUTO_ENABLE      (1<<4)
++#     define R300_GB_UNK31                    (1<<31)
++      /* each of the following is 2 bits wide */
++#define R300_GB_TEX_REPLICATE 0
++#define R300_GB_TEX_ST                1
++#define R300_GB_TEX_STR               2
++#     define R300_GB_TEX0_SOURCE_SHIFT        16
++#     define R300_GB_TEX1_SOURCE_SHIFT        18
++#     define R300_GB_TEX2_SOURCE_SHIFT        20
++#     define R300_GB_TEX3_SOURCE_SHIFT        22
++#     define R300_GB_TEX4_SOURCE_SHIFT        24
++#     define R300_GB_TEX5_SOURCE_SHIFT        26
++#     define R300_GB_TEX6_SOURCE_SHIFT        28
++#     define R300_GB_TEX7_SOURCE_SHIFT        30
++
++/* MSPOS - positions for multisample antialiasing (?) */
++#define R300_GB_MSPOS0        0x4010
++      /* shifts - each of the fields is 4 bits */
++#     define R300_GB_MSPOS0__MS_X0_SHIFT      0
++#     define R300_GB_MSPOS0__MS_Y0_SHIFT      4
++#     define R300_GB_MSPOS0__MS_X1_SHIFT      8
++#     define R300_GB_MSPOS0__MS_Y1_SHIFT      12
++#     define R300_GB_MSPOS0__MS_X2_SHIFT      16
++#     define R300_GB_MSPOS0__MS_Y2_SHIFT      20
++#     define R300_GB_MSPOS0__MSBD0_Y          24
++#     define R300_GB_MSPOS0__MSBD0_X          28
++
++#define R300_GB_MSPOS1        0x4014
++#     define R300_GB_MSPOS1__MS_X3_SHIFT      0
++#     define R300_GB_MSPOS1__MS_Y3_SHIFT      4
++#     define R300_GB_MSPOS1__MS_X4_SHIFT      8
++#     define R300_GB_MSPOS1__MS_Y4_SHIFT      12
++#     define R300_GB_MSPOS1__MS_X5_SHIFT      16
++#     define R300_GB_MSPOS1__MS_Y5_SHIFT      20
++#     define R300_GB_MSPOS1__MSBD1            24
++
++
++#define R300_GB_TILE_CONFIG   0x4018
++#     define R300_GB_TILE_ENABLE      (1<<0)
++#     define R300_GB_TILE_PIPE_COUNT_RV300    0
++#     define R300_GB_TILE_PIPE_COUNT_R300     (3<<1)
++#     define R300_GB_TILE_PIPE_COUNT_R420     (7<<1)
++#     define R300_GB_TILE_PIPE_COUNT_RV410    (3<<1)
++#     define R300_GB_TILE_SIZE_8              0
++#     define R300_GB_TILE_SIZE_16             (1<<4)
++#     define R300_GB_TILE_SIZE_32             (2<<4)
++#     define R300_GB_SUPER_SIZE_1             (0<<6)
++#     define R300_GB_SUPER_SIZE_2             (1<<6)
++#     define R300_GB_SUPER_SIZE_4             (2<<6)
++#     define R300_GB_SUPER_SIZE_8             (3<<6)
++#     define R300_GB_SUPER_SIZE_16            (4<<6)
++#     define R300_GB_SUPER_SIZE_32            (5<<6)
++#     define R300_GB_SUPER_SIZE_64            (6<<6)
++#     define R300_GB_SUPER_SIZE_128           (7<<6)
++#     define R300_GB_SUPER_X_SHIFT            9       /* 3 bits wide */
++#     define R300_GB_SUPER_Y_SHIFT            12      /* 3 bits wide */
++#     define R300_GB_SUPER_TILE_A             0
++#     define R300_GB_SUPER_TILE_B             (1<<15)
++#     define R300_GB_SUBPIXEL_1_12            0
++#     define R300_GB_SUBPIXEL_1_16            (1<<16)
++
++#define R300_GB_FIFO_SIZE     0x4024
++      /* each of the following is 2 bits wide */
++#define R300_GB_FIFO_SIZE_32  0
++#define R300_GB_FIFO_SIZE_64  1
++#define R300_GB_FIFO_SIZE_128 2
++#define R300_GB_FIFO_SIZE_256 3
++#     define R300_SC_IFIFO_SIZE_SHIFT 0
++#     define R300_SC_TZFIFO_SIZE_SHIFT        2
++#     define R300_SC_BFIFO_SIZE_SHIFT 4
++
++#     define R300_US_OFIFO_SIZE_SHIFT 12
++#     define R300_US_WFIFO_SIZE_SHIFT 14
++      /* the following use the same constants as above, but meaning is
++         is times 2 (i.e. instead of 32 words it means 64 */
++#     define R300_RS_TFIFO_SIZE_SHIFT 6
++#     define R300_RS_CFIFO_SIZE_SHIFT 8
++#     define R300_US_RAM_SIZE_SHIFT           10
++      /* watermarks, 3 bits wide */
++#     define R300_RS_HIGHWATER_COL_SHIFT      16
++#     define R300_RS_HIGHWATER_TEX_SHIFT      19
++#     define R300_OFIFO_HIGHWATER_SHIFT       22      /* two bits only */
++#     define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT       24
++
++#define R300_GB_SELECT        0x401C
++#     define R300_GB_FOG_SELECT_C0A           0
++#     define R300_GB_FOG_SELECT_C1A           1
++#     define R300_GB_FOG_SELECT_C2A           2
++#     define R300_GB_FOG_SELECT_C3A           3
++#     define R300_GB_FOG_SELECT_1_1_W 4
++#     define R300_GB_FOG_SELECT_Z             5
++#     define R300_GB_DEPTH_SELECT_Z           0
++#     define R300_GB_DEPTH_SELECT_1_1_W       (1<<3)
++#     define R300_GB_W_SELECT_1_W             0
++#     define R300_GB_W_SELECT_1               (1<<4)
++
++#define R300_GB_AA_CONFIG             0x4020
++#     define R300_AA_DISABLE                  0x00
++#     define R300_AA_ENABLE                   0x01
++#     define R300_AA_SUBSAMPLES_2             0
++#     define R300_AA_SUBSAMPLES_3             (1<<1)
++#     define R300_AA_SUBSAMPLES_4             (2<<1)
++#     define R300_AA_SUBSAMPLES_6             (3<<1)
++
++/* gap */
++
++/* Zero to flush caches. */
++#define R300_TX_INVALTAGS                   0x4100
++#define R300_TX_FLUSH                       0x0
++
++/* The upper enable bits are guessed, based on fglrx reported limits. */
++#define R300_TX_ENABLE                      0x4104
++#       define R300_TX_ENABLE_0                  (1 << 0)
++#       define R300_TX_ENABLE_1                  (1 << 1)
++#       define R300_TX_ENABLE_2                  (1 << 2)
++#       define R300_TX_ENABLE_3                  (1 << 3)
++#       define R300_TX_ENABLE_4                  (1 << 4)
++#       define R300_TX_ENABLE_5                  (1 << 5)
++#       define R300_TX_ENABLE_6                  (1 << 6)
++#       define R300_TX_ENABLE_7                  (1 << 7)
++#       define R300_TX_ENABLE_8                  (1 << 8)
++#       define R300_TX_ENABLE_9                  (1 << 9)
++#       define R300_TX_ENABLE_10                 (1 << 10)
++#       define R300_TX_ENABLE_11                 (1 << 11)
++#       define R300_TX_ENABLE_12                 (1 << 12)
++#       define R300_TX_ENABLE_13                 (1 << 13)
++#       define R300_TX_ENABLE_14                 (1 << 14)
++#       define R300_TX_ENABLE_15                 (1 << 15)
++
++/* The pointsize is given in multiples of 6. The pointsize can be
++ * enormous: Clear() renders a single point that fills the entire
++ * framebuffer.
++ */
++#define R300_RE_POINTSIZE                   0x421C
++#       define R300_POINTSIZE_Y_SHIFT            0
++#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
++#       define R300_POINTSIZE_X_SHIFT            16
++#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
++#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
++
++/* The line width is given in multiples of 6.
++ * In default mode lines are classified as vertical lines.
++ * HO: horizontal
++ * VE: vertical or horizontal
++ * HO & VE: no classification
++ */
++#define R300_RE_LINE_CNT                      0x4234
++#       define R300_LINESIZE_SHIFT            0
++#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
++#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
++#       define R300_LINE_CNT_HO               (1 << 16)
++#       define R300_LINE_CNT_VE               (1 << 17)
++
++/* Some sort of scale or clamp value for texcoordless textures. */
++#define R300_RE_UNK4238                       0x4238
++
++/* Something shade related */
++#define R300_RE_SHADE                         0x4274
++
++#define R300_RE_SHADE_MODEL                   0x4278
++#     define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
++#     define R300_RE_SHADE_MODEL_FLAT       0x39595
++
++/* Dangerous */
++#define R300_RE_POLYGON_MODE                  0x4288
++#     define R300_PM_ENABLED                (1 << 0)
++#     define R300_PM_FRONT_POINT            (0 << 0)
++#     define R300_PM_BACK_POINT             (0 << 0)
++#     define R300_PM_FRONT_LINE             (1 << 4)
++#     define R300_PM_FRONT_FILL             (1 << 5)
++#     define R300_PM_BACK_LINE              (1 << 7)
++#     define R300_PM_BACK_FILL              (1 << 8)
++
++/* Fog parameters */
++#define R300_RE_FOG_SCALE                     0x4294
++#define R300_RE_FOG_START                     0x4298
++
++/* Not sure why there are duplicate of factor and constant values.
++ * My best guess so far is that there are seperate zbiases for test and write.
++ * Ordering might be wrong.
++ * Some of the tests indicate that fgl has a fallback implementation of zbias
++ * via pixel shaders.
++ */
++#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
++#define R300_RE_ZBIAS_T_FACTOR                0x42A4
++#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
++#define R300_RE_ZBIAS_W_FACTOR                0x42AC
++#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
++
++/* This register needs to be set to (1<<1) for RV350 to correctly
++ * perform depth test (see --vb-triangles in r300_demo)
++ * Don't know about other chips. - Vladimir
++ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
++ * My guess is that there are two bits for each zbias primitive
++ * (FILL, LINE, POINT).
++ *  One to enable depth test and one for depth write.
++ * Yet this doesnt explain why depth writes work ...
++ */
++#define R300_RE_OCCLUSION_CNTL                    0x42B4
++#     define R300_OCCLUSION_ON                (1<<1)
++
++#define R300_RE_CULL_CNTL                   0x42B8
++#       define R300_CULL_FRONT                   (1 << 0)
++#       define R300_CULL_BACK                    (1 << 1)
++#       define R300_FRONT_FACE_CCW               (0 << 2)
++#       define R300_FRONT_FACE_CW                (1 << 2)
++
++
++/* BEGIN: Rasterization / Interpolators - many guesses */
++
++/* 0_UNKNOWN_18 has always been set except for clear operations.
++ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
++ * on the vertex program, *not* the fragment program)
++ */
++#define R300_RS_CNTL_0                      0x4300
++#       define R300_RS_CNTL_TC_CNT_SHIFT         2
++#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
++      /* number of color interpolators used */
++#     define R300_RS_CNTL_CI_CNT_SHIFT         7
++#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
++      /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
++         register. */
++#define R300_RS_CNTL_1                      0x4304
++
++/* gap */
++
++/* Only used for texture coordinates.
++ * Use the source field to route texture coordinate input from the
++ * vertex program to the desired interpolator. Note that the source
++ * field is relative to the outputs the vertex program *actually*
++ * writes. If a vertex program only writes texcoord[1], this will
++ * be source index 0.
++ * Set INTERP_USED on all interpolators that produce data used by
++ * the fragment program. INTERP_USED looks like a swizzling mask,
++ * but I haven't seen it used that way.
++ *
++ * Note: The _UNKNOWN constants are always set in their respective
++ * register. I don't know if this is necessary.
++ */
++#define R300_RS_INTERP_0                    0x4310
++#define R300_RS_INTERP_1                    0x4314
++#       define R300_RS_INTERP_1_UNKNOWN          0x40
++#define R300_RS_INTERP_2                    0x4318
++#       define R300_RS_INTERP_2_UNKNOWN          0x80
++#define R300_RS_INTERP_3                    0x431C
++#       define R300_RS_INTERP_3_UNKNOWN          0xC0
++#define R300_RS_INTERP_4                    0x4320
++#define R300_RS_INTERP_5                    0x4324
++#define R300_RS_INTERP_6                    0x4328
++#define R300_RS_INTERP_7                    0x432C
++#       define R300_RS_INTERP_SRC_SHIFT          2
++#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
++#       define R300_RS_INTERP_USED               0x00D10000
++
++/* These DWORDs control how vertex data is routed into fragment program
++ * registers, after interpolators.
++ */
++#define R300_RS_ROUTE_0                     0x4330
++#define R300_RS_ROUTE_1                     0x4334
++#define R300_RS_ROUTE_2                     0x4338
++#define R300_RS_ROUTE_3                     0x433C /* GUESS */
++#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
++#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
++#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
++#define R300_RS_ROUTE_7                     0x434C /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
++#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
++#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
++#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
++#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
++#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
++#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
++#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
++#       define R300_RS_ROUTE_DEST_SHIFT          6
++#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
++
++/* Special handling for color: When the fragment program uses color,
++ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
++ * color register index.
++ *
++ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
++ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
++ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
++ * correct or not. - Oliver.
++ */
++#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
++#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
++#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
++/* As above, but for secondary color */
++#             define R300_RS_ROUTE_1_COLOR1            (1 << 14)
++#             define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
++#             define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
++#             define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
++/* END: Rasterization / Interpolators - many guesses */
++
++/* Hierarchical Z Enable */
++#define R300_SC_HYPERZ                   0x43a4
++#     define R300_SC_HYPERZ_DISABLE     (0 << 0)
++#     define R300_SC_HYPERZ_ENABLE      (1 << 0)
++#     define R300_SC_HYPERZ_MIN         (0 << 1)
++#     define R300_SC_HYPERZ_MAX         (1 << 1)
++#     define R300_SC_HYPERZ_ADJ_256     (0 << 2)
++#     define R300_SC_HYPERZ_ADJ_128     (1 << 2)
++#     define R300_SC_HYPERZ_ADJ_64      (2 << 2)
++#     define R300_SC_HYPERZ_ADJ_32      (3 << 2)
++#     define R300_SC_HYPERZ_ADJ_16      (4 << 2)
++#     define R300_SC_HYPERZ_ADJ_8       (5 << 2)
++#     define R300_SC_HYPERZ_ADJ_4       (6 << 2)
++#     define R300_SC_HYPERZ_ADJ_2       (7 << 2)
++#     define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
++#     define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
++#     define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
++#     define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
++
++#define R300_SC_EDGERULE                 0x43a8
++
++/* BEGIN: Scissors and cliprects */
++
++/* There are four clipping rectangles. Their corner coordinates are inclusive.
++ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
++ * on whether the pixel is inside cliprects 0-3, respectively. For example,
++ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
++ * the number 3 (binary 0011).
++ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
++ * the pixel is rasterized.
++ *
++ * In addition to this, there is a scissors rectangle. Only pixels inside the
++ * scissors rectangle are drawn. (coordinates are inclusive)
++ *
++ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
++ * for the purpose of clipping and scissors.
++ */
++#define R300_RE_CLIPRECT_TL_0               0x43B0
++#define R300_RE_CLIPRECT_BR_0               0x43B4
++#define R300_RE_CLIPRECT_TL_1               0x43B8
++#define R300_RE_CLIPRECT_BR_1               0x43BC
++#define R300_RE_CLIPRECT_TL_2               0x43C0
++#define R300_RE_CLIPRECT_BR_2               0x43C4
++#define R300_RE_CLIPRECT_TL_3               0x43C8
++#define R300_RE_CLIPRECT_BR_3               0x43CC
++#       define R300_CLIPRECT_OFFSET              1440
++#       define R300_CLIPRECT_MASK                0x1FFF
++#       define R300_CLIPRECT_X_SHIFT             0
++#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
++#       define R300_CLIPRECT_Y_SHIFT             13
++#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
++#define R300_RE_CLIPRECT_CNTL               0x43D0
++#       define R300_CLIP_OUT                     (1 << 0)
++#       define R300_CLIP_0                       (1 << 1)
++#       define R300_CLIP_1                       (1 << 2)
++#       define R300_CLIP_10                      (1 << 3)
++#       define R300_CLIP_2                       (1 << 4)
++#       define R300_CLIP_20                      (1 << 5)
++#       define R300_CLIP_21                      (1 << 6)
++#       define R300_CLIP_210                     (1 << 7)
++#       define R300_CLIP_3                       (1 << 8)
++#       define R300_CLIP_30                      (1 << 9)
++#       define R300_CLIP_31                      (1 << 10)
++#       define R300_CLIP_310                     (1 << 11)
++#       define R300_CLIP_32                      (1 << 12)
++#       define R300_CLIP_320                     (1 << 13)
++#       define R300_CLIP_321                     (1 << 14)
++#       define R300_CLIP_3210                    (1 << 15)
++
++/* gap */
++
++#define R300_RE_SCISSORS_TL                 0x43E0
++#define R300_RE_SCISSORS_BR                 0x43E4
++#       define R300_SCISSORS_OFFSET              1440
++#       define R300_SCISSORS_X_SHIFT             0
++#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
++#       define R300_SCISSORS_Y_SHIFT             13
++#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
++/* END: Scissors and cliprects */
++
++/* BEGIN: Texture specification */
++
++/*
++ * The texture specification dwords are grouped by meaning and not by texture
++ * unit. This means that e.g. the offset for texture image unit N is found in
++ * register TX_OFFSET_0 + (4*N)
++ */
++#define R300_TX_FILTER_0                    0x4400
++#       define R300_TX_REPEAT                    0
++#       define R300_TX_MIRRORED                  1
++#       define R300_TX_CLAMP                     4
++#       define R300_TX_CLAMP_TO_EDGE             2
++#       define R300_TX_CLAMP_TO_BORDER           6
++#       define R300_TX_WRAP_S_SHIFT              0
++#       define R300_TX_WRAP_S_MASK               (7 << 0)
++#       define R300_TX_WRAP_T_SHIFT              3
++#       define R300_TX_WRAP_T_MASK               (7 << 3)
++#       define R300_TX_WRAP_Q_SHIFT              6
++#       define R300_TX_WRAP_Q_MASK               (7 << 6)
++#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
++#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
++#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
++#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
++#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
++#     define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
++#     define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
++#     define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
++#     define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
++
++/* NOTE: NEAREST doesnt seem to exist.
++ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
++ * anisotropy modes because that would void selected mag filter
++ */
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
++#     define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
++#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
++#     define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
++#     define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
++#     define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
++#     define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
++#     define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
++#     define R300_TX_MAX_ANISO_MASK    (14 << 21)
++
++#define R300_TX_FILTER1_0                      0x4440
++#     define R300_CHROMA_KEY_MODE_DISABLE    0
++#     define R300_CHROMA_KEY_FORCE           1
++#     define R300_CHROMA_KEY_BLEND           2
++#     define R300_MC_ROUND_NORMAL            (0<<2)
++#     define R300_MC_ROUND_MPEG4             (1<<2)
++#     define R300_LOD_BIAS_MASK           0x1fff
++#     define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
++#     define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
++#     define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
++#     define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
++#     define R300_TX_TRI_PERF_0_8            (0<<15)
++#     define R300_TX_TRI_PERF_1_8            (1<<15)
++#     define R300_TX_TRI_PERF_1_4            (2<<15)
++#     define R300_TX_TRI_PERF_3_8            (3<<15)
++#     define R300_ANISO_THRESHOLD_MASK       (7<<17)
++
++#define R300_TX_SIZE_0                      0x4480
++#       define R300_TX_WIDTHMASK_SHIFT           0
++#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
++#       define R300_TX_HEIGHTMASK_SHIFT          11
++#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
++#       define R300_TX_UNK23                     (1 << 23)
++#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
++#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
++#       define R300_TX_SIZE_PROJECTED            (1<<30)
++#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
++#define R300_TX_FORMAT_0                    0x44C0
++      /* The interpretation of the format word by Wladimir van der Laan */
++      /* The X, Y, Z and W refer to the layout of the components.
++         They are given meanings as R, G, B and Alpha by the swizzle
++         specification */
++#     define R300_TX_FORMAT_X8                    0x0
++#     define R300_TX_FORMAT_X16                   0x1
++#     define R300_TX_FORMAT_Y4X4                  0x2
++#     define R300_TX_FORMAT_Y8X8                  0x3
++#     define R300_TX_FORMAT_Y16X16                0x4
++#     define R300_TX_FORMAT_Z3Y3X2                0x5
++#     define R300_TX_FORMAT_Z5Y6X5                0x6
++#     define R300_TX_FORMAT_Z6Y5X5                0x7
++#     define R300_TX_FORMAT_Z11Y11X10             0x8
++#     define R300_TX_FORMAT_Z10Y11X11             0x9
++#     define R300_TX_FORMAT_W4Z4Y4X4              0xA
++#     define R300_TX_FORMAT_W1Z5Y5X5              0xB
++#     define R300_TX_FORMAT_W8Z8Y8X8              0xC
++#     define R300_TX_FORMAT_W2Z10Y10X10           0xD
++#     define R300_TX_FORMAT_W16Z16Y16X16          0xE
++#     define R300_TX_FORMAT_DXT1                  0xF
++#     define R300_TX_FORMAT_DXT3                  0x10
++#     define R300_TX_FORMAT_DXT5                  0x11
++#     define R300_TX_FORMAT_D3DMFT_CxV8U8         0x12     /* no swizzle */
++#     define R300_TX_FORMAT_A8R8G8B8              0x13     /* no swizzle */
++#     define R300_TX_FORMAT_B8G8_B8G8             0x14     /* no swizzle */
++#     define R300_TX_FORMAT_G8R8_G8B8             0x15     /* no swizzle */
++      /* 0x16 - some 16 bit green format.. ?? */
++#     define R300_TX_FORMAT_UNK25                (1 << 25) /* no swizzle */
++#     define R300_TX_FORMAT_CUBIC_MAP            (1 << 26)
++
++      /* gap */
++      /* Floating point formats */
++      /* Note - hardware supports both 16 and 32 bit floating point */
++#     define R300_TX_FORMAT_FL_I16                0x18
++#     define R300_TX_FORMAT_FL_I16A16             0x19
++#     define R300_TX_FORMAT_FL_R16G16B16A16       0x1A
++#     define R300_TX_FORMAT_FL_I32                0x1B
++#     define R300_TX_FORMAT_FL_I32A32             0x1C
++#     define R300_TX_FORMAT_FL_R32G32B32A32       0x1D
++      /* alpha modes, convenience mostly */
++      /* if you have alpha, pick constant appropriate to the
++         number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
++#     define R300_TX_FORMAT_ALPHA_1CH             0x000
++#     define R300_TX_FORMAT_ALPHA_2CH             0x200
++#     define R300_TX_FORMAT_ALPHA_4CH             0x600
++#     define R300_TX_FORMAT_ALPHA_NONE            0xA00
++      /* Swizzling */
++      /* constants */
++#     define R300_TX_FORMAT_X         0
++#     define R300_TX_FORMAT_Y         1
++#     define R300_TX_FORMAT_Z         2
++#     define R300_TX_FORMAT_W         3
++#     define R300_TX_FORMAT_ZERO      4
++#     define R300_TX_FORMAT_ONE       5
++      /* 2.0*Z, everything above 1.0 is set to 0.0 */
++#     define R300_TX_FORMAT_CUT_Z     6
++      /* 2.0*W, everything above 1.0 is set to 0.0 */
++#     define R300_TX_FORMAT_CUT_W     7
++
++#     define R300_TX_FORMAT_B_SHIFT   18
++#     define R300_TX_FORMAT_G_SHIFT   15
++#     define R300_TX_FORMAT_R_SHIFT   12
++#     define R300_TX_FORMAT_A_SHIFT   9
++      /* Convenience macro to take care of layout and swizzling */
++#     define R300_EASY_TX_FORMAT(B, G, R, A, FMT)     (               \
++              ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)          \
++              | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)        \
++              | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)        \
++              | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)        \
++              | (R300_TX_FORMAT_##FMT)                                \
++              )
++      /* These can be ORed with result of R300_EASY_TX_FORMAT()
++         We don't really know what they do. Take values from a
++           constant color ? */
++#     define R300_TX_FORMAT_CONST_X           (1<<5)
++#     define R300_TX_FORMAT_CONST_Y           (2<<5)
++#     define R300_TX_FORMAT_CONST_Z           (4<<5)
++#     define R300_TX_FORMAT_CONST_W           (8<<5)
++
++#     define R300_TX_FORMAT_YUV_MODE          0x00800000
++
++#define R300_TX_PITCH_0                           0x4500 /* obvious missing in gap */
++#define R300_TX_OFFSET_0                    0x4540
++      /* BEGIN: Guess from R200 */
++#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
++#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
++#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
++#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
++#       define R300_TXO_MACRO_TILE               (1 << 2)
++#       define R300_TXO_MICRO_TILE               (1 << 3)
++#       define R300_TXO_OFFSET_MASK              0xffffffe0
++#       define R300_TXO_OFFSET_SHIFT             5
++      /* END: Guess from R200 */
++
++/* 32 bit chroma key */
++#define R300_TX_CHROMA_KEY_0                      0x4580
++/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
++#define R300_TX_BORDER_COLOR_0              0x45C0
++
++/* END: Texture specification */
++
++/* BEGIN: Fragment program instruction set */
++
++/* Fragment programs are written directly into register space.
++ * There are separate instruction streams for texture instructions and ALU
++ * instructions.
++ * In order to synchronize these streams, the program is divided into up
++ * to 4 nodes. Each node begins with a number of TEX operations, followed
++ * by a number of ALU operations.
++ * The first node can have zero TEX ops, all subsequent nodes must have at
++ * least
++ * one TEX ops.
++ * All nodes must have at least one ALU op.
++ *
++ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
++ * 1 node, a value of 3 means 4 nodes.
++ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
++ * offsets into the respective instruction streams, while *_END points to the
++ * last instruction relative to this offset.
++ */
++#define R300_PFS_CNTL_0                     0x4600
++#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
++#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
++#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
++#define R300_PFS_CNTL_1                     0x4604
++/* There is an unshifted value here which has so far always been equal to the
++ * index of the highest used temporary register.
++ */
++#define R300_PFS_CNTL_2                     0x4608
++#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
++#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
++#       define R300_PFS_CNTL_ALU_END_SHIFT       6
++#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
++#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
++#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
++#       define R300_PFS_CNTL_TEX_END_SHIFT       18
++#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
++
++/* gap */
++
++/* Nodes are stored backwards. The last active node is always stored in
++ * PFS_NODE_3.
++ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
++ * first node is stored in NODE_2, the second node is stored in NODE_3.
++ *
++ * Offsets are relative to the master offset from PFS_CNTL_2.
++ */
++#define R300_PFS_NODE_0                     0x4610
++#define R300_PFS_NODE_1                     0x4614
++#define R300_PFS_NODE_2                     0x4618
++#define R300_PFS_NODE_3                     0x461C
++#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
++#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
++#       define R300_PFS_NODE_ALU_END_SHIFT       6
++#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
++#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
++#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
++#       define R300_PFS_NODE_TEX_END_SHIFT       17
++#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
++#             define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
++#             define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
++
++/* TEX
++ * As far as I can tell, texture instructions cannot write into output
++ * registers directly. A subsequent ALU instruction is always necessary,
++ * even if it's just MAD o0, r0, 1, 0
++ */
++#define R300_PFS_TEXI_0                     0x4620
++#     define R300_FPITX_SRC_SHIFT              0
++#     define R300_FPITX_SRC_MASK               (31 << 0)
++      /* GUESS */
++#     define R300_FPITX_SRC_CONST              (1 << 5)
++#     define R300_FPITX_DST_SHIFT              6
++#     define R300_FPITX_DST_MASK               (31 << 6)
++#     define R300_FPITX_IMAGE_SHIFT            11
++      /* GUESS based on layout and native limits */
++#       define R300_FPITX_IMAGE_MASK             (15 << 11)
++/* Unsure if these are opcodes, or some kind of bitfield, but this is how
++ * they were set when I checked
++ */
++#     define R300_FPITX_OPCODE_SHIFT          15
++#             define R300_FPITX_OP_TEX        1
++#             define R300_FPITX_OP_KIL        2
++#             define R300_FPITX_OP_TXP        3
++#             define R300_FPITX_OP_TXB        4
++#     define R300_FPITX_OPCODE_MASK           (7 << 15)
++
++/* ALU
++ * The ALU instructions register blocks are enumerated according to the order
++ * in which fglrx. I assume there is space for 64 instructions, since
++ * each block has space for a maximum of 64 DWORDs, and this matches reported
++ * native limits.
++ *
++ * The basic functional block seems to be one MAD for each color and alpha,
++ * and an adder that adds all components after the MUL.
++ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
++ *  - DP4: Use OUTC_DP4, OUTA_DP4
++ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
++ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
++ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
++ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
++ *  - FLR: use FRC+MAD
++ *  - XPD: use MAD+MAD
++ *  - SGE, SLT: use MAD+CMP
++ *  - RSQ: use ABS modifier for argument
++ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
++ *    (e.g. RCP) into color register
++ *  - apparently, there's no quick DST operation
++ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
++ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
++ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
++ *
++ * Operand selection
++ * First stage selects three sources from the available registers and
++ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
++ * fglrx sorts the three source fields: Registers before constants,
++ * lower indices before higher indices; I do not know whether this is
++ * necessary.
++ *
++ * fglrx fills unused sources with "read constant 0"
++ * According to specs, you cannot select more than two different constants.
++ *
++ * Second stage selects the operands from the sources. This is defined in
++ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
++ * zero and one.
++ * Swizzling and negation happens in this stage, as well.
++ *
++ * Important: Color and alpha seem to be mostly separate, i.e. their sources
++ * selection appears to be fully independent (the register storage is probably
++ * physically split into a color and an alpha section).
++ * However (because of the apparent physical split), there is some interaction
++ * WRT swizzling. If, for example, you want to load an R component into an
++ * Alpha operand, this R component is taken from a *color* source, not from
++ * an alpha source. The corresponding register doesn't even have to appear in
++ * the alpha sources list. (I hope this all makes sense to you)
++ *
++ * Destination selection
++ * The destination register index is in FPI1 (color) and FPI3 (alpha)
++ * together with enable bits.
++ * There are separate enable bits for writing into temporary registers
++ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
++ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
++ * same index must be used for both).
++ *
++ * Note: There is a special form for LRP
++ *  - Argument order is the same as in ARB_fragment_program.
++ *  - Operation is MAD
++ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
++ *  - Set FPI0/FPI2_SPECIAL_LRP
++ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
++ */
++#define R300_PFS_INSTR1_0                   0x46C0
++#       define R300_FPI1_SRC0C_SHIFT             0
++#       define R300_FPI1_SRC0C_MASK              (31 << 0)
++#       define R300_FPI1_SRC0C_CONST             (1 << 5)
++#       define R300_FPI1_SRC1C_SHIFT             6
++#       define R300_FPI1_SRC1C_MASK              (31 << 6)
++#       define R300_FPI1_SRC1C_CONST             (1 << 11)
++#       define R300_FPI1_SRC2C_SHIFT             12
++#       define R300_FPI1_SRC2C_MASK              (31 << 12)
++#       define R300_FPI1_SRC2C_CONST             (1 << 17)
++#       define R300_FPI1_SRC_MASK                0x0003ffff
++#       define R300_FPI1_DSTC_SHIFT              18
++#       define R300_FPI1_DSTC_MASK               (31 << 18)
++#             define R300_FPI1_DSTC_REG_MASK_SHIFT     23
++#       define R300_FPI1_DSTC_REG_X              (1 << 23)
++#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
++#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
++#             define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
++#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
++#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
++#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
++
++#define R300_PFS_INSTR3_0                   0x47C0
++#       define R300_FPI3_SRC0A_SHIFT             0
++#       define R300_FPI3_SRC0A_MASK              (31 << 0)
++#       define R300_FPI3_SRC0A_CONST             (1 << 5)
++#       define R300_FPI3_SRC1A_SHIFT             6
++#       define R300_FPI3_SRC1A_MASK              (31 << 6)
++#       define R300_FPI3_SRC1A_CONST             (1 << 11)
++#       define R300_FPI3_SRC2A_SHIFT             12
++#       define R300_FPI3_SRC2A_MASK              (31 << 12)
++#       define R300_FPI3_SRC2A_CONST             (1 << 17)
++#       define R300_FPI3_SRC_MASK                0x0003ffff
++#       define R300_FPI3_DSTA_SHIFT              18
++#       define R300_FPI3_DSTA_MASK               (31 << 18)
++#       define R300_FPI3_DSTA_REG                (1 << 23)
++#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
++#             define R300_FPI3_DSTA_DEPTH              (1 << 27)
++
++#define R300_PFS_INSTR0_0                   0x48C0
++#       define R300_FPI0_ARGC_SRC0C_XYZ          0
++#       define R300_FPI0_ARGC_SRC0C_XXX          1
++#       define R300_FPI0_ARGC_SRC0C_YYY          2
++#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
++#       define R300_FPI0_ARGC_SRC1C_XYZ          4
++#       define R300_FPI0_ARGC_SRC1C_XXX          5
++#       define R300_FPI0_ARGC_SRC1C_YYY          6
++#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
++#       define R300_FPI0_ARGC_SRC2C_XYZ          8
++#       define R300_FPI0_ARGC_SRC2C_XXX          9
++#       define R300_FPI0_ARGC_SRC2C_YYY          10
++#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
++#       define R300_FPI0_ARGC_SRC0A              12
++#       define R300_FPI0_ARGC_SRC1A              13
++#       define R300_FPI0_ARGC_SRC2A              14
++#       define R300_FPI0_ARGC_SRC1C_LRP          15
++#       define R300_FPI0_ARGC_ZERO               20
++#       define R300_FPI0_ARGC_ONE                21
++      /* GUESS */
++#       define R300_FPI0_ARGC_HALF               22
++#       define R300_FPI0_ARGC_SRC0C_YZX          23
++#       define R300_FPI0_ARGC_SRC1C_YZX          24
++#       define R300_FPI0_ARGC_SRC2C_YZX          25
++#       define R300_FPI0_ARGC_SRC0C_ZXY          26
++#       define R300_FPI0_ARGC_SRC1C_ZXY          27
++#       define R300_FPI0_ARGC_SRC2C_ZXY          28
++#       define R300_FPI0_ARGC_SRC0CA_WZY         29
++#       define R300_FPI0_ARGC_SRC1CA_WZY         30
++#       define R300_FPI0_ARGC_SRC2CA_WZY         31
++
++#       define R300_FPI0_ARG0C_SHIFT             0
++#       define R300_FPI0_ARG0C_MASK              (31 << 0)
++#       define R300_FPI0_ARG0C_NEG               (1 << 5)
++#       define R300_FPI0_ARG0C_ABS               (1 << 6)
++#       define R300_FPI0_ARG1C_SHIFT             7
++#       define R300_FPI0_ARG1C_MASK              (31 << 7)
++#       define R300_FPI0_ARG1C_NEG               (1 << 12)
++#       define R300_FPI0_ARG1C_ABS               (1 << 13)
++#       define R300_FPI0_ARG2C_SHIFT             14
++#       define R300_FPI0_ARG2C_MASK              (31 << 14)
++#       define R300_FPI0_ARG2C_NEG               (1 << 19)
++#       define R300_FPI0_ARG2C_ABS               (1 << 20)
++#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
++#       define R300_FPI0_OUTC_MAD                (0 << 23)
++#       define R300_FPI0_OUTC_DP3                (1 << 23)
++#       define R300_FPI0_OUTC_DP4                (2 << 23)
++#       define R300_FPI0_OUTC_MIN                (4 << 23)
++#       define R300_FPI0_OUTC_MAX                (5 << 23)
++#       define R300_FPI0_OUTC_CMPH               (7 << 23)
++#       define R300_FPI0_OUTC_CMP                (8 << 23)
++#       define R300_FPI0_OUTC_FRC                (9 << 23)
++#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
++#       define R300_FPI0_OUTC_SAT                (1 << 30)
++#       define R300_FPI0_INSERT_NOP              (1 << 31)
++
++#define R300_PFS_INSTR2_0                   0x49C0
++#       define R300_FPI2_ARGA_SRC0C_X            0
++#       define R300_FPI2_ARGA_SRC0C_Y            1
++#       define R300_FPI2_ARGA_SRC0C_Z            2
++#       define R300_FPI2_ARGA_SRC1C_X            3
++#       define R300_FPI2_ARGA_SRC1C_Y            4
++#       define R300_FPI2_ARGA_SRC1C_Z            5
++#       define R300_FPI2_ARGA_SRC2C_X            6
++#       define R300_FPI2_ARGA_SRC2C_Y            7
++#       define R300_FPI2_ARGA_SRC2C_Z            8
++#       define R300_FPI2_ARGA_SRC0A              9
++#       define R300_FPI2_ARGA_SRC1A              10
++#       define R300_FPI2_ARGA_SRC2A              11
++#       define R300_FPI2_ARGA_SRC1A_LRP          15
++#       define R300_FPI2_ARGA_ZERO               16
++#       define R300_FPI2_ARGA_ONE                17
++      /* GUESS */
++#       define R300_FPI2_ARGA_HALF               18
++#       define R300_FPI2_ARG0A_SHIFT             0
++#       define R300_FPI2_ARG0A_MASK              (31 << 0)
++#       define R300_FPI2_ARG0A_NEG               (1 << 5)
++      /* GUESS */
++#     define R300_FPI2_ARG0A_ABS               (1 << 6)
++#       define R300_FPI2_ARG1A_SHIFT             7
++#       define R300_FPI2_ARG1A_MASK              (31 << 7)
++#       define R300_FPI2_ARG1A_NEG               (1 << 12)
++      /* GUESS */
++#     define R300_FPI2_ARG1A_ABS               (1 << 13)
++#       define R300_FPI2_ARG2A_SHIFT             14
++#       define R300_FPI2_ARG2A_MASK              (31 << 14)
++#       define R300_FPI2_ARG2A_NEG               (1 << 19)
++      /* GUESS */
++#     define R300_FPI2_ARG2A_ABS               (1 << 20)
++#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
++#       define R300_FPI2_OUTA_MAD                (0 << 23)
++#       define R300_FPI2_OUTA_DP4                (1 << 23)
++#       define R300_FPI2_OUTA_MIN                (2 << 23)
++#       define R300_FPI2_OUTA_MAX                (3 << 23)
++#       define R300_FPI2_OUTA_CMP                (6 << 23)
++#       define R300_FPI2_OUTA_FRC                (7 << 23)
++#       define R300_FPI2_OUTA_EX2                (8 << 23)
++#       define R300_FPI2_OUTA_LG2                (9 << 23)
++#       define R300_FPI2_OUTA_RCP                (10 << 23)
++#       define R300_FPI2_OUTA_RSQ                (11 << 23)
++#       define R300_FPI2_OUTA_SAT                (1 << 30)
++#       define R300_FPI2_UNKNOWN_31              (1 << 31)
++/* END: Fragment program instruction set */
++
++/* Fog state and color */
++#define R300_RE_FOG_STATE                   0x4BC0
++#       define R300_FOG_ENABLE                   (1 << 0)
++#     define R300_FOG_MODE_LINEAR              (0 << 1)
++#     define R300_FOG_MODE_EXP                 (1 << 1)
++#     define R300_FOG_MODE_EXP2                (2 << 1)
++#     define R300_FOG_MODE_MASK                (3 << 1)
++#define R300_FOG_COLOR_R                    0x4BC8
++#define R300_FOG_COLOR_G                    0x4BCC
++#define R300_FOG_COLOR_B                    0x4BD0
++
++#define R300_PP_ALPHA_TEST                  0x4BD4
++#       define R300_REF_ALPHA_MASK               0x000000ff
++#       define R300_ALPHA_TEST_FAIL              (0 << 8)
++#       define R300_ALPHA_TEST_LESS              (1 << 8)
++#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
++#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
++#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
++#       define R300_ALPHA_TEST_GREATER           (4 << 8)
++#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
++#       define R300_ALPHA_TEST_PASS              (7 << 8)
++#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
++#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
++
++/* gap */
++
++/* Fragment program parameters in 7.16 floating point */
++#define R300_PFS_PARAM_0_X                  0x4C00
++#define R300_PFS_PARAM_0_Y                  0x4C04
++#define R300_PFS_PARAM_0_Z                  0x4C08
++#define R300_PFS_PARAM_0_W                  0x4C0C
++/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
++#define R300_PFS_PARAM_31_X                 0x4DF0
++#define R300_PFS_PARAM_31_Y                 0x4DF4
++#define R300_PFS_PARAM_31_Z                 0x4DF8
++#define R300_PFS_PARAM_31_W                 0x4DFC
++
++/* Notes:
++ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
++ *   the application
++ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
++ *    are set to the same
++ *   function (both registers are always set up completely in any case)
++ * - Most blend flags are simply copied from R200 and not tested yet
++ */
++#define R300_RB3D_CBLEND                    0x4E04
++#define R300_RB3D_ABLEND                    0x4E08
++/* the following only appear in CBLEND */
++#       define R300_BLEND_ENABLE                     (1 << 0)
++#       define R300_BLEND_UNKNOWN                    (3 << 1)
++#       define R300_BLEND_NO_SEPARATE                (1 << 3)
++/* the following are shared between CBLEND and ABLEND */
++#       define R300_FCN_MASK                         (3  << 12)
++#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
++#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
++#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
++#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
++#       define R300_COMB_FCN_MIN                     (4  << 12)
++#       define R300_COMB_FCN_MAX                     (5  << 12)
++#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
++#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
++#       define R300_BLEND_GL_ZERO                    (32)
++#       define R300_BLEND_GL_ONE                     (33)
++#       define R300_BLEND_GL_SRC_COLOR               (34)
++#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
++#       define R300_BLEND_GL_DST_COLOR               (36)
++#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
++#       define R300_BLEND_GL_SRC_ALPHA               (38)
++#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
++#       define R300_BLEND_GL_DST_ALPHA               (40)
++#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
++#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
++#       define R300_BLEND_GL_CONST_COLOR             (43)
++#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
++#       define R300_BLEND_GL_CONST_ALPHA             (45)
++#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
++#       define R300_BLEND_MASK                       (63)
++#       define R300_SRC_BLEND_SHIFT                  (16)
++#       define R300_DST_BLEND_SHIFT                  (24)
++#define R300_RB3D_BLEND_COLOR               0x4E10
++#define R300_RB3D_COLORMASK                 0x4E0C
++#       define R300_COLORMASK0_B                 (1<<0)
++#       define R300_COLORMASK0_G                 (1<<1)
++#       define R300_COLORMASK0_R                 (1<<2)
++#       define R300_COLORMASK0_A                 (1<<3)
++
++/* gap */
++
++#define R300_RB3D_COLOROFFSET0              0x4E28
++#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
++#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
++#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
++#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
++
++/* gap */
++
++/* Bit 16: Larger tiles
++ * Bit 17: 4x2 tiles
++ * Bit 18: Extremely weird tile like, but some pixels duplicated?
++ */
++#define R300_RB3D_COLORPITCH0               0x4E38
++#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
++#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
++#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
++#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
++#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
++#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
++#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
++#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
++#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
++#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
++#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
++
++#define R300_RB3D_AARESOLVE_CTL             0x4E88
++/* gap */
++
++/* Guess by Vladimir.
++ * Set to 0A before 3D operations, set to 02 afterwards.
++ */
++/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
++#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
++#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
++
++/* gap */
++/* There seems to be no "write only" setting, so use Z-test = ALWAYS
++ * for this.
++ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
++ */
++#define R300_ZB_CNTL                             0x4F00
++#     define R300_STENCIL_ENABLE               (1 << 0)
++#     define R300_Z_ENABLE                     (1 << 1)
++#     define R300_Z_WRITE_ENABLE               (1 << 2)
++#     define R300_Z_SIGNED_COMPARE             (1 << 3)
++#     define R300_STENCIL_FRONT_BACK           (1 << 4)
++
++#define R300_ZB_ZSTENCILCNTL                   0x4f04
++      /* functions */
++#     define R300_ZS_NEVER                    0
++#     define R300_ZS_LESS                     1
++#     define R300_ZS_LEQUAL                   2
++#     define R300_ZS_EQUAL                    3
++#     define R300_ZS_GEQUAL                   4
++#     define R300_ZS_GREATER                  5
++#     define R300_ZS_NOTEQUAL                 6
++#     define R300_ZS_ALWAYS                   7
++#       define R300_ZS_MASK                     7
++      /* operations */
++#     define R300_ZS_KEEP                     0
++#     define R300_ZS_ZERO                     1
++#     define R300_ZS_REPLACE                  2
++#     define R300_ZS_INCR                     3
++#     define R300_ZS_DECR                     4
++#     define R300_ZS_INVERT                   5
++#     define R300_ZS_INCR_WRAP                6
++#     define R300_ZS_DECR_WRAP                7
++#     define R300_Z_FUNC_SHIFT                0
++      /* front and back refer to operations done for front
++         and back faces, i.e. separate stencil function support */
++#     define R300_S_FRONT_FUNC_SHIFT          3
++#     define R300_S_FRONT_SFAIL_OP_SHIFT      6
++#     define R300_S_FRONT_ZPASS_OP_SHIFT      9
++#     define R300_S_FRONT_ZFAIL_OP_SHIFT      12
++#     define R300_S_BACK_FUNC_SHIFT           15
++#     define R300_S_BACK_SFAIL_OP_SHIFT       18
++#     define R300_S_BACK_ZPASS_OP_SHIFT       21
++#     define R300_S_BACK_ZFAIL_OP_SHIFT       24
++
++#define R300_ZB_STENCILREFMASK                        0x4f08
++#     define R300_STENCILREF_SHIFT       0
++#     define R300_STENCILREF_MASK        0x000000ff
++#     define R300_STENCILMASK_SHIFT      8
++#     define R300_STENCILMASK_MASK       0x0000ff00
++#     define R300_STENCILWRITEMASK_SHIFT 16
++#     define R300_STENCILWRITEMASK_MASK  0x00ff0000
++
++/* gap */
++
++#define R300_ZB_FORMAT                             0x4f10
++#     define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
++#     define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
++#     define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
++/* reserved up to (15 << 0) */
++#     define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
++#     define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
++
++#define R300_ZB_ZTOP                             0x4F14
++#     define R300_ZTOP_DISABLE                 (0 << 0)
++#     define R300_ZTOP_ENABLE                  (1 << 0)
++
++/* gap */
++
++#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
++#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
++
++#define R300_ZB_BW_CNTL                     0x4f1c
++#     define R300_HIZ_DISABLE                              (0 << 0)
++#     define R300_HIZ_ENABLE                               (1 << 0)
++#     define R300_HIZ_MIN                                  (0 << 1)
++#     define R300_HIZ_MAX                                  (1 << 1)
++#     define R300_FAST_FILL_DISABLE                        (0 << 2)
++#     define R300_FAST_FILL_ENABLE                         (1 << 2)
++#     define R300_RD_COMP_DISABLE                          (0 << 3)
++#     define R300_RD_COMP_ENABLE                           (1 << 3)
++#     define R300_WR_COMP_DISABLE                          (0 << 4)
++#     define R300_WR_COMP_ENABLE                           (1 << 4)
++#     define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
++#     define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
++#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
++#     define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
++
++#     define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
++#     define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
++#     define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
++#     define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
++
++#     define R500_BMASK_ENABLE                             (0 << 10)
++#     define R500_BMASK_DISABLE                            (1 << 10)
++#     define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
++#     define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
++#     define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
++#     define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
++#     define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
++#     define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
++#     define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
++#     define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
++#     define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
++#     define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
++#     define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
++#     define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
++#     define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
++#     define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
++#     define R500_PEQ_PACKING_DISABLE                      (0 << 18)
++#     define R500_PEQ_PACKING_ENABLE                       (1 << 18)
++#     define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
++#     define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
++
++
++/* gap */
++
++/* Z Buffer Address Offset.
++ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
++ */
++#define R300_ZB_DEPTHOFFSET               0x4f20
++
++/* Z Buffer Pitch and Endian Control */
++#define R300_ZB_DEPTHPITCH                0x4f24
++#       define R300_DEPTHPITCH_MASK              0x00003FFC
++#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
++#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
++#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
++#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
++#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
++#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
++#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
++#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
++#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
++
++/* Z Buffer Clear Value */
++#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
++
++#define R300_ZB_ZMASK_OFFSET                   0x4f30
++#define R300_ZB_ZMASK_PITCH                    0x4f34
++#define R300_ZB_ZMASK_WRINDEX                  0x4f38
++#define R300_ZB_ZMASK_DWORD                    0x4f3c
++#define R300_ZB_ZMASK_RDINDEX                  0x4f40
++
++/* Hierarchical Z Memory Offset */
++#define R300_ZB_HIZ_OFFSET                       0x4f44
++
++/* Hierarchical Z Write Index */
++#define R300_ZB_HIZ_WRINDEX                      0x4f48
++
++/* Hierarchical Z Data */
++#define R300_ZB_HIZ_DWORD                        0x4f4c
++
++/* Hierarchical Z Read Index */
++#define R300_ZB_HIZ_RDINDEX                      0x4f50
++
++/* Hierarchical Z Pitch */
++#define R300_ZB_HIZ_PITCH                        0x4f54
++
++/* Z Buffer Z Pass Counter Data */
++#define R300_ZB_ZPASS_DATA                       0x4f58
++
++/* Z Buffer Z Pass Counter Address */
++#define R300_ZB_ZPASS_ADDR                       0x4f5c
++
++/* Depth buffer X and Y coordinate offset */
++#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
++#     define R300_DEPTHX_OFFSET_SHIFT  1
++#     define R300_DEPTHX_OFFSET_MASK   0x000007FE
++#     define R300_DEPTHY_OFFSET_SHIFT  17
++#     define R300_DEPTHY_OFFSET_MASK   0x07FE0000
++
++/* Sets the fifo sizes */
++#define R500_ZB_FIFO_SIZE                        0x4fd0
++#     define R500_OP_FIFO_SIZE_FULL   (0 << 0)
++#     define R500_OP_FIFO_SIZE_HALF   (1 << 0)
++#     define R500_OP_FIFO_SIZE_QUATER (2 << 0)
++#     define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
++
++/* Stencil Reference Value and Mask for backfacing quads */
++/* R300_ZB_STENCILREFMASK handles front face */
++#define R500_ZB_STENCILREFMASK_BF                0x4fd4
++#     define R500_STENCILREF_SHIFT       0
++#     define R500_STENCILREF_MASK        0x000000ff
++#     define R500_STENCILMASK_SHIFT      8
++#     define R500_STENCILMASK_MASK       0x0000ff00
++#     define R500_STENCILWRITEMASK_SHIFT 16
++#     define R500_STENCILWRITEMASK_MASK  0x00ff0000
++
++/* BEGIN: Vertex program instruction set */
++
++/* Every instruction is four dwords long:
++ *  DWORD 0: output and opcode
++ *  DWORD 1: first argument
++ *  DWORD 2: second argument
++ *  DWORD 3: third argument
++ *
++ * Notes:
++ *  - ABS r, a is implemented as MAX r, a, -a
++ *  - MOV is implemented as ADD to zero
++ *  - XPD is implemented as MUL + MAD
++ *  - FLR is implemented as FRC + ADD
++ *  - apparently, fglrx tries to schedule instructions so that there is at
++ *    least one instruction between the write to a temporary and the first
++ *    read from said temporary; however, violations of this scheduling are
++ *    allowed
++ *  - register indices seem to be unrelated with OpenGL aliasing to
++ *    conventional state
++ *  - only one attribute and one parameter can be loaded at a time; however,
++ *    the same attribute/parameter can be used for more than one argument
++ *  - the second software argument for POW is the third hardware argument
++ *    (no idea why)
++ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
++ *
++ * There is some magic surrounding LIT:
++ *   The single argument is replicated across all three inputs, but swizzled:
++ *     First argument: xyzy
++ *     Second argument: xyzx
++ *     Third argument: xyzw
++ *   Whenever the result is used later in the fragment program, fglrx forces
++ *   x and w to be 1.0 in the input selection; I don't know whether this is
++ *   strictly necessary
++ */
++#define R300_VPI_OUT_OP_DOT                     (1 << 0)
++#define R300_VPI_OUT_OP_MUL                     (2 << 0)
++#define R300_VPI_OUT_OP_ADD                     (3 << 0)
++#define R300_VPI_OUT_OP_MAD                     (4 << 0)
++#define R300_VPI_OUT_OP_DST                     (5 << 0)
++#define R300_VPI_OUT_OP_FRC                     (6 << 0)
++#define R300_VPI_OUT_OP_MAX                     (7 << 0)
++#define R300_VPI_OUT_OP_MIN                     (8 << 0)
++#define R300_VPI_OUT_OP_SGE                     (9 << 0)
++#define R300_VPI_OUT_OP_SLT                     (10 << 0)
++      /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
++#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
++#define R300_VPI_OUT_OP_ARL                     (13 << 0)
++#define R300_VPI_OUT_OP_EXP                     (65 << 0)
++#define R300_VPI_OUT_OP_LOG                     (66 << 0)
++      /* Used in fog computations, scalar(scalar) */
++#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
++#define R300_VPI_OUT_OP_LIT                     (68 << 0)
++#define R300_VPI_OUT_OP_POW                     (69 << 0)
++#define R300_VPI_OUT_OP_RCP                     (70 << 0)
++#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
++      /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
++#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
++#define R300_VPI_OUT_OP_EX2                     (75 << 0)
++#define R300_VPI_OUT_OP_LG2                     (76 << 0)
++#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
++      /* all temps, vector(scalar, vector, vector) */
++#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
++
++#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
++#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
++#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
++#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
++
++#define R300_VPI_OUT_REG_INDEX_SHIFT            13
++      /* GUESS based on fglrx native limits */
++#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
++
++#define R300_VPI_OUT_WRITE_X                    (1 << 20)
++#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
++#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
++#define R300_VPI_OUT_WRITE_W                    (1 << 23)
++
++#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
++#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
++#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
++#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
++#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
++
++#define R300_VPI_IN_REG_INDEX_SHIFT             5
++      /* GUESS based on fglrx native limits */
++#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
++
++/* The R300 can select components from the input register arbitrarily.
++ * Use the following constants, shifted by the component shift you
++ * want to select
++ */
++#define R300_VPI_IN_SELECT_X    0
++#define R300_VPI_IN_SELECT_Y    1
++#define R300_VPI_IN_SELECT_Z    2
++#define R300_VPI_IN_SELECT_W    3
++#define R300_VPI_IN_SELECT_ZERO 4
++#define R300_VPI_IN_SELECT_ONE  5
++#define R300_VPI_IN_SELECT_MASK 7
++
++#define R300_VPI_IN_X_SHIFT                     13
++#define R300_VPI_IN_Y_SHIFT                     16
++#define R300_VPI_IN_Z_SHIFT                     19
++#define R300_VPI_IN_W_SHIFT                     22
++
++#define R300_VPI_IN_NEG_X                       (1 << 25)
++#define R300_VPI_IN_NEG_Y                       (1 << 26)
++#define R300_VPI_IN_NEG_Z                       (1 << 27)
++#define R300_VPI_IN_NEG_W                       (1 << 28)
++/* END: Vertex program instruction set */
++
++/* BEGIN: Packet 3 commands */
++
++/* A primitive emission dword. */
++#define R300_PRIM_TYPE_NONE                     (0 << 0)
++#define R300_PRIM_TYPE_POINT                    (1 << 0)
++#define R300_PRIM_TYPE_LINE                     (2 << 0)
++#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
++#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
++#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
++#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
++#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
++#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
++#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
++#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
++      /* GUESS (based on r200) */
++#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
++#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
++#define R300_PRIM_TYPE_QUADS                    (13 << 0)
++#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
++#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
++#define R300_PRIM_TYPE_MASK                     0xF
++#define R300_PRIM_WALK_IND                      (1 << 4)
++#define R300_PRIM_WALK_LIST                     (2 << 4)
++#define R300_PRIM_WALK_RING                     (3 << 4)
++#define R300_PRIM_WALK_MASK                     (3 << 4)
++      /* GUESS (based on r200) */
++#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
++#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
++#define R300_PRIM_NUM_VERTICES_SHIFT            16
++#define R300_PRIM_NUM_VERTICES_MASK             0xffff
++
++/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
++ * Two parameter dwords:
++ * 0. The first parameter appears to be always 0
++ * 1. The second parameter is a standard primitive emission dword.
++ */
++#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
++
++/* Specify the full set of vertex arrays as (address, stride).
++ * The first parameter is the number of vertex arrays specified.
++ * The rest of the command is a variable length list of blocks, where
++ * each block is three dwords long and specifies two arrays.
++ * The first dword of a block is split into two words, the lower significant
++ * word refers to the first array, the more significant word to the second
++ * array in the block.
++ * The low byte of each word contains the size of an array entry in dwords,
++ * the high byte contains the stride of the array.
++ * The second dword of a block contains the pointer to the first array,
++ * the third dword of a block contains the pointer to the second array.
++ * Note that if the total number of arrays is odd, the third dword of
++ * the last block is omitted.
++ */
++#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
++
++#define R300_PACKET3_INDX_BUFFER            0x00003300
++#    define R300_EB_UNK1_SHIFT                      24
++#    define R300_EB_UNK1                    (0x80<<24)
++#    define R300_EB_UNK2                        0x0810
++#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
++#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
++
++/* END: Packet 3 commands */
++
++
++/* Color formats for 2d packets
++ */
++#define R300_CP_COLOR_FORMAT_CI8      2
++#define R300_CP_COLOR_FORMAT_ARGB1555 3
++#define R300_CP_COLOR_FORMAT_RGB565   4
++#define R300_CP_COLOR_FORMAT_ARGB8888 6
++#define R300_CP_COLOR_FORMAT_RGB332   7
++#define R300_CP_COLOR_FORMAT_RGB8     9
++#define R300_CP_COLOR_FORMAT_ARGB4444 15
++
++/*
++ * CP type-3 packets
++ */
++#define R300_CP_CMD_BITBLT_MULTI      0xC0009B00
++
++#define R500_VAP_INDEX_OFFSET         0x208c
++
++#define R500_GA_US_VECTOR_INDEX         0x4250
++#define R500_GA_US_VECTOR_DATA          0x4254
++
++#define R500_RS_IP_0                    0x4074
++#define R500_RS_INST_0                  0x4320
++
++#define R500_US_CONFIG                  0x4600
++
++#define R500_US_FC_CTRL                       0x4624
++#define R500_US_CODE_ADDR             0x4630
++
++#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
++#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
++
++#endif /* _R300_REG_H */
++
++/* *INDENT-ON* */
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_cp.c git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c
+--- git/drivers/gpu/drm-tungsten/radeon_cp.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_cp.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1771 @@
++/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
++/*
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * Copyright 2007 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++#include "r300_reg.h"
++
++#include "radeon_microcode.h"
++#define RADEON_FIFO_DEBUG     0
++
++static int radeon_do_cleanup_cp(struct drm_device * dev);
++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
++
++static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
++      ret = RADEON_READ(R520_MC_IND_DATA);
++      RADEON_WRITE(R520_MC_IND_INDEX, 0);
++      return ret;
++}
++
++static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
++      ret = RADEON_READ(RS480_NB_MC_DATA);
++      RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
++      return ret;
++}
++
++static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++      u32 ret;
++      RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
++      ret = RADEON_READ(RS690_MC_DATA);
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
++      return ret;
++}
++
++static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
++{
++        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++          return RS690_READ_MCIND(dev_priv, addr);
++      else
++          return RS480_READ_MCIND(dev_priv, addr);
++}
++
++u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
++{
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
++      else
++              return RADEON_READ(RADEON_MC_FB_LOCATION);
++}
++
++static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
++{
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
++      else
++              RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
++}
++
++static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
++{
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
++              R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++              RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
++      else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
++              R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
++      else
++              RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
++}
++
++static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
++{
++      u32 agp_base_hi = upper_32_bits(agp_base);
++      u32 agp_base_lo = agp_base & 0xffffffff;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
++              R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
++              R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
++              RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
++              RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
++              R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
++              R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
++              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
++              RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
++      } else {
++              RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
++              if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
++                      RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
++      }
++}
++
++static int RADEON_READ_PLL(struct drm_device * dev, int addr)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
++      return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
++}
++
++static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
++{
++      RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
++      return RADEON_READ(RADEON_PCIE_DATA);
++}
++
++#if RADEON_FIFO_DEBUG
++static void radeon_status(drm_radeon_private_t * dev_priv)
++{
++      printk("%s:\n", __FUNCTION__);
++      printk("RBBM_STATUS = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
++      printk("CP_RB_RTPR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
++      printk("CP_RB_WTPR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
++      printk("AIC_CNTL = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
++      printk("AIC_STAT = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_STAT));
++      printk("AIC_PT_BASE = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
++      printk("TLB_ADDR = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
++      printk("TLB_DATA = 0x%08x\n",
++             (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
++}
++#endif
++
++/* ================================================================
++ * Engine, FIFO control
++ */
++
++static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
++{
++      u32 tmp;
++      int i;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
++              tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
++              tmp |= RADEON_RB3D_DC_FLUSH_ALL;
++              RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
++
++              for (i = 0; i < dev_priv->usec_timeout; i++) {
++                      if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
++                            & RADEON_RB3D_DC_BUSY)) {
++                              return 0;
++                      }
++                      DRM_UDELAY(1);
++              }
++      } else {
++              /* don't flush or purge cache here or lockup */
++              return 0;
++      }
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
++{
++      int i;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              int slots = (RADEON_READ(RADEON_RBBM_STATUS)
++                           & RADEON_RBBM_FIFOCNT_MASK);
++              if (slots >= entries)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++      DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
++               RADEON_READ(RADEON_RBBM_STATUS),
++               RADEON_READ(R300_VAP_CNTL_STATUS));
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
++{
++      int i, ret;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      ret = radeon_do_wait_for_fifo(dev_priv, 64);
++      if (ret)
++              return ret;
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              if (!(RADEON_READ(RADEON_RBBM_STATUS)
++                    & RADEON_RBBM_ACTIVE)) {
++                      radeon_do_pixcache_flush(dev_priv);
++                      return 0;
++              }
++              DRM_UDELAY(1);
++      }
++      DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
++               RADEON_READ(RADEON_RBBM_STATUS),
++               RADEON_READ(R300_VAP_CNTL_STATUS));
++
++#if RADEON_FIFO_DEBUG
++      DRM_ERROR("failed!\n");
++      radeon_status(dev_priv);
++#endif
++      return -EBUSY;
++}
++
++static void radeon_init_pipes(drm_radeon_private_t * dev_priv)
++{
++      uint32_t gb_tile_config, gb_pipe_sel = 0;
++
++      /* RS4xx/RS6xx/R4xx/R5xx */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
++              gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
++              dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
++      } else {
++              /* R3xx */
++              if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
++                  ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
++                      dev_priv->num_gb_pipes = 2;
++              } else {
++                      /* R3Vxx */
++                      dev_priv->num_gb_pipes = 1;
++              }
++      }
++      DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
++
++      gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
++
++      switch(dev_priv->num_gb_pipes) {
++      case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
++      case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
++      case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
++      default:
++      case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
++      }
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
++              RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
++              RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
++      }
++      RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
++      radeon_do_wait_for_idle(dev_priv);
++      RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
++      RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
++                                             R300_DC_AUTOFLUSH_ENABLE |
++                                             R300_DC_DC_DISABLE_IGNORE_PE));
++
++
++}
++
++/* ================================================================
++ * CP control, initialization
++ */
++
++/* Load the microcode for the CP */
++static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
++{
++      int i;
++      DRM_DEBUG("\n");
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
++
++      if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
++          ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
++              DRM_INFO("Loading R100 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R100_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R100_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
++              DRM_INFO("Loading R200 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R200_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R200_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
++              DRM_INFO("Loading R300 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R300_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R300_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
++              DRM_INFO("Loading R400 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R420_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R420_cp_microcode[i][0]);
++              }
++      } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
++              DRM_INFO("Loading RS690 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   RS690_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   RS690_cp_microcode[i][0]);
++              }
++      } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
++                 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
++              DRM_INFO("Loading R500 Microcode\n");
++              for (i = 0; i < 256; i++) {
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
++                                   R520_cp_microcode[i][1]);
++                      RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
++                                   R520_cp_microcode[i][0]);
++              }
++      }
++}
++
++/* Flush any pending commands to the CP.  This should only be used just
++ * prior to a wait for idle, as it informs the engine that the command
++ * stream is ending.
++ */
++static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
++{
++      DRM_DEBUG("\n");
++#if 0
++      u32 tmp;
++
++      tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
++#endif
++}
++
++/* Wait for the CP to go idle.
++ */
++int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
++{
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(6);
++
++      RADEON_PURGE_CACHE();
++      RADEON_PURGE_ZCACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return radeon_do_wait_for_idle(dev_priv);
++}
++
++/* Start the Command Processor.
++ */
++static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
++{
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
++
++      dev_priv->cp_running = 1;
++
++      BEGIN_RING(8);
++      /* isync can only be written through cp on r5xx write it here */
++      OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
++      OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
++               RADEON_ISYNC_ANY3D_IDLE2D |
++               RADEON_ISYNC_WAIT_IDLEGUI |
++               RADEON_ISYNC_CPSCRATCH_IDLEGUI);
++      RADEON_PURGE_CACHE();
++      RADEON_PURGE_ZCACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
++}
++
++/* Reset the Command Processor.  This will not flush any pending
++ * commands, so you must wait for the CP command stream to complete
++ * before calling this routine.
++ */
++static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
++{
++      u32 cur_read_ptr;
++      DRM_DEBUG("\n");
++
++      cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
++      SET_RING_HEAD(dev_priv, cur_read_ptr);
++      dev_priv->ring.tail = cur_read_ptr;
++}
++
++/* Stop the Command Processor.  This will not flush any pending
++ * commands, so you must flush the command stream and wait for the CP
++ * to go idle before calling this routine.
++ */
++static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
++{
++      DRM_DEBUG("\n");
++
++      RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
++
++      dev_priv->cp_running = 0;
++}
++
++/* Reset the engine.  This will stop the CP if it is running.
++ */
++static int radeon_do_engine_reset(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
++      DRM_DEBUG("\n");
++
++      radeon_do_pixcache_flush(dev_priv);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
++              /* may need something similar for newer chips */
++              clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
++              mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
++
++              RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
++                                                  RADEON_FORCEON_MCLKA |
++                                                  RADEON_FORCEON_MCLKB |
++                                                  RADEON_FORCEON_YCLKA |
++                                                  RADEON_FORCEON_YCLKB |
++                                                  RADEON_FORCEON_MC |
++                                                  RADEON_FORCEON_AIC));
++      }
++
++      rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
++
++      RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
++                                            RADEON_SOFT_RESET_CP |
++                                            RADEON_SOFT_RESET_HI |
++                                            RADEON_SOFT_RESET_SE |
++                                            RADEON_SOFT_RESET_RE |
++                                            RADEON_SOFT_RESET_PP |
++                                            RADEON_SOFT_RESET_E2 |
++                                            RADEON_SOFT_RESET_RB));
++      RADEON_READ(RADEON_RBBM_SOFT_RESET);
++      RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
++                                            ~(RADEON_SOFT_RESET_CP |
++                                              RADEON_SOFT_RESET_HI |
++                                              RADEON_SOFT_RESET_SE |
++                                              RADEON_SOFT_RESET_RE |
++                                              RADEON_SOFT_RESET_PP |
++                                              RADEON_SOFT_RESET_E2 |
++                                              RADEON_SOFT_RESET_RB)));
++      RADEON_READ(RADEON_RBBM_SOFT_RESET);
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
++              RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
++              RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
++              RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
++      }
++
++      /* setup the raster pipes */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
++          radeon_init_pipes(dev_priv);
++
++      /* Reset the CP ring */
++      radeon_do_cp_reset(dev_priv);
++
++      /* The CP is no longer running after an engine reset */
++      dev_priv->cp_running = 0;
++
++      /* Reset any pending vertex, indirect buffers */
++      radeon_freelist_reset(dev);
++
++      return 0;
++}
++
++static void radeon_cp_init_ring_buffer(struct drm_device * dev,
++                                     drm_radeon_private_t * dev_priv)
++{
++      u32 ring_start, cur_read_ptr;
++      u32 tmp;
++
++      /* Initialize the memory controller. With new memory map, the fb location
++       * is not changed, it should have been properly initialized already. Part
++       * of the problem is that the code below is bogus, assuming the GART is
++       * always appended to the fb which is not necessarily the case
++       */
++      if (!dev_priv->new_memmap)
++              radeon_write_fb_location(dev_priv,
++                           ((dev_priv->gart_vm_start - 1) & 0xffff0000)
++                           | (dev_priv->fb_location >> 16));
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              radeon_write_agp_base(dev_priv, dev->agp->base);
++
++              radeon_write_agp_location(dev_priv,
++                           (((dev_priv->gart_vm_start - 1 +
++                              dev_priv->gart_size) & 0xffff0000) |
++                            (dev_priv->gart_vm_start >> 16)));
++
++              ring_start = (dev_priv->cp_ring->offset
++                            - dev->agp->base
++                            + dev_priv->gart_vm_start);
++      } else
++#endif
++              ring_start = (dev_priv->cp_ring->offset
++                            - (unsigned long)dev->sg->virtual
++                            + dev_priv->gart_vm_start);
++
++      RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
++
++      /* Set the write pointer delay */
++      RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
++
++      /* Initialize the ring buffer's read and write pointers */
++      cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
++      RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
++      SET_RING_HEAD(dev_priv, cur_read_ptr);
++      dev_priv->ring.tail = cur_read_ptr;
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
++                           dev_priv->ring_rptr->offset
++                           - dev->agp->base + dev_priv->gart_vm_start);
++      } else
++#endif
++      {
++              struct drm_sg_mem *entry = dev->sg;
++              unsigned long tmp_ofs, page_ofs;
++
++              tmp_ofs = dev_priv->ring_rptr->offset -
++                              (unsigned long)dev->sg->virtual;
++              page_ofs = tmp_ofs >> PAGE_SHIFT;
++
++              RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
++              DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
++                        (unsigned long)entry->busaddr[page_ofs],
++                        entry->handle + tmp_ofs);
++      }
++
++      /* Set ring buffer size */
++#ifdef __BIG_ENDIAN
++      RADEON_WRITE(RADEON_CP_RB_CNTL,
++                   RADEON_BUF_SWAP_32BIT |
++                   (dev_priv->ring.fetch_size_l2ow << 18) |
++                   (dev_priv->ring.rptr_update_l2qw << 8) |
++                   dev_priv->ring.size_l2qw);
++#else
++      RADEON_WRITE(RADEON_CP_RB_CNTL,
++                   (dev_priv->ring.fetch_size_l2ow << 18) |
++                   (dev_priv->ring.rptr_update_l2qw << 8) |
++                   dev_priv->ring.size_l2qw);
++#endif
++
++      /* Initialize the scratch register pointer.  This will cause
++       * the scratch register values to be written out to memory
++       * whenever they are updated.
++       *
++       * We simply put this behind the ring read pointer, this works
++       * with PCI GART as well as (whatever kind of) AGP GART
++       */
++      RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
++                   + RADEON_SCRATCH_REG_OFFSET);
++
++      dev_priv->scratch = ((__volatile__ u32 *)
++                           dev_priv->ring_rptr->handle +
++                           (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
++
++      RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
++
++      /* Turn on bus mastering */
++      tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
++      RADEON_WRITE(RADEON_BUS_CNTL, tmp);
++
++      dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
++      RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
++
++      dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
++      RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
++                   dev_priv->sarea_priv->last_dispatch);
++
++      dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
++      RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
++
++      radeon_do_wait_for_idle(dev_priv);
++
++      /* Sync everything up */
++      RADEON_WRITE(RADEON_ISYNC_CNTL,
++                   (RADEON_ISYNC_ANY2D_IDLE3D |
++                    RADEON_ISYNC_ANY3D_IDLE2D |
++                    RADEON_ISYNC_WAIT_IDLEGUI |
++                    RADEON_ISYNC_CPSCRATCH_IDLEGUI));
++
++}
++
++static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
++{
++      u32 tmp;
++
++      /* Writeback doesn't seem to work everywhere, test it here and possibly
++       * enable it if it appears to work
++       */
++      DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
++      RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
++
++      for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
++              if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
++                  0xdeadbeef)
++                      break;
++              DRM_UDELAY(1);
++      }
++
++      if (tmp < dev_priv->usec_timeout) {
++              dev_priv->writeback_works = 1;
++              DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
++      } else {
++              dev_priv->writeback_works = 0;
++              DRM_INFO("writeback test failed\n");
++      }
++      if (radeon_no_wb == 1) {
++              dev_priv->writeback_works = 0;
++              DRM_INFO("writeback forced off\n");
++      }
++
++      if (!dev_priv->writeback_works) {
++              /* Disable writeback to avoid unnecessary bus master transfers */
++              RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE);
++              RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
++      }
++}
++
++/* Enable or disable IGP GART on the chip */
++static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 temp;
++
++      if (on) {
++              DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
++                       dev_priv->gart_vm_start,
++                       (long)dev_priv->gart_info.bus_addr,
++                       dev_priv->gart_size);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
++
++              if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
++                      IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
++                                                           RS690_BLOCK_GFX_D3_EN));
++              else
++                      IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
++
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
++                                                             RS480_VA_SIZE_32MB));
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
++              IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
++                                                      RS480_TLB_ENABLE |
++                                                      RS480_GTW_LAC_EN |
++                                                      RS480_1LEVEL_GART));
++
++              temp = dev_priv->gart_info.bus_addr & 0xfffff000;
++              temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
++              IGP_WRITE_MCIND(RS480_GART_BASE, temp);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
++              IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
++                                                    RS480_REQ_TYPE_SNOOP_DIS));
++
++              radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
++
++              dev_priv->gart_size = 32*1024*1024;
++              temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & 
++                      0xffff0000) | (dev_priv->gart_vm_start >> 16));
++
++              radeon_write_agp_location(dev_priv, temp);
++
++              temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
++                                                             RS480_VA_SIZE_32MB));
++
++              do {
++                      temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
++                      if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
++                              break;
++                      DRM_UDELAY(1);
++              } while(1);
++
++              IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
++                              RS480_GART_CACHE_INVALIDATE);
++
++              do {
++                      temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
++                      if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
++                              break;
++                      DRM_UDELAY(1);
++              } while(1);
++
++              IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
++      } else {
++              IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
++      }
++}
++
++static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
++      if (on) {
++
++              DRM_DEBUG("programming pcie %08X %08lX %08X\n",
++                        dev_priv->gart_vm_start,
++                        (long)dev_priv->gart_info.bus_addr,
++                        dev_priv->gart_size);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
++                                dev_priv->gart_vm_start);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
++                                dev_priv->gart_info.bus_addr);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
++                                dev_priv->gart_vm_start);
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
++                                dev_priv->gart_vm_start +
++                                dev_priv->gart_size - 1);
++
++              radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
++
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
++                                RADEON_PCIE_TX_GART_EN);
++      } else {
++              RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
++                                tmp & ~RADEON_PCIE_TX_GART_EN);
++      }
++}
++
++/* Enable or disable PCI GART on the chip */
++static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
++{
++      u32 tmp;
++
++      if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
++          (dev_priv->flags & RADEON_IS_IGPGART)) {
++              radeon_set_igpgart(dev_priv, on);
++              return;
++      }
++
++      if (dev_priv->flags & RADEON_IS_PCIE) {
++              radeon_set_pciegart(dev_priv, on);
++              return;
++      }
++
++      tmp = RADEON_READ(RADEON_AIC_CNTL);
++
++      if (on) {
++              RADEON_WRITE(RADEON_AIC_CNTL,
++                           tmp | RADEON_PCIGART_TRANSLATE_EN);
++
++              /* set PCI GART page-table base address
++               */
++              RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
++
++              /* set address range for PCI address translate
++               */
++              RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
++              RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
++                           + dev_priv->gart_size - 1);
++
++              /* Turn off AGP aperture -- is this required for PCI GART?
++               */
++              radeon_write_agp_location(dev_priv, 0xffffffc0);
++              RADEON_WRITE(RADEON_AGP_COMMAND, 0);    /* clear AGP_COMMAND */
++      } else {
++              RADEON_WRITE(RADEON_AIC_CNTL,
++                           tmp & ~RADEON_PCIGART_TRANSLATE_EN);
++      }
++}
++
++static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++
++      /* if we require new memory map but we don't have it fail */
++      if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
++              DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP))
++      {
++              DRM_DEBUG("Forcing AGP card to PCI mode\n");
++              dev_priv->flags &= ~RADEON_IS_AGP;
++      }
++      else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
++               && !init->is_pci)
++      {
++              DRM_DEBUG("Restoring AGP flag\n");
++              dev_priv->flags |= RADEON_IS_AGP;
++      }
++
++      if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
++              DRM_ERROR("PCI GART memory not allocated!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->usec_timeout = init->usec_timeout;
++      if (dev_priv->usec_timeout < 1 ||
++          dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
++              DRM_DEBUG("TIMEOUT problem!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      /* Enable vblank on CRTC1 for older X servers
++       */
++      dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
++
++      dev_priv->do_boxes = 0;
++      dev_priv->cp_mode = init->cp_mode;
++
++      /* We don't support anything other than bus-mastering ring mode,
++       * but the ring can be in either AGP or PCI space for the ring
++       * read pointer.
++       */
++      if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
++          (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
++              DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      switch (init->fb_bpp) {
++      case 16:
++              dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
++              break;
++      case 32:
++      default:
++              dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
++              break;
++      }
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++
++      switch (init->depth_bpp) {
++      case 16:
++              dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
++              break;
++      case 32:
++      default:
++              dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
++              break;
++      }
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      /* Hardware state for depth clears.  Remove this if/when we no
++       * longer clear the depth buffer with a 3D rectangle.  Hard-code
++       * all values to prevent unwanted 3D state from slipping through
++       * and screwing with the clear operation.
++       */
++      dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
++                                         (dev_priv->color_fmt << 10) |
++                                         (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0));
++
++      dev_priv->depth_clear.rb3d_zstencilcntl =
++          (dev_priv->depth_fmt |
++           RADEON_Z_TEST_ALWAYS |
++           RADEON_STENCIL_TEST_ALWAYS |
++           RADEON_STENCIL_S_FAIL_REPLACE |
++           RADEON_STENCIL_ZPASS_REPLACE |
++           RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
++
++      dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
++                                       RADEON_BFACE_SOLID |
++                                       RADEON_FFACE_SOLID |
++                                       RADEON_FLAT_SHADE_VTX_LAST |
++                                       RADEON_DIFFUSE_SHADE_FLAT |
++                                       RADEON_ALPHA_SHADE_FLAT |
++                                       RADEON_SPECULAR_SHADE_FLAT |
++                                       RADEON_FOG_SHADE_FLAT |
++                                       RADEON_VTX_PIX_CENTER_OGL |
++                                       RADEON_ROUND_MODE_TRUNC |
++                                       RADEON_ROUND_PREC_8TH_PIX);
++
++
++      dev_priv->ring_offset = init->ring_offset;
++      dev_priv->ring_rptr_offset = init->ring_rptr_offset;
++      dev_priv->buffers_offset = init->buffers_offset;
++      dev_priv->gart_textures_offset = init->gart_textures_offset;
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
++      if (!dev_priv->cp_ring) {
++              DRM_ERROR("could not find cp ring region!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++      dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
++      if (!dev_priv->ring_rptr) {
++              DRM_ERROR("could not find ring read pointer!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++      dev->agp_buffer_token = init->buffers_offset;
++      dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
++      if (!dev->agp_buffer_map) {
++              DRM_ERROR("could not find dma buffer region!\n");
++              radeon_do_cleanup_cp(dev);
++              return -EINVAL;
++      }
++
++      if (init->gart_textures_offset) {
++              dev_priv->gart_textures =
++                  drm_core_findmap(dev, init->gart_textures_offset);
++              if (!dev_priv->gart_textures) {
++                      DRM_ERROR("could not find GART texture region!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -EINVAL;
++              }
++      }
++
++      dev_priv->sarea_priv =
++          (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                                  init->sarea_priv_offset);
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              drm_core_ioremap(dev_priv->cp_ring, dev);
++              drm_core_ioremap(dev_priv->ring_rptr, dev);
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev_priv->cp_ring->handle ||
++                  !dev_priv->ring_rptr->handle ||
++                  !dev->agp_buffer_map->handle) {
++                      DRM_ERROR("could not find ioremap agp regions!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -EINVAL;
++              }
++      } else
++#endif
++      {
++              dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
++              dev_priv->ring_rptr->handle =
++                  (void *)dev_priv->ring_rptr->offset;
++              dev->agp_buffer_map->handle =
++                  (void *)dev->agp_buffer_map->offset;
++
++              DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
++                        dev_priv->cp_ring->handle);
++              DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
++                        dev_priv->ring_rptr->handle);
++              DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
++                        dev->agp_buffer_map->handle);
++      }
++
++      dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
++      dev_priv->fb_size =
++              ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
++              - dev_priv->fb_location;
++
++      dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
++                                      ((dev_priv->front_offset
++                                        + dev_priv->fb_location) >> 10));
++
++      dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
++                                     ((dev_priv->back_offset
++                                       + dev_priv->fb_location) >> 10));
++
++      dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
++                                      ((dev_priv->depth_offset
++                                        + dev_priv->fb_location) >> 10));
++
++      dev_priv->gart_size = init->gart_size;
++
++      /* New let's set the memory map ... */
++      if (dev_priv->new_memmap) {
++              u32 base = 0;
++
++              DRM_INFO("Setting GART location based on new memory map\n");
++
++              /* If using AGP, try to locate the AGP aperture at the same
++               * location in the card and on the bus, though we have to
++               * align it down.
++               */
++#if __OS_HAS_AGP
++              if (dev_priv->flags & RADEON_IS_AGP) {
++                      base = dev->agp->base;
++                      /* Check if valid */
++                      if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
++                          base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
++                              DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
++                                       dev->agp->base);
++                              base = 0;
++                      }
++              }
++#endif
++              /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
++              if (base == 0) {
++                      base = dev_priv->fb_location + dev_priv->fb_size;
++                      if (base < dev_priv->fb_location ||
++                          ((base + dev_priv->gart_size) & 0xfffffffful) < base)
++                              base = dev_priv->fb_location
++                                      - dev_priv->gart_size;
++              }
++              dev_priv->gart_vm_start = base & 0xffc00000u;
++              if (dev_priv->gart_vm_start != base)
++                      DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
++                               base, dev_priv->gart_vm_start);
++      } else {
++              DRM_INFO("Setting GART location based on old memory map\n");
++              dev_priv->gart_vm_start = dev_priv->fb_location +
++                      RADEON_READ(RADEON_CONFIG_APER_SIZE);
++      }
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP)
++              dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
++                                               - dev->agp->base
++                                               + dev_priv->gart_vm_start);
++      else
++#endif
++              dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
++                                      - (unsigned long)dev->sg->virtual
++                                      + dev_priv->gart_vm_start);
++
++      DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
++      DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
++      DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
++                dev_priv->gart_buffers_offset);
++
++      dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
++      dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
++                            + init->ring_size / sizeof(u32));
++      dev_priv->ring.size = init->ring_size;
++      dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
++
++      dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
++      dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
++
++      dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
++      dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
++
++      dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
++
++      dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              /* Turn off PCI GART */
++              radeon_set_pcigart(dev_priv, 0);
++      } else
++#endif
++      {
++              dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
++              /* if we have an offset set from userspace */
++              if (dev_priv->pcigart_offset_set) {
++                      dev_priv->gart_info.bus_addr =
++                          dev_priv->pcigart_offset + dev_priv->fb_location;
++                      dev_priv->gart_info.mapping.offset =
++                          dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
++                      dev_priv->gart_info.mapping.size =
++                          dev_priv->gart_info.table_size;
++
++                      drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
++                      dev_priv->gart_info.addr =
++                          dev_priv->gart_info.mapping.handle;
++
++                      if (dev_priv->flags & RADEON_IS_PCIE)
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
++                      else
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++                      dev_priv->gart_info.gart_table_location =
++                          DRM_ATI_GART_FB;
++
++                      DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
++                                dev_priv->gart_info.addr,
++                                dev_priv->pcigart_offset);
++              } else {
++                      if (dev_priv->flags & RADEON_IS_IGPGART)
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
++                      else
++                              dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++                      dev_priv->gart_info.gart_table_location =
++                          DRM_ATI_GART_MAIN;
++                      dev_priv->gart_info.addr = NULL;
++                      dev_priv->gart_info.bus_addr = 0;
++                      if (dev_priv->flags & RADEON_IS_PCIE) {
++                              DRM_ERROR
++                                  ("Cannot use PCI Express without GART in FB memory\n");
++                              radeon_do_cleanup_cp(dev);
++                              return -EINVAL;
++                      }
++              }
++
++              if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
++                      DRM_ERROR("failed to init PCI GART!\n");
++                      radeon_do_cleanup_cp(dev);
++                      return -ENOMEM;
++              }
++
++              /* Turn on PCI GART */
++              radeon_set_pcigart(dev_priv, 1);
++      }
++
++      /* Start with assuming that writeback doesn't work */
++      dev_priv->writeback_works = 0;
++
++      radeon_cp_load_microcode(dev_priv);
++      radeon_cp_init_ring_buffer(dev, dev_priv);
++
++      dev_priv->last_buf = 0;
++
++      radeon_do_engine_reset(dev);
++      radeon_test_writeback(dev_priv);
++
++      return 0;
++}
++
++static int radeon_do_cleanup_cp(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      /* Make sure interrupts are disabled here because the uninstall ioctl
++       * may not have been called from userspace and after dev_private
++       * is freed, it's too late.
++       */
++      if (dev->irq_enabled)
++              drm_irq_uninstall(dev);
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              if (dev_priv->cp_ring != NULL) {
++                      drm_core_ioremapfree(dev_priv->cp_ring, dev);
++                      dev_priv->cp_ring = NULL;
++              }
++              if (dev_priv->ring_rptr != NULL) {
++                      drm_core_ioremapfree(dev_priv->ring_rptr, dev);
++                      dev_priv->ring_rptr = NULL;
++              }
++              if (dev->agp_buffer_map != NULL) {
++                      drm_core_ioremapfree(dev->agp_buffer_map, dev);
++                      dev->agp_buffer_map = NULL;
++              }
++      } else
++#endif
++      {
++
++              if (dev_priv->gart_info.bus_addr) {
++                      /* Turn off PCI GART */
++                      radeon_set_pcigart(dev_priv, 0);
++                      if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
++                              DRM_ERROR("failed to cleanup PCI GART!\n");
++              }
++
++              if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
++              {
++                      drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
++                      dev_priv->gart_info.addr = 0;
++              }
++      }
++      /* only clear to the start of flags */
++      memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
++
++      return 0;
++}
++
++/* This code will reinit the Radeon CP hardware after a resume from disc.
++ * AFAIK, it would be very difficult to pickle the state at suspend time, so
++ * here we make sure that all Radeon hardware initialisation is re-done without
++ * affecting running applications.
++ *
++ * Charl P. Botha <http://cpbotha.net>
++ */
++static int radeon_do_resume_cp(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("Called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("Starting radeon_do_resume_cp()\n");
++
++#if __OS_HAS_AGP
++      if (dev_priv->flags & RADEON_IS_AGP) {
++              /* Turn off PCI GART */
++              radeon_set_pcigart(dev_priv, 0);
++      } else
++#endif
++      {
++              /* Turn on PCI GART */
++              radeon_set_pcigart(dev_priv, 1);
++      }
++
++      radeon_cp_load_microcode(dev_priv);
++      radeon_cp_init_ring_buffer(dev, dev_priv);
++
++      radeon_do_engine_reset(dev);
++      radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
++
++      DRM_DEBUG("radeon_do_resume_cp() complete\n");
++
++      return 0;
++}
++
++int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_init_t *init = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (init->func == RADEON_INIT_R300_CP)
++              r300_init_reg_flags(dev);
++
++      switch (init->func) {
++      case RADEON_INIT_CP:
++      case RADEON_INIT_R200_CP:
++      case RADEON_INIT_R300_CP:
++              return radeon_do_init_cp(dev, init);
++      case RADEON_CLEANUP_CP:
++              return radeon_do_cleanup_cp(dev);
++      }
++
++      return -EINVAL;
++}
++
++int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dev_priv->cp_running) {
++              DRM_DEBUG("while CP running\n");
++              return 0;
++      }
++      if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
++              DRM_DEBUG("called with bogus CP mode (%d)\n",
++                        dev_priv->cp_mode);
++              return 0;
++      }
++
++      radeon_do_cp_start(dev_priv);
++
++      return 0;
++}
++
++/* Stop the CP.  The engine must have been idled before calling this
++ * routine.
++ */
++int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_cp_stop_t *stop = data;
++      int ret;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv->cp_running)
++              return 0;
++
++      /* Flush any pending CP commands.  This ensures any outstanding
++       * commands are exectuted by the engine before we turn it off.
++       */
++      if (stop->flush) {
++              radeon_do_cp_flush(dev_priv);
++      }
++
++      /* If we fail to make the engine go idle, we return an error
++       * code so that the DRM ioctl wrapper can try again.
++       */
++      if (stop->idle) {
++              ret = radeon_do_cp_idle(dev_priv);
++              if (ret)
++                      return ret;
++      }
++
++      /* Finally, we can turn off the CP.  If the engine isn't idle,
++       * we will get some dropped triangles as they won't be fully
++       * rendered before the CP is shut down.
++       */
++      radeon_do_cp_stop(dev_priv);
++
++      /* Reset the engine */
++      radeon_do_engine_reset(dev);
++
++      return 0;
++}
++
++void radeon_do_release(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i, ret;
++
++      if (dev_priv) {
++              if (dev_priv->cp_running) {
++                      /* Stop the cp */
++                      while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
++                              DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
++#ifdef __linux__
++                              schedule();
++#else
++#if defined(__FreeBSD__) && __FreeBSD_version > 500000
++                              mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel",
++                                     1);
++#else
++                              tsleep(&ret, PZERO, "rdnrel", 1);
++#endif
++#endif
++                      }
++                      radeon_do_cp_stop(dev_priv);
++                      radeon_do_engine_reset(dev);
++              }
++
++              /* Disable *all* interrupts */
++              if (dev_priv->mmio)     /* remove this after permanent addmaps */
++                      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++
++              if (dev_priv->mmio) {   /* remove all surfaces */
++                      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++                              RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
++                              RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
++                                           16 * i, 0);
++                              RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
++                                           16 * i, 0);
++                      }
++              }
++
++              /* Free memory heap structures */
++              radeon_mem_takedown(&(dev_priv->gart_heap));
++              radeon_mem_takedown(&(dev_priv->fb_heap));
++
++              /* deallocate kernel resources */
++              radeon_do_cleanup_cp(dev);
++      }
++}
++
++/* Just reset the CP ring.  Called as part of an X Server engine reset.
++ */
++int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_DEBUG("called before init done\n");
++              return -EINVAL;
++      }
++
++      radeon_do_cp_reset(dev_priv);
++
++      /* The CP is no longer running after an engine reset */
++      dev_priv->cp_running = 0;
++
++      return 0;
++}
++
++int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return radeon_do_cp_idle(dev_priv);
++}
++
++/* Added by Charl P. Botha to call radeon_do_resume_cp().
++ */
++int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++
++      return radeon_do_resume_cp(dev);
++}
++
++int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return radeon_do_engine_reset(dev);
++}
++
++/* ================================================================
++ * Fullscreen mode
++ */
++
++/* KW: Deprecated to say the least:
++ */
++int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      return 0;
++}
++
++/* ================================================================
++ * Freelist management
++ */
++
++/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
++ *   bufs until freelist code is used.  Note this hides a problem with
++ *   the scratch register * (used to keep track of last buffer
++ *   completed) being written to before * the last buffer has actually
++ *   completed rendering.
++ *
++ * KW:  It's also a good way to find free buffers quickly.
++ *
++ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
++ * sleep.  However, bugs in older versions of radeon_accel.c mean that
++ * we essentially have to do this, else old clients will break.
++ *
++ * However, it does leave open a potential deadlock where all the
++ * buffers are held by other clients, which can't release them because
++ * they can't get the lock.
++ */
++
++struct drm_buf *radeon_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++      int start;
++
++      if (++dev_priv->last_buf >= dma->buf_count)
++              dev_priv->last_buf = 0;
++
++      start = dev_priv->last_buf;
++
++      for (t = 0; t < dev_priv->usec_timeout; t++) {
++              u32 done_age = GET_SCRATCH(1);
++              DRM_DEBUG("done_age = %d\n", done_age);
++              for (i = start; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->file_priv == NULL || (buf->pending &&
++                                                     buf_priv->age <=
++                                                     done_age)) {
++                              dev_priv->stats.requested_bufs++;
++                              buf->pending = 0;
++                              return buf;
++                      }
++                      start = 0;
++              }
++
++              if (t) {
++                      DRM_UDELAY(1);
++                      dev_priv->stats.freelist_loops++;
++              }
++      }
++
++      DRM_DEBUG("returning NULL!\n");
++      return NULL;
++}
++
++#if 0
++struct drm_buf *radeon_freelist_get(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv;
++      struct drm_buf *buf;
++      int i, t;
++      int start;
++      u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
++
++      if (++dev_priv->last_buf >= dma->buf_count)
++              dev_priv->last_buf = 0;
++
++      start = dev_priv->last_buf;
++      dev_priv->stats.freelist_loops++;
++
++      for (t = 0; t < 2; t++) {
++              for (i = start; i < dma->buf_count; i++) {
++                      buf = dma->buflist[i];
++                      buf_priv = buf->dev_private;
++                      if (buf->file_priv == 0 || (buf->pending &&
++                                                  buf_priv->age <=
++                                                  done_age)) {
++                              dev_priv->stats.requested_bufs++;
++                              buf->pending = 0;
++                              return buf;
++                      }
++              }
++              start = 0;
++      }
++
++      return NULL;
++}
++#endif
++
++void radeon_freelist_reset(struct drm_device * dev)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      dev_priv->last_buf = 0;
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++              buf_priv->age = 0;
++      }
++}
++
++/* ================================================================
++ * CP command submission
++ */
++
++int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
++{
++      drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
++      int i;
++      u32 last_head = GET_RING_HEAD(dev_priv);
++
++      for (i = 0; i < dev_priv->usec_timeout; i++) {
++              u32 head = GET_RING_HEAD(dev_priv);
++
++              ring->space = (head - ring->tail) * sizeof(u32);
++              if (ring->space <= 0)
++                      ring->space += ring->size;
++              if (ring->space > n)
++                      return 0;
++
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++              if (head != last_head)
++                      i = 0;
++              last_head = head;
++
++              DRM_UDELAY(1);
++      }
++
++      /* FIXME: This return value is ignored in the BEGIN_RING macro! */
++#if RADEON_FIFO_DEBUG
++      radeon_status(dev_priv);
++      DRM_ERROR("failed!\n");
++#endif
++      return -EBUSY;
++}
++
++static int radeon_cp_get_buffers(struct drm_device *dev,
++                               struct drm_file *file_priv,
++                               struct drm_dma * d)
++{
++      int i;
++      struct drm_buf *buf;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = radeon_freelist_get(dev);
++              if (!buf)
++                      return -EBUSY;  /* NOTE: broken client */
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
++                                   sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
++                                   sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      int ret = 0;
++      struct drm_dma *d = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = radeon_cp_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++int radeon_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      drm_radeon_private_t *dev_priv;
++      int ret = 0;
++
++      dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_radeon_private_t));
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->flags = flags;
++
++      switch (flags & RADEON_FAMILY_MASK) {
++      case CHIP_R100:
++      case CHIP_RV200:
++      case CHIP_R200:
++      case CHIP_R300:
++      case CHIP_R350:
++      case CHIP_R420:
++      case CHIP_RV410:
++      case CHIP_RV515:
++      case CHIP_R520:
++      case CHIP_RV570:
++      case CHIP_R580:
++              dev_priv->flags |= RADEON_HAS_HIERZ;
++              break;
++      default:
++              /* all other chips have no hierarchical z buffer */
++              break;
++      }
++
++      dev_priv->chip_family = flags & RADEON_FAMILY_MASK;
++      if (drm_device_is_agp(dev))
++              dev_priv->flags |= RADEON_IS_AGP;
++      else if (drm_device_is_pcie(dev))
++              dev_priv->flags |= RADEON_IS_PCIE;
++      else
++              dev_priv->flags |= RADEON_IS_PCI;
++
++      DRM_DEBUG("%s card detected\n",
++                ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
++      return ret;
++}
++
++/* Create mappings for registers and framebuffer so userland doesn't necessarily
++ * have to find them.
++ */
++int radeon_driver_firstopen(struct drm_device *dev)
++{
++      int ret;
++      drm_local_map_t *map;
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
++
++      ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
++                       drm_get_resource_len(dev, 2), _DRM_REGISTERS,
++                       _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret != 0)
++              return ret;
++
++      dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
++      ret = drm_addmap(dev, dev_priv->fb_aper_offset,
++                       drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
++                       _DRM_WRITE_COMBINING, &map);
++      if (ret != 0)
++              return ret;
++
++      return 0;
++}
++
++int radeon_driver_unload(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      DRM_DEBUG("\n");
++      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++
++      dev->dev_private = NULL;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drm.h git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h
+--- git/drivers/gpu/drm-tungsten/radeon_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,750 @@
++/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
++ *
++ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++#ifndef __RADEON_DRM_H__
++#define __RADEON_DRM_H__
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the X server file (radeon_sarea.h)
++ */
++#ifndef __RADEON_SAREA_DEFINES__
++#define __RADEON_SAREA_DEFINES__
++
++/* Old style state flags, required for sarea interface (1.1 and 1.2
++ * clears) and 1.2 drm_vertex2 ioctl.
++ */
++#define RADEON_UPLOAD_CONTEXT         0x00000001
++#define RADEON_UPLOAD_VERTFMT         0x00000002
++#define RADEON_UPLOAD_LINE            0x00000004
++#define RADEON_UPLOAD_BUMPMAP         0x00000008
++#define RADEON_UPLOAD_MASKS           0x00000010
++#define RADEON_UPLOAD_VIEWPORT                0x00000020
++#define RADEON_UPLOAD_SETUP           0x00000040
++#define RADEON_UPLOAD_TCL             0x00000080
++#define RADEON_UPLOAD_MISC            0x00000100
++#define RADEON_UPLOAD_TEX0            0x00000200
++#define RADEON_UPLOAD_TEX1            0x00000400
++#define RADEON_UPLOAD_TEX2            0x00000800
++#define RADEON_UPLOAD_TEX0IMAGES      0x00001000
++#define RADEON_UPLOAD_TEX1IMAGES      0x00002000
++#define RADEON_UPLOAD_TEX2IMAGES      0x00004000
++#define RADEON_UPLOAD_CLIPRECTS               0x00008000      /* handled client-side */
++#define RADEON_REQUIRE_QUIESCENCE     0x00010000
++#define RADEON_UPLOAD_ZBIAS           0x00020000      /* version 1.2 and newer */
++#define RADEON_UPLOAD_ALL             0x003effff
++#define RADEON_UPLOAD_CONTEXT_ALL       0x003e01ff
++
++/* New style per-packet identifiers for use in cmd_buffer ioctl with
++ * the RADEON_EMIT_PACKET command.  Comments relate new packets to old
++ * state bits and the packet size:
++ */
++#define RADEON_EMIT_PP_MISC                         0 /* context/7 */
++#define RADEON_EMIT_PP_CNTL                         1 /* context/3 */
++#define RADEON_EMIT_RB3D_COLORPITCH                 2 /* context/1 */
++#define RADEON_EMIT_RE_LINE_PATTERN                 3 /* line/2 */
++#define RADEON_EMIT_SE_LINE_WIDTH                   4 /* line/1 */
++#define RADEON_EMIT_PP_LUM_MATRIX                   5 /* bumpmap/1 */
++#define RADEON_EMIT_PP_ROT_MATRIX_0                 6 /* bumpmap/2 */
++#define RADEON_EMIT_RB3D_STENCILREFMASK             7 /* masks/3 */
++#define RADEON_EMIT_SE_VPORT_XSCALE                 8 /* viewport/6 */
++#define RADEON_EMIT_SE_CNTL                         9 /* setup/2 */
++#define RADEON_EMIT_SE_CNTL_STATUS                  10        /* setup/1 */
++#define RADEON_EMIT_RE_MISC                         11        /* misc/1 */
++#define RADEON_EMIT_PP_TXFILTER_0                   12        /* tex0/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_0               13        /* tex0/1 */
++#define RADEON_EMIT_PP_TXFILTER_1                   14        /* tex1/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_1               15        /* tex1/1 */
++#define RADEON_EMIT_PP_TXFILTER_2                   16        /* tex2/6 */
++#define RADEON_EMIT_PP_BORDER_COLOR_2               17        /* tex2/1 */
++#define RADEON_EMIT_SE_ZBIAS_FACTOR                 18        /* zbias/2 */
++#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT           19        /* tcl/11 */
++#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED   20        /* material/17 */
++#define R200_EMIT_PP_TXCBLEND_0                     21        /* tex0/4 */
++#define R200_EMIT_PP_TXCBLEND_1                     22        /* tex1/4 */
++#define R200_EMIT_PP_TXCBLEND_2                     23        /* tex2/4 */
++#define R200_EMIT_PP_TXCBLEND_3                     24        /* tex3/4 */
++#define R200_EMIT_PP_TXCBLEND_4                     25        /* tex4/4 */
++#define R200_EMIT_PP_TXCBLEND_5                     26        /* tex5/4 */
++#define R200_EMIT_PP_TXCBLEND_6                     27        /* /4 */
++#define R200_EMIT_PP_TXCBLEND_7                     28        /* /4 */
++#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0             29        /* tcl/7 */
++#define R200_EMIT_TFACTOR_0                         30        /* tf/7 */
++#define R200_EMIT_VTX_FMT_0                         31        /* vtx/5 */
++#define R200_EMIT_VAP_CTL                           32        /* vap/1 */
++#define R200_EMIT_MATRIX_SELECT_0                   33        /* msl/5 */
++#define R200_EMIT_TEX_PROC_CTL_2                    34        /* tcg/5 */
++#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL            35        /* tcl/1 */
++#define R200_EMIT_PP_TXFILTER_0                     36        /* tex0/6 */
++#define R200_EMIT_PP_TXFILTER_1                     37        /* tex1/6 */
++#define R200_EMIT_PP_TXFILTER_2                     38        /* tex2/6 */
++#define R200_EMIT_PP_TXFILTER_3                     39        /* tex3/6 */
++#define R200_EMIT_PP_TXFILTER_4                     40        /* tex4/6 */
++#define R200_EMIT_PP_TXFILTER_5                     41        /* tex5/6 */
++#define R200_EMIT_PP_TXOFFSET_0                     42        /* tex0/1 */
++#define R200_EMIT_PP_TXOFFSET_1                     43        /* tex1/1 */
++#define R200_EMIT_PP_TXOFFSET_2                     44        /* tex2/1 */
++#define R200_EMIT_PP_TXOFFSET_3                     45        /* tex3/1 */
++#define R200_EMIT_PP_TXOFFSET_4                     46        /* tex4/1 */
++#define R200_EMIT_PP_TXOFFSET_5                     47        /* tex5/1 */
++#define R200_EMIT_VTE_CNTL                          48        /* vte/1 */
++#define R200_EMIT_OUTPUT_VTX_COMP_SEL               49        /* vtx/1 */
++#define R200_EMIT_PP_TAM_DEBUG3                     50        /* tam/1 */
++#define R200_EMIT_PP_CNTL_X                         51        /* cst/1 */
++#define R200_EMIT_RB3D_DEPTHXY_OFFSET               52        /* cst/1 */
++#define R200_EMIT_RE_AUX_SCISSOR_CNTL               53        /* cst/1 */
++#define R200_EMIT_RE_SCISSOR_TL_0                   54        /* cst/2 */
++#define R200_EMIT_RE_SCISSOR_TL_1                   55        /* cst/2 */
++#define R200_EMIT_RE_SCISSOR_TL_2                   56        /* cst/2 */
++#define R200_EMIT_SE_VAP_CNTL_STATUS                57        /* cst/1 */
++#define R200_EMIT_SE_VTX_STATE_CNTL                 58        /* cst/1 */
++#define R200_EMIT_RE_POINTSIZE                      59        /* cst/1 */
++#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0       60        /* cst/4 */
++#define R200_EMIT_PP_CUBIC_FACES_0                  61
++#define R200_EMIT_PP_CUBIC_OFFSETS_0                62
++#define R200_EMIT_PP_CUBIC_FACES_1                  63
++#define R200_EMIT_PP_CUBIC_OFFSETS_1                64
++#define R200_EMIT_PP_CUBIC_FACES_2                  65
++#define R200_EMIT_PP_CUBIC_OFFSETS_2                66
++#define R200_EMIT_PP_CUBIC_FACES_3                  67
++#define R200_EMIT_PP_CUBIC_OFFSETS_3                68
++#define R200_EMIT_PP_CUBIC_FACES_4                  69
++#define R200_EMIT_PP_CUBIC_OFFSETS_4                70
++#define R200_EMIT_PP_CUBIC_FACES_5                  71
++#define R200_EMIT_PP_CUBIC_OFFSETS_5                72
++#define RADEON_EMIT_PP_TEX_SIZE_0                   73
++#define RADEON_EMIT_PP_TEX_SIZE_1                   74
++#define RADEON_EMIT_PP_TEX_SIZE_2                   75
++#define R200_EMIT_RB3D_BLENDCOLOR                   76
++#define R200_EMIT_TCL_POINT_SPRITE_CNTL             77
++#define RADEON_EMIT_PP_CUBIC_FACES_0                78
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0             79
++#define RADEON_EMIT_PP_CUBIC_FACES_1                80
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1             81
++#define RADEON_EMIT_PP_CUBIC_FACES_2                82
++#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2             83
++#define R200_EMIT_PP_TRI_PERF_CNTL                  84
++#define R200_EMIT_PP_AFS_0                          85
++#define R200_EMIT_PP_AFS_1                          86
++#define R200_EMIT_ATF_TFACTOR                       87
++#define R200_EMIT_PP_TXCTLALL_0                     88
++#define R200_EMIT_PP_TXCTLALL_1                     89
++#define R200_EMIT_PP_TXCTLALL_2                     90
++#define R200_EMIT_PP_TXCTLALL_3                     91
++#define R200_EMIT_PP_TXCTLALL_4                     92
++#define R200_EMIT_PP_TXCTLALL_5                     93
++#define R200_EMIT_VAP_PVS_CNTL                      94
++#define RADEON_MAX_STATE_PACKETS                    95
++
++/* Commands understood by cmd_buffer ioctl.  More can be added but
++ * obviously these can't be removed or changed:
++ */
++#define RADEON_CMD_PACKET      1      /* emit one of the register packets above */
++#define RADEON_CMD_SCALARS     2      /* emit scalar data */
++#define RADEON_CMD_VECTORS     3      /* emit vector data */
++#define RADEON_CMD_DMA_DISCARD 4      /* discard current dma buf */
++#define RADEON_CMD_PACKET3     5      /* emit hw packet */
++#define RADEON_CMD_PACKET3_CLIP 6     /* emit hw packet wrapped in cliprects */
++#define RADEON_CMD_SCALARS2     7     /* r200 stopgap */
++#define RADEON_CMD_WAIT         8     /* emit hw wait commands -- note:
++                                       *  doesn't make the cpu wait, just
++                                       *  the graphics hardware */
++#define RADEON_CMD_VECLINEAR  9       /* another r200 stopgap */
++
++typedef union {
++      int i;
++      struct {
++              unsigned char cmd_type, pad0, pad1, pad2;
++      } header;
++      struct {
++              unsigned char cmd_type, packet_id, pad0, pad1;
++      } packet;
++      struct {
++              unsigned char cmd_type, offset, stride, count;
++      } scalars;
++      struct {
++              unsigned char cmd_type, offset, stride, count;
++      } vectors;
++      struct {
++              unsigned char cmd_type, addr_lo, addr_hi, count;
++      } veclinear;
++      struct {
++              unsigned char cmd_type, buf_idx, pad0, pad1;
++      } dma;
++      struct {
++              unsigned char cmd_type, flags, pad0, pad1;
++      } wait;
++} drm_radeon_cmd_header_t;
++
++#define RADEON_WAIT_2D  0x1
++#define RADEON_WAIT_3D  0x2
++
++/* Allowed parameters for R300_CMD_PACKET3
++ */
++#define R300_CMD_PACKET3_CLEAR                0
++#define R300_CMD_PACKET3_RAW          1
++
++/* Commands understood by cmd_buffer ioctl for R300.
++ * The interface has not been stabilized, so some of these may be removed
++ * and eventually reordered before stabilization.
++ */
++#define R300_CMD_PACKET0              1
++#define R300_CMD_VPU                  2       /* emit vertex program upload */
++#define R300_CMD_PACKET3              3       /* emit a packet3 */
++#define R300_CMD_END3D                        4       /* emit sequence ending 3d rendering */
++#define R300_CMD_CP_DELAY             5
++#define R300_CMD_DMA_DISCARD          6
++#define R300_CMD_WAIT                 7
++#     define R300_WAIT_2D             0x1
++#     define R300_WAIT_3D             0x2
++/* these two defines are DOING IT WRONG - however
++ * we have userspace which relies on using these.
++ * The wait interface is backwards compat new 
++ * code should use the NEW_WAIT defines below
++ * THESE ARE NOT BIT FIELDS
++ */
++#     define R300_WAIT_2D_CLEAN       0x3
++#     define R300_WAIT_3D_CLEAN       0x4
++
++#     define R300_NEW_WAIT_2D_3D      0x3
++#     define R300_NEW_WAIT_2D_2D_CLEAN        0x4
++#     define R300_NEW_WAIT_3D_3D_CLEAN        0x6
++#     define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN    0x8
++
++#define R300_CMD_SCRATCH              8
++#define R300_CMD_R500FP                 9
++
++typedef union {
++      unsigned int u;
++      struct {
++              unsigned char cmd_type, pad0, pad1, pad2;
++      } header;
++      struct {
++              unsigned char cmd_type, count, reglo, reghi;
++      } packet0;
++      struct {
++              unsigned char cmd_type, count, adrlo, adrhi;
++      } vpu;
++      struct {
++              unsigned char cmd_type, packet, pad0, pad1;
++      } packet3;
++      struct {
++              unsigned char cmd_type, packet;
++              unsigned short count;   /* amount of packet2 to emit */
++      } delay;
++      struct {
++              unsigned char cmd_type, buf_idx, pad0, pad1;
++      } dma;
++      struct {
++              unsigned char cmd_type, flags, pad0, pad1;
++      } wait;
++      struct {
++              unsigned char cmd_type, reg, n_bufs, flags;
++      } scratch;
++      struct {
++              unsigned char cmd_type, count, adrlo, adrhi_flags;
++      } r500fp;
++} drm_r300_cmd_header_t;
++
++#define RADEON_FRONT                  0x1
++#define RADEON_BACK                   0x2
++#define RADEON_DEPTH                  0x4
++#define RADEON_STENCIL                        0x8
++#define RADEON_CLEAR_FASTZ            0x80000000
++#define RADEON_USE_HIERZ              0x40000000
++#define RADEON_USE_COMP_ZBUF          0x20000000
++
++#define R500FP_CONSTANT_TYPE  (1 << 1)
++#define R500FP_CONSTANT_CLAMP (1 << 2)
++
++/* Primitive types
++ */
++#define RADEON_POINTS                 0x1
++#define RADEON_LINES                  0x2
++#define RADEON_LINE_STRIP             0x3
++#define RADEON_TRIANGLES              0x4
++#define RADEON_TRIANGLE_FAN           0x5
++#define RADEON_TRIANGLE_STRIP         0x6
++
++/* Vertex/indirect buffer size
++ */
++#define RADEON_BUFFER_SIZE            65536
++
++/* Byte offsets for indirect buffer data
++ */
++#define RADEON_INDEX_PRIM_OFFSET      20
++
++#define RADEON_SCRATCH_REG_OFFSET     32
++
++#define RADEON_NR_SAREA_CLIPRECTS     12
++
++/* There are 2 heaps (local/GART).  Each region within a heap is a
++ * minimum of 64k, and there are at most 64 of them per heap.
++ */
++#define RADEON_LOCAL_TEX_HEAP         0
++#define RADEON_GART_TEX_HEAP          1
++#define RADEON_NR_TEX_HEAPS           2
++#define RADEON_NR_TEX_REGIONS         64
++#define RADEON_LOG_TEX_GRANULARITY    16
++
++#define RADEON_MAX_TEXTURE_LEVELS     12
++#define RADEON_MAX_TEXTURE_UNITS      3
++
++#define RADEON_MAX_SURFACES           8
++
++/* Blits have strict offset rules.  All blit offset must be aligned on
++ * a 1K-byte boundary.
++ */
++#define RADEON_OFFSET_SHIFT             10
++#define RADEON_OFFSET_ALIGN             (1 << RADEON_OFFSET_SHIFT)
++#define RADEON_OFFSET_MASK              (RADEON_OFFSET_ALIGN - 1)
++
++#endif                                /* __RADEON_SAREA_DEFINES__ */
++
++typedef struct {
++      unsigned int red;
++      unsigned int green;
++      unsigned int blue;
++      unsigned int alpha;
++} radeon_color_regs_t;
++
++typedef struct {
++      /* Context state */
++      unsigned int pp_misc;   /* 0x1c14 */
++      unsigned int pp_fog_color;
++      unsigned int re_solid_color;
++      unsigned int rb3d_blendcntl;
++      unsigned int rb3d_depthoffset;
++      unsigned int rb3d_depthpitch;
++      unsigned int rb3d_zstencilcntl;
++
++      unsigned int pp_cntl;   /* 0x1c38 */
++      unsigned int rb3d_cntl;
++      unsigned int rb3d_coloroffset;
++      unsigned int re_width_height;
++      unsigned int rb3d_colorpitch;
++      unsigned int se_cntl;
++
++      /* Vertex format state */
++      unsigned int se_coord_fmt;      /* 0x1c50 */
++
++      /* Line state */
++      unsigned int re_line_pattern;   /* 0x1cd0 */
++      unsigned int re_line_state;
++
++      unsigned int se_line_width;     /* 0x1db8 */
++
++      /* Bumpmap state */
++      unsigned int pp_lum_matrix;     /* 0x1d00 */
++
++      unsigned int pp_rot_matrix_0;   /* 0x1d58 */
++      unsigned int pp_rot_matrix_1;
++
++      /* Mask state */
++      unsigned int rb3d_stencilrefmask;       /* 0x1d7c */
++      unsigned int rb3d_ropcntl;
++      unsigned int rb3d_planemask;
++
++      /* Viewport state */
++      unsigned int se_vport_xscale;   /* 0x1d98 */
++      unsigned int se_vport_xoffset;
++      unsigned int se_vport_yscale;
++      unsigned int se_vport_yoffset;
++      unsigned int se_vport_zscale;
++      unsigned int se_vport_zoffset;
++
++      /* Setup state */
++      unsigned int se_cntl_status;    /* 0x2140 */
++
++      /* Misc state */
++      unsigned int re_top_left;       /* 0x26c0 */
++      unsigned int re_misc;
++} drm_radeon_context_regs_t;
++
++typedef struct {
++      /* Zbias state */
++      unsigned int se_zbias_factor;   /* 0x1dac */
++      unsigned int se_zbias_constant;
++} drm_radeon_context2_regs_t;
++
++/* Setup registers for each texture unit
++ */
++typedef struct {
++      unsigned int pp_txfilter;
++      unsigned int pp_txformat;
++      unsigned int pp_txoffset;
++      unsigned int pp_txcblend;
++      unsigned int pp_txablend;
++      unsigned int pp_tfactor;
++      unsigned int pp_border_color;
++} drm_radeon_texture_regs_t;
++
++typedef struct {
++      unsigned int start;
++      unsigned int finish;
++      unsigned int prim:8;
++      unsigned int stateidx:8;
++      unsigned int numverts:16;       /* overloaded as offset/64 for elt prims */
++      unsigned int vc_format; /* vertex format */
++} drm_radeon_prim_t;
++
++typedef struct {
++      drm_radeon_context_regs_t context;
++      drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
++      drm_radeon_context2_regs_t context2;
++      unsigned int dirty;
++} drm_radeon_state_t;
++
++typedef struct {
++      /* The channel for communication of state information to the
++       * kernel on firing a vertex buffer with either of the
++       * obsoleted vertex/index ioctls.
++       */
++      drm_radeon_context_regs_t context_state;
++      drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
++      unsigned int dirty;
++      unsigned int vertsize;
++      unsigned int vc_format;
++
++      /* The current cliprects, or a subset thereof.
++       */
++      struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
++      unsigned int nbox;
++
++      /* Counters for client-side throttling of rendering clients.
++       */
++      unsigned int last_frame;
++      unsigned int last_dispatch;
++      unsigned int last_clear;
++
++      struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
++                                                     1];
++      unsigned int tex_age[RADEON_NR_TEX_HEAPS];
++      int ctx_owner;
++      int pfState;            /* number of 3d windows (0,1,2ormore) */
++      int pfCurrentPage;      /* which buffer is being displayed? */
++      int crtc2_base;         /* CRTC2 frame offset */
++      int tiling_enabled;     /* set by drm, read by 2d + 3d clients */
++} drm_radeon_sarea_t;
++
++/* WARNING: If you change any of these defines, make sure to change the
++ * defines in the Xserver file (xf86drmRadeon.h)
++ *
++ * KW: actually it's illegal to change any of this (backwards compatibility).
++ */
++
++/* Radeon specific ioctls
++ * The device specific ioctl range is 0x40 to 0x79.
++ */
++#define DRM_RADEON_CP_INIT    0x00
++#define DRM_RADEON_CP_START   0x01
++#define DRM_RADEON_CP_STOP    0x02
++#define DRM_RADEON_CP_RESET   0x03
++#define DRM_RADEON_CP_IDLE    0x04
++#define DRM_RADEON_RESET      0x05
++#define DRM_RADEON_FULLSCREEN 0x06
++#define DRM_RADEON_SWAP       0x07
++#define DRM_RADEON_CLEAR      0x08
++#define DRM_RADEON_VERTEX     0x09
++#define DRM_RADEON_INDICES    0x0A
++#define DRM_RADEON_NOT_USED
++#define DRM_RADEON_STIPPLE    0x0C
++#define DRM_RADEON_INDIRECT   0x0D
++#define DRM_RADEON_TEXTURE    0x0E
++#define DRM_RADEON_VERTEX2    0x0F
++#define DRM_RADEON_CMDBUF     0x10
++#define DRM_RADEON_GETPARAM   0x11
++#define DRM_RADEON_FLIP       0x12
++#define DRM_RADEON_ALLOC      0x13
++#define DRM_RADEON_FREE       0x14
++#define DRM_RADEON_INIT_HEAP  0x15
++#define DRM_RADEON_IRQ_EMIT   0x16
++#define DRM_RADEON_IRQ_WAIT   0x17
++#define DRM_RADEON_CP_RESUME  0x18
++#define DRM_RADEON_SETPARAM   0x19
++#define DRM_RADEON_SURF_ALLOC 0x1a
++#define DRM_RADEON_SURF_FREE  0x1b
++
++#define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
++#define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
++#define DRM_IOCTL_RADEON_CP_STOP    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
++#define DRM_IOCTL_RADEON_CP_RESET   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
++#define DRM_IOCTL_RADEON_CP_IDLE    DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
++#define DRM_IOCTL_RADEON_RESET      DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_RESET)
++#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
++#define DRM_IOCTL_RADEON_SWAP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_SWAP)
++#define DRM_IOCTL_RADEON_CLEAR      DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
++#define DRM_IOCTL_RADEON_VERTEX     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
++#define DRM_IOCTL_RADEON_INDICES    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
++#define DRM_IOCTL_RADEON_STIPPLE    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
++#define DRM_IOCTL_RADEON_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
++#define DRM_IOCTL_RADEON_TEXTURE    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
++#define DRM_IOCTL_RADEON_VERTEX2    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
++#define DRM_IOCTL_RADEON_CMDBUF     DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
++#define DRM_IOCTL_RADEON_GETPARAM   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
++#define DRM_IOCTL_RADEON_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_FLIP)
++#define DRM_IOCTL_RADEON_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
++#define DRM_IOCTL_RADEON_FREE       DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
++#define DRM_IOCTL_RADEON_INIT_HEAP  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
++#define DRM_IOCTL_RADEON_IRQ_EMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
++#define DRM_IOCTL_RADEON_IRQ_WAIT   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
++#define DRM_IOCTL_RADEON_CP_RESUME  DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
++#define DRM_IOCTL_RADEON_SETPARAM   DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
++#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
++#define DRM_IOCTL_RADEON_SURF_FREE  DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
++
++typedef struct drm_radeon_init {
++      enum {
++              RADEON_INIT_CP = 0x01,
++              RADEON_CLEANUP_CP = 0x02,
++              RADEON_INIT_R200_CP = 0x03,
++              RADEON_INIT_R300_CP = 0x04
++      } func;
++      unsigned long sarea_priv_offset;
++      int is_pci; /* for overriding only */
++      int cp_mode;
++      int gart_size;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      unsigned long fb_offset DEPRECATED;     /* deprecated, driver asks hardware */
++      unsigned long mmio_offset DEPRECATED;   /* deprecated, driver asks hardware */
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long gart_textures_offset;
++} drm_radeon_init_t;
++
++typedef struct drm_radeon_cp_stop {
++      int flush;
++      int idle;
++} drm_radeon_cp_stop_t;
++
++typedef struct drm_radeon_fullscreen {
++      enum {
++              RADEON_INIT_FULLSCREEN = 0x01,
++              RADEON_CLEANUP_FULLSCREEN = 0x02
++      } func;
++} drm_radeon_fullscreen_t;
++
++#define CLEAR_X1      0
++#define CLEAR_Y1      1
++#define CLEAR_X2      2
++#define CLEAR_Y2      3
++#define CLEAR_DEPTH   4
++
++typedef union drm_radeon_clear_rect {
++      float f[5];
++      unsigned int ui[5];
++} drm_radeon_clear_rect_t;
++
++typedef struct drm_radeon_clear {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;        /* misnamed field:  should be stencil */
++      drm_radeon_clear_rect_t __user *depth_boxes;
++} drm_radeon_clear_t;
++
++typedef struct drm_radeon_vertex {
++      int prim;
++      int idx;                /* Index of vertex buffer */
++      int count;              /* Number of vertices in buffer */
++      int discard;            /* Client finished with buffer? */
++} drm_radeon_vertex_t;
++
++typedef struct drm_radeon_indices {
++      int prim;
++      int idx;
++      int start;
++      int end;
++      int discard;            /* Client finished with buffer? */
++} drm_radeon_indices_t;
++
++/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
++ *      - allows multiple primitives and state changes in a single ioctl
++ *      - supports driver change to emit native primitives
++ */
++typedef struct drm_radeon_vertex2 {
++      int idx;                /* Index of vertex buffer */
++      int discard;            /* Client finished with buffer? */
++      int nr_states;
++      drm_radeon_state_t __user *state;
++      int nr_prims;
++      drm_radeon_prim_t __user *prim;
++} drm_radeon_vertex2_t;
++
++/* v1.3 - obsoletes drm_radeon_vertex2
++ *      - allows arbitarily large cliprect list
++ *      - allows updating of tcl packet, vector and scalar state
++ *      - allows memory-efficient description of state updates
++ *      - allows state to be emitted without a primitive
++ *           (for clears, ctx switches)
++ *      - allows more than one dma buffer to be referenced per ioctl
++ *      - supports tcl driver
++ *      - may be extended in future versions with new cmd types, packets
++ */
++typedef struct drm_radeon_cmd_buffer {
++      int bufsz;
++      char __user *buf;
++      int nbox;
++      struct drm_clip_rect __user *boxes;
++} drm_radeon_cmd_buffer_t;
++
++typedef struct drm_radeon_tex_image {
++      unsigned int x, y;      /* Blit coordinates */
++      unsigned int width, height;
++      const void __user *data;
++} drm_radeon_tex_image_t;
++
++typedef struct drm_radeon_texture {
++      unsigned int offset;
++      int pitch;
++      int format;
++      int width;              /* Texture image coordinates */
++      int height;
++      drm_radeon_tex_image_t __user *image;
++} drm_radeon_texture_t;
++
++typedef struct drm_radeon_stipple {
++      unsigned int __user *mask;
++} drm_radeon_stipple_t;
++
++typedef struct drm_radeon_indirect {
++      int idx;
++      int start;
++      int end;
++      int discard;
++} drm_radeon_indirect_t;
++
++/* enum for card type parameters */
++#define RADEON_CARD_PCI 0
++#define RADEON_CARD_AGP 1
++#define RADEON_CARD_PCIE 2
++
++/* 1.3: An ioctl to get parameters that aren't available to the 3d
++ * client any other way.
++ */
++#define RADEON_PARAM_GART_BUFFER_OFFSET    1  /* card offset of 1st GART buffer */
++#define RADEON_PARAM_LAST_FRAME            2
++#define RADEON_PARAM_LAST_DISPATCH         3
++#define RADEON_PARAM_LAST_CLEAR            4
++/* Added with DRM version 1.6. */
++#define RADEON_PARAM_IRQ_NR                5
++#define RADEON_PARAM_GART_BASE             6  /* card offset of GART base */
++/* Added with DRM version 1.8. */
++#define RADEON_PARAM_REGISTER_HANDLE       7  /* for drmMap() */
++#define RADEON_PARAM_STATUS_HANDLE         8
++#define RADEON_PARAM_SAREA_HANDLE          9
++#define RADEON_PARAM_GART_TEX_HANDLE       10
++#define RADEON_PARAM_SCRATCH_OFFSET        11
++#define RADEON_PARAM_CARD_TYPE             12
++#define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */
++#define RADEON_PARAM_FB_LOCATION           14   /* FB location */
++#define RADEON_PARAM_NUM_GB_PIPES          15   /* num GB pipes */
++
++typedef struct drm_radeon_getparam {
++      int param;
++      void __user *value;
++} drm_radeon_getparam_t;
++
++/* 1.6: Set up a memory manager for regions of shared memory:
++ */
++#define RADEON_MEM_REGION_GART 1
++#define RADEON_MEM_REGION_FB   2
++
++typedef struct drm_radeon_mem_alloc {
++      int region;
++      int alignment;
++      int size;
++      int __user *region_offset;      /* offset from start of fb or GART */
++} drm_radeon_mem_alloc_t;
++
++typedef struct drm_radeon_mem_free {
++      int region;
++      int region_offset;
++} drm_radeon_mem_free_t;
++
++typedef struct drm_radeon_mem_init_heap {
++      int region;
++      int size;
++      int start;
++} drm_radeon_mem_init_heap_t;
++
++/* 1.6: Userspace can request & wait on irq's:
++ */
++typedef struct drm_radeon_irq_emit {
++      int __user *irq_seq;
++} drm_radeon_irq_emit_t;
++
++typedef struct drm_radeon_irq_wait {
++      int irq_seq;
++} drm_radeon_irq_wait_t;
++
++/* 1.10: Clients tell the DRM where they think the framebuffer is located in
++ * the card's address space, via a new generic ioctl to set parameters
++ */
++
++typedef struct drm_radeon_setparam {
++      unsigned int param;
++      int64_t value;
++} drm_radeon_setparam_t;
++
++#define RADEON_SETPARAM_FB_LOCATION    1      /* determined framebuffer location */
++#define RADEON_SETPARAM_SWITCH_TILING  2      /* enable/disable color tiling */
++#define RADEON_SETPARAM_PCIGART_LOCATION 3    /* PCI Gart Location */
++
++#define RADEON_SETPARAM_NEW_MEMMAP 4          /* Use new memory map */
++#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5    /* PCI GART Table Size */
++#define RADEON_SETPARAM_VBLANK_CRTC 6           /* VBLANK CRTC */
++/* 1.14: Clients can allocate/free a surface
++ */
++typedef struct drm_radeon_surface_alloc {
++      unsigned int address;
++      unsigned int size;
++      unsigned int flags;
++} drm_radeon_surface_alloc_t;
++
++typedef struct drm_radeon_surface_free {
++      unsigned int address;
++} drm_radeon_surface_free_t;
++
++#define       DRM_RADEON_VBLANK_CRTC1         1
++#define       DRM_RADEON_VBLANK_CRTC2         2
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.c git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c
+--- git/drivers/gpu/drm-tungsten/radeon_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,157 @@
++/**
++ * \file radeon_drv.c
++ * ATI Radeon driver
++ *
++ * \author Gareth Hughes <gareth@valinux.com>
++ */
++
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++#include "drm_pciids.h"
++
++int radeon_no_wb;
++
++MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
++module_param_named(no_wb, radeon_no_wb, int, 0444);
++
++static int dri_library_name(struct drm_device * dev, char * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int family = dev_priv->flags & RADEON_FAMILY_MASK;
++
++      return snprintf(buf, PAGE_SIZE, "%s\n",
++              (family < CHIP_R200) ? "radeon" :
++              ((family < CHIP_R300) ? "r200" :
++              "r300"));
++}
++
++static int radeon_suspend(struct drm_device *dev, pm_message_t state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      /* Disable *all* interrupts */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++      return 0;
++}
++
++static int radeon_resume(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      /* Restore interrupt registers */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
++      return 0;
++}
++
++static struct pci_device_id pciidlist[] = {
++      radeon_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
++          DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
++      .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
++      .load = radeon_driver_load,
++      .firstopen = radeon_driver_firstopen,
++      .open = radeon_driver_open,
++      .preclose = radeon_driver_preclose,
++      .postclose = radeon_driver_postclose,
++      .lastclose = radeon_driver_lastclose,
++      .unload = radeon_driver_unload,
++      .suspend = radeon_suspend,
++      .resume = radeon_resume,
++      .get_vblank_counter = radeon_get_vblank_counter,
++      .enable_vblank = radeon_enable_vblank,
++      .disable_vblank = radeon_disable_vblank,
++      .dri_library_name = dri_library_name,
++      .irq_preinstall = radeon_driver_irq_preinstall,
++      .irq_postinstall = radeon_driver_irq_postinstall,
++      .irq_uninstall = radeon_driver_irq_uninstall,
++      .irq_handler = radeon_driver_irq_handler,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = radeon_ioctls,
++      .dma_ioctl = radeon_cp_buffers,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = radeon_compat_ioctl,
++#endif
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init radeon_init(void)
++{
++      driver.num_ioctls = radeon_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit radeon_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(radeon_init);
++module_exit(radeon_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_drv.h git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h
+--- git/drivers/gpu/drm-tungsten/radeon_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1443 @@
++/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Kevin E. Martin <martin@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __RADEON_DRV_H__
++#define __RADEON_DRV_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "Gareth Hughes, Keith Whitwell, others."
++
++#define DRIVER_NAME           "radeon"
++#define DRIVER_DESC           "ATI Radeon"
++#define DRIVER_DATE           "20080613"
++
++/* Interface history:
++ *
++ * 1.1 - ??
++ * 1.2 - Add vertex2 ioctl (keith)
++ *     - Add stencil capability to clear ioctl (gareth, keith)
++ *     - Increase MAX_TEXTURE_LEVELS (brian)
++ * 1.3 - Add cmdbuf ioctl (keith)
++ *     - Add support for new radeon packets (keith)
++ *     - Add getparam ioctl (keith)
++ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
++ * 1.4 - Add scratch registers to get_param ioctl.
++ * 1.5 - Add r200 packets to cmdbuf ioctl
++ *     - Add r200 function to init ioctl
++ *     - Add 'scalar2' instruction to cmdbuf
++ * 1.6 - Add static GART memory manager
++ *       Add irq handler (won't be turned on unless X server knows to)
++ *       Add irq ioctls and irq_active getparam.
++ *       Add wait command for cmdbuf ioctl
++ *       Add GART offset query for getparam
++ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
++ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
++ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
++ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
++ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
++ *       Add 'GET' queries for starting additional clients on different VT's.
++ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
++ *       Add texture rectangle support for r100.
++ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
++ *       clients use to tell the DRM where they think the framebuffer is
++ *       located in the card's address space
++ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
++ *       and GL_EXT_blend_[func|equation]_separate on r200
++ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
++ *       (No 3D support yet - just microcode loading).
++ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
++ *     - Add hyperz support, add hyperz flags to clear ioctl.
++ * 1.14- Add support for color tiling
++ *     - Add R100/R200 surface allocation/free support
++ * 1.15- Add support for texture micro tiling
++ *     - Add support for r100 cube maps
++ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
++ *       texture filtering on r200
++ * 1.17- Add initial support for R300 (3D).
++ * 1.18- Add support for GL_ATI_fragment_shader, new packets
++ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
++ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
++ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
++ * 1.19- Add support for gart table in FB memory and PCIE r300
++ * 1.20- Add support for r300 texrect
++ * 1.21- Add support for card type getparam
++ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
++ * 1.23- Add new radeon memory map work from benh
++ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
++ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
++ *       new packet type)
++ * 1.26- Add support for variable size PCI(E) gart aperture
++ * 1.27- Add support for IGP GART
++ * 1.28- Add support for VBL on CRTC2
++ * 1.29- R500 3D cmd buffer support
++ */
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          29
++#define DRIVER_PATCHLEVEL     0
++
++/*
++ * Radeon chip families
++ */
++enum radeon_family {
++      CHIP_R100,
++      CHIP_RV100,
++      CHIP_RS100,
++      CHIP_RV200,
++      CHIP_RS200,
++      CHIP_R200,
++      CHIP_RV250,
++      CHIP_RS300,
++      CHIP_RV280,
++      CHIP_R300,
++      CHIP_R350,
++      CHIP_RV350,
++      CHIP_RV380,
++      CHIP_R420,
++      CHIP_RV410,
++      CHIP_RS400,
++      CHIP_RS480,
++      CHIP_RS690,
++      CHIP_RV515,
++      CHIP_R520,
++      CHIP_RV530,
++      CHIP_RV560,
++      CHIP_RV570,
++      CHIP_R580,
++      CHIP_LAST,
++};
++
++/*
++ * Chip flags
++ */
++enum radeon_chip_flags {
++      RADEON_FAMILY_MASK = 0x0000ffffUL,
++      RADEON_FLAGS_MASK = 0xffff0000UL,
++      RADEON_IS_MOBILITY = 0x00010000UL,
++      RADEON_IS_IGP = 0x00020000UL,
++      RADEON_SINGLE_CRTC = 0x00040000UL,
++      RADEON_IS_AGP = 0x00080000UL,
++      RADEON_HAS_HIERZ = 0x00100000UL,
++      RADEON_IS_PCIE = 0x00200000UL,
++      RADEON_NEW_MEMMAP = 0x00400000UL,
++      RADEON_IS_PCI = 0x00800000UL,
++      RADEON_IS_IGPGART = 0x01000000UL,
++};
++
++#define GET_RING_HEAD(dev_priv)       (dev_priv->writeback_works ? \
++        DRM_READ32(  (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
++#define SET_RING_HEAD(dev_priv,val)   DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
++
++typedef struct drm_radeon_freelist {
++      unsigned int age;
++      struct drm_buf *buf;
++      struct drm_radeon_freelist *next;
++      struct drm_radeon_freelist *prev;
++} drm_radeon_freelist_t;
++
++typedef struct drm_radeon_ring_buffer {
++      u32 *start;
++      u32 *end;
++      int size; /* Double Words */
++      int size_l2qw; /* log2 Quad Words */
++
++      int rptr_update; /* Double Words */
++      int rptr_update_l2qw; /* log2 Quad Words */
++
++      int fetch_size; /* Double Words */
++      int fetch_size_l2ow; /* log2 Oct Words */
++
++      u32 tail;
++      u32 tail_mask;
++      int space;
++
++      int high_mark;
++} drm_radeon_ring_buffer_t;
++
++typedef struct drm_radeon_depth_clear_t {
++      u32 rb3d_cntl;
++      u32 rb3d_zstencilcntl;
++      u32 se_cntl;
++} drm_radeon_depth_clear_t;
++
++struct drm_radeon_driver_file_fields {
++      int64_t radeon_fb_delta;
++};
++
++struct mem_block {
++      struct mem_block *next;
++      struct mem_block *prev;
++      int start;
++      int size;
++      struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++};
++
++struct radeon_surface {
++      int refcount;
++      u32 lower;
++      u32 upper;
++      u32 flags;
++};
++
++struct radeon_virt_surface {
++      int surface_index;
++      u32 lower;
++      u32 upper;
++      u32 flags;
++      struct drm_file *file_priv;
++};
++
++#define RADEON_FLUSH_EMITED   (1 < 0)
++#define RADEON_PURGE_EMITED   (1 < 1)
++
++typedef struct drm_radeon_private {
++
++      drm_radeon_ring_buffer_t ring;
++      drm_radeon_sarea_t *sarea_priv;
++
++      u32 fb_location;
++      u32 fb_size;
++      int new_memmap;
++
++      int gart_size;
++      u32 gart_vm_start;
++      unsigned long gart_buffers_offset;
++
++      int cp_mode;
++      int cp_running;
++
++      drm_radeon_freelist_t *head;
++      drm_radeon_freelist_t *tail;
++      int last_buf;
++      volatile u32 *scratch;
++      int writeback_works;
++
++      int usec_timeout;
++
++      struct {
++              u32 boxes;
++              int freelist_timeouts;
++              int freelist_loops;
++              int requested_bufs;
++              int last_frame_reads;
++              int last_clear_reads;
++              int clears;
++              int texture_uploads;
++      } stats;
++
++      int do_boxes;
++      int page_flipping;
++
++      u32 color_fmt;
++      unsigned int front_offset;
++      unsigned int front_pitch;
++      unsigned int back_offset;
++      unsigned int back_pitch;
++
++      u32 depth_fmt;
++      unsigned int depth_offset;
++      unsigned int depth_pitch;
++
++      u32 front_pitch_offset;
++      u32 back_pitch_offset;
++      u32 depth_pitch_offset;
++
++      drm_radeon_depth_clear_t depth_clear;
++
++      unsigned long ring_offset;
++      unsigned long ring_rptr_offset;
++      unsigned long buffers_offset;
++      unsigned long gart_textures_offset;
++
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *cp_ring;
++      drm_local_map_t *ring_rptr;
++      drm_local_map_t *gart_textures;
++
++      struct mem_block *gart_heap;
++      struct mem_block *fb_heap;
++
++      /* SW interrupt */
++      wait_queue_head_t swi_queue;
++      atomic_t swi_emitted;
++      int vblank_crtc;
++      uint32_t irq_enable_reg;
++      int irq_enabled;
++      uint32_t r500_disp_irq_reg;
++
++      struct radeon_surface surfaces[RADEON_MAX_SURFACES];
++      struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
++
++      unsigned long pcigart_offset;
++      unsigned int pcigart_offset_set;
++      struct drm_ati_pcigart_info gart_info;
++
++      u32 scratch_ages[5];
++
++      unsigned int crtc_last_cnt;
++      unsigned int crtc2_last_cnt;
++
++      /* starting from here on, data is preserved accross an open */
++      uint32_t flags;         /* see radeon_chip_flags */
++      unsigned long fb_aper_offset;
++
++      int num_gb_pipes;
++      int track_flush;
++      uint32_t chip_family; /* extract from flags */
++} drm_radeon_private_t;
++
++typedef struct drm_radeon_buf_priv {
++      u32 age;
++} drm_radeon_buf_priv_t;
++
++typedef struct drm_radeon_kcmd_buffer {
++      int bufsz;
++      char *buf;
++      int nbox;
++      struct drm_clip_rect __user *boxes;
++} drm_radeon_kcmd_buffer_t;
++
++extern int radeon_no_wb;
++extern struct drm_ioctl_desc radeon_ioctls[];
++extern int radeon_max_ioctl;
++
++/* Check whether the given hardware address is inside the framebuffer or the
++ * GART area.
++ */
++static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
++                                        u64 off)
++{
++      u32 fb_start = dev_priv->fb_location;
++      u32 fb_end = fb_start + dev_priv->fb_size - 1;
++      u32 gart_start = dev_priv->gart_vm_start;
++      u32 gart_end = gart_start + dev_priv->gart_size - 1;
++
++      return ((off >= fb_start && off <= fb_end) ||
++              (off >= gart_start && off <= gart_end));
++}
++
++                              /* radeon_cp.c */
++extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
++
++extern void radeon_freelist_reset(struct drm_device * dev);
++extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
++
++extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
++
++extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
++
++extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern void radeon_mem_takedown(struct mem_block **heap);
++extern void radeon_mem_release(struct drm_file *file_priv,
++                             struct mem_block *heap);
++
++                              /* radeon_irq.c */
++extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
++extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++extern void radeon_do_release(struct drm_device * dev);
++extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
++extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
++extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
++extern void radeon_driver_irq_preinstall(struct drm_device * dev);
++extern int radeon_driver_irq_postinstall(struct drm_device * dev);
++extern void radeon_driver_irq_uninstall(struct drm_device * dev);
++extern int radeon_vblank_crtc_get(struct drm_device *dev);
++extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
++
++extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
++extern int radeon_driver_unload(struct drm_device *dev);
++extern int radeon_driver_firstopen(struct drm_device *dev);
++extern void radeon_driver_preclose(struct drm_device * dev,
++                                 struct drm_file *file_priv);
++extern void radeon_driver_postclose(struct drm_device * dev,
++                                  struct drm_file *file_priv);
++extern void radeon_driver_lastclose(struct drm_device * dev);
++extern int radeon_driver_open(struct drm_device * dev,
++                            struct drm_file * file_priv);
++extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
++                                       unsigned long arg);
++
++/* r300_cmdbuf.c */
++extern void r300_init_reg_flags(struct drm_device *dev);
++
++extern int r300_do_cp_cmdbuf(struct drm_device *dev,
++                           struct drm_file *file_priv,
++                           drm_radeon_kcmd_buffer_t *cmdbuf);
++
++/* Flags for stats.boxes
++ */
++#define RADEON_BOX_DMA_IDLE      0x1
++#define RADEON_BOX_RING_FULL     0x2
++#define RADEON_BOX_FLIP          0x4
++#define RADEON_BOX_WAIT_IDLE     0x8
++#define RADEON_BOX_TEXTURE_LOAD  0x10
++
++/* Register definitions, register access macros and drmAddMap constants
++ * for Radeon kernel driver.
++ */
++#define RADEON_AGP_COMMAND            0x0f60
++#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060  /* offset in PCI config */
++#       define RADEON_AGP_ENABLE            (1<<8)
++#define RADEON_AUX_SCISSOR_CNTL               0x26f0
++#     define RADEON_EXCLUSIVE_SCISSOR_0       (1 << 24)
++#     define RADEON_EXCLUSIVE_SCISSOR_1       (1 << 25)
++#     define RADEON_EXCLUSIVE_SCISSOR_2       (1 << 26)
++#     define RADEON_SCISSOR_0_ENABLE          (1 << 28)
++#     define RADEON_SCISSOR_1_ENABLE          (1 << 29)
++#     define RADEON_SCISSOR_2_ENABLE          (1 << 30)
++
++#define RADEON_BUS_CNTL                       0x0030
++#     define RADEON_BUS_MASTER_DIS            (1 << 6)
++
++#define RADEON_CLOCK_CNTL_DATA                0x000c
++#     define RADEON_PLL_WR_EN                 (1 << 7)
++#define RADEON_CLOCK_CNTL_INDEX               0x0008
++#define RADEON_CONFIG_APER_SIZE               0x0108
++#define RADEON_CONFIG_MEMSIZE           0x00f8
++#define RADEON_CRTC_OFFSET            0x0224
++#define RADEON_CRTC_OFFSET_CNTL               0x0228
++#     define RADEON_CRTC_TILE_EN              (1 << 15)
++#     define RADEON_CRTC_OFFSET_FLIP_CNTL     (1 << 16)
++#define RADEON_CRTC2_OFFSET           0x0324
++#define RADEON_CRTC2_OFFSET_CNTL      0x0328
++
++#define RADEON_PCIE_INDEX               0x0030
++#define RADEON_PCIE_DATA                0x0034
++#define RADEON_PCIE_TX_GART_CNTL      0x10
++#     define RADEON_PCIE_TX_GART_EN           (1 << 0)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
++#     define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
++#     define RADEON_PCIE_TX_GART_MODE_32_128_CACHE    (0 << 3)
++#     define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE   (1 << 3)
++#     define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
++#     define RADEON_PCIE_TX_GART_INVALIDATE_TLB       (1 << 8)
++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
++#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
++#define RADEON_PCIE_TX_GART_BASE      0x13
++#define RADEON_PCIE_TX_GART_START_LO  0x14
++#define RADEON_PCIE_TX_GART_START_HI  0x15
++#define RADEON_PCIE_TX_GART_END_LO    0x16
++#define RADEON_PCIE_TX_GART_END_HI    0x17
++
++#define RS480_NB_MC_INDEX               0x168
++#     define RS480_NB_MC_IND_WR_EN    (1 << 8)
++#define RS480_NB_MC_DATA                0x16c
++
++#define RS690_MC_INDEX                  0x78
++#   define RS690_MC_INDEX_MASK          0x1ff
++#   define RS690_MC_INDEX_WR_EN         (1 << 9)
++#   define RS690_MC_INDEX_WR_ACK        0x7f
++#define RS690_MC_DATA                   0x7c
++
++/* MC indirect registers */
++#define RS480_MC_MISC_CNTL              0x18
++#     define RS480_DISABLE_GTW        (1 << 1)
++/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
++#     define RS480_GART_INDEX_REG_EN  (1 << 12)
++#     define RS690_BLOCK_GFX_D3_EN    (1 << 14)
++#define RS480_K8_FB_LOCATION            0x1e
++#define RS480_GART_FEATURE_ID           0x2b
++#     define RS480_HANG_EN            (1 << 11)
++#     define RS480_TLB_ENABLE         (1 << 18)
++#     define RS480_P2P_ENABLE         (1 << 19)
++#     define RS480_GTW_LAC_EN         (1 << 25)
++#     define RS480_2LEVEL_GART        (0 << 30)
++#     define RS480_1LEVEL_GART        (1 << 30)
++#     define RS480_PDC_EN             (1 << 31)
++#define RS480_GART_BASE                 0x2c
++#define RS480_GART_CACHE_CNTRL          0x2e
++#     define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
++#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
++#     define RS480_GART_EN            (1 << 0)
++#     define RS480_VA_SIZE_32MB       (0 << 1)
++#     define RS480_VA_SIZE_64MB       (1 << 1)
++#     define RS480_VA_SIZE_128MB      (2 << 1)
++#     define RS480_VA_SIZE_256MB      (3 << 1)
++#     define RS480_VA_SIZE_512MB      (4 << 1)
++#     define RS480_VA_SIZE_1GB        (5 << 1)
++#     define RS480_VA_SIZE_2GB        (6 << 1)
++#define RS480_AGP_MODE_CNTL             0x39
++#     define RS480_POST_GART_Q_SIZE   (1 << 18)
++#     define RS480_NONGART_SNOOP      (1 << 19)
++#     define RS480_AGP_RD_BUF_SIZE    (1 << 20)
++#     define RS480_REQ_TYPE_SNOOP_SHIFT 22
++#     define RS480_REQ_TYPE_SNOOP_MASK  0x3
++#     define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
++#define RS480_MC_MISC_UMA_CNTL          0x5f
++#define RS480_MC_MCLK_CNTL              0x7a
++#define RS480_MC_UMA_DUALCH_CNTL        0x86
++
++#define RS690_MC_FB_LOCATION            0x100
++#define RS690_MC_AGP_LOCATION           0x101
++#define RS690_MC_AGP_BASE               0x102
++#define RS690_MC_AGP_BASE_2             0x103
++
++#define R520_MC_IND_INDEX 0x70
++#define R520_MC_IND_WR_EN (1 << 24)
++#define R520_MC_IND_DATA  0x74
++
++#define RV515_MC_FB_LOCATION 0x01
++#define RV515_MC_AGP_LOCATION 0x02
++#define RV515_MC_AGP_BASE     0x03
++#define RV515_MC_AGP_BASE_2   0x04
++
++#define R520_MC_FB_LOCATION 0x04
++#define R520_MC_AGP_LOCATION 0x05
++#define R520_MC_AGP_BASE     0x06
++#define R520_MC_AGP_BASE_2   0x07
++
++#define RADEON_MPP_TB_CONFIG          0x01c0
++#define RADEON_MEM_CNTL                       0x0140
++#define RADEON_MEM_SDRAM_MODE_REG     0x0158
++#define RADEON_AGP_BASE_2             0x015c /* r200+ only */
++#define RS480_AGP_BASE_2              0x0164
++#define RADEON_AGP_BASE                       0x0170
++
++/* pipe config regs */
++#define R400_GB_PIPE_SELECT             0x402c
++#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
++#define R500_SU_REG_DEST                0x42c8
++#define R300_GB_TILE_CONFIG             0x4018
++#       define R300_ENABLE_TILING       (1 << 0)
++#       define R300_PIPE_COUNT_RV350    (0 << 1)
++#       define R300_PIPE_COUNT_R300     (3 << 1)
++#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
++#       define R300_PIPE_COUNT_R420     (7 << 1)
++#       define R300_TILE_SIZE_8         (0 << 4)
++#       define R300_TILE_SIZE_16        (1 << 4)
++#       define R300_TILE_SIZE_32        (2 << 4)
++#       define R300_SUBPIXEL_1_12       (0 << 16)
++#       define R300_SUBPIXEL_1_16       (1 << 16)
++#define R300_DST_PIPE_CONFIG            0x170c
++#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
++#define R300_RB2D_DSTCACHE_MODE         0x3428
++#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
++#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
++
++#define RADEON_RB3D_COLOROFFSET               0x1c40
++#define RADEON_RB3D_COLORPITCH                0x1c48
++
++#define       RADEON_SRC_X_Y                  0x1590
++
++#define RADEON_DP_GUI_MASTER_CNTL     0x146c
++#     define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
++#     define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
++#     define RADEON_GMC_BRUSH_SOLID_COLOR     (13 << 4)
++#     define RADEON_GMC_BRUSH_NONE            (15 << 4)
++#     define RADEON_GMC_DST_16BPP             (4 << 8)
++#     define RADEON_GMC_DST_24BPP             (5 << 8)
++#     define RADEON_GMC_DST_32BPP             (6 << 8)
++#     define RADEON_GMC_DST_DATATYPE_SHIFT    8
++#     define RADEON_GMC_SRC_DATATYPE_COLOR    (3 << 12)
++#     define RADEON_DP_SRC_SOURCE_MEMORY      (2 << 24)
++#     define RADEON_DP_SRC_SOURCE_HOST_DATA   (3 << 24)
++#     define RADEON_GMC_CLR_CMP_CNTL_DIS      (1 << 28)
++#     define RADEON_GMC_WR_MSK_DIS            (1 << 30)
++#     define RADEON_ROP3_S                    0x00cc0000
++#     define RADEON_ROP3_P                    0x00f00000
++#define RADEON_DP_WRITE_MASK          0x16cc
++#define RADEON_SRC_PITCH_OFFSET               0x1428
++#define RADEON_DST_PITCH_OFFSET               0x142c
++#define RADEON_DST_PITCH_OFFSET_C     0x1c80
++#     define RADEON_DST_TILE_LINEAR           (0 << 30)
++#     define RADEON_DST_TILE_MACRO            (1 << 30)
++#     define RADEON_DST_TILE_MICRO            (2 << 30)
++#     define RADEON_DST_TILE_BOTH             (3 << 30)
++
++#define RADEON_SCRATCH_REG0           0x15e0
++#define RADEON_SCRATCH_REG1           0x15e4
++#define RADEON_SCRATCH_REG2           0x15e8
++#define RADEON_SCRATCH_REG3           0x15ec
++#define RADEON_SCRATCH_REG4           0x15f0
++#define RADEON_SCRATCH_REG5           0x15f4
++#define RADEON_SCRATCH_UMSK           0x0770
++#define RADEON_SCRATCH_ADDR           0x0774
++
++#define RADEON_SCRATCHOFF( x )                (RADEON_SCRATCH_REG_OFFSET + 4*(x))
++
++#define GET_SCRATCH( x )      (dev_priv->writeback_works                      \
++                              ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
++                              : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
++
++#define RADEON_CRTC_CRNT_FRAME 0x0214
++#define RADEON_CRTC2_CRNT_FRAME 0x0314
++
++#define RADEON_CRTC_STATUS            0x005c
++#define RADEON_CRTC2_STATUS           0x03fc
++
++#define RADEON_GEN_INT_CNTL           0x0040
++#     define RADEON_CRTC_VBLANK_MASK          (1 << 0)
++#     define RADEON_CRTC2_VBLANK_MASK         (1 << 9)
++#     define RADEON_GUI_IDLE_INT_ENABLE       (1 << 19)
++#     define RADEON_SW_INT_ENABLE             (1 << 25)
++
++#define RADEON_GEN_INT_STATUS         0x0044
++#     define RADEON_CRTC_VBLANK_STAT          (1 << 0)
++#     define RADEON_CRTC_VBLANK_STAT_ACK      (1 << 0)
++#     define RADEON_CRTC2_VBLANK_STAT         (1 << 9)
++#     define RADEON_CRTC2_VBLANK_STAT_ACK     (1 << 9)
++#     define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
++#     define RADEON_SW_INT_TEST               (1 << 25)
++#     define RADEON_SW_INT_TEST_ACK           (1 << 25)
++#     define RADEON_SW_INT_FIRE               (1 << 26)
++#       define R500_DISPLAY_INT_STATUS          (1 << 0)
++
++
++#define RADEON_HOST_PATH_CNTL         0x0130
++#     define RADEON_HDP_SOFT_RESET            (1 << 26)
++#     define RADEON_HDP_WC_TIMEOUT_MASK       (7 << 28)
++#     define RADEON_HDP_WC_TIMEOUT_28BCLK     (7 << 28)
++
++#define RADEON_ISYNC_CNTL             0x1724
++#     define RADEON_ISYNC_ANY2D_IDLE3D        (1 << 0)
++#     define RADEON_ISYNC_ANY3D_IDLE2D        (1 << 1)
++#     define RADEON_ISYNC_TRIG2D_IDLE3D       (1 << 2)
++#     define RADEON_ISYNC_TRIG3D_IDLE2D       (1 << 3)
++#     define RADEON_ISYNC_WAIT_IDLEGUI        (1 << 4)
++#     define RADEON_ISYNC_CPSCRATCH_IDLEGUI   (1 << 5)
++
++#define RADEON_RBBM_GUICNTL           0x172c
++#     define RADEON_HOST_DATA_SWAP_NONE       (0 << 0)
++#     define RADEON_HOST_DATA_SWAP_16BIT      (1 << 0)
++#     define RADEON_HOST_DATA_SWAP_32BIT      (2 << 0)
++#     define RADEON_HOST_DATA_SWAP_HDW        (3 << 0)
++
++#define RADEON_MC_AGP_LOCATION                0x014c
++#define RADEON_MC_FB_LOCATION         0x0148
++#define RADEON_MCLK_CNTL              0x0012
++#     define RADEON_FORCEON_MCLKA             (1 << 16)
++#     define RADEON_FORCEON_MCLKB             (1 << 17)
++#     define RADEON_FORCEON_YCLKA             (1 << 18)
++#     define RADEON_FORCEON_YCLKB             (1 << 19)
++#     define RADEON_FORCEON_MC                (1 << 20)
++#     define RADEON_FORCEON_AIC               (1 << 21)
++
++#define RADEON_PP_BORDER_COLOR_0      0x1d40
++#define RADEON_PP_BORDER_COLOR_1      0x1d44
++#define RADEON_PP_BORDER_COLOR_2      0x1d48
++#define RADEON_PP_CNTL                        0x1c38
++#     define RADEON_SCISSOR_ENABLE            (1 <<  1)
++#define RADEON_PP_LUM_MATRIX          0x1d00
++#define RADEON_PP_MISC                        0x1c14
++#define RADEON_PP_ROT_MATRIX_0                0x1d58
++#define RADEON_PP_TXFILTER_0          0x1c54
++#define RADEON_PP_TXOFFSET_0          0x1c5c
++#define RADEON_PP_TXFILTER_1          0x1c6c
++#define RADEON_PP_TXFILTER_2          0x1c84
++
++#define R300_RB2D_DSTCACHE_CTLSTAT    0x342c /* use R300_DSTCACHE_CTLSTAT */
++#define R300_DSTCACHE_CTLSTAT         0x1714
++#     define R300_RB2D_DC_FLUSH               (3 << 0)
++#     define R300_RB2D_DC_FREE                (3 << 2)
++#     define R300_RB2D_DC_FLUSH_ALL           0xf
++#     define R300_RB2D_DC_BUSY                (1 << 31)
++#define RADEON_RB3D_CNTL              0x1c3c
++#     define RADEON_ALPHA_BLEND_ENABLE        (1 << 0)
++#     define RADEON_PLANE_MASK_ENABLE         (1 << 1)
++#     define RADEON_DITHER_ENABLE             (1 << 2)
++#     define RADEON_ROUND_ENABLE              (1 << 3)
++#     define RADEON_SCALE_DITHER_ENABLE       (1 << 4)
++#     define RADEON_DITHER_INIT               (1 << 5)
++#     define RADEON_ROP_ENABLE                (1 << 6)
++#     define RADEON_STENCIL_ENABLE            (1 << 7)
++#     define RADEON_Z_ENABLE                  (1 << 8)
++#     define RADEON_ZBLOCK16                  (1 << 15)
++#define RADEON_RB3D_DEPTHOFFSET               0x1c24
++#define RADEON_RB3D_DEPTHCLEARVALUE   0x3230
++#define RADEON_RB3D_DEPTHPITCH                0x1c28
++#define RADEON_RB3D_PLANEMASK         0x1d84
++#define RADEON_RB3D_STENCILREFMASK    0x1d7c
++#define RADEON_RB3D_ZCACHE_MODE               0x3250
++#define RADEON_RB3D_ZCACHE_CTLSTAT    0x3254
++#     define RADEON_RB3D_ZC_FLUSH             (1 << 0)
++#     define RADEON_RB3D_ZC_FREE              (1 << 2)
++#     define RADEON_RB3D_ZC_FLUSH_ALL         0x5
++#     define RADEON_RB3D_ZC_BUSY              (1 << 31)
++#define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
++#     define R300_ZC_FLUSH                    (1 << 0)
++#     define R300_ZC_FREE                     (1 << 1)
++#     define R300_ZC_BUSY                     (1 << 31)
++#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325c
++#     define RADEON_RB3D_DC_FLUSH             (3 << 0)
++#     define RADEON_RB3D_DC_FREE              (3 << 2)
++#     define RADEON_RB3D_DC_FLUSH_ALL         0xf
++#     define RADEON_RB3D_DC_BUSY              (1 << 31)
++#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
++#     define R300_RB3D_DC_FLUSH               (2 << 0)
++#     define R300_RB3D_DC_FREE                (2 << 2)
++#     define R300_RB3D_DC_FINISH              (1 << 4)
++#define RADEON_RB3D_ZSTENCILCNTL      0x1c2c
++#     define RADEON_Z_TEST_MASK               (7 << 4)
++#     define RADEON_Z_TEST_ALWAYS             (7 << 4)
++#     define RADEON_Z_HIERARCHY_ENABLE        (1 << 8)
++#     define RADEON_STENCIL_TEST_ALWAYS       (7 << 12)
++#     define RADEON_STENCIL_S_FAIL_REPLACE    (2 << 16)
++#     define RADEON_STENCIL_ZPASS_REPLACE     (2 << 20)
++#     define RADEON_STENCIL_ZFAIL_REPLACE     (2 << 24)
++#     define RADEON_Z_COMPRESSION_ENABLE      (1 << 28)
++#     define RADEON_FORCE_Z_DIRTY             (1 << 29)
++#     define RADEON_Z_WRITE_ENABLE            (1 << 30)
++#     define RADEON_Z_DECOMPRESSION_ENABLE    (1 << 31)
++#define RADEON_RBBM_SOFT_RESET                0x00f0
++#     define RADEON_SOFT_RESET_CP             (1 <<  0)
++#     define RADEON_SOFT_RESET_HI             (1 <<  1)
++#     define RADEON_SOFT_RESET_SE             (1 <<  2)
++#     define RADEON_SOFT_RESET_RE             (1 <<  3)
++#     define RADEON_SOFT_RESET_PP             (1 <<  4)
++#     define RADEON_SOFT_RESET_E2             (1 <<  5)
++#     define RADEON_SOFT_RESET_RB             (1 <<  6)
++#     define RADEON_SOFT_RESET_HDP            (1 <<  7)
++/*
++ *   6:0  Available slots in the FIFO
++ *   8    Host Interface active
++ *   9    CP request active
++ *   10   FIFO request active
++ *   11   Host Interface retry active
++ *   12   CP retry active
++ *   13   FIFO retry active
++ *   14   FIFO pipeline busy
++ *   15   Event engine busy
++ *   16   CP command stream busy
++ *   17   2D engine busy
++ *   18   2D portion of render backend busy
++ *   20   3D setup engine busy
++ *   26   GA engine busy
++ *   27   CBA 2D engine busy
++ *   31   2D engine busy or 3D engine busy or FIFO not empty or CP busy or
++ *           command stream queue not empty or Ring Buffer not empty
++ */
++#define RADEON_RBBM_STATUS            0x0e40
++/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */
++/* #define RADEON_RBBM_STATUS         0x1740 */
++/* bits 6:0 are dword slots available in the cmd fifo */
++#     define RADEON_RBBM_FIFOCNT_MASK         0x007f
++#     define RADEON_HIRQ_ON_RBB       (1 <<  8)
++#     define RADEON_CPRQ_ON_RBB       (1 <<  9)
++#     define RADEON_CFRQ_ON_RBB       (1 << 10)
++#     define RADEON_HIRQ_IN_RTBUF     (1 << 11)
++#     define RADEON_CPRQ_IN_RTBUF     (1 << 12)
++#     define RADEON_CFRQ_IN_RTBUF     (1 << 13)
++#     define RADEON_PIPE_BUSY         (1 << 14)
++#     define RADEON_ENG_EV_BUSY       (1 << 15)
++#     define RADEON_CP_CMDSTRM_BUSY   (1 << 16)
++#     define RADEON_E2_BUSY           (1 << 17)
++#     define RADEON_RB2D_BUSY         (1 << 18)
++#     define RADEON_RB3D_BUSY         (1 << 19) /* not used on r300 */
++#     define RADEON_VAP_BUSY          (1 << 20)
++#     define RADEON_RE_BUSY           (1 << 21) /* not used on r300 */
++#     define RADEON_TAM_BUSY          (1 << 22) /* not used on r300 */
++#     define RADEON_TDM_BUSY          (1 << 23) /* not used on r300 */
++#     define RADEON_PB_BUSY           (1 << 24) /* not used on r300 */
++#     define RADEON_TIM_BUSY          (1 << 25) /* not used on r300 */
++#     define RADEON_GA_BUSY           (1 << 26)
++#     define RADEON_CBA2D_BUSY        (1 << 27)
++#     define RADEON_RBBM_ACTIVE       (1 << 31)
++#define RADEON_RE_LINE_PATTERN                0x1cd0
++#define RADEON_RE_MISC                        0x26c4
++#define RADEON_RE_TOP_LEFT            0x26c0
++#define RADEON_RE_WIDTH_HEIGHT                0x1c44
++#define RADEON_RE_STIPPLE_ADDR                0x1cc8
++#define RADEON_RE_STIPPLE_DATA                0x1ccc
++
++#define RADEON_SCISSOR_TL_0           0x1cd8
++#define RADEON_SCISSOR_BR_0           0x1cdc
++#define RADEON_SCISSOR_TL_1           0x1ce0
++#define RADEON_SCISSOR_BR_1           0x1ce4
++#define RADEON_SCISSOR_TL_2           0x1ce8
++#define RADEON_SCISSOR_BR_2           0x1cec
++#define RADEON_SE_COORD_FMT           0x1c50
++#define RADEON_SE_CNTL                        0x1c4c
++#     define RADEON_FFACE_CULL_CW             (0 << 0)
++#     define RADEON_BFACE_SOLID               (3 << 1)
++#     define RADEON_FFACE_SOLID               (3 << 3)
++#     define RADEON_FLAT_SHADE_VTX_LAST       (3 << 6)
++#     define RADEON_DIFFUSE_SHADE_FLAT        (1 << 8)
++#     define RADEON_DIFFUSE_SHADE_GOURAUD     (2 << 8)
++#     define RADEON_ALPHA_SHADE_FLAT          (1 << 10)
++#     define RADEON_ALPHA_SHADE_GOURAUD       (2 << 10)
++#     define RADEON_SPECULAR_SHADE_FLAT       (1 << 12)
++#     define RADEON_SPECULAR_SHADE_GOURAUD    (2 << 12)
++#     define RADEON_FOG_SHADE_FLAT            (1 << 14)
++#     define RADEON_FOG_SHADE_GOURAUD         (2 << 14)
++#     define RADEON_VPORT_XY_XFORM_ENABLE     (1 << 24)
++#     define RADEON_VPORT_Z_XFORM_ENABLE      (1 << 25)
++#     define RADEON_VTX_PIX_CENTER_OGL        (1 << 27)
++#     define RADEON_ROUND_MODE_TRUNC          (0 << 28)
++#     define RADEON_ROUND_PREC_8TH_PIX        (1 << 30)
++#define RADEON_SE_CNTL_STATUS         0x2140
++#define RADEON_SE_LINE_WIDTH          0x1db8
++#define RADEON_SE_VPORT_XSCALE                0x1d98
++#define RADEON_SE_ZBIAS_FACTOR                0x1db0
++#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
++#define RADEON_SE_TCL_OUTPUT_VTX_FMT         0x2254
++#define RADEON_SE_TCL_VECTOR_INDX_REG        0x2200
++#       define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT  16
++#       define RADEON_VEC_INDX_DWORD_COUNT_SHIFT     28
++#define RADEON_SE_TCL_VECTOR_DATA_REG       0x2204
++#define RADEON_SE_TCL_SCALAR_INDX_REG       0x2208
++#       define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT  16
++#define RADEON_SE_TCL_SCALAR_DATA_REG       0x220C
++#define RADEON_SURFACE_ACCESS_FLAGS   0x0bf8
++#define RADEON_SURFACE_ACCESS_CLR     0x0bfc
++#define RADEON_SURFACE_CNTL           0x0b00
++#     define RADEON_SURF_TRANSLATION_DIS      (1 << 8)
++#     define RADEON_NONSURF_AP0_SWP_MASK      (3 << 20)
++#     define RADEON_NONSURF_AP0_SWP_LITTLE    (0 << 20)
++#     define RADEON_NONSURF_AP0_SWP_BIG16     (1 << 20)
++#     define RADEON_NONSURF_AP0_SWP_BIG32     (2 << 20)
++#     define RADEON_NONSURF_AP1_SWP_MASK      (3 << 22)
++#     define RADEON_NONSURF_AP1_SWP_LITTLE    (0 << 22)
++#     define RADEON_NONSURF_AP1_SWP_BIG16     (1 << 22)
++#     define RADEON_NONSURF_AP1_SWP_BIG32     (2 << 22)
++#define RADEON_SURFACE0_INFO          0x0b0c
++#     define RADEON_SURF_PITCHSEL_MASK        (0x1ff << 0)
++#     define RADEON_SURF_TILE_MODE_MASK       (3 << 16)
++#     define RADEON_SURF_TILE_MODE_MACRO      (0 << 16)
++#     define RADEON_SURF_TILE_MODE_MICRO      (1 << 16)
++#     define RADEON_SURF_TILE_MODE_32BIT_Z    (2 << 16)
++#     define RADEON_SURF_TILE_MODE_16BIT_Z    (3 << 16)
++#define RADEON_SURFACE0_LOWER_BOUND   0x0b04
++#define RADEON_SURFACE0_UPPER_BOUND   0x0b08
++#     define RADEON_SURF_ADDRESS_FIXED_MASK   (0x3ff << 0)
++#define RADEON_SURFACE1_INFO          0x0b1c
++#define RADEON_SURFACE1_LOWER_BOUND   0x0b14
++#define RADEON_SURFACE1_UPPER_BOUND   0x0b18
++#define RADEON_SURFACE2_INFO          0x0b2c
++#define RADEON_SURFACE2_LOWER_BOUND   0x0b24
++#define RADEON_SURFACE2_UPPER_BOUND   0x0b28
++#define RADEON_SURFACE3_INFO          0x0b3c
++#define RADEON_SURFACE3_LOWER_BOUND   0x0b34
++#define RADEON_SURFACE3_UPPER_BOUND   0x0b38
++#define RADEON_SURFACE4_INFO          0x0b4c
++#define RADEON_SURFACE4_LOWER_BOUND   0x0b44
++#define RADEON_SURFACE4_UPPER_BOUND   0x0b48
++#define RADEON_SURFACE5_INFO          0x0b5c
++#define RADEON_SURFACE5_LOWER_BOUND   0x0b54
++#define RADEON_SURFACE5_UPPER_BOUND   0x0b58
++#define RADEON_SURFACE6_INFO          0x0b6c
++#define RADEON_SURFACE6_LOWER_BOUND   0x0b64
++#define RADEON_SURFACE6_UPPER_BOUND   0x0b68
++#define RADEON_SURFACE7_INFO          0x0b7c
++#define RADEON_SURFACE7_LOWER_BOUND   0x0b74
++#define RADEON_SURFACE7_UPPER_BOUND   0x0b78
++#define RADEON_SW_SEMAPHORE           0x013c
++
++#define RADEON_WAIT_UNTIL             0x1720
++#     define RADEON_WAIT_CRTC_PFLIP           (1 << 0)
++#     define RADEON_WAIT_2D_IDLE              (1 << 14)
++#     define RADEON_WAIT_3D_IDLE              (1 << 15)
++#     define RADEON_WAIT_2D_IDLECLEAN         (1 << 16)
++#     define RADEON_WAIT_3D_IDLECLEAN         (1 << 17)
++#     define RADEON_WAIT_HOST_IDLECLEAN       (1 << 18)
++
++#define RADEON_RB3D_ZMASKOFFSET               0x3234
++#define RADEON_RB3D_ZSTENCILCNTL      0x1c2c
++#     define RADEON_DEPTH_FORMAT_16BIT_INT_Z  (0 << 0)
++#     define RADEON_DEPTH_FORMAT_24BIT_INT_Z  (2 << 0)
++
++/* CP registers */
++#define RADEON_CP_ME_RAM_ADDR         0x07d4
++#define RADEON_CP_ME_RAM_RADDR                0x07d8
++#define RADEON_CP_ME_RAM_DATAH                0x07dc
++#define RADEON_CP_ME_RAM_DATAL                0x07e0
++
++#define RADEON_CP_RB_BASE             0x0700
++#define RADEON_CP_RB_CNTL             0x0704
++#     define RADEON_BUF_SWAP_32BIT            (2 << 16)
++#     define RADEON_RB_NO_UPDATE              (1 << 27)
++#define RADEON_CP_RB_RPTR_ADDR                0x070c
++#define RADEON_CP_RB_RPTR             0x0710
++#define RADEON_CP_RB_WPTR             0x0714
++
++#define RADEON_CP_RB_WPTR_DELAY               0x0718
++#     define RADEON_PRE_WRITE_TIMER_SHIFT     0
++#     define RADEON_PRE_WRITE_LIMIT_SHIFT     23
++
++#define RADEON_CP_IB_BASE             0x0738
++
++#define RADEON_CP_CSQ_CNTL            0x0740
++#     define RADEON_CSQ_CNT_PRIMARY_MASK      (0xff << 0)
++#     define RADEON_CSQ_PRIDIS_INDDIS         (0 << 28)
++#     define RADEON_CSQ_PRIPIO_INDDIS         (1 << 28)
++#     define RADEON_CSQ_PRIBM_INDDIS          (2 << 28)
++#     define RADEON_CSQ_PRIPIO_INDBM          (3 << 28)
++#     define RADEON_CSQ_PRIBM_INDBM           (4 << 28)
++#     define RADEON_CSQ_PRIPIO_INDPIO         (15 << 28)
++
++#define RADEON_AIC_CNTL                       0x01d0
++#     define RADEON_PCIGART_TRANSLATE_EN      (1 << 0)
++#define RADEON_AIC_STAT                       0x01d4
++#define RADEON_AIC_PT_BASE            0x01d8
++#define RADEON_AIC_LO_ADDR            0x01dc
++#define RADEON_AIC_HI_ADDR            0x01e0
++#define RADEON_AIC_TLB_ADDR           0x01e4
++#define RADEON_AIC_TLB_DATA           0x01e8
++
++/* CP command packets */
++#define RADEON_CP_PACKET0             0x00000000
++#     define RADEON_ONE_REG_WR                (1 << 15)
++#define RADEON_CP_PACKET1             0x40000000
++#define RADEON_CP_PACKET2             0x80000000
++#define RADEON_CP_PACKET3             0xC0000000
++#       define RADEON_CP_NOP                    0x00001000
++#       define RADEON_CP_NEXT_CHAR              0x00001900
++#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
++#       define RADEON_CP_SET_SCISSORS           0x00001E00
++             /* GEN_INDX_PRIM is unsupported starting with R300 */
++#     define RADEON_3D_RNDR_GEN_INDX_PRIM     0x00002300
++#     define RADEON_WAIT_FOR_IDLE             0x00002600
++#     define RADEON_3D_DRAW_VBUF              0x00002800
++#     define RADEON_3D_DRAW_IMMD              0x00002900
++#     define RADEON_3D_DRAW_INDX              0x00002A00
++#       define RADEON_CP_LOAD_PALETTE           0x00002C00
++#     define RADEON_3D_LOAD_VBPNTR            0x00002F00
++#     define RADEON_MPEG_IDCT_MACROBLOCK      0x00003000
++#     define RADEON_MPEG_IDCT_MACROBLOCK_REV  0x00003100
++#     define RADEON_3D_CLEAR_ZMASK            0x00003200
++#     define RADEON_CP_INDX_BUFFER            0x00003300
++#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
++#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
++#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
++#     define RADEON_3D_CLEAR_HIZ              0x00003700
++#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
++#     define RADEON_CNTL_HOSTDATA_BLT         0x00009400
++#     define RADEON_CNTL_PAINT_MULTI          0x00009A00
++#     define RADEON_CNTL_BITBLT_MULTI         0x00009B00
++#     define RADEON_CNTL_SET_SCISSORS         0xC0001E00
++
++#define RADEON_CP_PACKET_MASK         0xC0000000
++#define RADEON_CP_PACKET_COUNT_MASK   0x3fff0000
++#define RADEON_CP_PACKET0_REG_MASK    0x000007ff
++#define RADEON_CP_PACKET1_REG0_MASK   0x000007ff
++#define RADEON_CP_PACKET1_REG1_MASK   0x003ff800
++
++#define RADEON_VTX_Z_PRESENT                  (1 << 31)
++#define RADEON_VTX_PKCOLOR_PRESENT            (1 << 3)
++
++#define RADEON_PRIM_TYPE_NONE                 (0 << 0)
++#define RADEON_PRIM_TYPE_POINT                        (1 << 0)
++#define RADEON_PRIM_TYPE_LINE                 (2 << 0)
++#define RADEON_PRIM_TYPE_LINE_STRIP           (3 << 0)
++#define RADEON_PRIM_TYPE_TRI_LIST             (4 << 0)
++#define RADEON_PRIM_TYPE_TRI_FAN              (5 << 0)
++#define RADEON_PRIM_TYPE_TRI_STRIP            (6 << 0)
++#define RADEON_PRIM_TYPE_TRI_TYPE2            (7 << 0)
++#define RADEON_PRIM_TYPE_RECT_LIST            (8 << 0)
++#define RADEON_PRIM_TYPE_3VRT_POINT_LIST      (9 << 0)
++#define RADEON_PRIM_TYPE_3VRT_LINE_LIST               (10 << 0)
++#define RADEON_PRIM_TYPE_MASK                   0xf
++#define RADEON_PRIM_WALK_IND                  (1 << 4)
++#define RADEON_PRIM_WALK_LIST                 (2 << 4)
++#define RADEON_PRIM_WALK_RING                 (3 << 4)
++#define RADEON_COLOR_ORDER_BGRA                       (0 << 6)
++#define RADEON_COLOR_ORDER_RGBA                       (1 << 6)
++#define RADEON_MAOS_ENABLE                    (1 << 7)
++#define RADEON_VTX_FMT_R128_MODE              (0 << 8)
++#define RADEON_VTX_FMT_RADEON_MODE            (1 << 8)
++#define RADEON_NUM_VERTICES_SHIFT             16
++
++#define RADEON_COLOR_FORMAT_CI8               2
++#define RADEON_COLOR_FORMAT_ARGB1555  3
++#define RADEON_COLOR_FORMAT_RGB565    4
++#define RADEON_COLOR_FORMAT_ARGB8888  6
++#define RADEON_COLOR_FORMAT_RGB332    7
++#define RADEON_COLOR_FORMAT_RGB8      9
++#define RADEON_COLOR_FORMAT_ARGB4444  15
++
++#define RADEON_TXFORMAT_I8            0
++#define RADEON_TXFORMAT_AI88          1
++#define RADEON_TXFORMAT_RGB332                2
++#define RADEON_TXFORMAT_ARGB1555      3
++#define RADEON_TXFORMAT_RGB565                4
++#define RADEON_TXFORMAT_ARGB4444      5
++#define RADEON_TXFORMAT_ARGB8888      6
++#define RADEON_TXFORMAT_RGBA8888      7
++#define RADEON_TXFORMAT_Y8            8
++#define RADEON_TXFORMAT_VYUY422         10
++#define RADEON_TXFORMAT_YVYU422         11
++#define RADEON_TXFORMAT_DXT1            12
++#define RADEON_TXFORMAT_DXT23           14
++#define RADEON_TXFORMAT_DXT45           15
++
++#define R200_PP_TXCBLEND_0                0x2f00
++#define R200_PP_TXCBLEND_1                0x2f10
++#define R200_PP_TXCBLEND_2                0x2f20
++#define R200_PP_TXCBLEND_3                0x2f30
++#define R200_PP_TXCBLEND_4                0x2f40
++#define R200_PP_TXCBLEND_5                0x2f50
++#define R200_PP_TXCBLEND_6                0x2f60
++#define R200_PP_TXCBLEND_7                0x2f70
++#define R200_SE_TCL_LIGHT_MODEL_CTL_0     0x2268
++#define R200_PP_TFACTOR_0                 0x2ee0
++#define R200_SE_VTX_FMT_0                 0x2088
++#define R200_SE_VAP_CNTL                  0x2080
++#define R200_SE_TCL_MATRIX_SEL_0          0x2230
++#define R200_SE_TCL_TEX_PROC_CTL_2        0x22a8
++#define R200_SE_TCL_UCP_VERT_BLEND_CTL    0x22c0
++#define R200_PP_TXFILTER_5                0x2ca0
++#define R200_PP_TXFILTER_4                0x2c80
++#define R200_PP_TXFILTER_3                0x2c60
++#define R200_PP_TXFILTER_2                0x2c40
++#define R200_PP_TXFILTER_1                0x2c20
++#define R200_PP_TXFILTER_0                0x2c00
++#define R200_PP_TXOFFSET_5                0x2d78
++#define R200_PP_TXOFFSET_4                0x2d60
++#define R200_PP_TXOFFSET_3                0x2d48
++#define R200_PP_TXOFFSET_2                0x2d30
++#define R200_PP_TXOFFSET_1                0x2d18
++#define R200_PP_TXOFFSET_0                0x2d00
++
++#define R200_PP_CUBIC_FACES_0             0x2c18
++#define R200_PP_CUBIC_FACES_1             0x2c38
++#define R200_PP_CUBIC_FACES_2             0x2c58
++#define R200_PP_CUBIC_FACES_3             0x2c78
++#define R200_PP_CUBIC_FACES_4             0x2c98
++#define R200_PP_CUBIC_FACES_5             0x2cb8
++#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
++#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
++#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
++#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
++#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
++#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
++#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
++#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
++#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
++#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
++#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
++#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
++#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
++#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
++#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
++#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
++#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
++#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
++#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
++#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
++#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
++#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
++#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
++#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
++#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
++#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
++#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
++#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
++#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
++#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
++
++#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
++#define R200_SE_VTE_CNTL                  0x20b0
++#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL   0x2250
++#define R200_PP_TAM_DEBUG3                0x2d9c
++#define R200_PP_CNTL_X                    0x2cc4
++#define R200_SE_VAP_CNTL_STATUS           0x2140
++#define R200_RE_SCISSOR_TL_0              0x1cd8
++#define R200_RE_SCISSOR_TL_1              0x1ce0
++#define R200_RE_SCISSOR_TL_2              0x1ce8
++#define R200_RB3D_DEPTHXY_OFFSET          0x1d60
++#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
++#define R200_SE_VTX_STATE_CNTL            0x2180
++#define R200_RE_POINTSIZE                 0x2648
++#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
++
++#define RADEON_PP_TEX_SIZE_0                0x1d04    /* NPOT */
++#define RADEON_PP_TEX_SIZE_1                0x1d0c
++#define RADEON_PP_TEX_SIZE_2                0x1d14
++
++#define RADEON_PP_CUBIC_FACES_0             0x1d24
++#define RADEON_PP_CUBIC_FACES_1             0x1d28
++#define RADEON_PP_CUBIC_FACES_2             0x1d2c
++#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0    /* bits [31:5] */
++#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
++#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
++
++#define RADEON_SE_TCL_STATE_FLUSH           0x2284
++
++#define SE_VAP_CNTL__TCL_ENA_MASK                          0x00000001
++#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK                   0x00010000
++#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT                 0x00000012
++#define SE_VTE_CNTL__VTX_XY_FMT_MASK                       0x00000100
++#define SE_VTE_CNTL__VTX_Z_FMT_MASK                        0x00000200
++#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK                  0x00000001
++#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK                  0x00000002
++#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT               0x0000000b
++#define R200_3D_DRAW_IMMD_2      0xC0003500
++#define R200_SE_VTX_FMT_1                 0x208c
++#define R200_RE_CNTL                      0x1c50
++
++#define R200_RB3D_BLENDCOLOR              0x3218
++
++#define R200_SE_TCL_POINT_SPRITE_CNTL     0x22c4
++
++#define R200_PP_TRI_PERF                  0x2cf8
++
++#define R200_PP_AFS_0                     0x2f80
++#define R200_PP_AFS_1                     0x2f00 /* same as txcblend_0 */
++
++#define R200_VAP_PVS_CNTL_1               0x22D0
++
++/* MPEG settings from VHA code */
++#define RADEON_VHA_SETTO16_1                       0x2694
++#define RADEON_VHA_SETTO16_2                       0x2680
++#define RADEON_VHA_SETTO0_1                        0x1840
++#define RADEON_VHA_FB_OFFSET                       0x19e4
++#define RADEON_VHA_SETTO1AND70S                    0x19d8
++#define RADEON_VHA_DST_PITCH                       0x1408
++
++// set as reference header
++#define RADEON_VHA_BACKFRAME0_OFF_Y              0x1840
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y        0x1844
++#define RADEON_VHA_BACKFRAME0_OFF_U              0x1848
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U        0x184c
++#define RADOEN_VHA_BACKFRAME0_OFF_V              0x1850
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V        0x1854
++#define RADEON_VHA_FORWFRAME0_OFF_Y              0x1858
++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y        0x185c
++#define RADEON_VHA_FORWFRAME0_OFF_U              0x1860
++#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U        0x1864
++#define RADEON_VHA_FORWFRAME0_OFF_V              0x1868
++#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V        0x1880
++#define RADEON_VHA_BACKFRAME0_OFF_Y_2            0x1884
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2      0x1888
++#define RADEON_VHA_BACKFRAME0_OFF_U_2            0x188c
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2      0x1890
++#define RADEON_VHA_BACKFRAME0_OFF_V_2            0x1894
++#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2      0x1898
++
++#define R500_D1CRTC_STATUS 0x609c
++#define R500_D2CRTC_STATUS 0x689c
++#define R500_CRTC_V_BLANK (1<<0)
++
++#define R500_D1CRTC_FRAME_COUNT 0x60a4
++#define R500_D2CRTC_FRAME_COUNT 0x68a4
++
++#define R500_D1MODE_V_COUNTER 0x6530
++#define R500_D2MODE_V_COUNTER 0x6d30
++
++#define R500_D1MODE_VBLANK_STATUS 0x6534
++#define R500_D2MODE_VBLANK_STATUS 0x6d34
++#define R500_VBLANK_OCCURED (1<<0)
++#define R500_VBLANK_ACK     (1<<4)
++#define R500_VBLANK_STAT    (1<<12)
++#define R500_VBLANK_INT     (1<<16)
++
++#define R500_DxMODE_INT_MASK 0x6540
++#define R500_D1MODE_INT_MASK (1<<0)
++#define R500_D2MODE_INT_MASK (1<<8)
++
++#define R500_DISP_INTERRUPT_STATUS 0x7edc
++#define R500_D1_VBLANK_INTERRUPT (1 << 4)
++#define R500_D2_VBLANK_INTERRUPT (1 << 5)
++
++/* Constants */
++#define RADEON_MAX_USEC_TIMEOUT               100000  /* 100 ms */
++
++#define RADEON_LAST_FRAME_REG         RADEON_SCRATCH_REG0
++#define RADEON_LAST_DISPATCH_REG      RADEON_SCRATCH_REG1
++#define RADEON_LAST_CLEAR_REG         RADEON_SCRATCH_REG2
++#define RADEON_LAST_SWI_REG           RADEON_SCRATCH_REG3
++#define RADEON_LAST_DISPATCH          1
++
++#define RADEON_MAX_VB_AGE             0x7fffffff
++#define RADEON_MAX_VB_VERTS           (0xffff)
++
++#define RADEON_RING_HIGH_MARK         128
++
++#define RADEON_PCIGART_TABLE_SIZE      (32*1024)
++
++#define RADEON_READ(reg)    DRM_READ32(  dev_priv->mmio, (reg) )
++#define RADEON_WRITE(reg,val)  DRM_WRITE32( dev_priv->mmio, (reg), (val) )
++#define RADEON_READ8(reg)     DRM_READ8(  dev_priv->mmio, (reg) )
++#define RADEON_WRITE8(reg,val)        DRM_WRITE8( dev_priv->mmio, (reg), (val) )
++
++#define RADEON_WRITE_PLL( addr, val )                                 \
++do {                                                                  \
++      RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX,                         \
++                     ((addr) & 0x1f) | RADEON_PLL_WR_EN );            \
++      RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) );                  \
++} while (0)
++
++#define RADEON_WRITE_PCIE( addr, val )                                        \
++do {                                                                  \
++      RADEON_WRITE8( RADEON_PCIE_INDEX,                               \
++                      ((addr) & 0xff));                               \
++      RADEON_WRITE( RADEON_PCIE_DATA, (val) );                        \
++} while (0)
++
++#define R500_WRITE_MCIND( addr, val )                                 \
++do {                                                          \
++      RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));    \
++      RADEON_WRITE(R520_MC_IND_DATA, (val));                  \
++      RADEON_WRITE(R520_MC_IND_INDEX, 0);     \
++} while (0)
++
++#define RS480_WRITE_MCIND( addr, val )                                \
++do {                                                                  \
++      RADEON_WRITE( RS480_NB_MC_INDEX,                                \
++                      ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);       \
++      RADEON_WRITE( RS480_NB_MC_DATA, (val) );                        \
++      RADEON_WRITE( RS480_NB_MC_INDEX, 0xff );                        \
++} while (0)
++
++#define RS690_WRITE_MCIND( addr, val )                                        \
++do {                                                          \
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));    \
++      RADEON_WRITE(RS690_MC_DATA, val);                       \
++      RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);    \
++} while (0)
++
++#define IGP_WRITE_MCIND( addr, val )                          \
++do {                                                                  \
++        if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
++              RS690_WRITE_MCIND( addr, val );                         \
++      else                                                            \
++              RS480_WRITE_MCIND( addr, val );                         \
++} while (0)
++
++#define CP_PACKET0( reg, n )                                          \
++      (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
++#define CP_PACKET0_TABLE( reg, n )                                    \
++      (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
++#define CP_PACKET1( reg0, reg1 )                                      \
++      (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
++#define CP_PACKET2()                                                  \
++      (RADEON_CP_PACKET2)
++#define CP_PACKET3( pkt, n )                                          \
++      (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
++
++/* ================================================================
++ * Engine control helper macros
++ */
++
++#define RADEON_WAIT_UNTIL_2D_IDLE() do {                              \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_3D_IDLE() do {                              \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_3D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_IDLE() do {                                 \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |                           \
++                 RADEON_WAIT_3D_IDLECLEAN |                           \
++                 RADEON_WAIT_HOST_IDLECLEAN) );                       \
++} while (0)
++
++#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do {                         \
++      OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );                 \
++      OUT_RING( RADEON_WAIT_CRTC_PFLIP );                             \
++} while (0)
++
++#define RADEON_FLUSH_CACHE() do {                                     \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));  \
++              OUT_RING(RADEON_RB3D_DC_FLUSH);                         \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
++              OUT_RING(R300_RB3D_DC_FLUSH);                           \
++        }                                                               \
++} while (0)
++
++#define RADEON_PURGE_CACHE() do {                                     \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
++              OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);   \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));    \
++              OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE );      \
++        }                                                               \
++} while (0)
++
++#define RADEON_FLUSH_ZCACHE() do {                                    \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
++              OUT_RING( RADEON_RB3D_ZC_FLUSH );                       \
++      } else {                                                        \
++              OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) );    \
++              OUT_RING( R300_ZC_FLUSH );                              \
++        }                                                               \
++} while (0)
++
++#define RADEON_PURGE_ZCACHE() do {                                    \
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {     \
++              OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));    \
++              OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);   \
++      } else {                                                        \
++              OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));        \
++              OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);                 \
++        }                                                               \
++} while (0)
++
++/* ================================================================
++ * Misc helper macros
++ */
++
++/* Perfbox functionality only.
++ */
++#define RING_SPACE_TEST_WITH_RETURN( dev_priv )                               \
++do {                                                                  \
++      if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) {           \
++              u32 head = GET_RING_HEAD( dev_priv );                   \
++              if (head == dev_priv->ring.tail)                        \
++                      dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE;   \
++      }                                                               \
++} while (0)
++
++#define VB_AGE_TEST_WITH_RETURN( dev_priv )                           \
++do {                                                                  \
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;          \
++      if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {         \
++              int __ret = radeon_do_cp_idle( dev_priv );              \
++              if ( __ret ) return __ret;                              \
++              sarea_priv->last_dispatch = 0;                          \
++              radeon_freelist_reset( dev );                           \
++      }                                                               \
++} while (0)
++
++#define RADEON_DISPATCH_AGE( age ) do {                                       \
++      OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) );          \
++      OUT_RING( age );                                                \
++} while (0)
++
++#define RADEON_FRAME_AGE( age ) do {                                  \
++      OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) );             \
++      OUT_RING( age );                                                \
++} while (0)
++
++#define RADEON_CLEAR_AGE( age ) do {                                  \
++      OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) );             \
++      OUT_RING( age );                                                \
++} while (0)
++
++/* ================================================================
++ * Ring control
++ */
++
++#define RADEON_VERBOSE        0
++
++#define RING_LOCALS   int write, _nr; unsigned int mask; u32 *ring;
++
++#define BEGIN_RING( n ) do {                                          \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "BEGIN_RING( %d )\n", (n));                   \
++      }                                                               \
++      if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {              \
++              COMMIT_RING();                                          \
++              radeon_wait_ring( dev_priv, (n) * sizeof(u32) );        \
++      }                                                               \
++      _nr = n; dev_priv->ring.space -= (n) * sizeof(u32);             \
++      ring = dev_priv->ring.start;                                    \
++      write = dev_priv->ring.tail;                                    \
++      mask = dev_priv->ring.tail_mask;                                \
++} while (0)
++
++#define ADVANCE_RING() do {                                           \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
++                        write, dev_priv->ring.tail );                 \
++      }                                                               \
++      if (((dev_priv->ring.tail + _nr) & mask) != write) {            \
++              DRM_ERROR(                                              \
++                      "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",        \
++                      ((dev_priv->ring.tail + _nr) & mask),           \
++                      write, __LINE__);                                               \
++      } else                                                          \
++              dev_priv->ring.tail = write;                            \
++} while (0)
++
++#define COMMIT_RING() do {                                            \
++      /* Flush writes to ring */                                      \
++      DRM_MEMORYBARRIER();                                            \
++      GET_RING_HEAD( dev_priv );                                      \
++      RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail );         \
++      /* read from PCI bus to ensure correct posting */               \
++      RADEON_READ( RADEON_CP_RB_RPTR );                               \
++} while (0)
++
++#define OUT_RING( x ) do {                                            \
++      if ( RADEON_VERBOSE ) {                                         \
++              DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",            \
++                         (unsigned int)(x), write );                  \
++      }                                                               \
++      ring[write++] = (x);                                            \
++      write &= mask;                                                  \
++} while (0)
++
++#define OUT_RING_REG( reg, val ) do {                                 \
++      OUT_RING( CP_PACKET0( reg, 0 ) );                               \
++      OUT_RING( val );                                                \
++} while (0)
++
++#define OUT_RING_TABLE( tab, sz ) do {                                \
++      int _size = (sz);                                       \
++      int *_tab = (int *)(tab);                               \
++                                                              \
++      if (write + _size > mask) {                             \
++              int _i = (mask+1) - write;                      \
++              _size -= _i;                                    \
++              while (_i > 0) {                                \
++                      *(int *)(ring + write) = *_tab++;       \
++                      write++;                                \
++                      _i--;                                   \
++              }                                               \
++              write = 0;                                      \
++              _tab += _i;                                     \
++      }                                                       \
++      while (_size > 0) {                                     \
++              *(ring + write) = *_tab++;                      \
++              write++;                                        \
++              _size--;                                        \
++      }                                                       \
++      write &= mask;                                          \
++} while (0)
++
++#endif                                /* __RADEON_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_ioc32.c git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c
+--- git/drivers/gpu/drm-tungsten/radeon_ioc32.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_ioc32.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,424 @@
++/**
++ * \file radeon_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the Radeon DRM.
++ *
++ * \author Paul Mackerras <paulus@samba.org>
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++typedef struct drm_radeon_init32 {
++      int func;
++      u32 sarea_priv_offset;
++      int is_pci;
++      int cp_mode;
++      int gart_size;
++      int ring_size;
++      int usec_timeout;
++
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      u32 fb_offset;
++      u32 mmio_offset;
++      u32 ring_offset;
++      u32 ring_rptr_offset;
++      u32 buffers_offset;
++      u32 gart_textures_offset;
++} drm_radeon_init32_t;
++
++static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
++                               unsigned long arg)
++{
++      drm_radeon_init32_t init32;
++      drm_radeon_init_t __user *init;
++
++      if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
++              return -EFAULT;
++
++      init = compat_alloc_user_space(sizeof(*init));
++      if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
++          || __put_user(init32.func, &init->func)
++          || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
++          || __put_user(init32.is_pci, &init->is_pci)
++          || __put_user(init32.cp_mode, &init->cp_mode)
++          || __put_user(init32.gart_size, &init->gart_size)
++          || __put_user(init32.ring_size, &init->ring_size)
++          || __put_user(init32.usec_timeout, &init->usec_timeout)
++          || __put_user(init32.fb_bpp, &init->fb_bpp)
++          || __put_user(init32.front_offset, &init->front_offset)
++          || __put_user(init32.front_pitch, &init->front_pitch)
++          || __put_user(init32.back_offset, &init->back_offset)
++          || __put_user(init32.back_pitch, &init->back_pitch)
++          || __put_user(init32.depth_bpp, &init->depth_bpp)
++          || __put_user(init32.depth_offset, &init->depth_offset)
++          || __put_user(init32.depth_pitch, &init->depth_pitch)
++          || __put_user(init32.fb_offset, &init->fb_offset)
++          || __put_user(init32.mmio_offset, &init->mmio_offset)
++          || __put_user(init32.ring_offset, &init->ring_offset)
++          || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
++          || __put_user(init32.buffers_offset, &init->buffers_offset)
++          || __put_user(init32.gart_textures_offset,
++                        &init->gart_textures_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init);
++}
++
++typedef struct drm_radeon_clear32 {
++      unsigned int flags;
++      unsigned int clear_color;
++      unsigned int clear_depth;
++      unsigned int color_mask;
++      unsigned int depth_mask;   /* misnamed field:  should be stencil */
++      u32          depth_boxes;
++} drm_radeon_clear32_t;
++
++static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_radeon_clear32_t clr32;
++      drm_radeon_clear_t __user *clr;
++
++      if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
++              return -EFAULT;
++
++      clr = compat_alloc_user_space(sizeof(*clr));
++      if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
++          || __put_user(clr32.flags, &clr->flags)
++          || __put_user(clr32.clear_color, &clr->clear_color)
++          || __put_user(clr32.clear_depth, &clr->clear_depth)
++          || __put_user(clr32.color_mask, &clr->color_mask)
++          || __put_user(clr32.depth_mask, &clr->depth_mask)
++          || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
++                        &clr->depth_boxes))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr);
++}
++
++typedef struct drm_radeon_stipple32 {
++      u32 mask;
++} drm_radeon_stipple32_t;
++
++static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_stipple32_t __user *argp = (void __user *)arg;
++      drm_radeon_stipple_t __user *request;
++      u32 mask;
++
++      if (get_user(mask, &argp->mask))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((unsigned int __user *)(unsigned long) mask,
++                        &request->mask))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request);
++}
++
++typedef struct drm_radeon_tex_image32 {
++      unsigned int x, y;              /* Blit coordinates */
++      unsigned int width, height;
++      u32 data;
++} drm_radeon_tex_image32_t;
++
++typedef struct drm_radeon_texture32 {
++      unsigned int offset;
++      int pitch;
++      int format;
++      int width;                      /* Texture image coordinates */
++      int height;
++      u32 image;
++} drm_radeon_texture32_t;
++
++static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_texture32_t req32;
++      drm_radeon_texture_t __user *request;
++      drm_radeon_tex_image32_t img32;
++      drm_radeon_tex_image_t __user *image;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++      if (req32.image == 0)
++              return -EINVAL;
++      if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
++                         sizeof(img32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
++      if (!access_ok(VERIFY_WRITE, request,
++                     sizeof(*request) + sizeof(*image)))
++              return -EFAULT;
++      image = (drm_radeon_tex_image_t __user *) (request + 1);
++
++      if (__put_user(req32.offset, &request->offset)
++          || __put_user(req32.pitch, &request->pitch)
++          || __put_user(req32.format, &request->format)
++          || __put_user(req32.width, &request->width)
++          || __put_user(req32.height, &request->height)
++          || __put_user(image, &request->image)
++          || __put_user(img32.x, &image->x)
++          || __put_user(img32.y, &image->y)
++          || __put_user(img32.width, &image->width)
++          || __put_user(img32.height, &image->height)
++          || __put_user((const void __user *)(unsigned long)img32.data,
++                        &image->data))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request);
++}
++
++typedef struct drm_radeon_vertex2_32 {
++      int idx;                        /* Index of vertex buffer */
++      int discard;                    /* Client finished with buffer? */
++      int nr_states;
++      u32 state;
++      int nr_prims;
++      u32 prim;
++} drm_radeon_vertex2_32_t;
++
++static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
++                                  unsigned long arg)
++{
++      drm_radeon_vertex2_32_t req32;
++      drm_radeon_vertex2_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.idx, &request->idx)
++          || __put_user(req32.discard, &request->discard)
++          || __put_user(req32.nr_states, &request->nr_states)
++          || __put_user((void __user *)(unsigned long)req32.state,
++                        &request->state)
++          || __put_user(req32.nr_prims, &request->nr_prims)
++          || __put_user((void __user *)(unsigned long)req32.prim,
++                        &request->prim))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request);
++}
++
++typedef struct drm_radeon_cmd_buffer32 {
++      int bufsz;
++      u32 buf;
++      int nbox;
++      u32 boxes;
++} drm_radeon_cmd_buffer32_t;
++
++static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_radeon_cmd_buffer32_t req32;
++      drm_radeon_cmd_buffer_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.bufsz, &request->bufsz)
++          || __put_user((void __user *)(unsigned long)req32.buf,
++                        &request->buf)
++          || __put_user(req32.nbox, &request->nbox)
++          || __put_user((void __user *)(unsigned long)req32.boxes,
++                        &request->boxes))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request);
++}
++
++typedef struct drm_radeon_getparam32 {
++      int param;
++      u32 value;
++} drm_radeon_getparam32_t;
++
++static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
++                                   unsigned long arg)
++{
++      drm_radeon_getparam32_t req32;
++      drm_radeon_getparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request);
++}
++
++typedef struct drm_radeon_mem_alloc32 {
++      int region;
++      int alignment;
++      int size;
++      u32 region_offset;      /* offset from start of fb or GART */
++} drm_radeon_mem_alloc32_t;
++
++static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
++                                 unsigned long arg)
++{
++      drm_radeon_mem_alloc32_t req32;
++      drm_radeon_mem_alloc_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.region, &request->region)
++          || __put_user(req32.alignment, &request->alignment)
++          || __put_user(req32.size, &request->size)
++          || __put_user((int __user *)(unsigned long)req32.region_offset,
++                        &request->region_offset))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_ALLOC, (unsigned long) request);
++}
++
++typedef struct drm_radeon_irq_emit32 {
++      u32 irq_seq;
++} drm_radeon_irq_emit32_t;
++
++static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
++                                unsigned long arg)
++{
++      drm_radeon_irq_emit32_t req32;
++      drm_radeon_irq_emit_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user((int __user *)(unsigned long)req32.irq_seq,
++                        &request->irq_seq))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request);
++}
++
++/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
++#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
++typedef struct drm_radeon_setparam32 {
++      int param;
++      u64 value;
++} __attribute__((packed)) drm_radeon_setparam32_t;
++
++static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
++                                   unsigned long arg)
++{
++      drm_radeon_setparam32_t req32;
++      drm_radeon_setparam_t __user *request;
++
++      if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
++              return -EFAULT;
++
++      request = compat_alloc_user_space(sizeof(*request));
++      if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
++          || __put_user(req32.param, &request->param)
++          || __put_user((void __user *)(unsigned long)req32.value,
++                        &request->value))
++              return -EFAULT;
++
++      return drm_ioctl(file->f_dentry->d_inode, file,
++                       DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
++}
++#else
++#define compat_radeon_cp_setparam NULL
++#endif /* X86_64 || IA64 */
++
++drm_ioctl_compat_t *radeon_compat_ioctls[] = {
++      [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
++      [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
++      [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
++      [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
++      [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
++      [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
++      [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
++      [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
++      [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
++      [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++      unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
++              fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();          /* XXX for now */
++      if (fn != NULL)
++              ret = (*fn)(filp, cmd, arg);
++      else
++              ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_irq.c git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c
+--- git/drivers/gpu/drm-tungsten/radeon_irq.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_irq.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,390 @@
++/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Michel D�zer <michel@daenzer.net>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (state)
++              dev_priv->irq_enable_reg |= mask;
++      else
++              dev_priv->irq_enable_reg &= ~mask;
++
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
++}
++
++static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (state)
++              dev_priv->r500_disp_irq_reg |= mask;
++      else
++              dev_priv->r500_disp_irq_reg &= ~mask;
++
++      RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
++}
++
++int radeon_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {     
++              switch (crtc) {
++              case 0:
++                      r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
++                      break;
++              case 1:
++                      r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      return EINVAL;
++              }
++      } else {
++              switch (crtc) {
++              case 0:
++                      radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
++                      break;
++              case 1:
++                      radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      return EINVAL;
++              }
++      }
++
++      return 0;
++}
++
++void radeon_disable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {     
++              switch (crtc) {
++              case 0:
++                      r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
++                      break;
++              case 1:
++                      r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      break;
++              }
++      } else {
++              switch (crtc) {
++              case 0:
++                      radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
++                      break;
++              case 1:
++                      radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
++                      break;
++              default:
++                      DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
++                                crtc);
++                      break;
++              }
++      }
++}
++
++static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int)
++{
++      u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
++      u32 irq_mask = RADEON_SW_INT_TEST;
++
++      *r500_disp_int = 0;
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              /* vbl interrupts in a different place */
++
++              if (irqs & R500_DISPLAY_INT_STATUS) {
++                      /* if a display interrupt */
++                      u32 disp_irq;
++
++                      disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
++
++                      *r500_disp_int = disp_irq;
++                      if (disp_irq & R500_D1_VBLANK_INTERRUPT) {
++                              RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
++                      }
++                      if (disp_irq & R500_D2_VBLANK_INTERRUPT) {
++                              RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
++                      }
++              }
++              irq_mask |= R500_DISPLAY_INT_STATUS;
++      } else
++              irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
++
++      irqs &= irq_mask;
++
++      if (irqs)
++              RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
++      
++      return irqs;
++}
++
++/* Interrupts - Used for device synchronization and flushing in the
++ * following circumstances:
++ *
++ * - Exclusive FB access with hw idle:
++ *    - Wait for GUI Idle (?) interrupt, then do normal flush.
++ *
++ * - Frame throttling, NV_fence:
++ *    - Drop marker irq's into command stream ahead of time.
++ *    - Wait on irq's with lock *not held*
++ *    - Check each for termination condition
++ *
++ * - Internally in cp_getbuffer, etc:
++ *    - as above, but wait with lock held???
++ *
++ * NOTE: These functions are misleadingly named -- the irq's aren't
++ * tied to dma at all, this is just a hangover from dri prehistory.
++ */
++
++irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      u32 stat;
++      u32 r500_disp_int;
++
++      /* Only consider the bits we're interested in - others could be used
++       * outside the DRM
++       */
++      stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
++      if (!stat)
++              return IRQ_NONE;
++
++      stat &= dev_priv->irq_enable_reg;
++
++      /* SW interrupt */
++      if (stat & RADEON_SW_INT_TEST)
++              DRM_WAKEUP(&dev_priv->swi_queue);
++
++      /* VBLANK interrupt */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
++                      drm_handle_vblank(dev, 0);
++              if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
++                      drm_handle_vblank(dev, 1);
++      } else {
++              if (stat & RADEON_CRTC_VBLANK_STAT)
++                      drm_handle_vblank(dev, 0);
++              if (stat & RADEON_CRTC2_VBLANK_STAT)
++                      drm_handle_vblank(dev, 1);
++      }
++      return IRQ_HANDLED;
++}
++
++static int radeon_emit_irq(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      unsigned int ret;
++      RING_LOCALS;
++
++      atomic_inc(&dev_priv->swi_emitted);
++      ret = atomic_read(&dev_priv->swi_emitted);
++
++      BEGIN_RING(4);
++      OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
++      OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return ret;
++}
++
++static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
++              return 0;
++
++      dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++
++      DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
++                  RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
++
++      return ret;
++}
++
++u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (crtc < 0 || crtc > 1) {
++              DRM_ERROR("Invalid crtc %d\n", crtc);
++              return -EINVAL;
++      }
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
++              if (crtc == 0)
++                      return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
++              else
++                      return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
++      } else {
++              if (crtc == 0)
++                      return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
++              else
++                      return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
++      }
++}
++
++/* Needs the lock as it touches the ring.
++ */
++int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_irq_emit_t *emit = data;
++      int result;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      result = radeon_emit_irq(dev);
++
++      if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++/* Doesn't need the hardware lock.
++ */
++int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_irq_wait_t *irqwait = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      return radeon_wait_irq(dev, irqwait->irq_seq);
++}
++
++/* drm_dma.h hooks
++*/
++void radeon_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      u32 dummy;
++
++      /* Disable *all* interrupts */
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++
++      /* Clear bits if they're already high */
++      radeon_acknowledge_irqs(dev_priv, &dummy);
++}
++
++int radeon_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      int ret;
++
++      atomic_set(&dev_priv->swi_emitted, 0);
++      DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
++
++      ret = drm_vblank_init(dev, 2);
++      if (ret)
++              return ret;
++
++      dev->max_vblank_count = 0x001fffff;
++
++      radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
++
++      return 0;
++}
++
++void radeon_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv =
++          (drm_radeon_private_t *) dev->dev_private;
++      if (!dev_priv)
++              return;
++
++      dev_priv->irq_enabled = 0;
++
++      if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
++              RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
++      /* Disable *all* interrupts */
++      RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
++}
++
++
++int radeon_vblank_crtc_get(struct drm_device *dev)
++{
++      drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
++
++      return dev_priv->vblank_crtc;
++}
++
++int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
++{
++      drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
++      if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
++              DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
++              return -EINVAL;
++      }
++      dev_priv->vblank_crtc = (unsigned int)value;
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_mem.c git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c
+--- git/drivers/gpu/drm-tungsten/radeon_mem.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_mem.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,302 @@
++/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++/* Very simple allocator for GART memory, working on a static range
++ * already mapped into each client's address space.
++ */
++
++static struct mem_block *split_block(struct mem_block *p, int start, int size,
++                                   struct drm_file *file_priv)
++{
++      /* Maybe cut off the start of an existing block */
++      if (start > p->start) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start;
++              newblock->size = p->size - (start - p->start);
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size -= newblock->size;
++              p = newblock;
++      }
++
++      /* Maybe cut off the end of an existing block */
++      if (size < p->size) {
++              struct mem_block *newblock =
++                  drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
++              if (!newblock)
++                      goto out;
++              newblock->start = start + size;
++              newblock->size = p->size - size;
++              newblock->file_priv = NULL;
++              newblock->next = p->next;
++              newblock->prev = p;
++              p->next->prev = newblock;
++              p->next = newblock;
++              p->size = size;
++      }
++
++      out:
++      /* Our block is in the middle */
++      p->file_priv = file_priv;
++      return p;
++}
++
++static struct mem_block *alloc_block(struct mem_block *heap, int size,
++                                   int align2, struct drm_file *file_priv)
++{
++      struct mem_block *p;
++      int mask = (1 << align2) - 1;
++
++      list_for_each(p, heap) {
++              int start = (p->start + mask) & ~mask;
++              if (p->file_priv == NULL && start + size <= p->start + p->size)
++                      return split_block(p, start, size, file_priv);
++      }
++
++      return NULL;
++}
++
++static struct mem_block *find_block(struct mem_block *heap, int start)
++{
++      struct mem_block *p;
++
++      list_for_each(p, heap)
++              if (p->start == start)
++                      return p;
++
++      return NULL;
++}
++
++static void free_block(struct mem_block *p)
++{
++      p->file_priv = NULL;
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      if (p->next->file_priv == NULL) {
++              struct mem_block *q = p->next;
++              p->size += q->size;
++              p->next = q->next;
++              p->next->prev = p;
++              drm_free(q, sizeof(*q), DRM_MEM_BUFS);
++      }
++
++      if (p->prev->file_priv == NULL) {
++              struct mem_block *q = p->prev;
++              q->size += p->size;
++              q->next = p->next;
++              q->next->prev = q;
++              drm_free(p, sizeof(*q), DRM_MEM_BUFS);
++      }
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++static int init_heap(struct mem_block **heap, int start, int size)
++{
++      struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
++
++      if (!blocks)
++              return -ENOMEM;
++
++      *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
++      if (!*heap) {
++              drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
++              return -ENOMEM;
++      }
++
++      blocks->start = start;
++      blocks->size = size;
++      blocks->file_priv = NULL;
++      blocks->next = blocks->prev = *heap;
++
++      memset(*heap, 0, sizeof(**heap));
++      (*heap)->file_priv = (struct drm_file *) - 1;
++      (*heap)->next = (*heap)->prev = blocks;
++      return 0;
++}
++
++/* Free all blocks associated with the releasing file.
++ */
++void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
++{
++      struct mem_block *p;
++
++      if (!heap || !heap->next)
++              return;
++
++      list_for_each(p, heap) {
++              if (p->file_priv == file_priv)
++                      p->file_priv = NULL;
++      }
++
++      /* Assumes a single contiguous range.  Needs a special file_priv in
++       * 'heap' to stop it being subsumed.
++       */
++      list_for_each(p, heap) {
++              while (p->file_priv == NULL && p->next->file_priv == NULL) {
++                      struct mem_block *q = p->next;
++                      p->size += q->size;
++                      p->next = q->next;
++                      p->next->prev = p;
++                      drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++              }
++      }
++}
++
++/* Shutdown.
++ */
++void radeon_mem_takedown(struct mem_block **heap)
++{
++      struct mem_block *p;
++
++      if (!*heap)
++              return;
++
++      for (p = (*heap)->next; p != *heap;) {
++              struct mem_block *q = p;
++              p = p->next;
++              drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
++      }
++
++      drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
++      *heap = NULL;
++}
++
++/* IOCTL HANDLERS */
++
++static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
++{
++      switch (region) {
++      case RADEON_MEM_REGION_GART:
++              return &dev_priv->gart_heap;
++      case RADEON_MEM_REGION_FB:
++              return &dev_priv->fb_heap;
++      default:
++              return NULL;
++      }
++}
++
++int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_alloc_t *alloc = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, alloc->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      /* Make things easier on ourselves: all allocations at least
++       * 4k aligned.
++       */
++      if (alloc->alignment < 12)
++              alloc->alignment = 12;
++
++      block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
++
++      if (!block)
++              return -ENOMEM;
++
++      if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
++                           sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_free_t *memfree = data;
++      struct mem_block *block, **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, memfree->region);
++      if (!heap || !*heap)
++              return -EFAULT;
++
++      block = find_block(*heap, memfree->region_offset);
++      if (!block)
++              return -EFAULT;
++
++      if (block->file_priv != file_priv)
++              return -EPERM;
++
++      free_block(block);
++      return 0;
++}
++
++int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_mem_init_heap_t *initheap = data;
++      struct mem_block **heap;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      heap = get_heap(dev_priv, initheap->region);
++      if (!heap)
++              return -EFAULT;
++
++      if (*heap) {
++              DRM_ERROR("heap already initialized?");
++              return -EFAULT;
++      }
++
++      return init_heap(heap, initheap->start, initheap->size);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_microcode.h git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h
+--- git/drivers/gpu/drm-tungsten/radeon_microcode.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_microcode.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1844 @@
++/*
++ * Copyright 2007 Advanced Micro Devices, Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef RADEON_MICROCODE_H
++#define RADEON_MICROCODE_H
++
++/* production radeon ucode r1xx-r6xx */
++static const u32 R100_cp_microcode[][2]={
++    { 0x21007000, 0000000000 },
++    { 0x20007000, 0000000000 },
++    { 0x000000b4, 0x00000004 },
++    { 0x000000b8, 0x00000004 },
++    { 0x6f5b4d4c, 0000000000 },
++    { 0x4c4c427f, 0000000000 },
++    { 0x5b568a92, 0000000000 },
++    { 0x4ca09c6d, 0000000000 },
++    { 0xad4c4c4c, 0000000000 },
++    { 0x4ce1af3d, 0000000000 },
++    { 0xd8afafaf, 0000000000 },
++    { 0xd64c4cdc, 0000000000 },
++    { 0x4cd10d10, 0000000000 },
++    { 0x000f0000, 0x00000016 },
++    { 0x362f242d, 0000000000 },
++    { 0x00000012, 0x00000004 },
++    { 0x000f0000, 0x00000016 },
++    { 0x362f282d, 0000000000 },
++    { 0x000380e7, 0x00000002 },
++    { 0x04002c97, 0x00000002 },
++    { 0x000f0001, 0x00000016 },
++    { 0x333a3730, 0000000000 },
++    { 0x000077ef, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000021, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00000017, 0x00000004 },
++    { 0x0003802b, 0x00000002 },
++    { 0x040067e0, 0x00000002 },
++    { 0x00000017, 0x00000004 },
++    { 0x000077e0, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x000037e1, 0x00000002 },
++    { 0x040067e1, 0x00000006 },
++    { 0x000077e0, 0x00000002 },
++    { 0x000077e1, 0x00000002 },
++    { 0x000077e1, 0x00000006 },
++    { 0xffffffff, 0000000000 },
++    { 0x10000000, 0000000000 },
++    { 0x0003802b, 0x00000002 },
++    { 0x040067e0, 0x00000006 },
++    { 0x00007675, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0000002f, 0x00000018 },
++    { 0x0000002f, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x00000030, 0x00000018 },
++    { 0x00000030, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x01605000, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00098000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x64c0603e, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00080000, 0x00000016 },
++    { 0000000000, 0000000000 },
++    { 0x0400251d, 0x00000002 },
++    { 0x00007580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x04002580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x00000049, 0x00000004 },
++    { 0x00005000, 0000000000 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x00019000, 0x00000002 },
++    { 0x00011055, 0x00000014 },
++    { 0x00000055, 0x00000012 },
++    { 0x0400250f, 0x00000002 },
++    { 0x0000504f, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007565, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000058, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x01e655b4, 0x00000002 },
++    { 0x4401b0e4, 0x00000002 },
++    { 0x01c110e4, 0x00000002 },
++    { 0x26667066, 0x00000018 },
++    { 0x040c2565, 0x00000002 },
++    { 0x00000066, 0x00000018 },
++    { 0x04002564, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x0000005d, 0x00000004 },
++    { 0x00401069, 0x00000008 },
++    { 0x00101000, 0x00000002 },
++    { 0x000d80ff, 0x00000002 },
++    { 0x0080006c, 0x00000008 },
++    { 0x000f9000, 0x00000002 },
++    { 0x000e00ff, 0x00000002 },
++    { 0000000000, 0x00000006 },
++    { 0x0000008f, 0x00000018 },
++    { 0x0000005b, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007576, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00009000, 0x00000002 },
++    { 0x00041000, 0x00000002 },
++    { 0x0c00350e, 0x00000002 },
++    { 0x00049000, 0x00000002 },
++    { 0x00051000, 0x00000002 },
++    { 0x01e785f8, 0x00000002 },
++    { 0x00200000, 0x00000002 },
++    { 0x0060007e, 0x0000000c },
++    { 0x00007563, 0x00000002 },
++    { 0x006075f0, 0x00000021 },
++    { 0x20007073, 0x00000004 },
++    { 0x00005073, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00007576, 0x00000002 },
++    { 0x00007577, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x0000750f, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00600083, 0x0000000c },
++    { 0x006075f0, 0x00000021 },
++    { 0x000075f8, 0x00000002 },
++    { 0x00000083, 0x00000004 },
++    { 0x000a750e, 0x00000002 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x0020750f, 0x00000002 },
++    { 0x00600086, 0x00000004 },
++    { 0x00007570, 0x00000002 },
++    { 0x00007571, 0x00000002 },
++    { 0x00007572, 0x00000006 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00005000, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00007568, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000095, 0x0000000c },
++    { 0x00058000, 0x00000002 },
++    { 0x0c607562, 0x00000002 },
++    { 0x00000097, 0x00000004 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x00600096, 0x00000004 },
++    { 0x400070e5, 0000000000 },
++    { 0x000380e6, 0x00000002 },
++    { 0x040025c5, 0x00000002 },
++    { 0x000380e5, 0x00000002 },
++    { 0x000000a8, 0x0000001c },
++    { 0x000650aa, 0x00000018 },
++    { 0x040025bb, 0x00000002 },
++    { 0x000610ab, 0x00000018 },
++    { 0x040075bc, 0000000000 },
++    { 0x000075bb, 0x00000002 },
++    { 0x000075bc, 0000000000 },
++    { 0x00090000, 0x00000006 },
++    { 0x00090000, 0x00000002 },
++    { 0x000d8002, 0x00000006 },
++    { 0x00007832, 0x00000002 },
++    { 0x00005000, 0x00000002 },
++    { 0x000380e7, 0x00000002 },
++    { 0x04002c97, 0x00000002 },
++    { 0x00007820, 0x00000002 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x01200000, 0x00000002 },
++    { 0x20077000, 0x00000002 },
++    { 0x01200000, 0x00000002 },
++    { 0x20007000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x0120751b, 0x00000002 },
++    { 0x8040750a, 0x00000002 },
++    { 0x8040750b, 0x00000002 },
++    { 0x00110000, 0x00000002 },
++    { 0x000380e5, 0x00000002 },
++    { 0x000000c6, 0x0000001c },
++    { 0x000610ab, 0x00000018 },
++    { 0x844075bd, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x840075bb, 0x00000002 },
++    { 0x000610ab, 0x00000018 },
++    { 0x844075bc, 0x00000002 },
++    { 0x000000c9, 0x00000004 },
++    { 0x804075bd, 0x00000002 },
++    { 0x800075bb, 0x00000002 },
++    { 0x804075bc, 0x00000002 },
++    { 0x00108000, 0x00000002 },
++    { 0x01400000, 0x00000002 },
++    { 0x006000cd, 0x0000000c },
++    { 0x20c07000, 0x00000020 },
++    { 0x000000cf, 0x00000012 },
++    { 0x00800000, 0x00000006 },
++    { 0x0080751d, 0x00000006 },
++    { 0000000000, 0000000000 },
++    { 0x0000775c, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460275d, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x01e00830, 0x00000002 },
++    { 0x21007000, 0000000000 },
++    { 0x6464614d, 0000000000 },
++    { 0x69687420, 0000000000 },
++    { 0x00000073, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x00005000, 0x00000002 },
++    { 0x000380d0, 0x00000002 },
++    { 0x040025e0, 0x00000002 },
++    { 0x000075e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000380e0, 0x00000002 },
++    { 0x04002394, 0x00000002 },
++    { 0x00005000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x00000008, 0000000000 },
++    { 0x00000004, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R200_cp_microcode[][2]={
++    { 0x21007000, 0000000000 },
++    { 0x20007000, 0000000000 },
++    { 0x000000bf, 0x00000004 },
++    { 0x000000c3, 0x00000004 },
++    { 0x7a685e5d, 0000000000 },
++    { 0x5d5d5588, 0000000000 },
++    { 0x68659197, 0000000000 },
++    { 0x5da19f78, 0000000000 },
++    { 0x5d5d5d5d, 0000000000 },
++    { 0x5dee5d50, 0000000000 },
++    { 0xf2acacac, 0000000000 },
++    { 0xe75df9e9, 0000000000 },
++    { 0xb1dd0e11, 0000000000 },
++    { 0xe2afafaf, 0000000000 },
++    { 0x000f0000, 0x00000016 },
++    { 0x452f232d, 0000000000 },
++    { 0x00000013, 0x00000004 },
++    { 0x000f0000, 0x00000016 },
++    { 0x452f272d, 0000000000 },
++    { 0x000f0001, 0x00000016 },
++    { 0x3e4d4a37, 0000000000 },
++    { 0x000077ef, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00061000, 0x00000002 },
++    { 0x00000020, 0x0000001a },
++    { 0x00004000, 0x0000001e },
++    { 0x00000016, 0x00000004 },
++    { 0x0003802a, 0x00000002 },
++    { 0x040067e0, 0x00000002 },
++    { 0x00000016, 0x00000004 },
++    { 0x000077e0, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x000037e1, 0x00000002 },
++    { 0x040067e1, 0x00000006 },
++    { 0x000077e0, 0x00000002 },
++    { 0x000077e1, 0x00000002 },
++    { 0x000077e1, 0x00000006 },
++    { 0xffffffff, 0000000000 },
++    { 0x10000000, 0000000000 },
++    { 0x07f007f0, 0000000000 },
++    { 0x0003802a, 0x00000002 },
++    { 0x040067e0, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007675, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802b, 0x00000002 },
++    { 0x04002676, 0x00000002 },
++    { 0x00007677, 0x00000002 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0003802c, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002741, 0x00000002 },
++    { 0x04002743, 0x00000002 },
++    { 0x00007678, 0x00000006 },
++    { 0x0000002f, 0x00000018 },
++    { 0x0000002f, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x00000037, 0x00000018 },
++    { 0x00000037, 0x00000018 },
++    { 0000000000, 0x00000006 },
++    { 0x01605000, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00098000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x64c06051, 0x00000004 },
++    { 0x00080000, 0x00000016 },
++    { 0000000000, 0000000000 },
++    { 0x0400251d, 0x00000002 },
++    { 0x00007580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x04002580, 0x00000002 },
++    { 0x00067581, 0x00000002 },
++    { 0x0000005a, 0x00000004 },
++    { 0x00005000, 0000000000 },
++    { 0x00061000, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x00019000, 0x00000002 },
++    { 0x00011064, 0x00000014 },
++    { 0x00000064, 0x00000012 },
++    { 0x0400250f, 0x00000002 },
++    { 0x0000505e, 0x00000004 },
++    { 0x00007565, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000065, 0x00000004 },
++    { 0x01e655b4, 0x00000002 },
++    { 0x4401b0f0, 0x00000002 },
++    { 0x01c110f0, 0x00000002 },
++    { 0x26667071, 0x00000018 },
++    { 0x040c2565, 0x00000002 },
++    { 0x00000071, 0x00000018 },
++    { 0x04002564, 0x00000002 },
++    { 0x00007566, 0x00000002 },
++    { 0x00000068, 0x00000004 },
++    { 0x00401074, 0x00000008 },
++    { 0x00101000, 0x00000002 },
++    { 0x000d80ff, 0x00000002 },
++    { 0x00800077, 0x00000008 },
++    { 0x000f9000, 0x00000002 },
++    { 0x000e00ff, 0x00000002 },
++    { 0000000000, 0x00000006 },
++    { 0x00000094, 0x00000018 },
++    { 0x00000068, 0x00000004 },
++    { 0x00007576, 0x00000002 },
++    { 0x00065000, 0x00000002 },
++    { 0x00009000, 0x00000002 },
++    { 0x00041000, 0x00000002 },
++    { 0x0c00350e, 0x00000002 },
++    { 0x00049000, 0x00000002 },
++    { 0x00051000, 0x00000002 },
++    { 0x01e785f8, 0x00000002 },
++    { 0x00200000, 0x00000002 },
++    { 0x00600087, 0x0000000c },
++    { 0x00007563, 0x00000002 },
++    { 0x006075f0, 0x00000021 },
++    { 0x2000707c, 0x00000004 },
++    { 0x0000507c, 0x00000004 },
++    { 0x00007576, 0x00000002 },
++    { 0x00007577, 0x00000002 },
++    { 0x0000750e, 0x00000002 },
++    { 0x0000750f, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x0060008a, 0x0000000c },
++    { 0x006075f0, 0x00000021 },
++    { 0x000075f8, 0x00000002 },
++    { 0x0000008a, 0x00000004 },
++    { 0x000a750e, 0x00000002 },
++    { 0x0020750f, 0x00000002 },
++    { 0x0060008d, 0x00000004 },
++    { 0x00007570, 0x00000002 },
++    { 0x00007571, 0x00000002 },
++    { 0x00007572, 0x00000006 },
++    { 0x00005000, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00007568, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x00000098, 0x0000000c },
++    { 0x00058000, 0x00000002 },
++    { 0x0c607562, 0x00000002 },
++    { 0x0000009a, 0x00000004 },
++    { 0x00600099, 0x00000004 },
++    { 0x400070f1, 0000000000 },
++    { 0x000380f1, 0x00000002 },
++    { 0x000000a7, 0x0000001c },
++    { 0x000650a9, 0x00000018 },
++    { 0x040025bb, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x040075bc, 0000000000 },
++    { 0x000075bb, 0x00000002 },
++    { 0x000075bc, 0000000000 },
++    { 0x00090000, 0x00000006 },
++    { 0x00090000, 0x00000002 },
++    { 0x000d8002, 0x00000006 },
++    { 0x00005000, 0x00000002 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x00007821, 0x00000002 },
++    { 0x00007800, 0000000000 },
++    { 0x01665000, 0x00000002 },
++    { 0x000a0000, 0x00000002 },
++    { 0x000671cc, 0x00000002 },
++    { 0x0286f1cd, 0x00000002 },
++    { 0x000000b7, 0x00000010 },
++    { 0x21007000, 0000000000 },
++    { 0x000000be, 0x0000001c },
++    { 0x00065000, 0x00000002 },
++    { 0x000a0000, 0x00000002 },
++    { 0x00061000, 0x00000002 },
++    { 0x000b0000, 0x00000002 },
++    { 0x38067000, 0x00000002 },
++    { 0x000a00ba, 0x00000004 },
++    { 0x20007000, 0000000000 },
++    { 0x01200000, 0x00000002 },
++    { 0x20077000, 0x00000002 },
++    { 0x01200000, 0x00000002 },
++    { 0x20007000, 0000000000 },
++    { 0x00061000, 0x00000002 },
++    { 0x0120751b, 0x00000002 },
++    { 0x8040750a, 0x00000002 },
++    { 0x8040750b, 0x00000002 },
++    { 0x00110000, 0x00000002 },
++    { 0x000380f1, 0x00000002 },
++    { 0x000000d1, 0x0000001c },
++    { 0x000610aa, 0x00000018 },
++    { 0x844075bd, 0x00000002 },
++    { 0x000610a9, 0x00000018 },
++    { 0x840075bb, 0x00000002 },
++    { 0x000610aa, 0x00000018 },
++    { 0x844075bc, 0x00000002 },
++    { 0x000000d4, 0x00000004 },
++    { 0x804075bd, 0x00000002 },
++    { 0x800075bb, 0x00000002 },
++    { 0x804075bc, 0x00000002 },
++    { 0x00108000, 0x00000002 },
++    { 0x01400000, 0x00000002 },
++    { 0x006000d8, 0x0000000c },
++    { 0x20c07000, 0x00000020 },
++    { 0x000000da, 0x00000012 },
++    { 0x00800000, 0x00000006 },
++    { 0x0080751d, 0x00000006 },
++    { 0x000025bb, 0x00000002 },
++    { 0x000040d4, 0x00000004 },
++    { 0x0000775c, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460275d, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x00007999, 0x00000002 },
++    { 0x00a05000, 0x00000002 },
++    { 0x00661000, 0x00000002 },
++    { 0x0460299b, 0x00000020 },
++    { 0x00004000, 0000000000 },
++    { 0x01e00830, 0x00000002 },
++    { 0x21007000, 0000000000 },
++    { 0x00005000, 0x00000002 },
++    { 0x00038056, 0x00000002 },
++    { 0x040025e0, 0x00000002 },
++    { 0x000075e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000380ed, 0x00000002 },
++    { 0x04007394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000078c4, 0x00000002 },
++    { 0x000078c5, 0x00000002 },
++    { 0x000078c6, 0x00000002 },
++    { 0x00007924, 0x00000002 },
++    { 0x00007925, 0x00000002 },
++    { 0x00007926, 0x00000002 },
++    { 0x000000f2, 0x00000004 },
++    { 0x00007924, 0x00000002 },
++    { 0x00007925, 0x00000002 },
++    { 0x00007926, 0x00000002 },
++    { 0x000000f9, 0x00000004 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R300_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x000000ae, 0x00000008 },
++    { 0x000000b2, 0x00000008 },
++    { 0x67554b4a, 0000000000 },
++    { 0x4a4a4475, 0000000000 },
++    { 0x55527d83, 0000000000 },
++    { 0x4a8c8b65, 0000000000 },
++    { 0x4aef4af6, 0000000000 },
++    { 0x4ae14a4a, 0000000000 },
++    { 0xe4979797, 0000000000 },
++    { 0xdb4aebdd, 0000000000 },
++    { 0x9ccc4a4a, 0000000000 },
++    { 0xd1989898, 0000000000 },
++    { 0x4a0f9ad6, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000080, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00012000, 0x00000004 },
++    { 0x00082000, 0x00000004 },
++    { 0x1800650e, 0x00000004 },
++    { 0x00092000, 0x00000004 },
++    { 0x000a2000, 0x00000004 },
++    { 0x000f0000, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000074, 0x00000018 },
++    { 0x0000e563, 0x00000004 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0000a069, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000077, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000077, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0007a, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000084, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000086, 0x00000008 },
++    { 0x00c00085, 0x00000008 },
++    { 0x000700e3, 0x00000004 },
++    { 0x00000092, 0x00000038 },
++    { 0x000ca094, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x000000a4, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x000000a1, 0x00000008 },
++    { 0x000000a6, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x000000ad, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x001400a9, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700e3, 0x00000004 },
++    { 0x000000c0, 0x00000038 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2094, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2095, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000c3, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000c7, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000c9, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080c3, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700e0, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000e4, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000eb, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000f3, 0x00000034 },
++    { 0x000000f0, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R420_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x00000099, 0x00000008 },
++    { 0x0000009d, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0xd9d3dff6, 0000000000 },
++    { 0x4ac54a4a, 0000000000 },
++    { 0xc8828282, 0000000000 },
++    { 0xbf4acfc1, 0000000000 },
++    { 0x87b04a4a, 0000000000 },
++    { 0xb5838383, 0000000000 },
++    { 0x4a0f85ba, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700c7, 0x00000004 },
++    { 0x00000080, 0x00000038 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x0000008f, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x0000008c, 0x00000008 },
++    { 0x00000091, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x00000098, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x00140094, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700c7, 0x00000004 },
++    { 0x000000a4, 0x00000038 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000ab, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000ad, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080a7, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c4, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000c8, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cf, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000d7, 0x00000034 },
++    { 0x000000d4, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x0000e1cc, 0x00000004 },
++    { 0x0500e1cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000de, 0x00000034 },
++    { 0x000000da, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x0019e1cc, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x0500a000, 0x00000004 },
++    { 0x080041cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 RS600_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x000000a0, 0x00000008 },
++    { 0x000000a4, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0x4ae74af6, 0000000000 },
++    { 0x4ad34a4a, 0000000000 },
++    { 0xd6898989, 0000000000 },
++    { 0xcd4addcf, 0000000000 },
++    { 0x8ebe4ae2, 0000000000 },
++    { 0xc38a8a8a, 0000000000 },
++    { 0x4a0f8cc8, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700d5, 0x00000004 },
++    { 0x00000084, 0x00000038 },
++    { 0x000ca086, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000096, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x00000093, 0x00000008 },
++    { 0x00000098, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000009f, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x0014009b, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700d5, 0x00000004 },
++    { 0x000000b2, 0x00000038 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2086, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000b5, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000b9, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000bb, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080b5, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700d2, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000d6, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000dd, 0x00000008 },
++    { 0x00e00116, 0000000000 },
++    { 0x000700e1, 0x00000004 },
++    { 0x0800401c, 0x00000004 },
++    { 0x200050e7, 0x00000004 },
++    { 0x0000e01d, 0x00000004 },
++    { 0x000000e4, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000eb, 0x00000034 },
++    { 0x000000e8, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 RS690_cp_microcode[][2]={
++    { 0x000000dd, 0x00000008 },
++    { 0x000000df, 0x00000008 },
++    { 0x000000a0, 0x00000008 },
++    { 0x000000a4, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0x4ad74af6, 0000000000 },
++    { 0x4ac94a4a, 0000000000 },
++    { 0xcc898989, 0000000000 },
++    { 0xc34ad3c5, 0000000000 },
++    { 0x8e4a4a4a, 0000000000 },
++    { 0x4a8a8a8a, 0000000000 },
++    { 0x4a0f8c4a, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000f041, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000f184, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000f185, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000f186, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000f187, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700cb, 0x00000004 },
++    { 0x00000084, 0x00000038 },
++    { 0x000ca086, 0x00000030 },
++    { 0x080045bb, 0x00000004 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0800e5bc, 0000000000 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x00120000, 0x0000000c },
++    { 0x00120000, 0x00000004 },
++    { 0x001b0002, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x00000096, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x00000093, 0x00000008 },
++    { 0x00000098, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000009f, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x0014009b, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x00100000, 0x0000002c },
++    { 0x00004000, 0000000000 },
++    { 0x080045c8, 0x00000004 },
++    { 0x00240005, 0x00000004 },
++    { 0x08004d0b, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700cb, 0x00000004 },
++    { 0x000000b7, 0x00000038 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bd, 0x00000005 },
++    { 0x000c2086, 0x00000030 },
++    { 0x0800e5bb, 0x00000005 },
++    { 0x000c2087, 0x00000030 },
++    { 0x0880e5bc, 0x00000005 },
++    { 0x000000ba, 0x00000008 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000be, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000c0, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080ba, 0x00000008 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c8, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cc, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000d3, 0x00000008 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000db, 0x00000034 },
++    { 0x000000d8, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x000000e1, 0x00000030 },
++    { 0x4200e000, 0000000000 },
++    { 0x000000e1, 0x00000030 },
++    { 0x4000e000, 0000000000 },
++    { 0x0025001b, 0x00000004 },
++    { 0x00230000, 0x00000004 },
++    { 0x00250005, 0x00000004 },
++    { 0x000000e6, 0x00000034 },
++    { 0000000000, 0x0000000c },
++    { 0x00244000, 0x00000004 },
++    { 0x080045c8, 0x00000004 },
++    { 0x00240005, 0x00000004 },
++    { 0x08004d0b, 0x0000000c },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++static const u32 R520_cp_microcode[][2]={
++    { 0x4200e000, 0000000000 },
++    { 0x4000e000, 0000000000 },
++    { 0x00000099, 0x00000008 },
++    { 0x0000009d, 0x00000008 },
++    { 0x4a554b4a, 0000000000 },
++    { 0x4a4a4467, 0000000000 },
++    { 0x55526f75, 0000000000 },
++    { 0x4a7e7d65, 0000000000 },
++    { 0xe0dae6f6, 0000000000 },
++    { 0x4ac54a4a, 0000000000 },
++    { 0xc8828282, 0000000000 },
++    { 0xbf4acfc1, 0000000000 },
++    { 0x87b04ad5, 0000000000 },
++    { 0xb5838383, 0000000000 },
++    { 0x4a0f85ba, 0000000000 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000d0012, 0x00000038 },
++    { 0x0000e8b4, 0x00000004 },
++    { 0x000d0014, 0x00000038 },
++    { 0x0000e8b6, 0x00000004 },
++    { 0x000d0016, 0x00000038 },
++    { 0x0000e854, 0x00000004 },
++    { 0x000d0018, 0x00000038 },
++    { 0x0000e855, 0x00000004 },
++    { 0x000d001a, 0x00000038 },
++    { 0x0000e856, 0x00000004 },
++    { 0x000d001c, 0x00000038 },
++    { 0x0000e857, 0x00000004 },
++    { 0x000d001e, 0x00000038 },
++    { 0x0000e824, 0x00000004 },
++    { 0x000d0020, 0x00000038 },
++    { 0x0000e825, 0x00000004 },
++    { 0x000d0022, 0x00000038 },
++    { 0x0000e830, 0x00000004 },
++    { 0x000d0024, 0x00000038 },
++    { 0x0000f0c0, 0x00000004 },
++    { 0x000d0026, 0x00000038 },
++    { 0x0000f0c1, 0x00000004 },
++    { 0x000d0028, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002a, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002c, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d002e, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d0030, 0x00000038 },
++    { 0x0000e000, 0x00000004 },
++    { 0x000d0032, 0x00000038 },
++    { 0x0000f180, 0x00000004 },
++    { 0x000d0034, 0x00000038 },
++    { 0x0000f393, 0x00000004 },
++    { 0x000d0036, 0x00000038 },
++    { 0x0000f38a, 0x00000004 },
++    { 0x000d0038, 0x00000038 },
++    { 0x0000f38e, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000043, 0x00000018 },
++    { 0x00cce800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x08004800, 0x00000004 },
++    { 0x0000003a, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x2000451d, 0x00000004 },
++    { 0x0000e580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x08004580, 0x00000004 },
++    { 0x000ce581, 0x00000004 },
++    { 0x00000047, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x00032000, 0x00000004 },
++    { 0x00022051, 0x00000028 },
++    { 0x00000051, 0x00000024 },
++    { 0x0800450f, 0x00000004 },
++    { 0x0000a04b, 0x00000008 },
++    { 0x0000e565, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000052, 0x00000008 },
++    { 0x03cca5b4, 0x00000004 },
++    { 0x05432000, 0x00000004 },
++    { 0x00022000, 0x00000004 },
++    { 0x4ccce05e, 0x00000030 },
++    { 0x08274565, 0x00000004 },
++    { 0x0000005e, 0x00000030 },
++    { 0x08004564, 0x00000004 },
++    { 0x0000e566, 0x00000004 },
++    { 0x00000055, 0x00000008 },
++    { 0x00802061, 0x00000010 },
++    { 0x00202000, 0x00000004 },
++    { 0x001b00ff, 0x00000004 },
++    { 0x01000064, 0x00000010 },
++    { 0x001f2000, 0x00000004 },
++    { 0x001c00ff, 0x00000004 },
++    { 0000000000, 0x0000000c },
++    { 0x00000072, 0x00000030 },
++    { 0x00000055, 0x00000008 },
++    { 0x0000e576, 0x00000004 },
++    { 0x0000e577, 0x00000004 },
++    { 0x0000e50e, 0x00000004 },
++    { 0x0000e50f, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00000069, 0x00000018 },
++    { 0x00c0e5f9, 0x000000c2 },
++    { 0x00000069, 0x00000008 },
++    { 0x0014e50e, 0x00000004 },
++    { 0x0040e50f, 0x00000004 },
++    { 0x00c0006c, 0x00000008 },
++    { 0x0000e570, 0x00000004 },
++    { 0x0000e571, 0x00000004 },
++    { 0x0000e572, 0x0000000c },
++    { 0x0000a000, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x0000e568, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00000076, 0x00000018 },
++    { 0x000b0000, 0x00000004 },
++    { 0x18c0e562, 0x00000004 },
++    { 0x00000078, 0x00000008 },
++    { 0x00c00077, 0x00000008 },
++    { 0x000700c7, 0x00000004 },
++    { 0x00000080, 0x00000038 },
++    { 0x0000e5bb, 0x00000004 },
++    { 0x0000e5bc, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e800, 0000000000 },
++    { 0x0000e821, 0x00000004 },
++    { 0x0000e82e, 0000000000 },
++    { 0x02cca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000ce1cc, 0x00000004 },
++    { 0x050de1cd, 0x00000004 },
++    { 0x00400000, 0x00000004 },
++    { 0x0000008f, 0x00000018 },
++    { 0x00c0a000, 0x00000004 },
++    { 0x0000008c, 0x00000008 },
++    { 0x00000091, 0x00000020 },
++    { 0x4200e000, 0000000000 },
++    { 0x00000098, 0x00000038 },
++    { 0x000ca000, 0x00000004 },
++    { 0x00140000, 0x00000004 },
++    { 0x000c2000, 0x00000004 },
++    { 0x00160000, 0x00000004 },
++    { 0x700ce000, 0x00000004 },
++    { 0x00140094, 0x00000008 },
++    { 0x4000e000, 0000000000 },
++    { 0x02400000, 0x00000004 },
++    { 0x400ee000, 0x00000004 },
++    { 0x02400000, 0x00000004 },
++    { 0x4000e000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x0240e51b, 0x00000004 },
++    { 0x0080e50a, 0x00000005 },
++    { 0x0080e50b, 0x00000005 },
++    { 0x00220000, 0x00000004 },
++    { 0x000700c7, 0x00000004 },
++    { 0x000000a4, 0x00000038 },
++    { 0x0080e5bd, 0x00000005 },
++    { 0x0000e5bb, 0x00000005 },
++    { 0x0080e5bc, 0x00000005 },
++    { 0x00210000, 0x00000004 },
++    { 0x02800000, 0x00000004 },
++    { 0x00c000ab, 0x00000018 },
++    { 0x4180e000, 0x00000040 },
++    { 0x000000ad, 0x00000024 },
++    { 0x01000000, 0x0000000c },
++    { 0x0100e51d, 0x0000000c },
++    { 0x000045bb, 0x00000004 },
++    { 0x000080a7, 0x00000008 },
++    { 0x0000f3ce, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053cf, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f3d2, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c053d3, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x0000f39d, 0x00000004 },
++    { 0x0140a000, 0x00000004 },
++    { 0x00cc2000, 0x00000004 },
++    { 0x08c0539e, 0x00000040 },
++    { 0x00008000, 0000000000 },
++    { 0x03c00830, 0x00000004 },
++    { 0x4200e000, 0000000000 },
++    { 0x0000a000, 0x00000004 },
++    { 0x200045e0, 0x00000004 },
++    { 0x0000e5e1, 0000000000 },
++    { 0x00000001, 0000000000 },
++    { 0x000700c4, 0x00000004 },
++    { 0x0800e394, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x0000e8c4, 0x00000004 },
++    { 0x0000e8c5, 0x00000004 },
++    { 0x0000e8c6, 0x00000004 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000c8, 0x00000008 },
++    { 0x0000e928, 0x00000004 },
++    { 0x0000e929, 0x00000004 },
++    { 0x0000e92a, 0x00000004 },
++    { 0x000000cf, 0x00000008 },
++    { 0xdeadbeef, 0000000000 },
++    { 0x00000116, 0000000000 },
++    { 0x000700d3, 0x00000004 },
++    { 0x080050e7, 0x00000004 },
++    { 0x000700d4, 0x00000004 },
++    { 0x0800401c, 0x00000004 },
++    { 0x0000e01d, 0000000000 },
++    { 0x02c02000, 0x00000004 },
++    { 0x00060000, 0x00000004 },
++    { 0x000000de, 0x00000034 },
++    { 0x000000db, 0x00000008 },
++    { 0x00008000, 0x00000004 },
++    { 0xc000e000, 0000000000 },
++    { 0x0000e1cc, 0x00000004 },
++    { 0x0500e1cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000e5, 0x00000034 },
++    { 0x000000e1, 0x00000008 },
++    { 0x0000a000, 0000000000 },
++    { 0x0019e1cc, 0x00000004 },
++    { 0x001b0001, 0x00000004 },
++    { 0x0500a000, 0x00000004 },
++    { 0x080041cd, 0x00000004 },
++    { 0x000ca000, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0x000c2000, 0x00000004 },
++    { 0x001d0018, 0x00000004 },
++    { 0x001a0001, 0x00000004 },
++    { 0x000000fb, 0x00000034 },
++    { 0x0000004a, 0x00000008 },
++    { 0x0500a04a, 0x00000008 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++    { 0000000000, 0000000000 },
++};
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/radeon_state.c git-nokia/drivers/gpu/drm-tungsten/radeon_state.c
+--- git/drivers/gpu/drm-tungsten/radeon_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/radeon_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,3263 @@
++/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ *    Kevin E. Martin <martin@valinux.com>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "radeon_drm.h"
++#include "radeon_drv.h"
++
++/* ================================================================
++ * Helper functions for client state checking and fixup
++ */
++
++static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
++                                                  dev_priv,
++                                                  struct drm_file *file_priv,
++                                                  u32 * offset)
++{
++      u64 off = *offset;
++      u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      /* Hrm ... the story of the offset ... So this function converts
++       * the various ideas of what userland clients might have for an
++       * offset in the card address space into an offset into the card
++       * address space :) So with a sane client, it should just keep
++       * the value intact and just do some boundary checking. However,
++       * not all clients are sane. Some older clients pass us 0 based
++       * offsets relative to the start of the framebuffer and some may
++       * assume the AGP aperture it appended to the framebuffer, so we
++       * try to detect those cases and fix them up.
++       *
++       * Note: It might be a good idea here to make sure the offset lands
++       * in some "allowed" area to protect things like the PCIE GART...
++       */
++
++      /* First, the best case, the offset already lands in either the
++       * framebuffer or the GART mapped space
++       */
++      if (radeon_check_offset(dev_priv, off))
++              return 0;
++
++      /* Ok, that didn't happen... now check if we have a zero based
++       * offset that fits in the framebuffer + gart space, apply the
++       * magic offset we get from SETPARAM or calculated from fb_location
++       */
++      if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
++              radeon_priv = file_priv->driver_priv;
++              off += radeon_priv->radeon_fb_delta;
++      }
++
++      /* Finally, assume we aimed at a GART offset if beyond the fb */
++      if (off > fb_end)
++              off = off - fb_end - 1 + dev_priv->gart_vm_start;
++
++      /* Now recheck and fail if out of bounds */
++      if (radeon_check_offset(dev_priv, off)) {
++              DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
++              *offset = off;
++              return 0;
++      }
++      return -EINVAL;
++}
++
++static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
++                                                   dev_priv,
++                                                   struct drm_file *file_priv,
++                                                   int id, u32 *data)
++{
++      switch (id) {
++
++      case RADEON_EMIT_PP_MISC:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_EMIT_PP_CNTL:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
++                      DRM_ERROR("Invalid colour buffer offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case R200_EMIT_PP_TXOFFSET_0:
++      case R200_EMIT_PP_TXOFFSET_1:
++      case R200_EMIT_PP_TXOFFSET_2:
++      case R200_EMIT_PP_TXOFFSET_3:
++      case R200_EMIT_PP_TXOFFSET_4:
++      case R200_EMIT_PP_TXOFFSET_5:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &data[0])) {
++                      DRM_ERROR("Invalid R200 texture offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_EMIT_PP_TXFILTER_0:
++      case RADEON_EMIT_PP_TXFILTER_1:
++      case RADEON_EMIT_PP_TXFILTER_2:
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                  &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
++                      DRM_ERROR("Invalid R100 texture offset\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case R200_EMIT_PP_CUBIC_OFFSETS_0:
++      case R200_EMIT_PP_CUBIC_OFFSETS_1:
++      case R200_EMIT_PP_CUBIC_OFFSETS_2:
++      case R200_EMIT_PP_CUBIC_OFFSETS_3:
++      case R200_EMIT_PP_CUBIC_OFFSETS_4:
++      case R200_EMIT_PP_CUBIC_OFFSETS_5:{
++                      int i;
++                      for (i = 0; i < 5; i++) {
++                              if (radeon_check_and_fixup_offset(dev_priv,
++                                                                file_priv,
++                                                                &data[i])) {
++                                      DRM_ERROR
++                                          ("Invalid R200 cubic texture offset\n");
++                                      return -EINVAL;
++                              }
++                      }
++                      break;
++              }
++
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
++      case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
++                      int i;
++                      for (i = 0; i < 5; i++) {
++                              if (radeon_check_and_fixup_offset(dev_priv,
++                                                                file_priv,
++                                                                &data[i])) {
++                                      DRM_ERROR
++                                          ("Invalid R100 cubic texture offset\n");
++                                      return -EINVAL;
++                              }
++                      }
++              }
++              break;
++
++      case R200_EMIT_VAP_CTL: {
++                      RING_LOCALS;
++                      BEGIN_RING(2);
++                      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++                      ADVANCE_RING();
++              }
++              break;
++
++      case RADEON_EMIT_RB3D_COLORPITCH:
++      case RADEON_EMIT_RE_LINE_PATTERN:
++      case RADEON_EMIT_SE_LINE_WIDTH:
++      case RADEON_EMIT_PP_LUM_MATRIX:
++      case RADEON_EMIT_PP_ROT_MATRIX_0:
++      case RADEON_EMIT_RB3D_STENCILREFMASK:
++      case RADEON_EMIT_SE_VPORT_XSCALE:
++      case RADEON_EMIT_SE_CNTL:
++      case RADEON_EMIT_SE_CNTL_STATUS:
++      case RADEON_EMIT_RE_MISC:
++      case RADEON_EMIT_PP_BORDER_COLOR_0:
++      case RADEON_EMIT_PP_BORDER_COLOR_1:
++      case RADEON_EMIT_PP_BORDER_COLOR_2:
++      case RADEON_EMIT_SE_ZBIAS_FACTOR:
++      case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
++      case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
++      case R200_EMIT_PP_TXCBLEND_0:
++      case R200_EMIT_PP_TXCBLEND_1:
++      case R200_EMIT_PP_TXCBLEND_2:
++      case R200_EMIT_PP_TXCBLEND_3:
++      case R200_EMIT_PP_TXCBLEND_4:
++      case R200_EMIT_PP_TXCBLEND_5:
++      case R200_EMIT_PP_TXCBLEND_6:
++      case R200_EMIT_PP_TXCBLEND_7:
++      case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
++      case R200_EMIT_TFACTOR_0:
++      case R200_EMIT_VTX_FMT_0:
++      case R200_EMIT_MATRIX_SELECT_0:
++      case R200_EMIT_TEX_PROC_CTL_2:
++      case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
++      case R200_EMIT_PP_TXFILTER_0:
++      case R200_EMIT_PP_TXFILTER_1:
++      case R200_EMIT_PP_TXFILTER_2:
++      case R200_EMIT_PP_TXFILTER_3:
++      case R200_EMIT_PP_TXFILTER_4:
++      case R200_EMIT_PP_TXFILTER_5:
++      case R200_EMIT_VTE_CNTL:
++      case R200_EMIT_OUTPUT_VTX_COMP_SEL:
++      case R200_EMIT_PP_TAM_DEBUG3:
++      case R200_EMIT_PP_CNTL_X:
++      case R200_EMIT_RB3D_DEPTHXY_OFFSET:
++      case R200_EMIT_RE_AUX_SCISSOR_CNTL:
++      case R200_EMIT_RE_SCISSOR_TL_0:
++      case R200_EMIT_RE_SCISSOR_TL_1:
++      case R200_EMIT_RE_SCISSOR_TL_2:
++      case R200_EMIT_SE_VAP_CNTL_STATUS:
++      case R200_EMIT_SE_VTX_STATE_CNTL:
++      case R200_EMIT_RE_POINTSIZE:
++      case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
++      case R200_EMIT_PP_CUBIC_FACES_0:
++      case R200_EMIT_PP_CUBIC_FACES_1:
++      case R200_EMIT_PP_CUBIC_FACES_2:
++      case R200_EMIT_PP_CUBIC_FACES_3:
++      case R200_EMIT_PP_CUBIC_FACES_4:
++      case R200_EMIT_PP_CUBIC_FACES_5:
++      case RADEON_EMIT_PP_TEX_SIZE_0:
++      case RADEON_EMIT_PP_TEX_SIZE_1:
++      case RADEON_EMIT_PP_TEX_SIZE_2:
++      case R200_EMIT_RB3D_BLENDCOLOR:
++      case R200_EMIT_TCL_POINT_SPRITE_CNTL:
++      case RADEON_EMIT_PP_CUBIC_FACES_0:
++      case RADEON_EMIT_PP_CUBIC_FACES_1:
++      case RADEON_EMIT_PP_CUBIC_FACES_2:
++      case R200_EMIT_PP_TRI_PERF_CNTL:
++      case R200_EMIT_PP_AFS_0:
++      case R200_EMIT_PP_AFS_1:
++      case R200_EMIT_ATF_TFACTOR:
++      case R200_EMIT_PP_TXCTLALL_0:
++      case R200_EMIT_PP_TXCTLALL_1:
++      case R200_EMIT_PP_TXCTLALL_2:
++      case R200_EMIT_PP_TXCTLALL_3:
++      case R200_EMIT_PP_TXCTLALL_4:
++      case R200_EMIT_PP_TXCTLALL_5:
++      case R200_EMIT_VAP_PVS_CNTL:
++              /* These packets don't contain memory offsets */
++              break;
++
++      default:
++              DRM_ERROR("Unknown state packet ID %d\n", id);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
++                                                   dev_priv,
++                                                   struct drm_file *file_priv,
++                                                   drm_radeon_kcmd_buffer_t *
++                                                   cmdbuf,
++                                                   unsigned int *cmdsz)
++{
++      u32 *cmd = (u32 *) cmdbuf->buf;
++      u32 offset, narrays;
++      int count, i, k;
++
++      *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
++
++      if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
++              DRM_ERROR("Not a type 3 packet\n");
++              return -EINVAL;
++      }
++
++      if (4 * *cmdsz > cmdbuf->bufsz) {
++              DRM_ERROR("Packet size larger than size of data provided\n");
++              return -EINVAL;
++      }
++
++      switch(cmd[0] & 0xff00) {
++      /* XXX Are there old drivers needing other packets? */
++
++      case RADEON_3D_DRAW_IMMD:
++      case RADEON_3D_DRAW_VBUF:
++      case RADEON_3D_DRAW_INDX:
++      case RADEON_WAIT_FOR_IDLE:
++      case RADEON_CP_NOP:
++      case RADEON_3D_CLEAR_ZMASK:
++/*    case RADEON_CP_NEXT_CHAR:
++      case RADEON_CP_PLY_NEXTSCAN:
++      case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
++              /* these packets are safe */
++              break;
++
++      case RADEON_CP_3D_DRAW_IMMD_2:
++      case RADEON_CP_3D_DRAW_VBUF_2:
++      case RADEON_CP_3D_DRAW_INDX_2:
++      case RADEON_3D_CLEAR_HIZ:
++              /* safe but r200 only */
++              if ((dev_priv->chip_family < CHIP_R200) ||
++                  (dev_priv->chip_family > CHIP_RV280)) {
++                      DRM_ERROR("Invalid 3d packet for non r200-class chip\n");
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_3D_LOAD_VBPNTR:
++              count = (cmd[0] >> 16) & 0x3fff;
++
++              if (count > 18) { /* 12 arrays max */
++                      DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
++                                count);
++                      return -EINVAL;
++              }
++
++              /* carefully check packet contents */
++              narrays = cmd[1] & ~0xc000;
++              k = 0;
++              i = 2;
++              while ((k < narrays) && (i < (count + 2))) {
++                      i++;            /* skip attribute field */
++                      if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                        &cmd[i])) {
++                              DRM_ERROR
++                                  ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
++                                   k, i);
++                              return -EINVAL;
++                      }
++                      k++;
++                      i++;
++                      if (k == narrays)
++                              break;
++                      /* have one more to process, they come in pairs */
++                      if (radeon_check_and_fixup_offset(dev_priv,
++                                                        file_priv, &cmd[i]))
++                      {
++                              DRM_ERROR
++                                  ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
++                                   k, i);
++                              return -EINVAL;
++                      }
++                      k++;
++                      i++;
++              }
++              /* do the counts match what we expect ? */
++              if ((k != narrays) || (i != (count + 2))) {
++                      DRM_ERROR
++                          ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
++                            k, i, narrays, count + 1);
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_3D_RNDR_GEN_INDX_PRIM:
++              if (dev_priv->chip_family > CHIP_RS200) {
++                      DRM_ERROR("Invalid 3d packet for non-r100-class chip\n");
++                      return -EINVAL;
++              }
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
++                              DRM_ERROR("Invalid rndr_gen_indx offset\n");
++                              return -EINVAL;
++              }
++              break;
++
++      case RADEON_CP_INDX_BUFFER:
++              /* safe but r200 only */
++              if ((dev_priv->chip_family < CHIP_R200) ||
++                  (dev_priv->chip_family > CHIP_RV280)) {
++                      DRM_ERROR("Invalid 3d packet for non-r200-class chip\n");
++                      return -EINVAL;
++              }
++              if ((cmd[1] & 0x8000ffff) != 0x80000810) {
++                      DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
++                      return -EINVAL;
++              }
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
++                      DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
++                      return -EINVAL;
++              }
++              break;
++
++      case RADEON_CNTL_HOSTDATA_BLT:
++      case RADEON_CNTL_PAINT_MULTI:
++      case RADEON_CNTL_BITBLT_MULTI:
++              /* MSB of opcode: next DWORD GUI_CNTL */
++              if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
++                            | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[2] << 10;
++                      if (radeon_check_and_fixup_offset
++                          (dev_priv, file_priv, &offset)) {
++                              DRM_ERROR("Invalid first packet offset\n");
++                              return -EINVAL;
++                      }
++                      cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
++              }
++
++              if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
++                  (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
++                      offset = cmd[3] << 10;
++                      if (radeon_check_and_fixup_offset
++                          (dev_priv, file_priv, &offset)) {
++                              DRM_ERROR("Invalid second packet offset\n");
++                              return -EINVAL;
++                      }
++                      cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
++              }
++              break;
++
++      default:
++              DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* ================================================================
++ * CP hardware state programming functions
++ */
++
++static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
++                                           struct drm_clip_rect * box)
++{
++      RING_LOCALS;
++
++      DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
++                box->x1, box->y1, box->x2, box->y2);
++
++      BEGIN_RING(4);
++      OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
++      OUT_RING((box->y1 << 16) | box->x1);
++      OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
++      OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
++      ADVANCE_RING();
++}
++
++/* Emit 1.1 state
++ */
++static int radeon_emit_state(drm_radeon_private_t * dev_priv,
++                           struct drm_file *file_priv,
++                           drm_radeon_context_regs_t * ctx,
++                           drm_radeon_texture_regs_t * tex,
++                           unsigned int dirty)
++{
++      RING_LOCALS;
++      DRM_DEBUG("dirty=0x%08x\n", dirty);
++
++      if (dirty & RADEON_UPLOAD_CONTEXT) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &ctx->rb3d_depthoffset)) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &ctx->rb3d_coloroffset)) {
++                      DRM_ERROR("Invalid depth buffer offset\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(14);
++              OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
++              OUT_RING(ctx->pp_misc);
++              OUT_RING(ctx->pp_fog_color);
++              OUT_RING(ctx->re_solid_color);
++              OUT_RING(ctx->rb3d_blendcntl);
++              OUT_RING(ctx->rb3d_depthoffset);
++              OUT_RING(ctx->rb3d_depthpitch);
++              OUT_RING(ctx->rb3d_zstencilcntl);
++              OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
++              OUT_RING(ctx->pp_cntl);
++              OUT_RING(ctx->rb3d_cntl);
++              OUT_RING(ctx->rb3d_coloroffset);
++              OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
++              OUT_RING(ctx->rb3d_colorpitch);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_VERTFMT) {
++              BEGIN_RING(2);
++              OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
++              OUT_RING(ctx->se_coord_fmt);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_LINE) {
++              BEGIN_RING(5);
++              OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
++              OUT_RING(ctx->re_line_pattern);
++              OUT_RING(ctx->re_line_state);
++              OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
++              OUT_RING(ctx->se_line_width);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_BUMPMAP) {
++              BEGIN_RING(5);
++              OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
++              OUT_RING(ctx->pp_lum_matrix);
++              OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
++              OUT_RING(ctx->pp_rot_matrix_0);
++              OUT_RING(ctx->pp_rot_matrix_1);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_MASKS) {
++              BEGIN_RING(4);
++              OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
++              OUT_RING(ctx->rb3d_stencilrefmask);
++              OUT_RING(ctx->rb3d_ropcntl);
++              OUT_RING(ctx->rb3d_planemask);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_VIEWPORT) {
++              BEGIN_RING(7);
++              OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
++              OUT_RING(ctx->se_vport_xscale);
++              OUT_RING(ctx->se_vport_xoffset);
++              OUT_RING(ctx->se_vport_yscale);
++              OUT_RING(ctx->se_vport_yoffset);
++              OUT_RING(ctx->se_vport_zscale);
++              OUT_RING(ctx->se_vport_zoffset);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_SETUP) {
++              BEGIN_RING(4);
++              OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
++              OUT_RING(ctx->se_cntl);
++              OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
++              OUT_RING(ctx->se_cntl_status);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_MISC) {
++              BEGIN_RING(2);
++              OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
++              OUT_RING(ctx->re_misc);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX0) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[0].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 0\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
++              OUT_RING(tex[0].pp_txfilter);
++              OUT_RING(tex[0].pp_txformat);
++              OUT_RING(tex[0].pp_txoffset);
++              OUT_RING(tex[0].pp_txcblend);
++              OUT_RING(tex[0].pp_txablend);
++              OUT_RING(tex[0].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
++              OUT_RING(tex[0].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX1) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[1].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 1\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
++              OUT_RING(tex[1].pp_txfilter);
++              OUT_RING(tex[1].pp_txformat);
++              OUT_RING(tex[1].pp_txoffset);
++              OUT_RING(tex[1].pp_txcblend);
++              OUT_RING(tex[1].pp_txablend);
++              OUT_RING(tex[1].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
++              OUT_RING(tex[1].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      if (dirty & RADEON_UPLOAD_TEX2) {
++              if (radeon_check_and_fixup_offset(dev_priv, file_priv,
++                                                &tex[2].pp_txoffset)) {
++                      DRM_ERROR("Invalid texture offset for unit 2\n");
++                      return -EINVAL;
++              }
++
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
++              OUT_RING(tex[2].pp_txfilter);
++              OUT_RING(tex[2].pp_txformat);
++              OUT_RING(tex[2].pp_txoffset);
++              OUT_RING(tex[2].pp_txcblend);
++              OUT_RING(tex[2].pp_txablend);
++              OUT_RING(tex[2].pp_tfactor);
++              OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
++              OUT_RING(tex[2].pp_border_color);
++              ADVANCE_RING();
++      }
++
++      return 0;
++}
++
++/* Emit 1.2 state
++ */
++static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
++                            struct drm_file *file_priv,
++                            drm_radeon_state_t * state)
++{
++      RING_LOCALS;
++
++      if (state->dirty & RADEON_UPLOAD_ZBIAS) {
++              BEGIN_RING(3);
++              OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
++              OUT_RING(state->context2.se_zbias_factor);
++              OUT_RING(state->context2.se_zbias_constant);
++              ADVANCE_RING();
++      }
++
++      return radeon_emit_state(dev_priv, file_priv, &state->context,
++                               state->tex, state->dirty);
++}
++
++/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
++ * 1.3 cmdbuffers allow all previous state to be updated as well as
++ * the tcl scalar and vector areas.
++ */
++static struct {
++      int start;
++      int len;
++      const char *name;
++} packet[RADEON_MAX_STATE_PACKETS] = {
++      {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
++      {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
++      {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
++      {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
++      {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
++      {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
++      {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
++      {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
++      {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
++      {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
++      {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
++      {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
++      {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
++      {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
++      {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
++      {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
++      {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
++      {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
++      {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
++      {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
++      {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
++                  "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
++      {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
++      {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
++      {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
++      {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
++      {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
++      {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
++      {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
++      {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
++      {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
++      {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
++      {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
++      {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
++      {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
++      {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
++      {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
++      {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
++      {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
++      {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
++      {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
++      {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
++      {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
++      {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
++      {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
++      {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
++      {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
++      {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
++      {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
++      {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
++      {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
++       "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
++      {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
++      {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
++      {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
++      {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
++      {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
++      {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
++      {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
++      {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
++      {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
++      {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
++      {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
++                  "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
++      {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},    /* 61 */
++      {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
++      {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
++      {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
++      {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
++      {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
++      {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
++      {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
++      {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
++      {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
++      {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
++      {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
++      {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
++      {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
++      {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
++      {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
++      {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
++      {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
++      {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
++      {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
++      {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
++      {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
++      {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
++      {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
++      {R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
++      {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
++      {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
++      {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
++      {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
++      {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
++      {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
++      {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
++      {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
++      {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
++};
++
++/* ================================================================
++ * Performance monitoring functions
++ */
++
++static void radeon_clear_box(drm_radeon_private_t * dev_priv,
++                           int x, int y, int w, int h, int r, int g, int b)
++{
++      u32 color;
++      RING_LOCALS;
++
++      x += dev_priv->sarea_priv->boxes[0].x1;
++      y += dev_priv->sarea_priv->boxes[0].y1;
++
++      switch (dev_priv->color_fmt) {
++      case RADEON_COLOR_FORMAT_RGB565:
++              color = (((r & 0xf8) << 8) |
++                       ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
++              break;
++      case RADEON_COLOR_FORMAT_ARGB8888:
++      default:
++              color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
++              break;
++      }
++
++      BEGIN_RING(4);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
++      OUT_RING(0xffffffff);
++      ADVANCE_RING();
++
++      BEGIN_RING(6);
++
++      OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
++      OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++               RADEON_GMC_BRUSH_SOLID_COLOR |
++               (dev_priv->color_fmt << 8) |
++               RADEON_GMC_SRC_DATATYPE_COLOR |
++               RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++      if (dev_priv->sarea_priv->pfCurrentPage == 1) {
++              OUT_RING(dev_priv->front_pitch_offset);
++      } else {
++              OUT_RING(dev_priv->back_pitch_offset);
++      }
++
++      OUT_RING(color);
++
++      OUT_RING((x << 16) | y);
++      OUT_RING((w << 16) | h);
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
++{
++      /* Collapse various things into a wait flag -- trying to
++       * guess if userspase slept -- better just to have them tell us.
++       */
++      if (dev_priv->stats.last_frame_reads > 1 ||
++          dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++      }
++
++      if (dev_priv->stats.freelist_loops) {
++              dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
++      }
++
++      /* Purple box for page flipping
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
++              radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
++
++      /* Red box if we have to wait for idle at any point
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
++              radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
++
++      /* Blue box: lost context?
++       */
++
++      /* Yellow box for texture swaps
++       */
++      if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
++              radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
++
++      /* Green box if hardware never idles (as far as we can tell)
++       */
++      if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
++              radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
++
++      /* Draw bars indicating number of buffers allocated
++       * (not a great measure, easily confused)
++       */
++      if (dev_priv->stats.requested_bufs) {
++              if (dev_priv->stats.requested_bufs > 100)
++                      dev_priv->stats.requested_bufs = 100;
++
++              radeon_clear_box(dev_priv, 4, 16,
++                               dev_priv->stats.requested_bufs, 4,
++                               196, 128, 128);
++      }
++
++      memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
++
++}
++
++/* ================================================================
++ * CP command dispatch functions
++ */
++
++static void radeon_cp_dispatch_clear(struct drm_device * dev,
++                                   drm_radeon_clear_t * clear,
++                                   drm_radeon_clear_rect_t * depth_boxes)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      unsigned int flags = clear->flags;
++      u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("flags = 0x%x\n", flags);
++
++      dev_priv->stats.clears++;
++
++      if (dev_priv->sarea_priv->pfCurrentPage == 1) {
++              unsigned int tmp = flags;
++
++              flags &= ~(RADEON_FRONT | RADEON_BACK);
++              if (tmp & RADEON_FRONT)
++                      flags |= RADEON_BACK;
++              if (tmp & RADEON_BACK)
++                      flags |= RADEON_FRONT;
++      }
++
++      if (flags & (RADEON_FRONT | RADEON_BACK)) {
++
++              BEGIN_RING(4);
++
++              /* Ensure the 3D stream is idle before doing a
++               * 2D fill to clear the front or back buffer.
++               */
++              RADEON_WAIT_UNTIL_3D_IDLE();
++
++              OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
++              OUT_RING(clear->color_mask);
++
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++                      int x = pbox[i].x1;
++                      int y = pbox[i].y1;
++                      int w = pbox[i].x2 - x;
++                      int h = pbox[i].y2 - y;
++
++                      DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
++                                x, y, w, h, flags);
++
++                      if (flags & RADEON_FRONT) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CP_PACKET3
++                                       (RADEON_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                                       RADEON_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->
++                                        color_fmt << 8) |
++                                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                                       RADEON_ROP3_P |
++                                       RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++                              OUT_RING(dev_priv->front_pitch_offset);
++                              OUT_RING(clear->clear_color);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((w << 16) | h);
++
++                              ADVANCE_RING();
++                      }
++
++                      if (flags & RADEON_BACK) {
++                              BEGIN_RING(6);
++
++                              OUT_RING(CP_PACKET3
++                                       (RADEON_CNTL_PAINT_MULTI, 4));
++                              OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                                       RADEON_GMC_BRUSH_SOLID_COLOR |
++                                       (dev_priv->
++                                        color_fmt << 8) |
++                                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                                       RADEON_ROP3_P |
++                                       RADEON_GMC_CLR_CMP_CNTL_DIS);
++
++                              OUT_RING(dev_priv->back_pitch_offset);
++                              OUT_RING(clear->clear_color);
++
++                              OUT_RING((x << 16) | y);
++                              OUT_RING((w << 16) | h);
++
++                              ADVANCE_RING();
++                      }
++              }
++      }
++
++      /* hyper z clear */
++      /* no docs available, based on reverse engeneering by Stephane Marchesin */
++      if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
++          && (flags & RADEON_CLEAR_FASTZ)) {
++
++              int i;
++              int depthpixperline =
++                  dev_priv->depth_fmt ==
++                  RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
++                                                     2) : (dev_priv->
++                                                           depth_pitch / 4);
++
++              u32 clearmask;
++
++              u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
++                  ((clear->depth_mask & 0xff) << 24);
++
++              /* Make sure we restore the 3D state next time.
++               * we haven't touched any "normal" state - still need this?
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                  && (flags & RADEON_USE_HIERZ)) {
++                      /* FIXME : reverse engineer that for Rx00 cards */
++                      /* FIXME : the mask supposedly contains low-res z values. So can't set
++                         just to the max (0xff? or actually 0x3fff?), need to take z clear
++                         value into account? */
++                      /* pattern seems to work for r100, though get slight
++                         rendering errors with glxgears. If hierz is not enabled for r100,
++                         only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
++                         other ones are ignored, and the same clear mask can be used. That's
++                         very different behaviour than R200 which needs different clear mask
++                         and different number of tiles to clear if hierz is enabled or not !?!
++                       */
++                      clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
++              } else {
++                      /* clear mask : chooses the clearing pattern.
++                         rv250: could be used to clear only parts of macrotiles
++                         (but that would get really complicated...)?
++                         bit 0 and 1 (either or both of them ?!?!) are used to
++                         not clear tile (or maybe one of the bits indicates if the tile is
++                         compressed or not), bit 2 and 3 to not clear tile 1,...,.
++                         Pattern is as follows:
++                         | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
++                         bits -------------------------------------------------
++                         | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
++                         rv100: clearmask covers 2x8 4x1 tiles, but one clear still
++                         covers 256 pixels ?!?
++                       */
++                      clearmask = 0x0;
++              }
++
++              BEGIN_RING(8);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
++                           tempRB3D_DEPTHCLEARVALUE);
++              /* what offset is this exactly ? */
++              OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
++              /* need ctlstat, otherwise get some strange black flickering */
++              OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
++                           RADEON_RB3D_ZC_FLUSH_ALL);
++              ADVANCE_RING();
++
++              for (i = 0; i < nbox; i++) {
++                      int tileoffset, nrtilesx, nrtilesy, j;
++                      /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
++                      if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                          && (dev_priv->chip_family < CHIP_R200)) {
++                              /* FIXME : figure this out for r200 (when hierz is enabled). Or
++                                 maybe r200 actually doesn't need to put the low-res z value into
++                                 the tile cache like r100, but just needs to clear the hi-level z-buffer?
++                                 Works for R100, both with hierz and without.
++                                 R100 seems to operate on 2x1 8x8 tiles, but...
++                                 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
++                                 problematic with resolutions which are not 64 pix aligned? */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 3) * depthpixperline +
++                                   pbox[i].x1) >> 6;
++                              nrtilesx =
++                                  ((pbox[i].x2 & ~63) -
++                                   (pbox[i].x1 & ~63)) >> 4;
++                              nrtilesy =
++                                  (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      /* first tile */
++                                      OUT_RING(tileoffset * 8);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 4);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 6;
++                              }
++                      } else if ((dev_priv->chip_family >= CHIP_R200) &&
++                                 (dev_priv->chip_family <= CHIP_RV280)) {
++                              /* works for rv250. */
++                              /* find first macro tile (8x2 4x4 z-pixels on rv250) */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 3) * depthpixperline +
++                                   pbox[i].x1) >> 5;
++                              nrtilesx =
++                                  (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
++                              nrtilesy =
++                                  (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      /* first tile */
++                                      /* judging by the first tile offset needed, could possibly
++                                         directly address/clear 4x4 tiles instead of 8x2 * 4x4
++                                         macro tiles, though would still need clear mask for
++                                         right/bottom if truely 4x4 granularity is desired ? */
++                                      OUT_RING(tileoffset * 16);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 1);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 5;
++                              }
++                      } else {        /* rv 100 */
++                              /* rv100 might not need 64 pix alignment, who knows */
++                              /* offsets are, hmm, weird */
++                              tileoffset =
++                                  ((pbox[i].y1 >> 4) * depthpixperline +
++                                   pbox[i].x1) >> 6;
++                              nrtilesx =
++                                  ((pbox[i].x2 & ~63) -
++                                   (pbox[i].x1 & ~63)) >> 4;
++                              nrtilesy =
++                                  (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
++                              for (j = 0; j <= nrtilesy; j++) {
++                                      BEGIN_RING(4);
++                                      OUT_RING(CP_PACKET3
++                                               (RADEON_3D_CLEAR_ZMASK, 2));
++                                      OUT_RING(tileoffset * 128);
++                                      /* the number of tiles to clear */
++                                      OUT_RING(nrtilesx + 4);
++                                      /* clear mask : chooses the clearing pattern. */
++                                      OUT_RING(clearmask);
++                                      ADVANCE_RING();
++                                      tileoffset += depthpixperline >> 6;
++                              }
++                      }
++              }
++
++              /* TODO don't always clear all hi-level z tiles */
++              if ((dev_priv->flags & RADEON_HAS_HIERZ)
++                  && ((dev_priv->chip_family >= CHIP_R200) &&
++                      (dev_priv->chip_family <= CHIP_RV280))
++                  && (flags & RADEON_USE_HIERZ))
++                      /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
++                      /* FIXME : the mask supposedly contains low-res z values. So can't set
++                         just to the max (0xff? or actually 0x3fff?), need to take z clear
++                         value into account? */
++              {
++                      BEGIN_RING(4);
++                      OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
++                      OUT_RING(0x0);  /* First tile */
++                      OUT_RING(0x3cc0);
++                      OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
++                      ADVANCE_RING();
++              }
++      }
++
++      /* We have to clear the depth and/or stencil buffers by
++       * rendering a quad into just those buffers.  Thus, we have to
++       * make sure the 3D engine is configured correctly.
++       */
++      else if ((dev_priv->chip_family >= CHIP_R200) &&
++               (dev_priv->chip_family <= CHIP_RV280) &&
++               (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
++
++              int tempPP_CNTL;
++              int tempRE_CNTL;
++              int tempRB3D_CNTL;
++              int tempRB3D_ZSTENCILCNTL;
++              int tempRB3D_STENCILREFMASK;
++              int tempRB3D_PLANEMASK;
++              int tempSE_CNTL;
++              int tempSE_VTE_CNTL;
++              int tempSE_VTX_FMT_0;
++              int tempSE_VTX_FMT_1;
++              int tempSE_VAP_CNTL;
++              int tempRE_AUX_SCISSOR_CNTL;
++
++              tempPP_CNTL = 0;
++              tempRE_CNTL = 0;
++
++              tempRB3D_CNTL = depth_clear->rb3d_cntl;
++
++              tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
++              tempRB3D_STENCILREFMASK = 0x0;
++
++              tempSE_CNTL = depth_clear->se_cntl;
++
++              /* Disable TCL */
++
++              tempSE_VAP_CNTL = (     /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
++                                        (0x9 <<
++                                         SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
++
++              tempRB3D_PLANEMASK = 0x0;
++
++              tempRE_AUX_SCISSOR_CNTL = 0x0;
++
++              tempSE_VTE_CNTL =
++                  SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
++
++              /* Vertex format (X, Y, Z, W) */
++              tempSE_VTX_FMT_0 =
++                  SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
++                  SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
++              tempSE_VTX_FMT_1 = 0x0;
++
++              /*
++               * Depth buffer specific enables
++               */
++              if (flags & RADEON_DEPTH) {
++                      /* Enable depth buffer */
++                      tempRB3D_CNTL |= RADEON_Z_ENABLE;
++              } else {
++                      /* Disable depth buffer */
++                      tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
++              }
++
++              /*
++               * Stencil buffer specific enables
++               */
++              if (flags & RADEON_STENCIL) {
++                      tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
++                      tempRB3D_STENCILREFMASK = clear->depth_mask;
++              } else {
++                      tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
++                      tempRB3D_STENCILREFMASK = 0x00000000;
++              }
++
++              if (flags & RADEON_USE_COMP_ZBUF) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
++                          RADEON_Z_DECOMPRESSION_ENABLE;
++              }
++              if (flags & RADEON_USE_HIERZ) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
++              }
++
++              BEGIN_RING(26);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++
++              OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
++              OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
++              OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
++              OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
++              OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
++                           tempRB3D_STENCILREFMASK);
++              OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
++              OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
++              OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
++              OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
++              OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
++              OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
++              OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++
++                      /* Funny that this should be required --
++                       *  sets top-left?
++                       */
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++                      BEGIN_RING(14);
++                      OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
++                      OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
++                                RADEON_PRIM_WALK_RING |
++                                (3 << RADEON_NUM_VERTICES_SHIFT)));
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x3f800000);
++                      ADVANCE_RING();
++              }
++      } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
++
++              int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
++
++              rb3d_cntl = depth_clear->rb3d_cntl;
++
++              if (flags & RADEON_DEPTH) {
++                      rb3d_cntl |= RADEON_Z_ENABLE;
++              } else {
++                      rb3d_cntl &= ~RADEON_Z_ENABLE;
++              }
++
++              if (flags & RADEON_STENCIL) {
++                      rb3d_cntl |= RADEON_STENCIL_ENABLE;
++                      rb3d_stencilrefmask = clear->depth_mask;        /* misnamed field */
++              } else {
++                      rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
++                      rb3d_stencilrefmask = 0x00000000;
++              }
++
++              if (flags & RADEON_USE_COMP_ZBUF) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
++                          RADEON_Z_DECOMPRESSION_ENABLE;
++              }
++              if (flags & RADEON_USE_HIERZ) {
++                      tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
++              }
++
++              BEGIN_RING(13);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++
++              OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
++              OUT_RING(0x00000000);
++              OUT_RING(rb3d_cntl);
++
++              OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
++              OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
++              OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
++              OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
++              ADVANCE_RING();
++
++              /* Make sure we restore the 3D state next time.
++               */
++              dev_priv->sarea_priv->ctx_owner = 0;
++
++              for (i = 0; i < nbox; i++) {
++
++                      /* Funny that this should be required --
++                       *  sets top-left?
++                       */
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++                      BEGIN_RING(15);
++
++                      OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
++                      OUT_RING(RADEON_VTX_Z_PRESENT |
++                               RADEON_VTX_PKCOLOR_PRESENT);
++                      OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
++                                RADEON_PRIM_WALK_RING |
++                                RADEON_MAOS_ENABLE |
++                                RADEON_VTX_FMT_RADEON_MODE |
++                                (3 << RADEON_NUM_VERTICES_SHIFT)));
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
++                      OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
++                      OUT_RING(0x0);
++
++                      ADVANCE_RING();
++              }
++      }
++
++      /* Increment the clear counter.  The client-side 3D driver must
++       * wait on this value before performing the clear ioctl.  We
++       * need this because the card's so damned fast...
++       */
++      dev_priv->sarea_priv->last_clear++;
++
++      BEGIN_RING(4);
++
++      RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
++      RADEON_WAIT_UNTIL_IDLE();
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_dispatch_swap(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int nbox = sarea_priv->nbox;
++      struct drm_clip_rect *pbox = sarea_priv->boxes;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      /* Do some trivial performance monitoring...
++       */
++      if (dev_priv->do_boxes)
++              radeon_cp_performance_boxes(dev_priv);
++
++      /* Wait for the 3D stream to idle before dispatching the bitblt.
++       * This will prevent data corruption between the two streams.
++       */
++      BEGIN_RING(2);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++
++      ADVANCE_RING();
++
++      for (i = 0; i < nbox; i++) {
++              int x = pbox[i].x1;
++              int y = pbox[i].y1;
++              int w = pbox[i].x2 - x;
++              int h = pbox[i].y2 - y;
++
++              DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
++
++              BEGIN_RING(9);
++
++              OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
++              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_BRUSH_NONE |
++                       (dev_priv->color_fmt << 8) |
++                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                       RADEON_ROP3_S |
++                       RADEON_DP_SRC_SOURCE_MEMORY |
++                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
++
++              /* Make this work even if front & back are flipped:
++               */
++              OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
++              if (dev_priv->sarea_priv->pfCurrentPage == 0) {
++                      OUT_RING(dev_priv->back_pitch_offset);
++                      OUT_RING(dev_priv->front_pitch_offset);
++              } else {
++                      OUT_RING(dev_priv->front_pitch_offset);
++                      OUT_RING(dev_priv->back_pitch_offset);
++              }
++
++              OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
++              OUT_RING((x << 16) | y);
++              OUT_RING((x << 16) | y);
++              OUT_RING((w << 16) | h);
++
++              ADVANCE_RING();
++      }
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++
++      BEGIN_RING(4);
++
++      RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
++      RADEON_WAIT_UNTIL_2D_IDLE();
++
++      ADVANCE_RING();
++}
++
++static void radeon_cp_dispatch_flip(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
++      int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
++          ? dev_priv->front_offset : dev_priv->back_offset;
++      RING_LOCALS;
++      DRM_DEBUG("pfCurrentPage=%d\n",
++                dev_priv->sarea_priv->pfCurrentPage);
++
++      /* Do some trivial performance monitoring...
++       */
++      if (dev_priv->do_boxes) {
++              dev_priv->stats.boxes |= RADEON_BOX_FLIP;
++              radeon_cp_performance_boxes(dev_priv);
++      }
++
++      /* Update the frame offsets for both CRTCs
++       */
++      BEGIN_RING(6);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING_REG(RADEON_CRTC_OFFSET,
++                   ((sarea->frame.y * dev_priv->front_pitch +
++                     sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
++                   + offset);
++      OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
++                   + offset);
++
++      ADVANCE_RING();
++
++      /* Increment the frame counter.  The client-side 3D driver must
++       * throttle the framerate by waiting for this value before
++       * performing the swapbuffer ioctl.
++       */
++      dev_priv->sarea_priv->last_frame++;
++      dev_priv->sarea_priv->pfCurrentPage =
++              1 - dev_priv->sarea_priv->pfCurrentPage;
++
++      BEGIN_RING(2);
++
++      RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
++
++      ADVANCE_RING();
++}
++
++static int bad_prim_vertex_nr(int primitive, int nr)
++{
++      switch (primitive & RADEON_PRIM_TYPE_MASK) {
++      case RADEON_PRIM_TYPE_NONE:
++      case RADEON_PRIM_TYPE_POINT:
++              return nr < 1;
++      case RADEON_PRIM_TYPE_LINE:
++              return (nr & 1) || nr == 0;
++      case RADEON_PRIM_TYPE_LINE_STRIP:
++              return nr < 2;
++      case RADEON_PRIM_TYPE_TRI_LIST:
++      case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
++      case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
++      case RADEON_PRIM_TYPE_RECT_LIST:
++              return nr % 3 || nr == 0;
++      case RADEON_PRIM_TYPE_TRI_FAN:
++      case RADEON_PRIM_TYPE_TRI_STRIP:
++              return nr < 3;
++      default:
++              return 1;
++      }
++}
++
++typedef struct {
++      unsigned int start;
++      unsigned int finish;
++      unsigned int prim;
++      unsigned int numverts;
++      unsigned int offset;
++      unsigned int vc_format;
++} drm_radeon_tcl_prim_t;
++
++static void radeon_cp_dispatch_vertex(struct drm_device * dev,
++                                    struct drm_buf * buf,
++                                    drm_radeon_tcl_prim_t * prim)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
++      int numverts = (int)prim->numverts;
++      int nbox = sarea_priv->nbox;
++      int i = 0;
++      RING_LOCALS;
++
++      DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
++                prim->prim,
++                prim->vc_format, prim->start, prim->finish, prim->numverts);
++
++      if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
++              DRM_ERROR("bad prim %x numverts %d\n",
++                        prim->prim, prim->numverts);
++              return;
++      }
++
++      do {
++              /* Emit the next cliprect */
++              if (i < nbox) {
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++              }
++
++              /* Emit the vertex buffer rendering commands */
++              BEGIN_RING(5);
++
++              OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
++              OUT_RING(offset);
++              OUT_RING(numverts);
++              OUT_RING(prim->vc_format);
++              OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
++                       RADEON_COLOR_ORDER_RGBA |
++                       RADEON_VTX_FMT_RADEON_MODE |
++                       (numverts << RADEON_NUM_VERTICES_SHIFT));
++
++              ADVANCE_RING();
++
++              i++;
++      } while (i < nbox);
++}
++
++static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
++      RING_LOCALS;
++
++      buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
++
++      /* Emit the vertex buffer age */
++      BEGIN_RING(2);
++      RADEON_DISPATCH_AGE(buf_priv->age);
++      ADVANCE_RING();
++
++      buf->pending = 1;
++      buf->used = 0;
++}
++
++static void radeon_cp_dispatch_indirect(struct drm_device * dev,
++                                      struct drm_buf * buf, int start, int end)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++      DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
++
++      if (start != end) {
++              int offset = (dev_priv->gart_buffers_offset
++                            + buf->offset + start);
++              int dwords = (end - start + 3) / sizeof(u32);
++
++              /* Indirect buffer data must be an even number of
++               * dwords, so if we've been given an odd number we must
++               * pad the data with a Type-2 CP packet.
++               */
++              if (dwords & 1) {
++                      u32 *data = (u32 *)
++                          ((char *)dev->agp_buffer_map->handle
++                           + buf->offset + start);
++                      data[dwords++] = RADEON_CP_PACKET2;
++              }
++
++              /* Fire off the indirect buffer */
++              BEGIN_RING(3);
++
++              OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
++              OUT_RING(offset);
++              OUT_RING(dwords);
++
++              ADVANCE_RING();
++      }
++}
++
++static void radeon_cp_dispatch_indices(struct drm_device * dev,
++                                     struct drm_buf * elt_buf,
++                                     drm_radeon_tcl_prim_t * prim)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      int offset = dev_priv->gart_buffers_offset + prim->offset;
++      u32 *data;
++      int dwords;
++      int i = 0;
++      int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
++      int count = (prim->finish - start) / sizeof(u16);
++      int nbox = sarea_priv->nbox;
++
++      DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
++                prim->prim,
++                prim->vc_format,
++                prim->start, prim->finish, prim->offset, prim->numverts);
++
++      if (bad_prim_vertex_nr(prim->prim, count)) {
++              DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
++              return;
++      }
++
++      if (start >= prim->finish || (prim->start & 0x7)) {
++              DRM_ERROR("buffer prim %d\n", prim->prim);
++              return;
++      }
++
++      dwords = (prim->finish - prim->start + 3) / sizeof(u32);
++
++      data = (u32 *) ((char *)dev->agp_buffer_map->handle +
++                      elt_buf->offset + prim->start);
++
++      data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
++      data[1] = offset;
++      data[2] = prim->numverts;
++      data[3] = prim->vc_format;
++      data[4] = (prim->prim |
++                 RADEON_PRIM_WALK_IND |
++                 RADEON_COLOR_ORDER_RGBA |
++                 RADEON_VTX_FMT_RADEON_MODE |
++                 (count << RADEON_NUM_VERTICES_SHIFT));
++
++      do {
++              if (i < nbox)
++                      radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
++
++              radeon_cp_dispatch_indirect(dev, elt_buf,
++                                          prim->start, prim->finish);
++
++              i++;
++      } while (i < nbox);
++
++}
++
++#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
++
++static int radeon_cp_dispatch_texture(struct drm_device * dev,
++                                    struct drm_file *file_priv,
++                                    drm_radeon_texture_t * tex,
++                                    drm_radeon_tex_image_t * image)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_buf *buf;
++      u32 format;
++      u32 *buffer;
++      const u8 __user *data;
++      int size, dwords, tex_width, blit_width, spitch;
++      u32 height;
++      int i;
++      u32 texpitch, microtile;
++      u32 offset, byte_offset;
++      RING_LOCALS;
++
++      if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
++              DRM_ERROR("Invalid destination offset\n");
++              return -EINVAL;
++      }
++
++      dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
++
++      /* Flush the pixel cache.  This ensures no pixel data gets mixed
++       * up with the texture data from the host data blit, otherwise
++       * part of the texture image may be corrupted.
++       */
++      BEGIN_RING(4);
++      RADEON_FLUSH_CACHE();
++      RADEON_WAIT_UNTIL_IDLE();
++      ADVANCE_RING();
++
++      /* The compiler won't optimize away a division by a variable,
++       * even if the only legal values are powers of two.  Thus, we'll
++       * use a shift instead.
++       */
++      switch (tex->format) {
++      case RADEON_TXFORMAT_ARGB8888:
++      case RADEON_TXFORMAT_RGBA8888:
++              format = RADEON_COLOR_FORMAT_ARGB8888;
++              tex_width = tex->width * 4;
++              blit_width = image->width * 4;
++              break;
++      case RADEON_TXFORMAT_AI88:
++      case RADEON_TXFORMAT_ARGB1555:
++      case RADEON_TXFORMAT_RGB565:
++      case RADEON_TXFORMAT_ARGB4444:
++      case RADEON_TXFORMAT_VYUY422:
++      case RADEON_TXFORMAT_YVYU422:
++              format = RADEON_COLOR_FORMAT_RGB565;
++              tex_width = tex->width * 2;
++              blit_width = image->width * 2;
++              break;
++      case RADEON_TXFORMAT_I8:
++      case RADEON_TXFORMAT_RGB332:
++              format = RADEON_COLOR_FORMAT_CI8;
++              tex_width = tex->width * 1;
++              blit_width = image->width * 1;
++              break;
++      default:
++              DRM_ERROR("invalid texture format %d\n", tex->format);
++              return -EINVAL;
++      }
++      spitch = blit_width >> 6;
++      if (spitch == 0 && image->height > 1)
++              return -EINVAL;
++
++      texpitch = tex->pitch;
++      if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
++              microtile = 1;
++              if (tex_width < 64) {
++                      texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
++                      /* we got tiled coordinates, untile them */
++                      image->x *= 2;
++              }
++      } else
++              microtile = 0;
++
++      /* this might fail for zero-sized uploads - are those illegal? */
++      if (!radeon_check_offset(dev_priv, tex->offset + image->height *
++                              blit_width - 1)) {
++              DRM_ERROR("Invalid final destination offset\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
++
++      do {
++              DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
++                        tex->offset >> 10, tex->pitch, tex->format,
++                        image->x, image->y, image->width, image->height);
++
++              /* Make a copy of some parameters in case we have to
++               * update them for a multi-pass texture blit.
++               */
++              height = image->height;
++              data = (const u8 __user *)image->data;
++
++              size = height * blit_width;
++
++              if (size > RADEON_MAX_TEXTURE_SIZE) {
++                      height = RADEON_MAX_TEXTURE_SIZE / blit_width;
++                      size = height * blit_width;
++              } else if (size < 4 && size > 0) {
++                      size = 4;
++              } else if (size == 0) {
++                      return 0;
++              }
++
++              buf = radeon_freelist_get(dev);
++              if (0 && !buf) {
++                      radeon_do_cp_idle(dev_priv);
++                      buf = radeon_freelist_get(dev);
++              }
++              if (!buf) {
++                      DRM_DEBUG("EAGAIN\n");
++                      if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
++                              return -EFAULT;
++                      return -EAGAIN;
++              }
++
++              /* Dispatch the indirect buffer.
++               */
++              buffer =
++                  (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
++              dwords = size / 4;
++
++#define RADEON_COPY_MT(_buf, _data, _width) \
++      do { \
++              if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
++                      DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
++                      return -EFAULT; \
++              } \
++      } while(0)
++
++              if (microtile) {
++                      /* texture micro tiling in use, minimum texture width is thus 16 bytes.
++                         however, we cannot use blitter directly for texture width < 64 bytes,
++                         since minimum tex pitch is 64 bytes and we need this to match
++                         the texture width, otherwise the blitter will tile it wrong.
++                         Thus, tiling manually in this case. Additionally, need to special
++                         case tex height = 1, since our actual image will have height 2
++                         and we need to ensure we don't read beyond the texture size
++                         from user space. */
++                      if (tex->height == 1) {
++                              if (tex_width >= 64 || tex_width <= 16) {
++                                      RADEON_COPY_MT(buffer, data,
++                                              (int)(tex_width * sizeof(u32)));
++                              } else if (tex_width == 32) {
++                                      RADEON_COPY_MT(buffer, data, 16);
++                                      RADEON_COPY_MT(buffer + 8,
++                                                     data + 16, 16);
++                              }
++                      } else if (tex_width >= 64 || tex_width == 16) {
++                              RADEON_COPY_MT(buffer, data,
++                                             (int)(dwords * sizeof(u32)));
++                      } else if (tex_width < 16) {
++                              for (i = 0; i < tex->height; i++) {
++                                      RADEON_COPY_MT(buffer, data, tex_width);
++                                      buffer += 4;
++                                      data += tex_width;
++                              }
++                      } else if (tex_width == 32) {
++                              /* TODO: make sure this works when not fitting in one buffer
++                                 (i.e. 32bytes x 2048...) */
++                              for (i = 0; i < tex->height; i += 2) {
++                                      RADEON_COPY_MT(buffer, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 8, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 4, data, 16);
++                                      data += 16;
++                                      RADEON_COPY_MT(buffer + 12, data, 16);
++                                      data += 16;
++                                      buffer += 16;
++                              }
++                      }
++              } else {
++                      if (tex_width >= 32) {
++                              /* Texture image width is larger than the minimum, so we
++                               * can upload it directly.
++                               */
++                              RADEON_COPY_MT(buffer, data,
++                                             (int)(dwords * sizeof(u32)));
++                      } else {
++                              /* Texture image width is less than the minimum, so we
++                               * need to pad out each image scanline to the minimum
++                               * width.
++                               */
++                              for (i = 0; i < tex->height; i++) {
++                                      RADEON_COPY_MT(buffer, data, tex_width);
++                                      buffer += 8;
++                                      data += tex_width;
++                              }
++                      }
++              }
++
++#undef RADEON_COPY_MT
++              byte_offset = (image->y & ~2047) * blit_width;
++              buf->file_priv = file_priv;
++              buf->used = size;
++              offset = dev_priv->gart_buffers_offset + buf->offset;
++              BEGIN_RING(9);
++              OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
++              OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_DST_PITCH_OFFSET_CNTL |
++                       RADEON_GMC_BRUSH_NONE |
++                       (format << 8) |
++                       RADEON_GMC_SRC_DATATYPE_COLOR |
++                       RADEON_ROP3_S |
++                       RADEON_DP_SRC_SOURCE_MEMORY |
++                       RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
++              OUT_RING((spitch << 22) | (offset >> 10));
++              OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
++              OUT_RING(0);
++              OUT_RING((image->x << 16) | (image->y % 2048));
++              OUT_RING((image->width << 16) | height);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              ADVANCE_RING();
++              COMMIT_RING();
++
++              radeon_cp_discard_buffer(dev, buf);
++
++              /* Update the input parameters for next time */
++              image->y += height;
++              image->height -= height;
++              image->data = (const u8 __user *)image->data + size;
++      } while (image->height > 0);
++
++      /* Flush the pixel cache after the blit completes.  This ensures
++       * the texture data is written out to memory before rendering
++       * continues.
++       */
++      BEGIN_RING(4);
++      RADEON_FLUSH_CACHE();
++      RADEON_WAIT_UNTIL_2D_IDLE();
++      ADVANCE_RING();
++      COMMIT_RING();
++
++      return 0;
++}
++
++static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      int i;
++      RING_LOCALS;
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(35);
++
++      OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
++      OUT_RING(0x00000000);
++
++      OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
++      for (i = 0; i < 32; i++) {
++              OUT_RING(stipple[i]);
++      }
++
++      ADVANCE_RING();
++}
++
++static void radeon_apply_surface_regs(int surf_index,
++                                    drm_radeon_private_t *dev_priv)
++{
++      if (!dev_priv->mmio)
++              return;
++
++      radeon_do_cp_idle(dev_priv);
++
++      RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].flags);
++      RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].lower);
++      RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
++                   dev_priv->surfaces[surf_index].upper);
++}
++
++/* Allocates a virtual surface
++ * doesn't always allocate a real surface, will stretch an existing
++ * surface when possible.
++ *
++ * Note that refcount can be at most 2, since during a free refcount=3
++ * might mean we have to allocate a new surface which might not always
++ * be available.
++ * For example : we allocate three contigous surfaces ABC. If B is
++ * freed, we suddenly need two surfaces to store A and C, which might
++ * not always be available.
++ */
++static int alloc_surface(drm_radeon_surface_alloc_t *new,
++                       drm_radeon_private_t *dev_priv,
++                       struct drm_file *file_priv)
++{
++      struct radeon_virt_surface *s;
++      int i;
++      int virt_surface_index;
++      uint32_t new_upper, new_lower;
++
++      new_lower = new->address;
++      new_upper = new_lower + new->size - 1;
++
++      /* sanity check */
++      if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
++          ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
++           RADEON_SURF_ADDRESS_FIXED_MASK)
++          || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
++              return -1;
++
++      /* make sure there is no overlap with existing surfaces */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              if ((dev_priv->surfaces[i].refcount != 0) &&
++                  (((new_lower >= dev_priv->surfaces[i].lower) &&
++                    (new_lower < dev_priv->surfaces[i].upper)) ||
++                   ((new_lower < dev_priv->surfaces[i].lower) &&
++                    (new_upper > dev_priv->surfaces[i].lower)))) {
++                      return -1;
++              }
++      }
++
++      /* find a virtual surface */
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
++              if (dev_priv->virt_surfaces[i].file_priv == 0)
++                      break;
++      if (i == 2 * RADEON_MAX_SURFACES) {
++              return -1;
++      }
++      virt_surface_index = i;
++
++      /* try to reuse an existing surface */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              /* extend before */
++              if ((dev_priv->surfaces[i].refcount == 1) &&
++                  (new->flags == dev_priv->surfaces[i].flags) &&
++                  (new_upper + 1 == dev_priv->surfaces[i].lower)) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount++;
++                      dev_priv->surfaces[i].lower = s->lower;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++
++              /* extend after */
++              if ((dev_priv->surfaces[i].refcount == 1) &&
++                  (new->flags == dev_priv->surfaces[i].flags) &&
++                  (new_lower == dev_priv->surfaces[i].upper + 1)) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount++;
++                      dev_priv->surfaces[i].upper = s->upper;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++      }
++
++      /* okay, we need a new one */
++      for (i = 0; i < RADEON_MAX_SURFACES; i++) {
++              if (dev_priv->surfaces[i].refcount == 0) {
++                      s = &(dev_priv->virt_surfaces[virt_surface_index]);
++                      s->surface_index = i;
++                      s->lower = new_lower;
++                      s->upper = new_upper;
++                      s->flags = new->flags;
++                      s->file_priv = file_priv;
++                      dev_priv->surfaces[i].refcount = 1;
++                      dev_priv->surfaces[i].lower = s->lower;
++                      dev_priv->surfaces[i].upper = s->upper;
++                      dev_priv->surfaces[i].flags = s->flags;
++                      radeon_apply_surface_regs(s->surface_index, dev_priv);
++                      return virt_surface_index;
++              }
++      }
++
++      /* we didn't find anything */
++      return -1;
++}
++
++static int free_surface(struct drm_file *file_priv,
++                      drm_radeon_private_t * dev_priv,
++                      int lower)
++{
++      struct radeon_virt_surface *s;
++      int i;
++      /* find the virtual surface */
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
++              s = &(dev_priv->virt_surfaces[i]);
++              if (s->file_priv) {
++                      if ((lower == s->lower) && (file_priv == s->file_priv))
++                      {
++                              if (dev_priv->surfaces[s->surface_index].
++                                  lower == s->lower)
++                                      dev_priv->surfaces[s->surface_index].
++                                          lower = s->upper;
++
++                              if (dev_priv->surfaces[s->surface_index].
++                                  upper == s->upper)
++                                      dev_priv->surfaces[s->surface_index].
++                                          upper = s->lower;
++
++                              dev_priv->surfaces[s->surface_index].refcount--;
++                              if (dev_priv->surfaces[s->surface_index].
++                                  refcount == 0)
++                                      dev_priv->surfaces[s->surface_index].
++                                          flags = 0;
++                              s->file_priv = NULL;
++                              radeon_apply_surface_regs(s->surface_index,
++                                                        dev_priv);
++                              return 0;
++                      }
++              }
++      }
++      return 1;
++}
++
++static void radeon_surfaces_release(struct drm_file *file_priv,
++                                  drm_radeon_private_t * dev_priv)
++{
++      int i;
++      for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
++              if (dev_priv->virt_surfaces[i].file_priv == file_priv)
++                      free_surface(file_priv, dev_priv,
++                                   dev_priv->virt_surfaces[i].lower);
++      }
++}
++
++/* ================================================================
++ * IOCTL functions
++ */
++static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_surface_alloc_t *alloc = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (alloc_surface(alloc, dev_priv, file_priv) == -1)
++              return -EINVAL;
++      else
++              return 0;
++}
++
++static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_surface_free_t *memfree = data;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (free_surface(file_priv, dev_priv, memfree->address))
++              return -EINVAL;
++      else
++              return 0;
++}
++
++static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      drm_radeon_clear_t *clear = data;
++      drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
++
++      if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
++                             sarea_priv->nbox * sizeof(depth_boxes[0])))
++              return -EFAULT;
++
++      radeon_cp_dispatch_clear(dev, clear, depth_boxes);
++
++      COMMIT_RING();
++      return 0;
++}
++
++/* Not sure why this isn't set all the time:
++ */
++static int radeon_do_init_pageflip(struct drm_device * dev)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      BEGIN_RING(6);
++      RADEON_WAIT_UNTIL_3D_IDLE();
++      OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
++      OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
++               RADEON_CRTC_OFFSET_FLIP_CNTL);
++      OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
++      OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
++               RADEON_CRTC_OFFSET_FLIP_CNTL);
++      ADVANCE_RING();
++
++      dev_priv->page_flipping = 1;
++
++      if (dev_priv->sarea_priv->pfCurrentPage != 1)
++              dev_priv->sarea_priv->pfCurrentPage = 0;
++
++      return 0;
++}
++
++/* Swapping and flipping are different operations, need different ioctls.
++ * They can & should be intermixed to support multiple 3d windows.
++ */
++static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (!dev_priv->page_flipping)
++              radeon_do_init_pageflip(dev);
++
++      radeon_cp_dispatch_flip(dev);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
++
++      radeon_cp_dispatch_swap(dev);
++      dev_priv->sarea_priv->ctx_owner = 0;
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_vertex_t *vertex = data;
++      drm_radeon_tcl_prim_t prim;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
++              DRM_ERROR("buffer prim %d\n", vertex->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      /* Build up a prim_t record:
++       */
++      if (vertex->count) {
++              buf->used = vertex->count;      /* not used? */
++
++              if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
++                      if (radeon_emit_state(dev_priv, file_priv,
++                                            &sarea_priv->context_state,
++                                            sarea_priv->tex_state,
++                                            sarea_priv->dirty)) {
++                              DRM_ERROR("radeon_emit_state failed\n");
++                              return -EINVAL;
++                      }
++
++                      sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
++                                             RADEON_UPLOAD_TEX1IMAGES |
++                                             RADEON_UPLOAD_TEX2IMAGES |
++                                             RADEON_REQUIRE_QUIESCENCE);
++              }
++
++              prim.start = 0;
++              prim.finish = vertex->count;    /* unused */
++              prim.prim = vertex->prim;
++              prim.numverts = vertex->count;
++              prim.vc_format = dev_priv->sarea_priv->vc_format;
++
++              radeon_cp_dispatch_vertex(dev, buf, &prim);
++      }
++
++      if (vertex->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_indices_t *elts = data;
++      drm_radeon_tcl_prim_t prim;
++      int count;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
++                DRM_CURRENTPID, elts->idx, elts->start, elts->end,
++                elts->discard);
++
++      if (elts->idx < 0 || elts->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        elts->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++      if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
++              DRM_ERROR("buffer prim %d\n", elts->prim);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[elts->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", elts->idx);
++              return -EINVAL;
++      }
++
++      count = (elts->end - elts->start) / sizeof(u16);
++      elts->start -= RADEON_INDEX_PRIM_OFFSET;
++
++      if (elts->start & 0x7) {
++              DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
++              return -EINVAL;
++      }
++      if (elts->start < buf->used) {
++              DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
++              return -EINVAL;
++      }
++
++      buf->used = elts->end;
++
++      if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
++              if (radeon_emit_state(dev_priv, file_priv,
++                                    &sarea_priv->context_state,
++                                    sarea_priv->tex_state,
++                                    sarea_priv->dirty)) {
++                      DRM_ERROR("radeon_emit_state failed\n");
++                      return -EINVAL;
++              }
++
++              sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
++                                     RADEON_UPLOAD_TEX1IMAGES |
++                                     RADEON_UPLOAD_TEX2IMAGES |
++                                     RADEON_REQUIRE_QUIESCENCE);
++      }
++
++      /* Build up a prim_t record:
++       */
++      prim.start = elts->start;
++      prim.finish = elts->end;
++      prim.prim = elts->prim;
++      prim.offset = 0;        /* offset from start of dma buffers */
++      prim.numverts = RADEON_MAX_VB_VERTS;    /* duh */
++      prim.vc_format = dev_priv->sarea_priv->vc_format;
++
++      radeon_cp_dispatch_indices(dev, buf, &prim);
++      if (elts->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_texture_t *tex = data;
++      drm_radeon_tex_image_t image;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (tex->image == NULL) {
++              DRM_ERROR("null texture image!\n");
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_FROM_USER(&image,
++                             (drm_radeon_tex_image_t __user *) tex->image,
++                             sizeof(image)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
++
++      return ret;
++}
++
++static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_stipple_t *stipple = data;
++      u32 mask[32];
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
++              return -EFAULT;
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++
++      radeon_cp_dispatch_stipple(dev, mask);
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_indirect_t *indirect = data;
++      RING_LOCALS;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
++                indirect->idx, indirect->start, indirect->end,
++                indirect->discard);
++
++      if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        indirect->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      buf = dma->buflist[indirect->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", indirect->idx);
++              return -EINVAL;
++      }
++
++      if (indirect->start < buf->used) {
++              DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
++                        indirect->start, buf->used);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf->used = indirect->end;
++
++      /* Wait for the 3D stream to idle before the indirect buffer
++       * containing 2D acceleration commands is processed.
++       */
++      BEGIN_RING(2);
++
++      RADEON_WAIT_UNTIL_3D_IDLE();
++
++      ADVANCE_RING();
++
++      /* Dispatch the indirect buffer full of commands from the
++       * X server.  This is insecure and is thus only available to
++       * privileged clients.
++       */
++      radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
++      if (indirect->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_sarea_t *sarea_priv;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_radeon_vertex2_t *vertex = data;
++      int i;
++      unsigned char laststate;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      sarea_priv = dev_priv->sarea_priv;
++
++      DRM_DEBUG("pid=%d index=%d discard=%d\n",
++                DRM_CURRENTPID, vertex->idx, vertex->discard);
++
++      if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
++              DRM_ERROR("buffer index %d (of %d max)\n",
++                        vertex->idx, dma->buf_count - 1);
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      buf = dma->buflist[vertex->idx];
++
++      if (buf->file_priv != file_priv) {
++              DRM_ERROR("process %d using buffer owned by %p\n",
++                        DRM_CURRENTPID, buf->file_priv);
++              return -EINVAL;
++      }
++
++      if (buf->pending) {
++              DRM_ERROR("sending pending buffer %d\n", vertex->idx);
++              return -EINVAL;
++      }
++
++      if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
++              return -EINVAL;
++
++      for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
++              drm_radeon_prim_t prim;
++              drm_radeon_tcl_prim_t tclprim;
++
++              if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
++                      return -EFAULT;
++
++              if (prim.stateidx != laststate) {
++                      drm_radeon_state_t state;
++
++                      if (DRM_COPY_FROM_USER(&state,
++                                             &vertex->state[prim.stateidx],
++                                             sizeof(state)))
++                              return -EFAULT;
++
++                      if (radeon_emit_state2(dev_priv, file_priv, &state)) {
++                              DRM_ERROR("radeon_emit_state2 failed\n");
++                              return -EINVAL;
++                      }
++
++                      laststate = prim.stateidx;
++              }
++
++              tclprim.start = prim.start;
++              tclprim.finish = prim.finish;
++              tclprim.prim = prim.prim;
++              tclprim.vc_format = prim.vc_format;
++
++              if (prim.prim & RADEON_PRIM_WALK_IND) {
++                      tclprim.offset = prim.numverts * 64;
++                      tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
++
++                      radeon_cp_dispatch_indices(dev, buf, &tclprim);
++              } else {
++                      tclprim.numverts = prim.numverts;
++                      tclprim.offset = 0;     /* not used */
++
++                      radeon_cp_dispatch_vertex(dev, buf, &tclprim);
++              }
++
++              if (sarea_priv->nbox == 1)
++                      sarea_priv->nbox = 0;
++      }
++
++      if (vertex->discard) {
++              radeon_cp_discard_buffer(dev, buf);
++      }
++
++      COMMIT_RING();
++      return 0;
++}
++
++static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
++                             struct drm_file *file_priv,
++                             drm_radeon_cmd_header_t header,
++                             drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int id = (int)header.packet.packet_id;
++      int sz, reg;
++      int *data = (int *)cmdbuf->buf;
++      RING_LOCALS;
++
++      if (id >= RADEON_MAX_STATE_PACKETS)
++              return -EINVAL;
++
++      sz = packet[id].len;
++      reg = packet[id].start;
++
++      if (sz * sizeof(int) > cmdbuf->bufsz) {
++              DRM_ERROR("Packet size provided larger than data provided\n");
++              return -EINVAL;
++      }
++
++      if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
++              DRM_ERROR("Packet verification failed\n");
++              return -EINVAL;
++      }
++
++      BEGIN_RING(sz + 1);
++      OUT_RING(CP_PACKET0(reg, (sz - 1)));
++      OUT_RING_TABLE(data, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.scalars.count;
++      int start = header.scalars.offset;
++      int stride = header.scalars.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(3 + sz);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++/* God this is ugly
++ */
++static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
++                                         drm_radeon_cmd_header_t header,
++                                         drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.scalars.count;
++      int start = ((unsigned int)header.scalars.offset) + 0x100;
++      int stride = header.scalars.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(3 + sz);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.vectors.count;
++      int start = header.vectors.offset;
++      int stride = header.vectors.stride;
++      RING_LOCALS;
++
++      BEGIN_RING(5 + sz);
++      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
++      OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
++                                        drm_radeon_cmd_header_t header,
++                                        drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      int sz = header.veclinear.count * 4;
++      int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
++      RING_LOCALS;
++
++      if (!sz)
++              return 0;
++      if (sz * 4 > cmdbuf->bufsz)
++              return -EINVAL;
++
++      BEGIN_RING(5 + sz);
++      OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
++      OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
++      OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
++      OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
++      OUT_RING_TABLE(cmdbuf->buf, sz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += sz * sizeof(int);
++      cmdbuf->bufsz -= sz * sizeof(int);
++      return 0;
++}
++
++static int radeon_emit_packet3(struct drm_device * dev,
++                             struct drm_file *file_priv,
++                             drm_radeon_kcmd_buffer_t *cmdbuf)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      unsigned int cmdsz;
++      int ret;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
++                                                cmdbuf, &cmdsz))) {
++              DRM_ERROR("Packet verification failed\n");
++              return ret;
++      }
++
++      BEGIN_RING(cmdsz);
++      OUT_RING_TABLE(cmdbuf->buf, cmdsz);
++      ADVANCE_RING();
++
++      cmdbuf->buf += cmdsz * 4;
++      cmdbuf->bufsz -= cmdsz * 4;
++      return 0;
++}
++
++static int radeon_emit_packet3_cliprect(struct drm_device *dev,
++                                      struct drm_file *file_priv,
++                                      drm_radeon_kcmd_buffer_t *cmdbuf,
++                                      int orig_nbox)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_clip_rect box;
++      unsigned int cmdsz;
++      int ret;
++      struct drm_clip_rect __user *boxes = cmdbuf->boxes;
++      int i = 0;
++      RING_LOCALS;
++
++      DRM_DEBUG("\n");
++
++      if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
++                                                cmdbuf, &cmdsz))) {
++              DRM_ERROR("Packet verification failed\n");
++              return ret;
++      }
++
++      if (!orig_nbox)
++              goto out;
++
++      do {
++              if (i < cmdbuf->nbox) {
++                      if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
++                              return -EFAULT;
++                      /* FIXME The second and subsequent times round
++                       * this loop, send a WAIT_UNTIL_3D_IDLE before
++                       * calling emit_clip_rect(). This fixes a
++                       * lockup on fast machines when sending
++                       * several cliprects with a cmdbuf, as when
++                       * waving a 2D window over a 3D
++                       * window. Something in the commands from user
++                       * space seems to hang the card when they're
++                       * sent several times in a row. That would be
++                       * the correct place to fix it but this works
++                       * around it until I can figure that out - Tim
++                       * Smith */
++                      if (i) {
++                              BEGIN_RING(2);
++                              RADEON_WAIT_UNTIL_3D_IDLE();
++                              ADVANCE_RING();
++                      }
++                      radeon_emit_clip_rect(dev_priv, &box);
++              }
++
++              BEGIN_RING(cmdsz);
++              OUT_RING_TABLE(cmdbuf->buf, cmdsz);
++              ADVANCE_RING();
++
++      } while (++i < cmdbuf->nbox);
++      if (cmdbuf->nbox == 1)
++              cmdbuf->nbox = 0;
++
++      out:
++      cmdbuf->buf += cmdsz * 4;
++      cmdbuf->bufsz -= cmdsz * 4;
++      return 0;
++}
++
++static int radeon_emit_wait(struct drm_device * dev, int flags)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      RING_LOCALS;
++
++      DRM_DEBUG("%x\n", flags);
++      switch (flags) {
++      case RADEON_WAIT_2D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_2D_IDLE();
++              ADVANCE_RING();
++              break;
++      case RADEON_WAIT_3D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_3D_IDLE();
++              ADVANCE_RING();
++              break;
++      case RADEON_WAIT_2D | RADEON_WAIT_3D:
++              BEGIN_RING(2);
++              RADEON_WAIT_UNTIL_IDLE();
++              ADVANCE_RING();
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf = NULL;
++      int idx;
++      drm_radeon_kcmd_buffer_t *cmdbuf = data;
++      drm_radeon_cmd_header_t header;
++      int orig_nbox, orig_bufsz;
++      char *kbuf = NULL;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      RING_SPACE_TEST_WITH_RETURN(dev_priv);
++      VB_AGE_TEST_WITH_RETURN(dev_priv);
++
++      if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
++              return -EINVAL;
++      }
++
++      /* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
++       * races between checking values and using those values in other code,
++       * and simply to avoid a lot of function calls to copy in data.
++       */
++      orig_bufsz = cmdbuf->bufsz;
++      if (orig_bufsz != 0) {
++              kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
++              if (kbuf == NULL)
++                      return -ENOMEM;
++              if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
++                                     cmdbuf->bufsz)) {
++                      drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++                      return -EFAULT;
++              }
++              cmdbuf->buf = kbuf;
++      }
++
++      orig_nbox = cmdbuf->nbox;
++
++      if (dev_priv->chip_family >= CHIP_R300) {
++              int temp;
++              temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
++
++              if (orig_bufsz != 0)
++                      drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++
++              return temp;
++      }
++
++      /* microcode_version != r300 */
++      while (cmdbuf->bufsz >= sizeof(header)) {
++
++              header.i = *(int *)cmdbuf->buf;
++              cmdbuf->buf += sizeof(header);
++              cmdbuf->bufsz -= sizeof(header);
++
++              switch (header.header.cmd_type) {
++              case RADEON_CMD_PACKET:
++                      DRM_DEBUG("RADEON_CMD_PACKET\n");
++                      if (radeon_emit_packets
++                          (dev_priv, file_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_packets failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_SCALARS:
++                      DRM_DEBUG("RADEON_CMD_SCALARS\n");
++                      if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_scalars failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_VECTORS:
++                      DRM_DEBUG("RADEON_CMD_VECTORS\n");
++                      if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_vectors failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_DMA_DISCARD:
++                      DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
++                      idx = header.dma.buf_idx;
++                      if (idx < 0 || idx >= dma->buf_count) {
++                              DRM_ERROR("buffer index %d (of %d max)\n",
++                                        idx, dma->buf_count - 1);
++                              goto err;
++                      }
++
++                      buf = dma->buflist[idx];
++                      if (buf->file_priv != file_priv || buf->pending) {
++                              DRM_ERROR("bad buffer %p %p %d\n",
++                                        buf->file_priv, file_priv,
++                                        buf->pending);
++                              goto err;
++                      }
++
++                      radeon_cp_discard_buffer(dev, buf);
++                      break;
++
++              case RADEON_CMD_PACKET3:
++                      DRM_DEBUG("RADEON_CMD_PACKET3\n");
++                      if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_packet3 failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_PACKET3_CLIP:
++                      DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
++                      if (radeon_emit_packet3_cliprect
++                          (dev, file_priv, cmdbuf, orig_nbox)) {
++                              DRM_ERROR("radeon_emit_packet3_clip failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_SCALARS2:
++                      DRM_DEBUG("RADEON_CMD_SCALARS2\n");
++                      if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_scalars2 failed\n");
++                              goto err;
++                      }
++                      break;
++
++              case RADEON_CMD_WAIT:
++                      DRM_DEBUG("RADEON_CMD_WAIT\n");
++                      if (radeon_emit_wait(dev, header.wait.flags)) {
++                              DRM_ERROR("radeon_emit_wait failed\n");
++                              goto err;
++                      }
++                      break;
++              case RADEON_CMD_VECLINEAR:
++                      DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
++                      if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
++                              DRM_ERROR("radeon_emit_veclinear failed\n");
++                              goto err;
++                      }
++                      break;
++
++              default:
++                      DRM_ERROR("bad cmd_type %d at %p\n",
++                                header.header.cmd_type,
++                                cmdbuf->buf - sizeof(header));
++                      goto err;
++              }
++      }
++
++      if (orig_bufsz != 0)
++              drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++
++      DRM_DEBUG("DONE\n");
++      COMMIT_RING();
++      return 0;
++
++      err:
++      if (orig_bufsz != 0)
++              drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
++      return -EINVAL;
++}
++
++static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_getparam_t *param = data;
++      int value;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
++
++      switch (param->param) {
++      case RADEON_PARAM_GART_BUFFER_OFFSET:
++              value = dev_priv->gart_buffers_offset;
++              break;
++      case RADEON_PARAM_LAST_FRAME:
++              dev_priv->stats.last_frame_reads++;
++              value = GET_SCRATCH(0);
++              break;
++      case RADEON_PARAM_LAST_DISPATCH:
++              value = GET_SCRATCH(1);
++              break;
++      case RADEON_PARAM_LAST_CLEAR:
++              dev_priv->stats.last_clear_reads++;
++              value = GET_SCRATCH(2);
++              break;
++      case RADEON_PARAM_IRQ_NR:
++              value = dev->irq;
++              break;
++      case RADEON_PARAM_GART_BASE:
++              value = dev_priv->gart_vm_start;
++              break;
++      case RADEON_PARAM_REGISTER_HANDLE:
++              value = dev_priv->mmio->offset;
++              break;
++      case RADEON_PARAM_STATUS_HANDLE:
++              value = dev_priv->ring_rptr_offset;
++              break;
++#ifndef __LP64__
++              /*
++               * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
++               * pointer which can't fit into an int-sized variable.  According to
++               * Michel Dänzer, the ioctl() is only used on embedded platforms, so
++               * not supporting it shouldn't be a problem.  If the same functionality
++               * is needed on 64-bit platforms, a new ioctl() would have to be added,
++               * so backwards-compatibility for the embedded platforms can be
++               * maintained.  --davidm 4-Feb-2004.
++               */
++      case RADEON_PARAM_SAREA_HANDLE:
++              /* The lock is the first dword in the sarea. */
++              value = (long)dev->lock.hw_lock;
++              break;
++#endif
++      case RADEON_PARAM_GART_TEX_HANDLE:
++              value = dev_priv->gart_textures_offset;
++              break;
++      case RADEON_PARAM_SCRATCH_OFFSET:
++              if (!dev_priv->writeback_works)
++                      return -EINVAL;
++              value = RADEON_SCRATCH_REG_OFFSET;
++              break;
++
++      case RADEON_PARAM_CARD_TYPE:
++              if (dev_priv->flags & RADEON_IS_PCIE)
++                      value = RADEON_CARD_PCIE;
++              else if (dev_priv->flags & RADEON_IS_AGP)
++                      value = RADEON_CARD_AGP;
++              else
++                      value = RADEON_CARD_PCI;
++              break;
++      case RADEON_PARAM_VBLANK_CRTC:
++              value = radeon_vblank_crtc_get(dev);
++              break;
++      case RADEON_PARAM_FB_LOCATION:
++              value = radeon_read_fb_location(dev_priv);
++              break;
++      case RADEON_PARAM_NUM_GB_PIPES:
++              value = dev_priv->num_gb_pipes;
++              break;
++      default:
++              DRM_DEBUG( "Invalid parameter %d\n", param->param );
++              return -EINVAL;
++      }
++
++      if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
++              DRM_ERROR("copy_to_user\n");
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      drm_radeon_setparam_t *sp = data;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      switch (sp->param) {
++      case RADEON_SETPARAM_FB_LOCATION:
++              radeon_priv = file_priv->driver_priv;
++              radeon_priv->radeon_fb_delta = dev_priv->fb_location -
++                  sp->value;
++              break;
++      case RADEON_SETPARAM_SWITCH_TILING:
++              if (sp->value == 0) {
++                      DRM_DEBUG("color tiling disabled\n");
++                      dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
++                      dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
++                      if (dev_priv->sarea_priv)
++                              dev_priv->sarea_priv->tiling_enabled = 0;
++              } else if (sp->value == 1) {
++                      DRM_DEBUG("color tiling enabled\n");
++                      dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
++                      dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
++                      if (dev_priv->sarea_priv)
++                              dev_priv->sarea_priv->tiling_enabled = 1;
++              }
++              break;
++      case RADEON_SETPARAM_PCIGART_LOCATION:
++              dev_priv->pcigart_offset = sp->value;
++              dev_priv->pcigart_offset_set = 1;
++              break;
++      case RADEON_SETPARAM_NEW_MEMMAP:
++              dev_priv->new_memmap = sp->value;
++              break;
++      case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
++              dev_priv->gart_info.table_size = sp->value;
++              if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
++                      dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
++              break;
++      case RADEON_SETPARAM_VBLANK_CRTC:
++              return radeon_vblank_crtc_set(dev, sp->value);
++              break;
++      default:
++              DRM_DEBUG("Invalid parameter %d\n", sp->param);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++/* When a client dies:
++ *    - Check for and clean up flipped page state
++ *    - Free any alloced GART memory.
++ *    - Free any alloced radeon surfaces.
++ *
++ * DRM infrastructure takes care of reclaiming dma buffers.
++ */
++void radeon_driver_preclose(struct drm_device *dev,
++                          struct drm_file *file_priv)
++{
++      if (dev->dev_private) {
++              drm_radeon_private_t *dev_priv = dev->dev_private;
++              dev_priv->page_flipping = 0;
++              radeon_mem_release(file_priv, dev_priv->gart_heap);
++              radeon_mem_release(file_priv, dev_priv->fb_heap);
++              radeon_surfaces_release(file_priv, dev_priv);
++      }
++}
++
++void radeon_driver_lastclose(struct drm_device *dev)
++{
++      if (dev->dev_private) {
++              drm_radeon_private_t *dev_priv = dev->dev_private;
++
++              if (dev_priv->sarea_priv &&
++                  dev_priv->sarea_priv->pfCurrentPage != 0)
++                      radeon_cp_dispatch_flip(dev);
++      }
++
++      radeon_do_release(dev);
++}
++
++int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++      drm_radeon_private_t *dev_priv = dev->dev_private;
++      struct drm_radeon_driver_file_fields *radeon_priv;
++
++      DRM_DEBUG("\n");
++      radeon_priv =
++          (struct drm_radeon_driver_file_fields *)
++          drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
++
++      if (!radeon_priv)
++              return -ENOMEM;
++
++      file_priv->driver_priv = radeon_priv;
++
++      if (dev_priv)
++              radeon_priv->radeon_fb_delta = dev_priv->fb_location;
++      else
++              radeon_priv->radeon_fb_delta = 0;
++      return 0;
++}
++
++void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_radeon_driver_file_fields *radeon_priv =
++          file_priv->driver_priv;
++
++      drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
++}
++
++struct drm_ioctl_desc radeon_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
++};
++
++int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_bci.c git-nokia/drivers/gpu/drm-tungsten/savage_bci.c
+--- git/drivers/gpu/drm-tungsten/savage_bci.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_bci.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1092 @@
++/* savage_bci.c -- BCI support for Savage
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++/* Need a long timeout for shadow status updates can take a while
++ * and so can waiting for events when the queue is full. */
++#define SAVAGE_DEFAULT_USEC_TIMEOUT   1000000 /* 1s */
++#define SAVAGE_EVENT_USEC_TIMEOUT     5000000 /* 5s */
++#define SAVAGE_FREELIST_DEBUG         0
++
++static int savage_do_cleanup_bci(struct drm_device *dev);
++
++static int
++savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t mask = dev_priv->status_used_mask;
++      uint32_t threshold = dev_priv->bci_threshold_hi;
++      uint32_t status;
++      int i;
++
++#if SAVAGE_BCI_DEBUG
++      if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
++              DRM_ERROR("Trying to emit %d words "
++                        "(more than guaranteed space in COB)\n", n);
++#endif
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              DRM_MEMORYBARRIER();
++              status = dev_priv->status_ptr[0];
++              if ((status & mask) < threshold)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
++#endif
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
++              if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
++              if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x\n", status);
++#endif
++      return -EBUSY;
++}
++
++/*
++ * Waiting for events.
++ *
++ * The BIOSresets the event tag to 0 on mode changes. Therefore we
++ * never emit 0 to the event tag. If we find a 0 event tag we know the
++ * BIOS stomped on it and return success assuming that the BIOS waited
++ * for engine idle.
++ *
++ * Note: if the Xserver uses the event tag it has to follow the same
++ * rule. Otherwise there may be glitches every 2^16 events.
++ */
++static int
++savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
++{
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
++              DRM_MEMORYBARRIER();
++              status = dev_priv->status_ptr[1];
++              if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
++                  (status & 0xffff) == 0)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
++#endif
++
++      return -EBUSY;
++}
++
++static int
++savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
++{
++      uint32_t status;
++      int i;
++
++      for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
++              status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
++              if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
++                  (status & 0xffff) == 0)
++                      return 0;
++              DRM_UDELAY(1);
++      }
++
++#if SAVAGE_BCI_DEBUG
++      DRM_ERROR("failed!\n");
++      DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
++#endif
++
++      return -EBUSY;
++}
++
++uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
++                             unsigned int flags)
++{
++      uint16_t count;
++      BCI_LOCALS;
++
++      if (dev_priv->status_ptr) {
++              /* coordinate with Xserver */
++              count = dev_priv->status_ptr[1023];
++              if (count < dev_priv->event_counter)
++                      dev_priv->event_wrap++;
++      } else {
++              count = dev_priv->event_counter;
++      }
++      count = (count + 1) & 0xffff;
++      if (count == 0) {
++              count++; /* See the comment above savage_wait_event_*. */
++              dev_priv->event_wrap++;
++      }
++      dev_priv->event_counter = count;
++      if (dev_priv->status_ptr)
++              dev_priv->status_ptr[1023] = (uint32_t)count;
++
++      if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
++              unsigned int wait_cmd = BCI_CMD_WAIT;
++              if ((flags & SAVAGE_WAIT_2D))
++                      wait_cmd |= BCI_CMD_WAIT_2D;
++              if ((flags & SAVAGE_WAIT_3D))
++                      wait_cmd |= BCI_CMD_WAIT_3D;
++              BEGIN_BCI(2);
++              BCI_WRITE(wait_cmd);
++      } else {
++              BEGIN_BCI(1);
++      }
++      BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
++
++      return count;
++}
++
++/*
++ * Freelist management
++ */
++static int savage_freelist_init(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *buf;
++      drm_savage_buf_priv_t *entry;
++      int i;
++      DRM_DEBUG("count=%d\n", dma->buf_count);
++
++      dev_priv->head.next = &dev_priv->tail;
++      dev_priv->head.prev = NULL;
++      dev_priv->head.buf = NULL;
++
++      dev_priv->tail.next = NULL;
++      dev_priv->tail.prev = &dev_priv->head;
++      dev_priv->tail.buf = NULL;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              buf = dma->buflist[i];
++              entry = buf->dev_private;
++
++              SET_AGE(&entry->age, 0, 0);
++              entry->buf = buf;
++
++              entry->next = dev_priv->head.next;
++              entry->prev = &dev_priv->head;
++              dev_priv->head.next->prev = entry;
++              dev_priv->head.next = entry;
++      }
++
++      return 0;
++}
++
++static struct drm_buf *savage_freelist_get(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
++      uint16_t event;
++      unsigned int wrap;
++      DRM_DEBUG("\n");
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              event = dev_priv->status_ptr[1] & 0xffff;
++      else
++              event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      wrap = dev_priv->event_wrap;
++      if (event > dev_priv->event_counter)
++              wrap--; /* hardware hasn't passed the last wrap yet */
++
++      DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
++      DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
++
++      if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
++              drm_savage_buf_priv_t *next = tail->next;
++              drm_savage_buf_priv_t *prev = tail->prev;
++              prev->next = next;
++              next->prev = prev;
++              tail->next = tail->prev = NULL;
++              return tail->buf;
++      }
++
++      DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
++      return NULL;
++}
++
++void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
++
++      DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
++
++      if (entry->next != NULL || entry->prev != NULL) {
++              DRM_ERROR("entry already on freelist.\n");
++              return;
++      }
++
++      prev = &dev_priv->head;
++      next = prev->next;
++      prev->next = entry;
++      next->prev = entry;
++      entry->prev = prev;
++      entry->next = next;
++}
++
++/*
++ * Command DMA
++ */
++static int savage_dma_init(drm_savage_private_t *dev_priv)
++{
++      unsigned int i;
++
++      dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
++              (SAVAGE_DMA_PAGE_SIZE*4);
++      dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
++                                      dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
++      if (dev_priv->dma_pages == NULL)
++              return -ENOMEM;
++
++      for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      SET_AGE(&dev_priv->last_dma_age, 0, 0);
++
++      dev_priv->first_dma_page = 0;
++      dev_priv->current_dma_page = 0;
++
++      return 0;
++}
++
++void savage_dma_reset(drm_savage_private_t *dev_priv)
++{
++      uint16_t event;
++      unsigned int wrap, i;
++      event = savage_bci_emit_event(dev_priv, 0);
++      wrap = dev_priv->event_wrap;
++      for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      SET_AGE(&dev_priv->last_dma_age, event, wrap);
++      dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
++}
++
++void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
++{
++      uint16_t event;
++      unsigned int wrap;
++
++      /* Faked DMA buffer pages don't age. */
++      if (dev_priv->cmd_dma == &dev_priv->fake_dma)
++              return;
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              event = dev_priv->status_ptr[1] & 0xffff;
++      else
++              event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      wrap = dev_priv->event_wrap;
++      if (event > dev_priv->event_counter)
++              wrap--; /* hardware hasn't passed the last wrap yet */
++
++      if (dev_priv->dma_pages[page].age.wrap > wrap ||
++          (dev_priv->dma_pages[page].age.wrap == wrap &&
++           dev_priv->dma_pages[page].age.event > event)) {
++              if (dev_priv->wait_evnt(dev_priv,
++                                      dev_priv->dma_pages[page].age.event)
++                  < 0)
++                      DRM_ERROR("wait_evnt failed!\n");
++      }
++}
++
++uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
++{
++      unsigned int cur = dev_priv->current_dma_page;
++      unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
++              dev_priv->dma_pages[cur].used;
++      unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
++              SAVAGE_DMA_PAGE_SIZE;
++      uint32_t *dma_ptr;
++      unsigned int i;
++
++      DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
++                cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
++
++      if (cur + nr_pages < dev_priv->nr_dma_pages) {
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
++              if (n < rest)
++                      rest = n;
++              dev_priv->dma_pages[cur].used += rest;
++              n -= rest;
++              cur++;
++      } else {
++              dev_priv->dma_flush(dev_priv);
++              nr_pages =
++                  (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
++              for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
++                      dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
++                      dev_priv->dma_pages[i].used = 0;
++                      dev_priv->dma_pages[i].flushed = 0;
++              }
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
++              dev_priv->first_dma_page = cur = 0;
++      }
++      for (i = cur; nr_pages > 0; ++i, --nr_pages) {
++#if SAVAGE_DMA_DEBUG
++              if (dev_priv->dma_pages[i].used) {
++                      DRM_ERROR("unflushed page %u: used=%u\n",
++                                i, dev_priv->dma_pages[i].used);
++              }
++#endif
++              if (n > SAVAGE_DMA_PAGE_SIZE)
++                      dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
++              else
++                      dev_priv->dma_pages[i].used = n;
++              n -= SAVAGE_DMA_PAGE_SIZE;
++      }
++      dev_priv->current_dma_page = --i;
++
++      DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
++                i, dev_priv->dma_pages[i].used, n);
++
++      savage_dma_wait(dev_priv, dev_priv->current_dma_page);
++
++      return dma_ptr;
++}
++
++static void savage_dma_flush(drm_savage_private_t *dev_priv)
++{
++      unsigned int first = dev_priv->first_dma_page;
++      unsigned int cur = dev_priv->current_dma_page;
++      uint16_t event;
++      unsigned int wrap, pad, align, len, i;
++      unsigned long phys_addr;
++      BCI_LOCALS;
++
++      if (first == cur &&
++          dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
++              return;
++
++      /* pad length to multiples of 2 entries
++       * align start of next DMA block to multiles of 8 entries */
++      pad = -dev_priv->dma_pages[cur].used & 1;
++      align = -(dev_priv->dma_pages[cur].used + pad) & 7;
++
++      DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
++                "pad=%u, align=%u\n",
++                first, cur, dev_priv->dma_pages[first].flushed,
++                dev_priv->dma_pages[cur].used, pad, align);
++
++      /* pad with noops */
++      if (pad) {
++              uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                  cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
++              dev_priv->dma_pages[cur].used += pad;
++              while (pad != 0) {
++                      *dma_ptr++ = BCI_CMD_WAIT;
++                      pad--;
++              }
++      }
++
++      DRM_MEMORYBARRIER();
++
++      /* do flush ... */
++      phys_addr = dev_priv->cmd_dma->offset +
++              (first * SAVAGE_DMA_PAGE_SIZE +
++               dev_priv->dma_pages[first].flushed) * 4;
++      len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
++          dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
++
++      DRM_DEBUG("phys_addr=%lx, len=%u\n",
++                phys_addr | dev_priv->dma_type, len);
++
++      BEGIN_BCI(3);
++      BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
++      BCI_WRITE(phys_addr | dev_priv->dma_type);
++      BCI_DMA(len);
++
++      /* fix alignment of the start of the next block */
++      dev_priv->dma_pages[cur].used += align;
++
++      /* age DMA pages */
++      event = savage_bci_emit_event(dev_priv, 0);
++      wrap = dev_priv->event_wrap;
++      for (i = first; i < cur; ++i) {
++              SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
++              dev_priv->dma_pages[i].used = 0;
++              dev_priv->dma_pages[i].flushed = 0;
++      }
++      /* age the current page only when it's full */
++      if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
++              SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
++              dev_priv->dma_pages[cur].used = 0;
++              dev_priv->dma_pages[cur].flushed = 0;
++              /* advance to next page */
++              cur++;
++              if (cur == dev_priv->nr_dma_pages)
++                      cur = 0;
++              dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
++      } else {
++              dev_priv->first_dma_page = cur;
++              dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
++      }
++      SET_AGE(&dev_priv->last_dma_age, event, wrap);
++
++      DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
++                dev_priv->dma_pages[cur].used,
++                dev_priv->dma_pages[cur].flushed);
++}
++
++static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
++{
++      unsigned int i, j;
++      BCI_LOCALS;
++
++      if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
++          dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
++              return;
++
++      DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
++                dev_priv->first_dma_page, dev_priv->current_dma_page,
++                dev_priv->dma_pages[dev_priv->current_dma_page].used);
++
++      for (i = dev_priv->first_dma_page;
++           i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
++           ++i) {
++              uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
++                      i * SAVAGE_DMA_PAGE_SIZE;
++#if SAVAGE_DMA_DEBUG
++              /* Sanity check: all pages except the last one must be full. */
++              if (i < dev_priv->current_dma_page &&
++                  dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
++                      DRM_ERROR("partial DMA page %u: used=%u",
++                                i, dev_priv->dma_pages[i].used);
++              }
++#endif
++              BEGIN_BCI(dev_priv->dma_pages[i].used);
++              for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
++                      BCI_WRITE(dma_ptr[j]);
++              }
++              dev_priv->dma_pages[i].used = 0;
++      }
++
++      /* reset to first page */
++      dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
++}
++
++int savage_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_savage_private_t *dev_priv;
++
++      dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      memset(dev_priv, 0, sizeof(drm_savage_private_t));
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->chipset = (enum savage_family)chipset;
++
++      return 0;
++}
++
++/*
++ * Initalize mappings. On Savage4 and SavageIX the alignment
++ * and size of the aperture is not suitable for automatic MTRR setup
++ * in drm_addmap. Therefore we add them manually before the maps are
++ * initialized, and tear them down on last close.
++ */
++int savage_driver_firstopen(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      unsigned long mmio_base, fb_base, fb_size, aperture_base;
++      /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
++       * in case we decide we need information on the BAR for BSD in the
++       * future.
++       */
++      unsigned int fb_rsrc, aper_rsrc;
++      int ret = 0;
++
++      dev_priv->mtrr[0].handle = -1;
++      dev_priv->mtrr[1].handle = -1;
++      dev_priv->mtrr[2].handle = -1;
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              fb_rsrc = 0;
++              fb_base = drm_get_resource_start(dev, 0);
++              fb_size = SAVAGE_FB_SIZE_S3;
++              mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
++              aper_rsrc = 0;
++              aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
++              /* this should always be true */
++              if (drm_get_resource_len(dev, 0) == 0x08000000) {
++                      /* Don't make MMIO write-cobining! We need 3
++                       * MTRRs. */
++                      dev_priv->mtrr[0].base = fb_base;
++                      dev_priv->mtrr[0].size = 0x01000000;
++                      dev_priv->mtrr[0].handle =
++                          drm_mtrr_add(dev_priv->mtrr[0].base,
++                                       dev_priv->mtrr[0].size, DRM_MTRR_WC);
++                      dev_priv->mtrr[1].base = fb_base + 0x02000000;
++                      dev_priv->mtrr[1].size = 0x02000000;
++                      dev_priv->mtrr[1].handle =
++                          drm_mtrr_add(dev_priv->mtrr[1].base,
++                                       dev_priv->mtrr[1].size, DRM_MTRR_WC);
++                      dev_priv->mtrr[2].base = fb_base + 0x04000000;
++                      dev_priv->mtrr[2].size = 0x04000000;
++                      dev_priv->mtrr[2].handle =
++                          drm_mtrr_add(dev_priv->mtrr[2].base,
++                                       dev_priv->mtrr[2].size, DRM_MTRR_WC);
++              } else {
++                      DRM_ERROR("strange pci_resource_len %08lx\n",
++                                drm_get_resource_len(dev, 0));
++              }
++      } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
++                 dev_priv->chipset != S3_SAVAGE2000) {
++              mmio_base = drm_get_resource_start(dev, 0);
++              fb_rsrc = 1;
++              fb_base = drm_get_resource_start(dev, 1);
++              fb_size = SAVAGE_FB_SIZE_S4;
++              aper_rsrc = 1;
++              aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
++              /* this should always be true */
++              if (drm_get_resource_len(dev, 1) == 0x08000000) {
++                      /* Can use one MTRR to cover both fb and
++                       * aperture. */
++                      dev_priv->mtrr[0].base = fb_base;
++                      dev_priv->mtrr[0].size = 0x08000000;
++                      dev_priv->mtrr[0].handle =
++                          drm_mtrr_add(dev_priv->mtrr[0].base,
++                                       dev_priv->mtrr[0].size, DRM_MTRR_WC);
++              } else {
++                      DRM_ERROR("strange pci_resource_len %08lx\n",
++                                drm_get_resource_len(dev, 1));
++              }
++      } else {
++              mmio_base = drm_get_resource_start(dev, 0);
++              fb_rsrc = 1;
++              fb_base = drm_get_resource_start(dev, 1);
++              fb_size = drm_get_resource_len(dev, 1);
++              aper_rsrc = 2;
++              aperture_base = drm_get_resource_start(dev, 2);
++              /* Automatic MTRR setup will do the right thing. */
++      }
++
++      ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
++                       _DRM_READ_ONLY, &dev_priv->mmio);
++      if (ret)
++              return ret;
++
++      ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
++                       _DRM_WRITE_COMBINING, &dev_priv->fb);
++      if (ret)
++              return ret;
++
++      ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
++                       _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
++                       &dev_priv->aperture);
++      if (ret)
++              return ret;
++
++      return ret;
++}
++
++/*
++ * Delete MTRRs and free device-private data.
++ */
++void savage_driver_lastclose(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      for (i = 0; i < 3; ++i)
++              if (dev_priv->mtrr[i].handle >= 0)
++                      drm_mtrr_del(dev_priv->mtrr[i].handle,
++                                   dev_priv->mtrr[i].base,
++                                   dev_priv->mtrr[i].size, DRM_MTRR_WC);
++}
++
++int savage_driver_unload(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      if (init->fb_bpp != 16 && init->fb_bpp != 32) {
++              DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
++              return -EINVAL;
++      }
++      if (init->depth_bpp != 16 && init->depth_bpp != 32) {
++              DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
++              return -EINVAL;
++      }
++      if (init->dma_type != SAVAGE_DMA_AGP &&
++          init->dma_type != SAVAGE_DMA_PCI) {
++              DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
++              return -EINVAL;
++      }
++
++      dev_priv->cob_size = init->cob_size;
++      dev_priv->bci_threshold_lo = init->bci_threshold_lo;
++      dev_priv->bci_threshold_hi = init->bci_threshold_hi;
++      dev_priv->dma_type = init->dma_type;
++
++      dev_priv->fb_bpp = init->fb_bpp;
++      dev_priv->front_offset = init->front_offset;
++      dev_priv->front_pitch = init->front_pitch;
++      dev_priv->back_offset = init->back_offset;
++      dev_priv->back_pitch = init->back_pitch;
++      dev_priv->depth_bpp = init->depth_bpp;
++      dev_priv->depth_offset = init->depth_offset;
++      dev_priv->depth_pitch = init->depth_pitch;
++
++      dev_priv->texture_offset = init->texture_offset;
++      dev_priv->texture_size = init->texture_size;
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              savage_do_cleanup_bci(dev);
++              return -EINVAL;
++      }
++      if (init->status_offset != 0) {
++              dev_priv->status = drm_core_findmap(dev, init->status_offset);
++              if (!dev_priv->status) {
++                      DRM_ERROR("could not find shadow status region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->status = NULL;
++      }
++      if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
++              dev->agp_buffer_token = init->buffers_offset;
++              dev->agp_buffer_map = drm_core_findmap(dev,
++                                                     init->buffers_offset);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("could not find DMA buffer region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              drm_core_ioremap(dev->agp_buffer_map, dev);
++              if (!dev->agp_buffer_map) {
++                      DRM_ERROR("failed to ioremap DMA buffer region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -ENOMEM;
++              }
++      }
++      if (init->agp_textures_offset) {
++              dev_priv->agp_textures =
++                      drm_core_findmap(dev, init->agp_textures_offset);
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("could not find agp texture region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->agp_textures = NULL;
++      }
++
++      if (init->cmd_dma_offset) {
++              if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      DRM_ERROR("command DMA not supported on "
++                                "Savage3D/MX/IX.\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              if (dev->dma && dev->dma->buflist) {
++                      DRM_ERROR("command and vertex DMA not supported "
++                                "at the same time.\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
++              if (!dev_priv->cmd_dma) {
++                      DRM_ERROR("could not find command DMA region!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++              if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
++                      if (dev_priv->cmd_dma->type != _DRM_AGP) {
++                              DRM_ERROR("AGP command DMA region is not a "
++                                        "_DRM_AGP map!\n");
++                              savage_do_cleanup_bci(dev);
++                              return -EINVAL;
++                      }
++                      drm_core_ioremap(dev_priv->cmd_dma, dev);
++                      if (!dev_priv->cmd_dma->handle) {
++                              DRM_ERROR("failed to ioremap command "
++                                        "DMA region!\n");
++                              savage_do_cleanup_bci(dev);
++                              return -ENOMEM;
++                      }
++              } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
++                      DRM_ERROR("PCI command DMA region is not a "
++                                "_DRM_CONSISTENT map!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -EINVAL;
++              }
++      } else {
++              dev_priv->cmd_dma = NULL;
++      }
++
++      dev_priv->dma_flush = savage_dma_flush;
++      if (!dev_priv->cmd_dma) {
++              DRM_DEBUG("falling back to faked command DMA.\n");
++              dev_priv->fake_dma.offset = 0;
++              dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
++              dev_priv->fake_dma.type = _DRM_SHM;
++              dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
++                                                    DRM_MEM_DRIVER);
++              if (!dev_priv->fake_dma.handle) {
++                      DRM_ERROR("could not allocate faked DMA buffer!\n");
++                      savage_do_cleanup_bci(dev);
++                      return -ENOMEM;
++              }
++              dev_priv->cmd_dma = &dev_priv->fake_dma;
++              dev_priv->dma_flush = savage_fake_dma_flush;
++      }
++
++      dev_priv->sarea_priv =
++              (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
++                                     init->sarea_priv_offset);
++
++      /* setup bitmap descriptors */
++      {
++              unsigned int color_tile_format;
++              unsigned int depth_tile_format;
++              unsigned int front_stride, back_stride, depth_stride;
++              if (dev_priv->chipset <= S3_SAVAGE4) {
++                      color_tile_format = dev_priv->fb_bpp == 16 ?
++                              SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
++                      depth_tile_format = dev_priv->depth_bpp == 16 ?
++                              SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
++              } else {
++                      color_tile_format = SAVAGE_BD_TILE_DEST;
++                      depth_tile_format = SAVAGE_BD_TILE_DEST;
++              }
++              front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
++              back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
++              depth_stride =
++                  dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
++
++              dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (color_tile_format << SAVAGE_BD_TILE_SHIFT);
++
++              dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (color_tile_format << SAVAGE_BD_TILE_SHIFT);
++
++              dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
++                      (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
++                      (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
++      }
++
++      /* setup status and bci ptr */
++      dev_priv->event_counter = 0;
++      dev_priv->event_wrap = 0;
++      dev_priv->bci_ptr = (volatile uint32_t *)
++          ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
++      } else {
++              dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
++      }
++      if (dev_priv->status != NULL) {
++              dev_priv->status_ptr =
++                      (volatile uint32_t *)dev_priv->status->handle;
++              dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
++              dev_priv->wait_evnt = savage_bci_wait_event_shadow;
++              dev_priv->status_ptr[1023] = dev_priv->event_counter;
++      } else {
++              dev_priv->status_ptr = NULL;
++              if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
++              } else {
++                      dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
++              }
++              dev_priv->wait_evnt = savage_bci_wait_event_reg;
++      }
++
++      /* cliprect functions */
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
++              dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
++      else
++              dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
++
++      if (savage_freelist_init(dev) < 0) {
++              DRM_ERROR("could not initialize freelist\n");
++              savage_do_cleanup_bci(dev);
++              return -ENOMEM;
++      }
++
++      if (savage_dma_init(dev_priv) < 0) {
++              DRM_ERROR("could not initialize command DMA\n");
++              savage_do_cleanup_bci(dev);
++              return -ENOMEM;
++      }
++
++      return 0;
++}
++
++static int savage_do_cleanup_bci(struct drm_device *dev)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++
++      if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
++              if (dev_priv->fake_dma.handle)
++                      drm_free(dev_priv->fake_dma.handle,
++                               SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
++      } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
++                 dev_priv->cmd_dma->type == _DRM_AGP &&
++                 dev_priv->dma_type == SAVAGE_DMA_AGP)
++              drm_core_ioremapfree(dev_priv->cmd_dma, dev);
++
++      if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
++          dev->agp_buffer_map && dev->agp_buffer_map->handle) {
++              drm_core_ioremapfree(dev->agp_buffer_map, dev);
++              /* make sure the next instance (which may be running
++               * in PCI mode) doesn't try to use an old
++               * agp_buffer_map. */
++              dev->agp_buffer_map = NULL;
++      }
++
++      if (dev_priv->dma_pages)
++              drm_free(dev_priv->dma_pages,
++                       sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
++                       DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_init_t *init = data;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      switch (init->func) {
++      case SAVAGE_INIT_BCI:
++              return savage_do_init_bci(dev, init);
++      case SAVAGE_CLEANUP_BCI:
++              return savage_do_cleanup_bci(dev);
++      }
++
++      return -EINVAL;
++}
++
++static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_event_emit_t *event = data;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      event->count = savage_bci_emit_event(dev_priv, event->flags);
++      event->count |= dev_priv->event_wrap << 16;
++
++      return 0;
++}
++
++static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      drm_savage_event_wait_t *event = data;
++      unsigned int event_e, hw_e;
++      unsigned int event_w, hw_w;
++
++      DRM_DEBUG("\n");
++
++      UPDATE_EVENT_COUNTER();
++      if (dev_priv->status_ptr)
++              hw_e = dev_priv->status_ptr[1] & 0xffff;
++      else
++              hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
++      hw_w = dev_priv->event_wrap;
++      if (hw_e > dev_priv->event_counter)
++              hw_w--; /* hardware hasn't passed the last wrap yet */
++
++      event_e = event->count & 0xffff;
++      event_w = event->count >> 16;
++
++      /* Don't need to wait if
++       * - event counter wrapped since the event was emitted or
++       * - the hardware has advanced up to or over the event to wait for.
++       */
++      if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
++              return 0;
++      else
++              return dev_priv->wait_evnt(dev_priv, event_e);
++}
++
++/*
++ * DMA buffer management
++ */
++
++static int savage_bci_get_buffers(struct drm_device *dev,
++                                struct drm_file *file_priv,
++                                struct drm_dma *d)
++{
++      struct drm_buf *buf;
++      int i;
++
++      for (i = d->granted_count; i < d->request_count; i++) {
++              buf = savage_freelist_get(dev);
++              if (!buf)
++                      return -EAGAIN;
++
++              buf->file_priv = file_priv;
++
++              if (DRM_COPY_TO_USER(&d->request_indices[i],
++                                   &buf->idx, sizeof(buf->idx)))
++                      return -EFAULT;
++              if (DRM_COPY_TO_USER(&d->request_sizes[i],
++                                   &buf->total, sizeof(buf->total)))
++                      return -EFAULT;
++
++              d->granted_count++;
++      }
++      return 0;
++}
++
++int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_dma *d = data;
++      int ret = 0;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      /* Please don't send us buffers.
++       */
++      if (d->send_count != 0) {
++              DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
++                        DRM_CURRENTPID, d->send_count);
++              return -EINVAL;
++      }
++
++      /* We'll send you buffers.
++       */
++      if (d->request_count < 0 || d->request_count > dma->buf_count) {
++              DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
++                        DRM_CURRENTPID, d->request_count, dma->buf_count);
++              return -EINVAL;
++      }
++
++      d->granted_count = 0;
++
++      if (d->request_count) {
++              ret = savage_bci_get_buffers(dev, file_priv, d);
++      }
++
++      return ret;
++}
++
++void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
++{
++      struct drm_device_dma *dma = dev->dma;
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      int i;
++
++      if (!dma)
++              return;
++      if (!dev_priv)
++              return;
++      if (!dma->buflist)
++              return;
++
++      for (i = 0; i < dma->buf_count; i++) {
++              struct drm_buf *buf = dma->buflist[i];
++              drm_savage_buf_priv_t *buf_priv = buf->dev_private;
++
++              if (buf->file_priv == file_priv && buf_priv &&
++                  buf_priv->next == NULL && buf_priv->prev == NULL) {
++                      uint16_t event;
++                      DRM_DEBUG("reclaimed from client\n");
++                      event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
++                      SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
++                      savage_freelist_put(dev, buf);
++              }
++      }
++
++      drm_core_reclaim_buffers(dev, file_priv);
++}
++
++struct drm_ioctl_desc savage_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
++};
++
++int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drm.h git-nokia/drivers/gpu/drm-tungsten/savage_drm.h
+--- git/drivers/gpu/drm-tungsten/savage_drm.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drm.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,209 @@
++/* savage_drm.h -- Public header for the savage driver
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __SAVAGE_DRM_H__
++#define __SAVAGE_DRM_H__
++
++#ifndef __SAVAGE_SAREA_DEFINES__
++#define __SAVAGE_SAREA_DEFINES__
++
++/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
++ * regions, subject to a minimum region size of (1<<16) == 64k.
++ *
++ * Clients may subdivide regions internally, but when sharing between
++ * clients, the region size is the minimum granularity.
++ */
++
++#define SAVAGE_CARD_HEAP              0
++#define SAVAGE_AGP_HEAP                       1
++#define SAVAGE_NR_TEX_HEAPS           2
++#define SAVAGE_NR_TEX_REGIONS         16
++#define SAVAGE_LOG_MIN_TEX_REGION_SIZE        16
++
++#endif /* __SAVAGE_SAREA_DEFINES__ */
++
++typedef struct _drm_savage_sarea {
++      /* LRU lists for texture memory in agp space and on the card.
++       */
++      struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
++      unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
++
++      /* Mechanism to validate card state.
++       */
++      int ctxOwner;
++} drm_savage_sarea_t, *drm_savage_sarea_ptr;
++
++/* Savage-specific ioctls
++ */
++#define DRM_SAVAGE_BCI_INIT           0x00
++#define DRM_SAVAGE_BCI_CMDBUF           0x01
++#define DRM_SAVAGE_BCI_EVENT_EMIT     0x02
++#define DRM_SAVAGE_BCI_EVENT_WAIT     0x03
++
++#define DRM_IOCTL_SAVAGE_INIT         DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
++#define DRM_IOCTL_SAVAGE_CMDBUF               DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
++#define DRM_IOCTL_SAVAGE_EVENT_EMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
++#define DRM_IOCTL_SAVAGE_EVENT_WAIT   DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
++
++#define SAVAGE_DMA_PCI        1
++#define SAVAGE_DMA_AGP        3
++typedef struct drm_savage_init {
++      enum {
++              SAVAGE_INIT_BCI = 1,
++              SAVAGE_CLEANUP_BCI = 2
++      } func;
++      unsigned int sarea_priv_offset;
++
++      /* some parameters */
++      unsigned int cob_size;
++      unsigned int bci_threshold_lo, bci_threshold_hi;
++      unsigned int dma_type;
++
++      /* frame buffer layout */
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      /* local textures */
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      /* physical locations of non-permanent maps */
++      unsigned long status_offset;
++      unsigned long buffers_offset;
++      unsigned long agp_textures_offset;
++      unsigned long cmd_dma_offset;
++} drm_savage_init_t;
++
++typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
++typedef struct drm_savage_cmdbuf {
++                              /* command buffer in client's address space */
++      drm_savage_cmd_header_t __user *cmd_addr;
++      unsigned int size;      /* size of the command buffer in 64bit units */
++
++      unsigned int dma_idx;   /* DMA buffer index to use */
++      int discard;            /* discard DMA buffer when done */
++                              /* vertex buffer in client's address space */
++      unsigned int __user *vb_addr;
++      unsigned int vb_size;   /* size of client vertex buffer in bytes */
++      unsigned int vb_stride; /* stride of vertices in 32bit words */
++                              /* boxes in client's address space */
++      struct drm_clip_rect __user *box_addr;
++      unsigned int nbox;      /* number of clipping boxes */
++} drm_savage_cmdbuf_t;
++
++#define SAVAGE_WAIT_2D  0x1 /* wait for 2D idle before updating event tag */
++#define SAVAGE_WAIT_3D  0x2 /* wait for 3D idle before updating event tag */
++#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
++typedef struct drm_savage_event {
++      unsigned int count;
++      unsigned int flags;
++} drm_savage_event_emit_t, drm_savage_event_wait_t;
++
++/* Commands for the cmdbuf ioctl
++ */
++#define SAVAGE_CMD_STATE      0  /* a range of state registers */
++#define SAVAGE_CMD_DMA_PRIM   1  /* vertices from DMA buffer */
++#define SAVAGE_CMD_VB_PRIM    2  /* vertices from client vertex buffer */
++#define SAVAGE_CMD_DMA_IDX    3  /* indexed vertices from DMA buffer */
++#define SAVAGE_CMD_VB_IDX     4  /* indexed vertices client vertex buffer */
++#define SAVAGE_CMD_CLEAR      5  /* clear buffers */
++#define SAVAGE_CMD_SWAP               6  /* swap buffers */
++
++/* Primitive types
++*/
++#define SAVAGE_PRIM_TRILIST   0  /* triangle list */
++#define SAVAGE_PRIM_TRISTRIP  1  /* triangle strip */
++#define SAVAGE_PRIM_TRIFAN    2  /* triangle fan */
++#define SAVAGE_PRIM_TRILIST_201       3  /* reorder verts for correct flat
++                                  * shading on s3d */
++
++/* Skip flags (vertex format)
++ */
++#define SAVAGE_SKIP_Z         0x01
++#define SAVAGE_SKIP_W         0x02
++#define SAVAGE_SKIP_C0                0x04
++#define SAVAGE_SKIP_C1                0x08
++#define SAVAGE_SKIP_S0                0x10
++#define SAVAGE_SKIP_T0                0x20
++#define SAVAGE_SKIP_ST0               0x30
++#define SAVAGE_SKIP_S1                0x40
++#define SAVAGE_SKIP_T1                0x80
++#define SAVAGE_SKIP_ST1               0xc0
++#define SAVAGE_SKIP_ALL_S3D   0x3f
++#define SAVAGE_SKIP_ALL_S4    0xff
++
++/* Buffer names for clear command
++ */
++#define SAVAGE_FRONT          0x1
++#define SAVAGE_BACK           0x2
++#define SAVAGE_DEPTH          0x4
++
++/* 64-bit command header
++ */
++union drm_savage_cmd_header {
++      struct {
++              unsigned char cmd;      /* command */
++              unsigned char pad0;
++              unsigned short pad1;
++              unsigned short pad2;
++              unsigned short pad3;
++      } cmd; /* generic */
++      struct {
++              unsigned char cmd;
++              unsigned char global;   /* need idle engine? */
++              unsigned short count;   /* number of consecutive registers */
++              unsigned short start;   /* first register */
++              unsigned short pad3;
++      } state; /* SAVAGE_CMD_STATE */
++      struct {
++              unsigned char cmd;
++              unsigned char prim;     /* primitive type */
++              unsigned short skip;    /* vertex format (skip flags) */
++              unsigned short count;   /* number of vertices */
++              unsigned short start;   /* first vertex in DMA/vertex buffer */
++      } prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
++      struct {
++              unsigned char cmd;
++              unsigned char prim;
++              unsigned short skip;
++              unsigned short count;   /* number of indices that follow */
++              unsigned short pad3;
++      } idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
++      struct {
++              unsigned char cmd;
++              unsigned char pad0;
++              unsigned short pad1;
++              unsigned int flags;
++      } clear0; /* SAVAGE_CMD_CLEAR */
++      struct {
++              unsigned int mask;
++              unsigned int value;
++      } clear1; /* SAVAGE_CMD_CLEAR data */
++};
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.c git-nokia/drivers/gpu/drm-tungsten/savage_drv.c
+--- git/drivers/gpu/drm-tungsten/savage_drv.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,96 @@
++/* savage_drv.c -- Savage driver for Linux
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      savage_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR |
++          DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
++      .dev_priv_size = sizeof(drm_savage_buf_priv_t),
++      .load = savage_driver_load,
++      .firstopen = savage_driver_firstopen,
++      .lastclose = savage_driver_lastclose,
++      .unload = savage_driver_unload,
++      .reclaim_buffers = savage_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = savage_ioctls,
++      .dma_ioctl = savage_bci_buffers,
++      .fops = {
++              .owner   = THIS_MODULE,
++              .open    = drm_open,
++              .release = drm_release,
++              .ioctl   = drm_ioctl,
++              .mmap    = drm_mmap,
++              .poll = drm_poll,
++              .fasync  = drm_fasync,
++      },
++      .pci_driver = {
++              .name          = DRIVER_NAME,
++              .id_table      = pciidlist,
++              .probe         = probe,
++              .remove        = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init savage_init(void)
++{
++      driver.num_ioctls = savage_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit savage_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(savage_init);
++module_exit(savage_exit);
++
++MODULE_AUTHOR( DRIVER_AUTHOR );
++MODULE_DESCRIPTION( DRIVER_DESC );
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_drv.h git-nokia/drivers/gpu/drm-tungsten/savage_drv.h
+--- git/drivers/gpu/drm-tungsten/savage_drv.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_drv.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,575 @@
++/* savage_drv.h -- Private header for the savage driver */
++/*
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __SAVAGE_DRV_H__
++#define __SAVAGE_DRV_H__
++
++#define DRIVER_AUTHOR "Felix Kuehling"
++
++#define DRIVER_NAME   "savage"
++#define DRIVER_DESC   "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
++#define DRIVER_DATE   "20050313"
++
++#define DRIVER_MAJOR          2
++#define DRIVER_MINOR          4
++#define DRIVER_PATCHLEVEL     1
++/* Interface history:
++ *
++ * 1.x   The DRM driver from the VIA/S3 code drop, basically a dummy
++ * 2.0   The first real DRM
++ * 2.1   Scissors registers managed by the DRM, 3D operations clipped by
++ *       cliprects of the cmdbuf ioctl
++ * 2.2   Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
++ * 2.3   Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
++ *       wide and thus very long lived (unlikely to ever wrap). The size
++ *       in the struct was 32 bits before, but only 16 bits were used
++ * 2.4   Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
++ *       actually used
++ */
++
++typedef struct drm_savage_age {
++      uint16_t event;
++      unsigned int wrap;
++} drm_savage_age_t;
++
++typedef struct drm_savage_buf_priv {
++      struct drm_savage_buf_priv *next;
++      struct drm_savage_buf_priv *prev;
++      drm_savage_age_t age;
++      struct drm_buf *buf;
++} drm_savage_buf_priv_t;
++
++typedef struct drm_savage_dma_page {
++      drm_savage_age_t age;
++      unsigned int used, flushed;
++} drm_savage_dma_page_t;
++#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
++/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
++ * size of 16kbytes or 4k entries. Minimum requirement would be
++ * 10kbytes for 255 40-byte vertices in one drawing command. */
++#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
++
++/* interesting bits of hardware state that are saved in dev_priv */
++typedef union {
++      struct drm_savage_common_state {
++              uint32_t vbaddr;
++      } common;
++      struct {
++              unsigned char pad[sizeof(struct drm_savage_common_state)];
++              uint32_t texctrl, texaddr;
++              uint32_t scstart, new_scstart;
++              uint32_t scend, new_scend;
++      } s3d;
++      struct {
++              unsigned char pad[sizeof(struct drm_savage_common_state)];
++              uint32_t texdescr, texaddr0, texaddr1;
++              uint32_t drawctrl0, new_drawctrl0;
++              uint32_t drawctrl1, new_drawctrl1;
++      } s4;
++} drm_savage_state_t;
++
++/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
++enum savage_family {
++      S3_UNKNOWN = 0,
++      S3_SAVAGE3D,
++      S3_SAVAGE_MX,
++      S3_SAVAGE4,
++      S3_PROSAVAGE,
++      S3_TWISTER,
++      S3_PROSAVAGEDDR,
++      S3_SUPERSAVAGE,
++      S3_SAVAGE2000,
++      S3_LAST
++};
++
++extern struct drm_ioctl_desc savage_ioctls[];
++extern int savage_max_ioctl;
++
++#define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
++
++#define S3_SAVAGE4_SERIES(chip)  ((chip==S3_SAVAGE4)            \
++                                  || (chip==S3_PROSAVAGE)       \
++                                  || (chip==S3_TWISTER)         \
++                                  || (chip==S3_PROSAVAGEDDR))
++
++#define       S3_SAVAGE_MOBILE_SERIES(chip)   ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
++
++#define S3_SAVAGE_SERIES(chip)    ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
++
++#define S3_MOBILE_TWISTER_SERIES(chip)   ((chip==S3_TWISTER)    \
++                                          ||(chip==S3_PROSAVAGEDDR))
++
++/* flags */
++#define SAVAGE_IS_AGP 1
++
++typedef struct drm_savage_private {
++      drm_savage_sarea_t *sarea_priv;
++
++      drm_savage_buf_priv_t head, tail;
++
++      /* who am I? */
++      enum savage_family chipset;
++
++      unsigned int cob_size;
++      unsigned int bci_threshold_lo, bci_threshold_hi;
++      unsigned int dma_type;
++
++      /* frame buffer layout */
++      unsigned int fb_bpp;
++      unsigned int front_offset, front_pitch;
++      unsigned int back_offset, back_pitch;
++      unsigned int depth_bpp;
++      unsigned int depth_offset, depth_pitch;
++
++      /* bitmap descriptors for swap and clear */
++      unsigned int front_bd, back_bd, depth_bd;
++
++      /* local textures */
++      unsigned int texture_offset;
++      unsigned int texture_size;
++
++      /* memory regions in physical memory */
++      drm_local_map_t *sarea;
++      drm_local_map_t *mmio;
++      drm_local_map_t *fb;
++      drm_local_map_t *aperture;
++      drm_local_map_t *status;
++      drm_local_map_t *agp_textures;
++      drm_local_map_t *cmd_dma;
++      drm_local_map_t fake_dma;
++
++      struct {
++              int handle;
++              unsigned long base, size;
++      } mtrr[3];
++
++      /* BCI and status-related stuff */
++      volatile uint32_t *status_ptr, *bci_ptr;
++      uint32_t status_used_mask;
++      uint16_t event_counter;
++      unsigned int event_wrap;
++
++      /* Savage4 command DMA */
++      drm_savage_dma_page_t *dma_pages;
++      unsigned int nr_dma_pages, first_dma_page, current_dma_page;
++      drm_savage_age_t last_dma_age;
++
++      /* saved hw state for global/local check on S3D */
++      uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
++      /* and for scissors (global, so don't emit if not changed) */
++      uint32_t hw_scissors_start, hw_scissors_end;
++
++      drm_savage_state_t state;
++
++      /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
++      unsigned int waiting;
++
++      /* config/hardware-dependent function pointers */
++      int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
++      int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
++      /* Err, there is a macro wait_event in include/linux/wait.h.
++       * Avoid unwanted macro expansion. */
++      void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
++                             const struct drm_clip_rect *pbox);
++      void (*dma_flush)(struct drm_savage_private *dev_priv);
++} drm_savage_private_t;
++
++/* ioctls */
++extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
++
++/* BCI functions */
++extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
++                                    unsigned int flags);
++extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf);
++extern void savage_dma_reset(drm_savage_private_t *dev_priv);
++extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
++extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
++                                unsigned int n);
++extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
++extern int savage_driver_firstopen(struct drm_device *dev);
++extern void savage_driver_lastclose(struct drm_device *dev);
++extern int savage_driver_unload(struct drm_device *dev);
++extern void savage_reclaim_buffers(struct drm_device *dev,
++                                 struct drm_file *file_priv);
++
++/* state functions */
++extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
++                                    const struct drm_clip_rect *pbox);
++extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
++                                   const struct drm_clip_rect *pbox);
++
++#define SAVAGE_FB_SIZE_S3     0x01000000      /*  16MB */
++#define SAVAGE_FB_SIZE_S4     0x02000000      /*  32MB */
++#define SAVAGE_MMIO_SIZE        0x00080000    /* 512kB */
++#define SAVAGE_APERTURE_OFFSET  0x02000000    /*  32MB */
++#define SAVAGE_APERTURE_SIZE    0x05000000    /* 5 tiled surfaces, 16MB each */
++
++#define SAVAGE_BCI_OFFSET       0x00010000      /* offset of the BCI region
++                                               * inside the MMIO region */
++#define SAVAGE_BCI_FIFO_SIZE  32              /* number of entries in on-chip
++                                               * BCI FIFO */
++
++/*
++ * MMIO registers
++ */
++#define SAVAGE_STATUS_WORD0           0x48C00
++#define SAVAGE_STATUS_WORD1           0x48C04
++#define SAVAGE_ALT_STATUS_WORD0               0x48C60
++
++#define SAVAGE_FIFO_USED_MASK_S3D     0x0001ffff
++#define SAVAGE_FIFO_USED_MASK_S4      0x001fffff
++
++/* Copied from savage_bci.h in the 2D driver with some renaming. */
++
++/* Bitmap descriptors */
++#define SAVAGE_BD_STRIDE_SHIFT 0
++#define SAVAGE_BD_BPP_SHIFT   16
++#define SAVAGE_BD_TILE_SHIFT  24
++#define SAVAGE_BD_BW_DISABLE  (1<<28)
++/* common: */
++#define       SAVAGE_BD_TILE_LINEAR           0
++/* savage4, MX, IX, 3D */
++#define       SAVAGE_BD_TILE_16BPP            2
++#define       SAVAGE_BD_TILE_32BPP            3
++/* twister, prosavage, DDR, supersavage, 2000 */
++#define       SAVAGE_BD_TILE_DEST             1
++#define       SAVAGE_BD_TILE_TEXTURE          2
++/* GBD - BCI enable */
++/* savage4, MX, IX, 3D */
++#define SAVAGE_GBD_BCI_ENABLE                    8
++/* twister, prosavage, DDR, supersavage, 2000 */
++#define SAVAGE_GBD_BCI_ENABLE_TWISTER            0
++
++#define SAVAGE_GBD_BIG_ENDIAN                    4
++#define SAVAGE_GBD_LITTLE_ENDIAN                 0
++#define SAVAGE_GBD_64                            1
++
++/*  Global Bitmap Descriptor */
++#define SAVAGE_BCI_GLB_BD_LOW             0x8168
++#define SAVAGE_BCI_GLB_BD_HIGH            0x816C
++
++/*
++ * BCI registers
++ */
++/* Savage4/Twister/ProSavage 3D registers */
++#define SAVAGE_DRAWLOCALCTRL_S4               0x1e
++#define SAVAGE_TEXPALADDR_S4          0x1f
++#define SAVAGE_TEXCTRL0_S4            0x20
++#define SAVAGE_TEXCTRL1_S4            0x21
++#define SAVAGE_TEXADDR0_S4            0x22
++#define SAVAGE_TEXADDR1_S4            0x23
++#define SAVAGE_TEXBLEND0_S4           0x24
++#define SAVAGE_TEXBLEND1_S4           0x25
++#define SAVAGE_TEXXPRCLR_S4           0x26 /* never used */
++#define SAVAGE_TEXDESCR_S4            0x27
++#define SAVAGE_FOGTABLE_S4            0x28
++#define SAVAGE_FOGCTRL_S4             0x30
++#define SAVAGE_STENCILCTRL_S4         0x31
++#define SAVAGE_ZBUFCTRL_S4            0x32
++#define SAVAGE_ZBUFOFF_S4             0x33
++#define SAVAGE_DESTCTRL_S4            0x34
++#define SAVAGE_DRAWCTRL0_S4           0x35
++#define SAVAGE_DRAWCTRL1_S4           0x36
++#define SAVAGE_ZWATERMARK_S4          0x37
++#define SAVAGE_DESTTEXRWWATERMARK_S4  0x38
++#define SAVAGE_TEXBLENDCOLOR_S4               0x39
++/* Savage3D/MX/IX 3D registers */
++#define SAVAGE_TEXPALADDR_S3D         0x18
++#define SAVAGE_TEXXPRCLR_S3D          0x19 /* never used */
++#define SAVAGE_TEXADDR_S3D            0x1A
++#define SAVAGE_TEXDESCR_S3D           0x1B
++#define SAVAGE_TEXCTRL_S3D            0x1C
++#define SAVAGE_FOGTABLE_S3D           0x20
++#define SAVAGE_FOGCTRL_S3D            0x30
++#define SAVAGE_DRAWCTRL_S3D           0x31
++#define SAVAGE_ZBUFCTRL_S3D           0x32
++#define SAVAGE_ZBUFOFF_S3D            0x33
++#define SAVAGE_DESTCTRL_S3D           0x34
++#define SAVAGE_SCSTART_S3D            0x35
++#define SAVAGE_SCEND_S3D              0x36
++#define SAVAGE_ZWATERMARK_S3D         0x37
++#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
++/* common stuff */
++#define SAVAGE_VERTBUFADDR            0x3e
++#define SAVAGE_BITPLANEWTMASK         0xd7
++#define SAVAGE_DMABUFADDR             0x51
++
++/* texture enable bits (needed for tex addr checking) */
++#define SAVAGE_TEXCTRL_TEXEN_MASK     0x00010000 /* S3D */
++#define SAVAGE_TEXDESCR_TEX0EN_MASK   0x02000000 /* S4 */
++#define SAVAGE_TEXDESCR_TEX1EN_MASK   0x04000000 /* S4 */
++
++/* Global fields in Savage4/Twister/ProSavage 3D registers:
++ *
++ * All texture registers and DrawLocalCtrl are local. All other
++ * registers are global. */
++
++/* Global fields in Savage3D/MX/IX 3D registers:
++ *
++ * All texture registers are local. DrawCtrl and ZBufCtrl are
++ * partially local. All other registers are global.
++ *
++ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
++ * ZBufCtrl global fields: zCmpFunc, zBufEn
++ */
++#define SAVAGE_DRAWCTRL_S3D_GLOBAL    0x03f3c00c
++#define SAVAGE_ZBUFCTRL_S3D_GLOBAL    0x00000027
++
++/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
++ */
++#define SAVAGE_SCISSOR_MASK_S4                0x00fff7ff
++#define SAVAGE_SCISSOR_MASK_S3D               0x07ff07ff
++
++/*
++ * BCI commands
++ */
++#define BCI_CMD_NOP                  0x40000000
++#define BCI_CMD_RECT                 0x48000000
++#define BCI_CMD_RECT_XP              0x01000000
++#define BCI_CMD_RECT_YP              0x02000000
++#define BCI_CMD_SCANLINE             0x50000000
++#define BCI_CMD_LINE                 0x5C000000
++#define BCI_CMD_LINE_LAST_PIXEL      0x58000000
++#define BCI_CMD_BYTE_TEXT            0x63000000
++#define BCI_CMD_NT_BYTE_TEXT         0x67000000
++#define BCI_CMD_BIT_TEXT             0x6C000000
++#define BCI_CMD_GET_ROP(cmd)         (((cmd) >> 16) & 0xFF)
++#define BCI_CMD_SET_ROP(cmd, rop)    ((cmd) |= ((rop & 0xFF) << 16))
++#define BCI_CMD_SEND_COLOR           0x00008000
++
++#define BCI_CMD_CLIP_NONE            0x00000000
++#define BCI_CMD_CLIP_CURRENT         0x00002000
++#define BCI_CMD_CLIP_LR              0x00004000
++#define BCI_CMD_CLIP_NEW             0x00006000
++
++#define BCI_CMD_DEST_GBD             0x00000000
++#define BCI_CMD_DEST_PBD             0x00000800
++#define BCI_CMD_DEST_PBD_NEW         0x00000C00
++#define BCI_CMD_DEST_SBD             0x00001000
++#define BCI_CMD_DEST_SBD_NEW         0x00001400
++
++#define BCI_CMD_SRC_TRANSPARENT      0x00000200
++#define BCI_CMD_SRC_SOLID            0x00000000
++#define BCI_CMD_SRC_GBD              0x00000020
++#define BCI_CMD_SRC_COLOR            0x00000040
++#define BCI_CMD_SRC_MONO             0x00000060
++#define BCI_CMD_SRC_PBD_COLOR        0x00000080
++#define BCI_CMD_SRC_PBD_MONO         0x000000A0
++#define BCI_CMD_SRC_PBD_COLOR_NEW    0x000000C0
++#define BCI_CMD_SRC_PBD_MONO_NEW     0x000000E0
++#define BCI_CMD_SRC_SBD_COLOR        0x00000100
++#define BCI_CMD_SRC_SBD_MONO         0x00000120
++#define BCI_CMD_SRC_SBD_COLOR_NEW    0x00000140
++#define BCI_CMD_SRC_SBD_MONO_NEW     0x00000160
++
++#define BCI_CMD_PAT_TRANSPARENT      0x00000010
++#define BCI_CMD_PAT_NONE             0x00000000
++#define BCI_CMD_PAT_COLOR            0x00000002
++#define BCI_CMD_PAT_MONO             0x00000003
++#define BCI_CMD_PAT_PBD_COLOR        0x00000004
++#define BCI_CMD_PAT_PBD_MONO         0x00000005
++#define BCI_CMD_PAT_PBD_COLOR_NEW    0x00000006
++#define BCI_CMD_PAT_PBD_MONO_NEW     0x00000007
++#define BCI_CMD_PAT_SBD_COLOR        0x00000008
++#define BCI_CMD_PAT_SBD_MONO         0x00000009
++#define BCI_CMD_PAT_SBD_COLOR_NEW    0x0000000A
++#define BCI_CMD_PAT_SBD_MONO_NEW     0x0000000B
++
++#define BCI_BD_BW_DISABLE            0x10000000
++#define BCI_BD_TILE_MASK             0x03000000
++#define BCI_BD_TILE_NONE             0x00000000
++#define BCI_BD_TILE_16               0x02000000
++#define BCI_BD_TILE_32               0x03000000
++#define BCI_BD_GET_BPP(bd)           (((bd) >> 16) & 0xFF)
++#define BCI_BD_SET_BPP(bd, bpp)      ((bd) |= (((bpp) & 0xFF) << 16))
++#define BCI_BD_GET_STRIDE(bd)        ((bd) & 0xFFFF)
++#define BCI_BD_SET_STRIDE(bd, st)    ((bd) |= ((st) & 0xFFFF))
++
++#define BCI_CMD_SET_REGISTER            0x96000000
++
++#define BCI_CMD_WAIT                    0xC0000000
++#define BCI_CMD_WAIT_3D                 0x00010000
++#define BCI_CMD_WAIT_2D                 0x00020000
++
++#define BCI_CMD_UPDATE_EVENT_TAG        0x98000000
++
++#define BCI_CMD_DRAW_PRIM               0x80000000
++#define BCI_CMD_DRAW_INDEXED_PRIM       0x88000000
++#define BCI_CMD_DRAW_CONT               0x01000000
++#define BCI_CMD_DRAW_TRILIST            0x00000000
++#define BCI_CMD_DRAW_TRISTRIP           0x02000000
++#define BCI_CMD_DRAW_TRIFAN             0x04000000
++#define BCI_CMD_DRAW_SKIPFLAGS          0x000000ff
++#define BCI_CMD_DRAW_NO_Z             0x00000001
++#define BCI_CMD_DRAW_NO_W             0x00000002
++#define BCI_CMD_DRAW_NO_CD            0x00000004
++#define BCI_CMD_DRAW_NO_CS            0x00000008
++#define BCI_CMD_DRAW_NO_U0            0x00000010
++#define BCI_CMD_DRAW_NO_V0            0x00000020
++#define BCI_CMD_DRAW_NO_UV0           0x00000030
++#define BCI_CMD_DRAW_NO_U1            0x00000040
++#define BCI_CMD_DRAW_NO_V1            0x00000080
++#define BCI_CMD_DRAW_NO_UV1           0x000000c0
++
++#define BCI_CMD_DMA                   0xa8000000
++
++#define BCI_W_H(w, h)                ((((h) << 16) | (w)) & 0x0FFF0FFF)
++#define BCI_X_Y(x, y)                ((((y) << 16) | (x)) & 0x0FFF0FFF)
++#define BCI_X_W(x, y)                ((((w) << 16) | (x)) & 0x0FFF0FFF)
++#define BCI_CLIP_LR(l, r)            ((((r) << 16) | (l)) & 0x0FFF0FFF)
++#define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
++#define BCI_CLIP_BR(b, r)            ((((b) << 16) | (r)) & 0x0FFF0FFF)
++
++#define BCI_LINE_X_Y(x, y)           (((y) << 16) | ((x) & 0xFFFF))
++#define BCI_LINE_STEPS(diag, axi)    (((axi) << 16) | ((diag) & 0xFFFF))
++#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
++      (((maj) & 0x1FFF) | \
++      ((ym) ? 1<<13 : 0) | \
++      ((xp) ? 1<<14 : 0) | \
++      ((yp) ? 1<<15 : 0) | \
++      ((err) << 16))
++
++/*
++ * common commands
++ */
++#define BCI_SET_REGISTERS( first, n )                 \
++      BCI_WRITE(BCI_CMD_SET_REGISTER |                \
++                ((uint32_t)(n) & 0xff) << 16 |        \
++                ((uint32_t)(first) & 0xffff))
++#define DMA_SET_REGISTERS( first, n )                 \
++      DMA_WRITE(BCI_CMD_SET_REGISTER |                \
++                ((uint32_t)(n) & 0xff) << 16 |        \
++                ((uint32_t)(first) & 0xffff))
++
++#define BCI_DRAW_PRIMITIVE(n, type, skip)         \
++        BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
++                ((n) << 16))
++#define DMA_DRAW_PRIMITIVE(n, type, skip)         \
++        DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
++                ((n) << 16))
++
++#define BCI_DRAW_INDICES_S3D(n, type, i0)         \
++        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
++                ((n) << 16) | (i0))
++
++#define BCI_DRAW_INDICES_S4(n, type, skip)        \
++        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
++                  (skip) | ((n) << 16))
++
++#define BCI_DMA(n)    \
++      BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
++
++/*
++ * access to MMIO
++ */
++#define SAVAGE_READ(reg)      DRM_READ32(  dev_priv->mmio, (reg) )
++#define SAVAGE_WRITE(reg)     DRM_WRITE32( dev_priv->mmio, (reg) )
++
++/*
++ * access to the burst command interface (BCI)
++ */
++#define SAVAGE_BCI_DEBUG 1
++
++#define BCI_LOCALS    volatile uint32_t *bci_ptr;
++
++#define BEGIN_BCI( n ) do {                   \
++      dev_priv->wait_fifo(dev_priv, (n));     \
++      bci_ptr = dev_priv->bci_ptr;            \
++} while(0)
++
++#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
++
++/*
++ * command DMA support
++ */
++#define SAVAGE_DMA_DEBUG 1
++
++#define DMA_LOCALS   uint32_t *dma_ptr;
++
++#define BEGIN_DMA( n ) do {                                           \
++      unsigned int cur = dev_priv->current_dma_page;                  \
++      unsigned int rest = SAVAGE_DMA_PAGE_SIZE -                      \
++              dev_priv->dma_pages[cur].used;                          \
++      if ((n) > rest) {                                               \
++              dma_ptr = savage_dma_alloc(dev_priv, (n));              \
++      } else { /* fast path for small allocations */                  \
++              dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +       \
++                      cur * SAVAGE_DMA_PAGE_SIZE +                    \
++                      dev_priv->dma_pages[cur].used;                  \
++              if (dev_priv->dma_pages[cur].used == 0)                 \
++                      savage_dma_wait(dev_priv, cur);                 \
++              dev_priv->dma_pages[cur].used += (n);                   \
++      }                                                               \
++} while(0)
++
++#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
++
++#define DMA_COPY(src, n) do {                                 \
++      memcpy(dma_ptr, (src), (n)*4);                          \
++      dma_ptr += n;                                           \
++} while(0)
++
++#if SAVAGE_DMA_DEBUG
++#define DMA_COMMIT() do {                                             \
++      unsigned int cur = dev_priv->current_dma_page;                  \
++      uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle +    \
++                      cur * SAVAGE_DMA_PAGE_SIZE +                    \
++                      dev_priv->dma_pages[cur].used;                  \
++      if (dma_ptr != expected) {                                      \
++              DRM_ERROR("DMA allocation and use don't match: "        \
++                        "%p != %p\n", expected, dma_ptr);             \
++              savage_dma_reset(dev_priv);                             \
++      }                                                               \
++} while(0)
++#else
++#define DMA_COMMIT() do {/* nothing */} while(0)
++#endif
++
++#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
++
++/* Buffer aging via event tag
++ */
++
++#define UPDATE_EVENT_COUNTER( ) do {                  \
++      if (dev_priv->status_ptr) {                     \
++              uint16_t count;                         \
++              /* coordinate with Xserver */           \
++              count = dev_priv->status_ptr[1023];     \
++              if (count < dev_priv->event_counter)    \
++                      dev_priv->event_wrap++;         \
++              dev_priv->event_counter = count;        \
++      }                                               \
++} while(0)
++
++#define SET_AGE( age, e, w ) do {     \
++      (age)->event = e;               \
++      (age)->wrap = w;                \
++} while(0)
++
++#define TEST_AGE( age, e, w )                         \
++      ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
++
++#endif /* __SAVAGE_DRV_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/savage_state.c git-nokia/drivers/gpu/drm-tungsten/savage_state.c
+--- git/drivers/gpu/drm-tungsten/savage_state.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/savage_state.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1165 @@
++/* savage_state.c -- State and drawing support for Savage
++ *
++ * Copyright 2004  Felix Kuehling
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
++ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "savage_drm.h"
++#include "savage_drv.h"
++
++void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
++                             const struct drm_clip_rect *pbox)
++{
++      uint32_t scstart = dev_priv->state.s3d.new_scstart;
++      uint32_t scend = dev_priv->state.s3d.new_scend;
++      scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
++              ((uint32_t)pbox->x1 & 0x000007ff) |
++              (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
++      scend   = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
++              (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
++              ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
++      if (scstart != dev_priv->state.s3d.scstart ||
++          scend   != dev_priv->state.s3d.scend) {
++              DMA_LOCALS;
++              BEGIN_DMA(4);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
++              DMA_WRITE(scstart);
++              DMA_WRITE(scend);
++              dev_priv->state.s3d.scstart = scstart;
++              dev_priv->state.s3d.scend = scend;
++              dev_priv->waiting = 1;
++              DMA_COMMIT();
++      }
++}
++
++void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
++                            const struct drm_clip_rect *pbox)
++{
++      uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
++      uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
++      drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
++              ((uint32_t)pbox->x1 & 0x000007ff) |
++              (((uint32_t)pbox->y1 << 12) & 0x00fff000);
++      drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
++              (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
++              ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
++      if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
++          drawctrl1 != dev_priv->state.s4.drawctrl1) {
++              DMA_LOCALS;
++              BEGIN_DMA(4);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
++              DMA_WRITE(drawctrl0);
++              DMA_WRITE(drawctrl1);
++              dev_priv->state.s4.drawctrl0 = drawctrl0;
++              dev_priv->state.s4.drawctrl1 = drawctrl1;
++              dev_priv->waiting = 1;
++              DMA_COMMIT();
++      }
++}
++
++static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
++                               uint32_t addr)
++{
++      if ((addr & 6) != 2) { /* reserved bits */
++              DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
++              return -EINVAL;
++      }
++      if (!(addr & 1)) { /* local */
++              addr &= ~7;
++              if (addr < dev_priv->texture_offset ||
++                  addr >= dev_priv->texture_offset + dev_priv->texture_size) {
++                      DRM_ERROR
++                          ("bad texAddr%d %08x (local addr out of range)\n",
++                           unit, addr);
++                      return -EINVAL;
++              }
++      } else { /* AGP */
++              if (!dev_priv->agp_textures) {
++                      DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
++                                unit, addr);
++                      return -EINVAL;
++              }
++              addr &= ~7;
++              if (addr < dev_priv->agp_textures->offset ||
++                  addr >= (dev_priv->agp_textures->offset +
++                           dev_priv->agp_textures->size)) {
++                      DRM_ERROR
++                          ("bad texAddr%d %08x (AGP addr out of range)\n",
++                           unit, addr);
++                      return -EINVAL;
++              }
++      }
++      return 0;
++}
++
++#define SAVE_STATE(reg,where)                 \
++      if(start <= reg && start + count > reg) \
++              dev_priv->state.where = regs[reg - start]
++#define SAVE_STATE_MASK(reg,where,mask) do {                  \
++      if(start <= reg && start + count > reg) {                       \
++              uint32_t tmp;                                   \
++              tmp = regs[reg - start];                        \
++              dev_priv->state.where = (tmp & (mask)) |        \
++                      (dev_priv->state.where & ~(mask));      \
++      }                                                       \
++} while (0)
++static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
++                                 unsigned int start, unsigned int count,
++                                 const uint32_t *regs)
++{
++      if (start < SAVAGE_TEXPALADDR_S3D ||
++          start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
++              DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
++                        start, start + count - 1);
++              return -EINVAL;
++      }
++
++      SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
++                      ~SAVAGE_SCISSOR_MASK_S3D);
++      SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
++                      ~SAVAGE_SCISSOR_MASK_S3D);
++
++      /* if any texture regs were changed ... */
++      if (start <= SAVAGE_TEXCTRL_S3D &&
++          start + count > SAVAGE_TEXPALADDR_S3D) {
++              /* ... check texture state */
++              SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
++              SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
++              if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
++                      return savage_verify_texaddr(dev_priv, 0,
++                                              dev_priv->state.s3d.texaddr);
++      }
++
++      return 0;
++}
++
++static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
++                                unsigned int start, unsigned int count,
++                                const uint32_t *regs)
++{
++      int ret = 0;
++
++      if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
++          start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
++              DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
++                        start, start + count - 1);
++              return -EINVAL;
++      }
++
++      SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
++                      ~SAVAGE_SCISSOR_MASK_S4);
++      SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
++                      ~SAVAGE_SCISSOR_MASK_S4);
++
++      /* if any texture regs were changed ... */
++      if (start <= SAVAGE_TEXDESCR_S4 &&
++          start + count > SAVAGE_TEXPALADDR_S4) {
++              /* ... check texture state */
++              SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
++              SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
++              SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
++              if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
++                      ret |= savage_verify_texaddr(dev_priv, 0,
++                                              dev_priv->state.s4.texaddr0);
++              if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
++                      ret |= savage_verify_texaddr(dev_priv, 1,
++                                              dev_priv->state.s4.texaddr1);
++      }
++
++      return ret;
++}
++#undef SAVE_STATE
++#undef SAVE_STATE_MASK
++
++static int savage_dispatch_state(drm_savage_private_t *dev_priv,
++                               const drm_savage_cmd_header_t *cmd_header,
++                               const uint32_t *regs)
++{
++      unsigned int count = cmd_header->state.count;
++      unsigned int start = cmd_header->state.start;
++      unsigned int count2 = 0;
++      unsigned int bci_size;
++      int ret;
++      DMA_LOCALS;
++
++      if (!count)
++              return 0;
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              ret = savage_verify_state_s3d(dev_priv, start, count, regs);
++              if (ret != 0)
++                      return ret;
++              /* scissor regs are emitted in savage_dispatch_draw */
++              if (start < SAVAGE_SCSTART_S3D) {
++                      if (start + count > SAVAGE_SCEND_S3D + 1)
++                              count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
++                      if (start + count > SAVAGE_SCSTART_S3D)
++                              count = SAVAGE_SCSTART_S3D - start;
++              } else if (start <= SAVAGE_SCEND_S3D) {
++                      if (start + count > SAVAGE_SCEND_S3D + 1) {
++                              count -= SAVAGE_SCEND_S3D + 1 - start;
++                              start = SAVAGE_SCEND_S3D + 1;
++                      } else
++                              return 0;
++              }
++      } else {
++              ret = savage_verify_state_s4(dev_priv, start, count, regs);
++              if (ret != 0)
++                      return ret;
++              /* scissor regs are emitted in savage_dispatch_draw */
++              if (start < SAVAGE_DRAWCTRL0_S4) {
++                      if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
++                              count2 = count -
++                                       (SAVAGE_DRAWCTRL1_S4 + 1 - start);
++                      if (start + count > SAVAGE_DRAWCTRL0_S4)
++                              count = SAVAGE_DRAWCTRL0_S4 - start;
++              } else if (start <= SAVAGE_DRAWCTRL1_S4) {
++                      if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
++                              count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
++                              start = SAVAGE_DRAWCTRL1_S4 + 1;
++                      } else
++                              return 0;
++              }
++      }
++
++      bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
++
++      if (cmd_header->state.global) {
++              BEGIN_DMA(bci_size + 1);
++              DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
++              dev_priv->waiting = 1;
++      } else {
++              BEGIN_DMA(bci_size);
++      }
++
++      do {
++              while (count > 0) {
++                      unsigned int n = count < 255 ? count : 255;
++                      DMA_SET_REGISTERS(start, n);
++                      DMA_COPY(regs, n);
++                      count -= n;
++                      start += n;
++                      regs += n;
++              }
++              start += 2;
++              regs += 2;
++              count = count2;
++              count2 = 0;
++      } while (count);
++
++      DMA_COMMIT();
++
++      return 0;
++}
++
++static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
++                                  const drm_savage_cmd_header_t *cmd_header,
++                                  const struct drm_buf *dmabuf)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->prim.prim;
++      unsigned int skip = cmd_header->prim.skip;
++      unsigned int n = cmd_header->prim.count;
++      unsigned int start = cmd_header->prim.start;
++      unsigned int i;
++      BCI_LOCALS;
++
++      if (!dmabuf) {
++              DRM_ERROR("called without dma buffers!\n");
++              return -EINVAL;
++      }
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of vertices %u in TRILIST\n",
++                                n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                         ("wrong number of vertices %u in TRIFAN/STRIP\n",
++                          n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip != 0) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++      } else {
++              unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
++                      (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
++                      (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
++              if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++              if (reorder) {
++                      DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
++                      return -EINVAL;
++              }
++      }
++
++      if (start + n > dmabuf->total / 32) {
++              DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
++                        start, start + n - 1, dmabuf->total / 32);
++              return -EINVAL;
++      }
++
++      /* Vertex DMA doesn't work with command DMA at the same time,
++       * so we use BCI_... to submit commands here. Flush buffered
++       * faked DMA first. */
++      DMA_FLUSH();
++
++      if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
++              BEGIN_BCI(2);
++              BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
++              BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
++              dev_priv->state.common.vbaddr = dmabuf->bus_address;
++      }
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
++              /* Workaround for what looks like a hardware bug. If a
++               * WAIT_3D_IDLE was emitted some time before the
++               * indexed drawing command then the engine will lock
++               * up. There are two known workarounds:
++               * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
++              BEGIN_BCI(63);
++              for (i = 0; i < 63; ++i)
++                      BCI_WRITE(BCI_CMD_WAIT);
++              dev_priv->waiting = 0;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 indices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++              if (reorder) {
++                      /* Need to reorder indices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { -1, -1, -1 };
++                      reorder[start % 3] = 2;
++
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, start + 2);
++
++                      for (i = start + 1; i + 1 < start + count; i += 2)
++                              BCI_WRITE((i + reorder[i % 3]) |
++                                        ((i + 1 +
++                                          reorder[(i + 1) % 3]) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i + reorder[i % 3]);
++              } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, start);
++
++                      for (i = start + 1; i + 1 < start + count; i += 2)
++                              BCI_WRITE(i | ((i + 1) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i);
++              } else {
++                      BEGIN_BCI((count + 2 + 1) / 2);
++                      BCI_DRAW_INDICES_S4(count, prim, skip);
++
++                      for (i = start; i + 1 < start + count; i += 2)
++                              BCI_WRITE(i | ((i + 1) << 16));
++                      if (i < start + count)
++                              BCI_WRITE(i);
++              }
++
++              start += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
++                                 const drm_savage_cmd_header_t *cmd_header,
++                                 const uint32_t *vtxbuf, unsigned int vb_size,
++                                 unsigned int vb_stride)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->prim.prim;
++      unsigned int skip = cmd_header->prim.skip;
++      unsigned int n = cmd_header->prim.count;
++      unsigned int start = cmd_header->prim.start;
++      unsigned int vtx_size;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of vertices %u in TRILIST\n",
++                                n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of vertices %u in TRIFAN/STRIP\n",
++                           n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip > SAVAGE_SKIP_ALL_S3D) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 8; /* full vertex */
++      } else {
++              if (skip > SAVAGE_SKIP_ALL_S4) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 10; /* full vertex */
++      }
++
++      vtx_size -= (skip & 1) + (skip >> 1 & 1) +
++              (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
++              (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
++
++      if (vtx_size > vb_stride) {
++              DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
++                        vtx_size, vb_stride);
++              return -EINVAL;
++      }
++
++      if (start + n > vb_size / (vb_stride * 4)) {
++              DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
++                        start, start + n - 1, vb_size / (vb_stride * 4));
++              return -EINVAL;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 vertices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++              if (reorder) {
++                      /* Need to reorder vertices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { -1, -1, -1 };
++                      reorder[start % 3] = 2;
++
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = start; i < start + count; ++i) {
++                              unsigned int j = i + reorder[i % 3];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              } else {
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      if (vb_stride == vtx_size) {
++                              DMA_COPY(&vtxbuf[vb_stride * start],
++                                       vtx_size * count);
++                      } else {
++                              for (i = start; i < start + count; ++i) {
++                                      DMA_COPY(&vtxbuf[vb_stride * i],
++                                               vtx_size);
++                              }
++                      }
++
++                      DMA_COMMIT();
++              }
++
++              start += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
++                                 const drm_savage_cmd_header_t *cmd_header,
++                                 const uint16_t *idx,
++                                 const struct drm_buf *dmabuf)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->idx.prim;
++      unsigned int skip = cmd_header->idx.skip;
++      unsigned int n = cmd_header->idx.count;
++      unsigned int i;
++      BCI_LOCALS;
++
++      if (!dmabuf) {
++              DRM_ERROR("called without dma buffers!\n");
++              return -EINVAL;
++      }
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of indices %u in TRIFAN/STRIP\n", n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip != 0) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++      } else {
++              unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
++                      (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
++                      (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
++              if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
++                      DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
++                      return -EINVAL;
++              }
++              if (reorder) {
++                      DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
++                      return -EINVAL;
++              }
++      }
++
++      /* Vertex DMA doesn't work with command DMA at the same time,
++       * so we use BCI_... to submit commands here. Flush buffered
++       * faked DMA first. */
++      DMA_FLUSH();
++
++      if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
++              BEGIN_BCI(2);
++              BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
++              BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
++              dev_priv->state.common.vbaddr = dmabuf->bus_address;
++      }
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
++              /* Workaround for what looks like a hardware bug. If a
++               * WAIT_3D_IDLE was emitted some time before the
++               * indexed drawing command then the engine will lock
++               * up. There are two known workarounds:
++               * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
++              BEGIN_BCI(63);
++              for (i = 0; i < 63; ++i)
++                      BCI_WRITE(BCI_CMD_WAIT);
++              dev_priv->waiting = 0;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 indices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++
++              /* check indices */
++              for (i = 0; i < count; ++i) {
++                      if (idx[i] > dmabuf->total / 32) {
++                              DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
++                                        i, idx[i], dmabuf->total / 32);
++                              return -EINVAL;
++                      }
++              }
++
++              if (reorder) {
++                      /* Need to reorder indices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { 2, -1, -1 };
++
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
++
++                      for (i = 1; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i + reorder[i % 3]] |
++                                        (idx[i + 1 +
++                                         reorder[(i + 1) % 3]] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i + reorder[i % 3]]);
++              } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++                      BEGIN_BCI((count + 1 + 1) / 2);
++                      BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
++
++                      for (i = 1; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i] | (idx[i + 1] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i]);
++              } else {
++                      BEGIN_BCI((count + 2 + 1) / 2);
++                      BCI_DRAW_INDICES_S4(count, prim, skip);
++
++                      for (i = 0; i + 1 < count; i += 2)
++                              BCI_WRITE(idx[i] | (idx[i + 1] << 16));
++                      if (i < count)
++                              BCI_WRITE(idx[i]);
++              }
++
++              idx += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
++                                const drm_savage_cmd_header_t *cmd_header,
++                                const uint16_t *idx,
++                                const uint32_t *vtxbuf,
++                                unsigned int vb_size, unsigned int vb_stride)
++{
++      unsigned char reorder = 0;
++      unsigned int prim = cmd_header->idx.prim;
++      unsigned int skip = cmd_header->idx.skip;
++      unsigned int n = cmd_header->idx.count;
++      unsigned int vtx_size;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (!n)
++              return 0;
++
++      switch (prim) {
++      case SAVAGE_PRIM_TRILIST_201:
++              reorder = 1;
++              prim = SAVAGE_PRIM_TRILIST;
++      case SAVAGE_PRIM_TRILIST:
++              if (n % 3 != 0) {
++                      DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
++                      return -EINVAL;
++              }
++              break;
++      case SAVAGE_PRIM_TRISTRIP:
++      case SAVAGE_PRIM_TRIFAN:
++              if (n < 3) {
++                      DRM_ERROR
++                          ("wrong number of indices %u in TRIFAN/STRIP\n", n);
++                      return -EINVAL;
++              }
++              break;
++      default:
++              DRM_ERROR("invalid primitive type %u\n", prim);
++              return -EINVAL;
++      }
++
++      if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
++              if (skip > SAVAGE_SKIP_ALL_S3D) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 8; /* full vertex */
++      } else {
++              if (skip > SAVAGE_SKIP_ALL_S4) {
++                      DRM_ERROR("invalid skip flags 0x%04x\n", skip);
++                      return -EINVAL;
++              }
++              vtx_size = 10; /* full vertex */
++      }
++
++      vtx_size -= (skip & 1) + (skip >> 1 & 1) +
++              (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
++              (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
++
++      if (vtx_size > vb_stride) {
++              DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
++                        vtx_size, vb_stride);
++              return -EINVAL;
++      }
++
++      prim <<= 25;
++      while (n != 0) {
++              /* Can emit up to 255 vertices (85 triangles) at once. */
++              unsigned int count = n > 255 ? 255 : n;
++
++              /* Check indices */
++              for (i = 0; i < count; ++i) {
++                      if (idx[i] > vb_size / (vb_stride * 4)) {
++                              DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
++                                        i, idx[i],  vb_size / (vb_stride * 4));
++                              return -EINVAL;
++                      }
++              }
++
++              if (reorder) {
++                      /* Need to reorder vertices for correct flat
++                       * shading while preserving the clock sense
++                       * for correct culling. Only on Savage3D. */
++                      int reorder[3] = { 2, -1, -1 };
++
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = 0; i < count; ++i) {
++                              unsigned int j = idx[i + reorder[i % 3]];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              } else {
++                      BEGIN_DMA(count * vtx_size + 1);
++                      DMA_DRAW_PRIMITIVE(count, prim, skip);
++
++                      for (i = 0; i < count; ++i) {
++                              unsigned int j = idx[i];
++                              DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
++                      }
++
++                      DMA_COMMIT();
++              }
++
++              idx += count;
++              n -= count;
++
++              prim |= BCI_CMD_DRAW_CONT;
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
++                               const drm_savage_cmd_header_t *cmd_header,
++                               const drm_savage_cmd_header_t *data,
++                               unsigned int nbox,
++                               const struct drm_clip_rect *boxes)
++{
++      unsigned int flags = cmd_header->clear0.flags;
++      unsigned int clear_cmd;
++      unsigned int i, nbufs;
++      DMA_LOCALS;
++
++      if (nbox == 0)
++              return 0;
++
++      clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
++              BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
++      BCI_CMD_SET_ROP(clear_cmd,0xCC);
++
++      nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
++          ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
++      if (nbufs == 0)
++              return 0;
++
++      if (data->clear1.mask != 0xffffffff) {
++              /* set mask */
++              BEGIN_DMA(2);
++              DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
++              DMA_WRITE(data->clear1.mask);
++              DMA_COMMIT();
++      }
++      for (i = 0; i < nbox; ++i) {
++              unsigned int x, y, w, h;
++              unsigned int buf;
++
++              x = boxes[i].x1, y = boxes[i].y1;
++              w = boxes[i].x2 - boxes[i].x1;
++              h = boxes[i].y2 - boxes[i].y1;
++              BEGIN_DMA(nbufs * 6);
++              for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
++                      if (!(flags & buf))
++                              continue;
++                      DMA_WRITE(clear_cmd);
++                      switch (buf) {
++                      case SAVAGE_FRONT:
++                              DMA_WRITE(dev_priv->front_offset);
++                              DMA_WRITE(dev_priv->front_bd);
++                              break;
++                      case SAVAGE_BACK:
++                              DMA_WRITE(dev_priv->back_offset);
++                              DMA_WRITE(dev_priv->back_bd);
++                              break;
++                      case SAVAGE_DEPTH:
++                              DMA_WRITE(dev_priv->depth_offset);
++                              DMA_WRITE(dev_priv->depth_bd);
++                              break;
++                      }
++                      DMA_WRITE(data->clear1.value);
++                      DMA_WRITE(BCI_X_Y(x, y));
++                      DMA_WRITE(BCI_W_H(w, h));
++              }
++              DMA_COMMIT();
++      }
++      if (data->clear1.mask != 0xffffffff) {
++              /* reset mask */
++              BEGIN_DMA(2);
++              DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
++              DMA_WRITE(0xffffffff);
++              DMA_COMMIT();
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
++                              unsigned int nbox, const struct drm_clip_rect *boxes)
++{
++      unsigned int swap_cmd;
++      unsigned int i;
++      DMA_LOCALS;
++
++      if (nbox == 0)
++              return 0;
++
++      swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
++              BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
++      BCI_CMD_SET_ROP(swap_cmd,0xCC);
++
++      for (i = 0; i < nbox; ++i) {
++              BEGIN_DMA(6);
++              DMA_WRITE(swap_cmd);
++              DMA_WRITE(dev_priv->back_offset);
++              DMA_WRITE(dev_priv->back_bd);
++              DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
++              DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
++              DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
++                                boxes[i].y2 - boxes[i].y1));
++              DMA_COMMIT();
++      }
++
++      return 0;
++}
++
++static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
++                              const drm_savage_cmd_header_t *start,
++                              const drm_savage_cmd_header_t *end,
++                              const struct drm_buf *dmabuf,
++                              const unsigned int *vtxbuf,
++                              unsigned int vb_size, unsigned int vb_stride,
++                              unsigned int nbox,
++                              const struct drm_clip_rect *boxes)
++{
++      unsigned int i, j;
++      int ret;
++
++      for (i = 0; i < nbox; ++i) {
++              const drm_savage_cmd_header_t *cmdbuf;
++              dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
++
++              cmdbuf = start;
++              while (cmdbuf < end) {
++                      drm_savage_cmd_header_t cmd_header;
++                      cmd_header = *cmdbuf;
++                      cmdbuf++;
++                      switch (cmd_header.cmd.cmd) {
++                      case SAVAGE_CMD_DMA_PRIM:
++                              ret = savage_dispatch_dma_prim(
++                                      dev_priv, &cmd_header, dmabuf);
++                              break;
++                      case SAVAGE_CMD_VB_PRIM:
++                              ret = savage_dispatch_vb_prim(
++                                      dev_priv, &cmd_header,
++                                      vtxbuf, vb_size, vb_stride);
++                              break;
++                      case SAVAGE_CMD_DMA_IDX:
++                              j = (cmd_header.idx.count + 3) / 4;
++                              /* j was check in savage_bci_cmdbuf */
++                              ret = savage_dispatch_dma_idx(dev_priv,
++                                      &cmd_header, (const uint16_t *)cmdbuf,
++                                      dmabuf);
++                              cmdbuf += j;
++                              break;
++                      case SAVAGE_CMD_VB_IDX:
++                              j = (cmd_header.idx.count + 3) / 4;
++                              /* j was check in savage_bci_cmdbuf */
++                              ret = savage_dispatch_vb_idx(dev_priv,
++                                      &cmd_header, (const uint16_t *)cmdbuf,
++                                      (const uint32_t *)vtxbuf, vb_size,
++                                      vb_stride);
++                              cmdbuf += j;
++                              break;
++                      default:
++                              /* What's the best return code? EFAULT? */
++                              DRM_ERROR("IMPLEMENTATION ERROR: "
++                                        "non-drawing-command %d\n",
++                                        cmd_header.cmd.cmd);
++                              return -EINVAL;
++                      }
++
++                      if (ret != 0)
++                              return ret;
++              }
++      }
++
++      return 0;
++}
++
++int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_savage_private_t *dev_priv = dev->dev_private;
++      struct drm_device_dma *dma = dev->dma;
++      struct drm_buf *dmabuf;
++      drm_savage_cmdbuf_t *cmdbuf = data;
++      drm_savage_cmd_header_t *kcmd_addr = NULL;
++      drm_savage_cmd_header_t *first_draw_cmd;
++      unsigned int *kvb_addr = NULL;
++      struct drm_clip_rect *kbox_addr = NULL;
++      unsigned int i, j;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      if (dma && dma->buflist) {
++              if (cmdbuf->dma_idx > dma->buf_count) {
++                      DRM_ERROR
++                          ("vertex buffer index %u out of range (0-%u)\n",
++                           cmdbuf->dma_idx, dma->buf_count - 1);
++                      return -EINVAL;
++              }
++              dmabuf = dma->buflist[cmdbuf->dma_idx];
++      } else {
++              dmabuf = NULL;
++      }
++
++      /* Copy the user buffers into kernel temporary areas.  This hasn't been
++       * a performance loss compared to VERIFYAREA_READ/
++       * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
++       * for locking on FreeBSD.
++       */
++      if (cmdbuf->size) {
++              kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
++              if (kcmd_addr == NULL)
++                      return -ENOMEM;
++
++              if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
++                                     cmdbuf->size * 8))
++              {
++                      drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
++                      return -EFAULT;
++              }
++              cmdbuf->cmd_addr = kcmd_addr;
++      }
++      if (cmdbuf->vb_size) {
++              kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
++              if (kvb_addr == NULL) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
++                                     cmdbuf->vb_size)) {
++                      ret = -EFAULT;
++                      goto done;
++              }
++              cmdbuf->vb_addr = kvb_addr;
++      }
++      if (cmdbuf->nbox) {
++              kbox_addr = drm_alloc(cmdbuf->nbox *
++                                    sizeof(struct drm_clip_rect),
++                                    DRM_MEM_DRIVER);
++              if (kbox_addr == NULL) {
++                      ret = -ENOMEM;
++                      goto done;
++              }
++
++              if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
++                                     cmdbuf->nbox *
++                                     sizeof(struct drm_clip_rect))) {
++                      ret = -EFAULT;
++                      goto done;
++              }
++              cmdbuf->box_addr = kbox_addr;
++      }
++
++      /* Make sure writes to DMA buffers are finished before sending
++       * DMA commands to the graphics hardware. */
++      DRM_MEMORYBARRIER();
++
++      /* Coming from user space. Don't know if the Xserver has
++       * emitted wait commands. Assuming the worst. */
++      dev_priv->waiting = 1;
++
++      i = 0;
++      first_draw_cmd = NULL;
++      while (i < cmdbuf->size) {
++              drm_savage_cmd_header_t cmd_header;
++              cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
++              cmdbuf->cmd_addr++;
++              i++;
++
++              /* Group drawing commands with same state to minimize
++               * iterations over clip rects. */
++              j = 0;
++              switch (cmd_header.cmd.cmd) {
++              case SAVAGE_CMD_DMA_IDX:
++              case SAVAGE_CMD_VB_IDX:
++                      j = (cmd_header.idx.count + 3) / 4;
++                      if (i + j > cmdbuf->size) {
++                              DRM_ERROR("indexed drawing command extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              return -EINVAL;
++                      }
++                      /* fall through */
++              case SAVAGE_CMD_DMA_PRIM:
++              case SAVAGE_CMD_VB_PRIM:
++                      if (!first_draw_cmd)
++                              first_draw_cmd = cmdbuf->cmd_addr - 1;
++                      cmdbuf->cmd_addr += j;
++                      i += j;
++                      break;
++              default:
++                      if (first_draw_cmd) {
++                              ret = savage_dispatch_draw(
++                                      dev_priv, first_draw_cmd,
++                                      cmdbuf->cmd_addr - 1,
++                                      dmabuf, cmdbuf->vb_addr,
++                                      cmdbuf->vb_size,
++                                      cmdbuf->vb_stride,
++                                      cmdbuf->nbox, cmdbuf->box_addr);
++                              if (ret != 0)
++                                      return ret;
++                              first_draw_cmd = NULL;
++                      }
++              }
++              if (first_draw_cmd)
++                      continue;
++
++              switch (cmd_header.cmd.cmd) {
++              case SAVAGE_CMD_STATE:
++                      j = (cmd_header.state.count + 1) / 2;
++                      if (i + j > cmdbuf->size) {
++                              DRM_ERROR("command SAVAGE_CMD_STATE extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              ret = -EINVAL;
++                              goto done;
++                      }
++                      ret = savage_dispatch_state(dev_priv, &cmd_header,
++                              (const uint32_t *)cmdbuf->cmd_addr);
++                      cmdbuf->cmd_addr += j;
++                      i += j;
++                      break;
++              case SAVAGE_CMD_CLEAR:
++                      if (i + 1 > cmdbuf->size) {
++                              DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
++                                        "beyond end of command buffer\n");
++                              DMA_FLUSH();
++                              ret = -EINVAL;
++                              goto done;
++                      }
++                      ret = savage_dispatch_clear(dev_priv, &cmd_header,
++                                                  cmdbuf->cmd_addr,
++                                                  cmdbuf->nbox,
++                                                  cmdbuf->box_addr);
++                      cmdbuf->cmd_addr++;
++                      i++;
++                      break;
++              case SAVAGE_CMD_SWAP:
++                      ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
++                                                 cmdbuf->box_addr);
++                      break;
++              default:
++                      DRM_ERROR("invalid command 0x%x\n",
++                                cmd_header.cmd.cmd);
++                      DMA_FLUSH();
++                      ret = -EINVAL;
++                      goto done;
++              }
++
++              if (ret != 0) {
++                      DMA_FLUSH();
++                      goto done;
++              }
++      }
++
++      if (first_draw_cmd) {
++              ret = savage_dispatch_draw(
++                      dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
++                      cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
++                      cmdbuf->nbox, cmdbuf->box_addr);
++              if (ret != 0) {
++                      DMA_FLUSH();
++                      goto done;
++              }
++      }
++
++      DMA_FLUSH();
++
++      if (dmabuf && cmdbuf->discard) {
++              drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
++              uint16_t event;
++              event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
++              SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
++              savage_freelist_put(dev, dmabuf);
++      }
++
++done:
++      /* If we didn't need to allocate them, these'll be NULL */
++      drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
++      drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
++      drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drm.h git-nokia/drivers/gpu/drm-tungsten/sis_drm.h
+--- git/drivers/gpu/drm-tungsten/sis_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
++/*
++ * Copyright 2005 Eric Anholt
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#ifndef __SIS_DRM_H__
++#define __SIS_DRM_H__
++
++/* SiS specific ioctls */
++#define NOT_USED_0_3
++#define DRM_SIS_FB_ALLOC      0x04
++#define DRM_SIS_FB_FREE               0x05
++#define NOT_USED_6_12
++#define DRM_SIS_AGP_INIT      0x13
++#define DRM_SIS_AGP_ALLOC     0x14
++#define DRM_SIS_AGP_FREE      0x15
++#define DRM_SIS_FB_INIT               0x16
++
++#define DRM_IOCTL_SIS_FB_ALLOC                DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_FB_FREE         DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_AGP_INIT                DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
++#define DRM_IOCTL_SIS_AGP_ALLOC               DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_AGP_FREE                DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
++#define DRM_IOCTL_SIS_FB_INIT         DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
++/*
++#define DRM_IOCTL_SIS_FLIP            DRM_IOW( 0x48, drm_sis_flip_t)
++#define DRM_IOCTL_SIS_FLIP_INIT               DRM_IO(  0x49)
++#define DRM_IOCTL_SIS_FLIP_FINAL      DRM_IO(  0x50)
++*/
++
++typedef struct {
++      int context;
++      unsigned int offset;
++      unsigned int size;
++      unsigned long free;
++} drm_sis_mem_t;
++
++typedef struct {
++      unsigned int offset, size;
++} drm_sis_agp_t;
++
++typedef struct {
++      unsigned int offset, size;
++} drm_sis_fb_t;
++
++#endif                                /* __SIS_DRM_H__ */
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.c git-nokia/drivers/gpu/drm-tungsten/sis_drv.c
+--- git/drivers/gpu/drm-tungsten/sis_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,127 @@
++/* sis.c -- sis driver -*- linux-c -*-
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "sis_drm.h"
++#include "sis_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      sis_PCI_IDS
++};
++
++
++static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_sis_private_t *dev_priv;
++      int ret;
++
++      dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++      dev_priv->chipset = chipset;
++      ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
++      if (ret) {
++              drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
++      }
++
++      return ret;
++}
++
++static int sis_driver_unload(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      drm_sman_takedown(&dev_priv->sman);
++      drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++
++      return 0;
++}
++
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
++      .load = sis_driver_load,
++      .unload = sis_driver_unload,
++      .context_dtor = NULL,
++      .dma_quiescent = sis_idle,
++      .reclaim_buffers = NULL,
++      .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
++      .lastclose = sis_lastclose,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = sis_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init sis_init(void)
++{
++      driver.num_ioctls = sis_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit sis_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(sis_init);
++module_exit(sis_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_drv.h git-nokia/drivers/gpu/drm-tungsten/sis_drv.h
+--- git/drivers/gpu/drm-tungsten/sis_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,90 @@
++/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
++/*
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef _SIS_DRV_H_
++#define _SIS_DRV_H_
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "SIS, Tungsten Graphics"
++#define DRIVER_NAME           "sis"
++#define DRIVER_DESC           "SIS 300/630/540 and XGI V3XE/V5/V8"
++#define DRIVER_DATE           "20070626"
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          3
++#define DRIVER_PATCHLEVEL     0
++
++enum sis_family {
++      SIS_OTHER = 0,
++      SIS_CHIP_315 = 1,
++};
++
++#if defined(__linux__)
++#define SIS_HAVE_CORE_MM
++#endif
++
++#ifdef SIS_HAVE_CORE_MM
++#include "drm_sman.h"
++
++#define SIS_BASE (dev_priv->mmio)
++#define SIS_READ(reg)  DRM_READ32(SIS_BASE, reg);
++#define SIS_WRITE(reg, val)   DRM_WRITE32(SIS_BASE, reg, val);
++
++typedef struct drm_sis_private {
++      drm_local_map_t *mmio;
++      unsigned int idle_fault;
++      struct drm_sman sman;
++      unsigned int chipset;
++      int vram_initialized;
++      int agp_initialized;
++      unsigned long vram_offset;
++      unsigned long agp_offset;
++} drm_sis_private_t;
++
++extern int sis_idle(struct drm_device *dev);
++extern void sis_reclaim_buffers_locked(struct drm_device *dev,
++                                     struct drm_file *file_priv);
++extern void sis_lastclose(struct drm_device *dev);
++
++#else
++#include "sis_ds.h"
++
++typedef struct drm_sis_private {
++      memHeap_t *AGPHeap;
++      memHeap_t *FBHeap;
++} drm_sis_private_t;
++
++extern int sis_init_context(struct drm_device * dev, int context);
++extern int sis_final_context(struct drm_device * dev, int context);
++
++#endif
++
++extern struct drm_ioctl_desc sis_ioctls[];
++extern int sis_max_ioctl;
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/sis_mm.c git-nokia/drivers/gpu/drm-tungsten/sis_mm.c
+--- git/drivers/gpu/drm-tungsten/sis_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/sis_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,332 @@
++/**************************************************************************
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ *
++ **************************************************************************/
++
++/*
++ * Authors:
++ *    Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "sis_drm.h"
++#include "sis_drv.h"
++
++#if defined(__linux__)
++#include <video/sisfb.h>
++#endif
++
++#define VIDEO_TYPE 0
++#define AGP_TYPE 1
++
++#define SIS_MM_ALIGN_SHIFT 4
++#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
++
++#if defined(__linux__) && defined(CONFIG_FB_SIS)
++/* fb management via fb device */
++
++#define SIS_MM_ALIGN_SHIFT 0
++#define SIS_MM_ALIGN_MASK 0
++
++static void *sis_sman_mm_allocate(void *private, unsigned long size,
++                                unsigned alignment)
++{
++      struct sis_memreq req;
++
++      req.size = size;
++      sis_malloc(&req);
++      if (req.size == 0)
++              return NULL;
++      else
++              return (void *)~req.offset;
++}
++
++static void sis_sman_mm_free(void *private, void *ref)
++{
++      sis_free(~((unsigned long)ref));
++}
++
++static void sis_sman_mm_destroy(void *private)
++{
++      ;
++}
++
++static unsigned long sis_sman_mm_offset(void *private, void *ref)
++{
++      return ~((unsigned long)ref);
++}
++
++#endif
++
++static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_fb_t *fb = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++#if defined(__linux__) && defined(CONFIG_FB_SIS)
++      {
++              struct drm_sman_mm sman_mm;
++              sman_mm.private = (void *)0xFFFFFFFF;
++              sman_mm.allocate = sis_sman_mm_allocate;
++              sman_mm.free = sis_sman_mm_free;
++              sman_mm.destroy = sis_sman_mm_destroy;
++              sman_mm.offset = sis_sman_mm_offset;
++              ret =
++                  drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
++      }
++#else
++      ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
++                               fb->size >> SIS_MM_ALIGN_SHIFT);
++#endif
++
++      if (ret) {
++              DRM_ERROR("VRAM memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->vram_initialized = 1;
++      dev_priv->vram_offset = fb->offset;
++
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
++
++      return 0;
++}
++
++static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
++                       void *data, int pool)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_mem_t *mem = data;
++      int retval = 0;
++      struct drm_memblock_item *item;
++
++      mutex_lock(&dev->struct_mutex);
++
++      if (0 == ((pool == 0) ? dev_priv->vram_initialized :
++                    dev_priv->agp_initialized)) {
++              DRM_ERROR
++                  ("Attempt to allocate from uninitialized memory manager.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
++      item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
++                            (unsigned long)file_priv);
++
++      mutex_unlock(&dev->struct_mutex);
++      if (item) {
++              mem->offset = ((pool == 0) ?
++                            dev_priv->vram_offset : dev_priv->agp_offset) +
++                  (item->mm->
++                   offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
++              mem->free = item->user_hash.key;
++              mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
++      } else {
++              mem->offset = 0;
++              mem->size = 0;
++              mem->free = 0;
++              retval = -ENOMEM;
++      }
++
++      DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
++                mem->offset);
++
++      return retval;
++}
++
++static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_mem_t *mem = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_free_key(&dev_priv->sman, mem->free);
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("free = 0x%lx\n", mem->free);
++
++      return ret;
++}
++
++static int sis_fb_alloc(struct drm_device *dev, void *data,
++                      struct drm_file *file_priv)
++{
++      return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
++}
++
++static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      drm_sis_agp_t *agp = data;
++      int ret;
++      dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
++                               agp->size >> SIS_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("AGP memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->agp_initialized = 1;
++      dev_priv->agp_offset = agp->offset;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
++      return 0;
++}
++
++static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
++                             struct drm_file *file_priv)
++{
++
++      return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
++}
++
++static drm_local_map_t *sis_reg_init(struct drm_device *dev)
++{
++      struct drm_map_list *entry;
++      drm_local_map_t *map;
++
++      list_for_each_entry(entry, &dev->maplist, head) {
++              map = entry->map;
++              if (!map)
++                      continue;
++              if (map->type == _DRM_REGISTERS) {
++                      return map;
++              }
++      }
++      return NULL;
++}
++
++int sis_idle(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++      uint32_t idle_reg;
++      unsigned long end;
++      int i;
++
++      if (dev_priv->idle_fault)
++              return 0;
++
++      if (dev_priv->mmio == NULL) {
++              dev_priv->mmio = sis_reg_init(dev);
++              if (dev_priv->mmio == NULL) {
++                      DRM_ERROR("Could not find register map.\n");
++                      return 0;
++              }
++      }
++
++      /*
++       * Implement a device switch here if needed
++       */
++
++      if (dev_priv->chipset != SIS_CHIP_315)
++              return 0;
++
++      /*
++       * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
++       * because its polling frequency is too low.
++       */
++
++      end = jiffies + (DRM_HZ * 3);
++
++      for (i=0; i<4; ++i) {
++              do {
++                      idle_reg = SIS_READ(0x85cc);
++              } while ( !time_after_eq(jiffies, end) &&
++                        ((idle_reg & 0x80000000) != 0x80000000));
++      }
++
++      if (time_after_eq(jiffies, end)) {
++              DRM_ERROR("Graphics engine idle timeout. "
++                        "Disabling idle check\n");
++              dev_priv->idle_fault = 1;
++      }
++
++      /*
++       * The caller never sees an error code. It gets trapped
++       * in libdrm.
++       */
++
++      return 0;
++}
++
++
++void sis_lastclose(struct drm_device *dev)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      if (!dev_priv)
++              return;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_sman_cleanup(&dev_priv->sman);
++      dev_priv->vram_initialized = 0;
++      dev_priv->agp_initialized = 0;
++      dev_priv->mmio = NULL;
++      mutex_unlock(&dev->struct_mutex);
++}
++
++void sis_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file *file_priv)
++{
++      drm_sis_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      return;
++}
++
++struct drm_ioctl_desc sis_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
++};
++
++int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/tdfx_drv.c git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.c
+--- git/drivers/gpu/drm-tungsten/tdfx_drv.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
++ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
++ *
++ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Rickard E. (Rik) Faith <faith@valinux.com>
++ *    Daryll Strauss <daryll@valinux.com>
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#include "drmP.h"
++#include "tdfx_drv.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      tdfx_PCI_IDS
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features = DRIVER_USE_MTRR,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init tdfx_init(void)
++{
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit tdfx_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(tdfx_init);
++module_exit(tdfx_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/tdfx_drv.h git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.h
+--- git/drivers/gpu/drm-tungsten/tdfx_drv.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/tdfx_drv.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
++ */
++/*
++ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __TDFX_H__
++#define __TDFX_H__
++
++/* General customization:
++ */
++
++#define DRIVER_AUTHOR         "VA Linux Systems Inc."
++
++#define DRIVER_NAME           "tdfx"
++#define DRIVER_DESC           "3dfx Banshee/Voodoo3+"
++#define DRIVER_DATE           "20010216"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          0
++#define DRIVER_PATCHLEVEL     0
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_3d_reg.h git-nokia/drivers/gpu/drm-tungsten/via_3d_reg.h
+--- git/drivers/gpu/drm-tungsten/via_3d_reg.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_3d_reg.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1650 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef VIA_3D_REG_H
++#define VIA_3D_REG_H
++#define HC_REG_BASE             0x0400
++
++#define HC_REG_TRANS_SPACE      0x0040
++
++#define HC_ParaN_MASK           0xffffffff
++#define HC_Para_MASK            0x00ffffff
++#define HC_SubA_MASK            0xff000000
++#define HC_SubA_SHIFT           24
++/* Transmission Setting
++ */
++#define HC_REG_TRANS_SET        0x003c
++#define HC_ParaSubType_MASK     0xff000000
++#define HC_ParaType_MASK        0x00ff0000
++#define HC_ParaOS_MASK          0x0000ff00
++#define HC_ParaAdr_MASK         0x000000ff
++#define HC_ParaSubType_SHIFT    24
++#define HC_ParaType_SHIFT       16
++#define HC_ParaOS_SHIFT         8
++#define HC_ParaAdr_SHIFT        0
++
++#define HC_ParaType_CmdVdata    0x0000
++#define HC_ParaType_NotTex      0x0001
++#define HC_ParaType_Tex         0x0002
++#define HC_ParaType_Palette     0x0003
++#define HC_ParaType_PreCR       0x0010
++#define HC_ParaType_Auto        0x00fe
++
++/* Transmission Space
++ */
++#define HC_REG_Hpara0           0x0040
++#define HC_REG_HpataAF          0x02fc
++
++/* Read
++ */
++#define HC_REG_HREngSt          0x0000
++#define HC_REG_HRFIFOempty      0x0004
++#define HC_REG_HRFIFOfull       0x0008
++#define HC_REG_HRErr            0x000c
++#define HC_REG_FIFOstatus       0x0010
++/* HC_REG_HREngSt          0x0000
++ */
++#define HC_HDASZC_MASK          0x00010000
++#define HC_HSGEMI_MASK          0x0000f000
++#define HC_HLGEMISt_MASK        0x00000f00
++#define HC_HCRSt_MASK           0x00000080
++#define HC_HSE0St_MASK          0x00000040
++#define HC_HSE1St_MASK          0x00000020
++#define HC_HPESt_MASK           0x00000010
++#define HC_HXESt_MASK           0x00000008
++#define HC_HBESt_MASK           0x00000004
++#define HC_HE2St_MASK           0x00000002
++#define HC_HE3St_MASK           0x00000001
++/* HC_REG_HRFIFOempty      0x0004
++ */
++#define HC_HRZDempty_MASK       0x00000010
++#define HC_HRTXAempty_MASK      0x00000008
++#define HC_HRTXDempty_MASK      0x00000004
++#define HC_HWZDempty_MASK       0x00000002
++#define HC_HWCDempty_MASK       0x00000001
++/* HC_REG_HRFIFOfull       0x0008
++ */
++#define HC_HRZDfull_MASK        0x00000010
++#define HC_HRTXAfull_MASK       0x00000008
++#define HC_HRTXDfull_MASK       0x00000004
++#define HC_HWZDfull_MASK        0x00000002
++#define HC_HWCDfull_MASK        0x00000001
++/* HC_REG_HRErr            0x000c
++ */
++#define HC_HAGPCMErr_MASK       0x80000000
++#define HC_HAGPCMErrC_MASK      0x70000000
++/* HC_REG_FIFOstatus       0x0010
++ */
++#define HC_HRFIFOATall_MASK     0x80000000
++#define HC_HRFIFOATbusy_MASK    0x40000000
++#define HC_HRATFGMDo_MASK       0x00000100
++#define HC_HRATFGMDi_MASK       0x00000080
++#define HC_HRATFRZD_MASK        0x00000040
++#define HC_HRATFRTXA_MASK       0x00000020
++#define HC_HRATFRTXD_MASK       0x00000010
++#define HC_HRATFWZD_MASK        0x00000008
++#define HC_HRATFWCD_MASK        0x00000004
++#define HC_HRATTXTAG_MASK       0x00000002
++#define HC_HRATTXCH_MASK        0x00000001
++
++/* AGP Command Setting
++ */
++#define HC_SubA_HAGPBstL        0x0060
++#define HC_SubA_HAGPBendL       0x0061
++#define HC_SubA_HAGPCMNT        0x0062
++#define HC_SubA_HAGPBpL         0x0063
++#define HC_SubA_HAGPBpH         0x0064
++/* HC_SubA_HAGPCMNT        0x0062
++ */
++#define HC_HAGPCMNT_MASK        0x00800000
++#define HC_HCmdErrClr_MASK      0x00400000
++#define HC_HAGPBendH_MASK       0x0000ff00
++#define HC_HAGPBstH_MASK        0x000000ff
++#define HC_HAGPBendH_SHIFT      8
++#define HC_HAGPBstH_SHIFT       0
++/* HC_SubA_HAGPBpL         0x0063
++ */
++#define HC_HAGPBpL_MASK         0x00fffffc
++#define HC_HAGPBpID_MASK        0x00000003
++#define HC_HAGPBpID_PAUSE       0x00000000
++#define HC_HAGPBpID_JUMP        0x00000001
++#define HC_HAGPBpID_STOP        0x00000002
++/* HC_SubA_HAGPBpH         0x0064
++ */
++#define HC_HAGPBpH_MASK         0x00ffffff
++
++/* Miscellaneous Settings
++ */
++#define HC_SubA_HClipTB         0x0070
++#define HC_SubA_HClipLR         0x0071
++#define HC_SubA_HFPClipTL       0x0072
++#define HC_SubA_HFPClipBL       0x0073
++#define HC_SubA_HFPClipLL       0x0074
++#define HC_SubA_HFPClipRL       0x0075
++#define HC_SubA_HFPClipTBH      0x0076
++#define HC_SubA_HFPClipLRH      0x0077
++#define HC_SubA_HLP             0x0078
++#define HC_SubA_HLPRF           0x0079
++#define HC_SubA_HSolidCL        0x007a
++#define HC_SubA_HPixGC          0x007b
++#define HC_SubA_HSPXYOS         0x007c
++#define HC_SubA_HVertexCNT      0x007d
++
++#define HC_HClipT_MASK          0x00fff000
++#define HC_HClipT_SHIFT         12
++#define HC_HClipB_MASK          0x00000fff
++#define HC_HClipB_SHIFT         0
++#define HC_HClipL_MASK          0x00fff000
++#define HC_HClipL_SHIFT         12
++#define HC_HClipR_MASK          0x00000fff
++#define HC_HClipR_SHIFT         0
++#define HC_HFPClipBH_MASK       0x0000ff00
++#define HC_HFPClipBH_SHIFT      8
++#define HC_HFPClipTH_MASK       0x000000ff
++#define HC_HFPClipTH_SHIFT      0
++#define HC_HFPClipRH_MASK       0x0000ff00
++#define HC_HFPClipRH_SHIFT      8
++#define HC_HFPClipLH_MASK       0x000000ff
++#define HC_HFPClipLH_SHIFT      0
++#define HC_HSolidCH_MASK        0x000000ff
++#define HC_HPixGC_MASK          0x00800000
++#define HC_HSPXOS_MASK          0x00fff000
++#define HC_HSPXOS_SHIFT         12
++#define HC_HSPYOS_MASK          0x00000fff
++
++/* Command
++ * Command A
++ */
++#define HC_HCmdHeader_MASK      0xfe000000    /*0xffe00000 */
++#define HC_HE3Fire_MASK         0x00100000
++#define HC_HPMType_MASK         0x000f0000
++#define HC_HEFlag_MASK          0x0000e000
++#define HC_HShading_MASK        0x00001c00
++#define HC_HPMValidN_MASK       0x00000200
++#define HC_HPLEND_MASK          0x00000100
++#define HC_HVCycle_MASK         0x000000ff
++#define HC_HVCycle_Style_MASK   0x000000c0
++#define HC_HVCycle_ChgA_MASK    0x00000030
++#define HC_HVCycle_ChgB_MASK    0x0000000c
++#define HC_HVCycle_ChgC_MASK    0x00000003
++#define HC_HPMType_Point        0x00000000
++#define HC_HPMType_Line         0x00010000
++#define HC_HPMType_Tri          0x00020000
++#define HC_HPMType_TriWF        0x00040000
++#define HC_HEFlag_NoAA          0x00000000
++#define HC_HEFlag_ab            0x00008000
++#define HC_HEFlag_bc            0x00004000
++#define HC_HEFlag_ca            0x00002000
++#define HC_HShading_Solid       0x00000000
++#define HC_HShading_FlatA       0x00000400
++#define HC_HShading_FlatB       0x00000800
++#define HC_HShading_FlatC       0x00000c00
++#define HC_HShading_Gouraud     0x00001000
++#define HC_HVCycle_Full         0x00000000
++#define HC_HVCycle_AFP          0x00000040
++#define HC_HVCycle_One          0x000000c0
++#define HC_HVCycle_NewA         0x00000000
++#define HC_HVCycle_AA           0x00000010
++#define HC_HVCycle_AB           0x00000020
++#define HC_HVCycle_AC           0x00000030
++#define HC_HVCycle_NewB         0x00000000
++#define HC_HVCycle_BA           0x00000004
++#define HC_HVCycle_BB           0x00000008
++#define HC_HVCycle_BC           0x0000000c
++#define HC_HVCycle_NewC         0x00000000
++#define HC_HVCycle_CA           0x00000001
++#define HC_HVCycle_CB           0x00000002
++#define HC_HVCycle_CC           0x00000003
++
++/* Command B
++ */
++#define HC_HLPrst_MASK          0x00010000
++#define HC_HLLastP_MASK         0x00008000
++#define HC_HVPMSK_MASK          0x00007f80
++#define HC_HBFace_MASK          0x00000040
++#define HC_H2nd1VT_MASK         0x0000003f
++#define HC_HVPMSK_X             0x00004000
++#define HC_HVPMSK_Y             0x00002000
++#define HC_HVPMSK_Z             0x00001000
++#define HC_HVPMSK_W             0x00000800
++#define HC_HVPMSK_Cd            0x00000400
++#define HC_HVPMSK_Cs            0x00000200
++#define HC_HVPMSK_S             0x00000100
++#define HC_HVPMSK_T             0x00000080
++
++/* Enable Setting
++ */
++#define HC_SubA_HEnable         0x0000
++#define HC_HenTXEnvMap_MASK     0x00200000
++#define HC_HenVertexCNT_MASK    0x00100000
++#define HC_HenCPUDAZ_MASK       0x00080000
++#define HC_HenDASZWC_MASK       0x00040000
++#define HC_HenFBCull_MASK       0x00020000
++#define HC_HenCW_MASK           0x00010000
++#define HC_HenAA_MASK           0x00008000
++#define HC_HenST_MASK           0x00004000
++#define HC_HenZT_MASK           0x00002000
++#define HC_HenZW_MASK           0x00001000
++#define HC_HenAT_MASK           0x00000800
++#define HC_HenAW_MASK           0x00000400
++#define HC_HenSP_MASK           0x00000200
++#define HC_HenLP_MASK           0x00000100
++#define HC_HenTXCH_MASK         0x00000080
++#define HC_HenTXMP_MASK         0x00000040
++#define HC_HenTXPP_MASK         0x00000020
++#define HC_HenTXTR_MASK         0x00000010
++#define HC_HenCS_MASK           0x00000008
++#define HC_HenFOG_MASK          0x00000004
++#define HC_HenABL_MASK          0x00000002
++#define HC_HenDT_MASK           0x00000001
++
++/* Z Setting
++ */
++#define HC_SubA_HZWBBasL        0x0010
++#define HC_SubA_HZWBBasH        0x0011
++#define HC_SubA_HZWBType        0x0012
++#define HC_SubA_HZBiasL         0x0013
++#define HC_SubA_HZWBend         0x0014
++#define HC_SubA_HZWTMD          0x0015
++#define HC_SubA_HZWCDL          0x0016
++#define HC_SubA_HZWCTAGnum      0x0017
++#define HC_SubA_HZCYNum         0x0018
++#define HC_SubA_HZWCFire        0x0019
++/* HC_SubA_HZWBType
++ */
++#define HC_HZWBType_MASK        0x00800000
++#define HC_HZBiasedWB_MASK      0x00400000
++#define HC_HZONEasFF_MASK       0x00200000
++#define HC_HZOONEasFF_MASK      0x00100000
++#define HC_HZWBFM_MASK          0x00030000
++#define HC_HZWBLoc_MASK         0x0000c000
++#define HC_HZWBPit_MASK         0x00003fff
++#define HC_HZWBFM_16            0x00000000
++#define HC_HZWBFM_32            0x00020000
++#define HC_HZWBFM_24            0x00030000
++#define HC_HZWBLoc_Local        0x00000000
++#define HC_HZWBLoc_SyS          0x00004000
++/* HC_SubA_HZWBend
++ */
++#define HC_HZWBend_MASK         0x00ffe000
++#define HC_HZBiasH_MASK         0x000000ff
++#define HC_HZWBend_SHIFT        10
++/* HC_SubA_HZWTMD
++ */
++#define HC_HZWTMD_MASK          0x00070000
++#define HC_HEBEBias_MASK        0x00007f00
++#define HC_HZNF_MASK            0x000000ff
++#define HC_HZWTMD_NeverPass     0x00000000
++#define HC_HZWTMD_LT            0x00010000
++#define HC_HZWTMD_EQ            0x00020000
++#define HC_HZWTMD_LE            0x00030000
++#define HC_HZWTMD_GT            0x00040000
++#define HC_HZWTMD_NE            0x00050000
++#define HC_HZWTMD_GE            0x00060000
++#define HC_HZWTMD_AllPass       0x00070000
++#define HC_HEBEBias_SHIFT       8
++/* HC_SubA_HZWCDL          0x0016
++ */
++#define HC_HZWCDL_MASK          0x00ffffff
++/* HC_SubA_HZWCTAGnum      0x0017
++ */
++#define HC_HZWCTAGnum_MASK      0x00ff0000
++#define HC_HZWCTAGnum_SHIFT     16
++#define HC_HZWCDH_MASK          0x000000ff
++#define HC_HZWCDH_SHIFT         0
++/* HC_SubA_HZCYNum         0x0018
++ */
++#define HC_HZCYNum_MASK         0x00030000
++#define HC_HZCYNum_SHIFT        16
++#define HC_HZWCQWnum_MASK       0x00003fff
++#define HC_HZWCQWnum_SHIFT      0
++/* HC_SubA_HZWCFire        0x0019
++ */
++#define HC_ZWCFire_MASK         0x00010000
++#define HC_HZWCQWnumLast_MASK   0x00003fff
++#define HC_HZWCQWnumLast_SHIFT  0
++
++/* Stencil Setting
++ */
++#define HC_SubA_HSTREF          0x0023
++#define HC_SubA_HSTMD           0x0024
++/* HC_SubA_HSBFM
++ */
++#define HC_HSBFM_MASK           0x00030000
++#define HC_HSBLoc_MASK          0x0000c000
++#define HC_HSBPit_MASK          0x00003fff
++/* HC_SubA_HSTREF
++ */
++#define HC_HSTREF_MASK          0x00ff0000
++#define HC_HSTOPMSK_MASK        0x0000ff00
++#define HC_HSTBMSK_MASK         0x000000ff
++#define HC_HSTREF_SHIFT         16
++#define HC_HSTOPMSK_SHIFT       8
++/* HC_SubA_HSTMD
++ */
++#define HC_HSTMD_MASK           0x00070000
++#define HC_HSTOPSF_MASK         0x000001c0
++#define HC_HSTOPSPZF_MASK       0x00000038
++#define HC_HSTOPSPZP_MASK       0x00000007
++#define HC_HSTMD_NeverPass      0x00000000
++#define HC_HSTMD_LT             0x00010000
++#define HC_HSTMD_EQ             0x00020000
++#define HC_HSTMD_LE             0x00030000
++#define HC_HSTMD_GT             0x00040000
++#define HC_HSTMD_NE             0x00050000
++#define HC_HSTMD_GE             0x00060000
++#define HC_HSTMD_AllPass        0x00070000
++#define HC_HSTOPSF_KEEP         0x00000000
++#define HC_HSTOPSF_ZERO         0x00000040
++#define HC_HSTOPSF_REPLACE      0x00000080
++#define HC_HSTOPSF_INCRSAT      0x000000c0
++#define HC_HSTOPSF_DECRSAT      0x00000100
++#define HC_HSTOPSF_INVERT       0x00000140
++#define HC_HSTOPSF_INCR         0x00000180
++#define HC_HSTOPSF_DECR         0x000001c0
++#define HC_HSTOPSPZF_KEEP       0x00000000
++#define HC_HSTOPSPZF_ZERO       0x00000008
++#define HC_HSTOPSPZF_REPLACE    0x00000010
++#define HC_HSTOPSPZF_INCRSAT    0x00000018
++#define HC_HSTOPSPZF_DECRSAT    0x00000020
++#define HC_HSTOPSPZF_INVERT     0x00000028
++#define HC_HSTOPSPZF_INCR       0x00000030
++#define HC_HSTOPSPZF_DECR       0x00000038
++#define HC_HSTOPSPZP_KEEP       0x00000000
++#define HC_HSTOPSPZP_ZERO       0x00000001
++#define HC_HSTOPSPZP_REPLACE    0x00000002
++#define HC_HSTOPSPZP_INCRSAT    0x00000003
++#define HC_HSTOPSPZP_DECRSAT    0x00000004
++#define HC_HSTOPSPZP_INVERT     0x00000005
++#define HC_HSTOPSPZP_INCR       0x00000006
++#define HC_HSTOPSPZP_DECR       0x00000007
++
++/* Alpha Setting
++ */
++#define HC_SubA_HABBasL         0x0030
++#define HC_SubA_HABBasH         0x0031
++#define HC_SubA_HABFM           0x0032
++#define HC_SubA_HATMD           0x0033
++#define HC_SubA_HABLCsat        0x0034
++#define HC_SubA_HABLCop         0x0035
++#define HC_SubA_HABLAsat        0x0036
++#define HC_SubA_HABLAop         0x0037
++#define HC_SubA_HABLRCa         0x0038
++#define HC_SubA_HABLRFCa        0x0039
++#define HC_SubA_HABLRCbias      0x003a
++#define HC_SubA_HABLRCb         0x003b
++#define HC_SubA_HABLRFCb        0x003c
++#define HC_SubA_HABLRAa         0x003d
++#define HC_SubA_HABLRAb         0x003e
++/* HC_SubA_HABFM
++ */
++#define HC_HABFM_MASK           0x00030000
++#define HC_HABLoc_MASK          0x0000c000
++#define HC_HABPit_MASK          0x000007ff
++/* HC_SubA_HATMD
++ */
++#define HC_HATMD_MASK           0x00000700
++#define HC_HATREF_MASK          0x000000ff
++#define HC_HATMD_NeverPass      0x00000000
++#define HC_HATMD_LT             0x00000100
++#define HC_HATMD_EQ             0x00000200
++#define HC_HATMD_LE             0x00000300
++#define HC_HATMD_GT             0x00000400
++#define HC_HATMD_NE             0x00000500
++#define HC_HATMD_GE             0x00000600
++#define HC_HATMD_AllPass        0x00000700
++/* HC_SubA_HABLCsat
++ */
++#define HC_HABLCsat_MASK        0x00010000
++#define HC_HABLCa_MASK          0x0000fc00
++#define HC_HABLCa_C_MASK        0x0000c000
++#define HC_HABLCa_OPC_MASK      0x00003c00
++#define HC_HABLFCa_MASK         0x000003f0
++#define HC_HABLFCa_C_MASK       0x00000300
++#define HC_HABLFCa_OPC_MASK     0x000000f0
++#define HC_HABLCbias_MASK       0x0000000f
++#define HC_HABLCbias_C_MASK     0x00000008
++#define HC_HABLCbias_OPC_MASK   0x00000007
++/*-- Define the input color.
++ */
++#define HC_XC_Csrc              0x00000000
++#define HC_XC_Cdst              0x00000001
++#define HC_XC_Asrc              0x00000002
++#define HC_XC_Adst              0x00000003
++#define HC_XC_Fog               0x00000004
++#define HC_XC_HABLRC            0x00000005
++#define HC_XC_minSrcDst         0x00000006
++#define HC_XC_maxSrcDst         0x00000007
++#define HC_XC_mimAsrcInvAdst    0x00000008
++#define HC_XC_OPC               0x00000000
++#define HC_XC_InvOPC            0x00000010
++#define HC_XC_OPCp5             0x00000020
++/*-- Define the input Alpha
++ */
++#define HC_XA_OPA               0x00000000
++#define HC_XA_InvOPA            0x00000010
++#define HC_XA_OPAp5             0x00000020
++#define HC_XA_0                 0x00000000
++#define HC_XA_Asrc              0x00000001
++#define HC_XA_Adst              0x00000002
++#define HC_XA_Fog               0x00000003
++#define HC_XA_minAsrcFog        0x00000004
++#define HC_XA_minAsrcAdst       0x00000005
++#define HC_XA_maxAsrcFog        0x00000006
++#define HC_XA_maxAsrcAdst       0x00000007
++#define HC_XA_HABLRA            0x00000008
++#define HC_XA_minAsrcInvAdst    0x00000008
++#define HC_XA_HABLFRA           0x00000009
++/*--
++ */
++#define HC_HABLCa_OPC           (HC_XC_OPC << 10)
++#define HC_HABLCa_InvOPC        (HC_XC_InvOPC << 10)
++#define HC_HABLCa_OPCp5         (HC_XC_OPCp5 << 10)
++#define HC_HABLCa_Csrc          (HC_XC_Csrc << 10)
++#define HC_HABLCa_Cdst          (HC_XC_Cdst << 10)
++#define HC_HABLCa_Asrc          (HC_XC_Asrc << 10)
++#define HC_HABLCa_Adst          (HC_XC_Adst << 10)
++#define HC_HABLCa_Fog           (HC_XC_Fog << 10)
++#define HC_HABLCa_HABLRCa       (HC_XC_HABLRC << 10)
++#define HC_HABLCa_minSrcDst     (HC_XC_minSrcDst << 10)
++#define HC_HABLCa_maxSrcDst     (HC_XC_maxSrcDst << 10)
++#define HC_HABLFCa_OPC              (HC_XC_OPC << 4)
++#define HC_HABLFCa_InvOPC           (HC_XC_InvOPC << 4)
++#define HC_HABLFCa_OPCp5            (HC_XC_OPCp5 << 4)
++#define HC_HABLFCa_Csrc             (HC_XC_Csrc << 4)
++#define HC_HABLFCa_Cdst             (HC_XC_Cdst << 4)
++#define HC_HABLFCa_Asrc             (HC_XC_Asrc << 4)
++#define HC_HABLFCa_Adst             (HC_XC_Adst << 4)
++#define HC_HABLFCa_Fog              (HC_XC_Fog << 4)
++#define HC_HABLFCa_HABLRCa          (HC_XC_HABLRC << 4)
++#define HC_HABLFCa_minSrcDst        (HC_XC_minSrcDst << 4)
++#define HC_HABLFCa_maxSrcDst        (HC_XC_maxSrcDst << 4)
++#define HC_HABLFCa_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 4)
++#define HC_HABLCbias_HABLRCbias 0x00000000
++#define HC_HABLCbias_Asrc       0x00000001
++#define HC_HABLCbias_Adst       0x00000002
++#define HC_HABLCbias_Fog        0x00000003
++#define HC_HABLCbias_Cin        0x00000004
++/* HC_SubA_HABLCop         0x0035
++ */
++#define HC_HABLdot_MASK         0x00010000
++#define HC_HABLCop_MASK         0x00004000
++#define HC_HABLCb_MASK          0x00003f00
++#define HC_HABLCb_C_MASK        0x00003000
++#define HC_HABLCb_OPC_MASK      0x00000f00
++#define HC_HABLFCb_MASK         0x000000fc
++#define HC_HABLFCb_C_MASK       0x000000c0
++#define HC_HABLFCb_OPC_MASK     0x0000003c
++#define HC_HABLCshift_MASK      0x00000003
++#define HC_HABLCb_OPC           (HC_XC_OPC << 8)
++#define HC_HABLCb_InvOPC        (HC_XC_InvOPC << 8)
++#define HC_HABLCb_OPCp5         (HC_XC_OPCp5 << 8)
++#define HC_HABLCb_Csrc          (HC_XC_Csrc << 8)
++#define HC_HABLCb_Cdst          (HC_XC_Cdst << 8)
++#define HC_HABLCb_Asrc          (HC_XC_Asrc << 8)
++#define HC_HABLCb_Adst          (HC_XC_Adst << 8)
++#define HC_HABLCb_Fog           (HC_XC_Fog << 8)
++#define HC_HABLCb_HABLRCa       (HC_XC_HABLRC << 8)
++#define HC_HABLCb_minSrcDst     (HC_XC_minSrcDst << 8)
++#define HC_HABLCb_maxSrcDst     (HC_XC_maxSrcDst << 8)
++#define HC_HABLFCb_OPC              (HC_XC_OPC << 2)
++#define HC_HABLFCb_InvOPC           (HC_XC_InvOPC << 2)
++#define HC_HABLFCb_OPCp5            (HC_XC_OPCp5 << 2)
++#define HC_HABLFCb_Csrc             (HC_XC_Csrc << 2)
++#define HC_HABLFCb_Cdst             (HC_XC_Cdst << 2)
++#define HC_HABLFCb_Asrc             (HC_XC_Asrc << 2)
++#define HC_HABLFCb_Adst             (HC_XC_Adst << 2)
++#define HC_HABLFCb_Fog              (HC_XC_Fog << 2)
++#define HC_HABLFCb_HABLRCb          (HC_XC_HABLRC << 2)
++#define HC_HABLFCb_minSrcDst        (HC_XC_minSrcDst << 2)
++#define HC_HABLFCb_maxSrcDst        (HC_XC_maxSrcDst << 2)
++#define HC_HABLFCb_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 2)
++/* HC_SubA_HABLAsat        0x0036
++ */
++#define HC_HABLAsat_MASK        0x00010000
++#define HC_HABLAa_MASK          0x0000fc00
++#define HC_HABLAa_A_MASK        0x0000c000
++#define HC_HABLAa_OPA_MASK      0x00003c00
++#define HC_HABLFAa_MASK         0x000003f0
++#define HC_HABLFAa_A_MASK       0x00000300
++#define HC_HABLFAa_OPA_MASK     0x000000f0
++#define HC_HABLAbias_MASK       0x0000000f
++#define HC_HABLAbias_A_MASK     0x00000008
++#define HC_HABLAbias_OPA_MASK   0x00000007
++#define HC_HABLAa_OPA           (HC_XA_OPA << 10)
++#define HC_HABLAa_InvOPA        (HC_XA_InvOPA << 10)
++#define HC_HABLAa_OPAp5         (HC_XA_OPAp5 << 10)
++#define HC_HABLAa_0             (HC_XA_0 << 10)
++#define HC_HABLAa_Asrc          (HC_XA_Asrc << 10)
++#define HC_HABLAa_Adst          (HC_XA_Adst << 10)
++#define HC_HABLAa_Fog           (HC_XA_Fog << 10)
++#define HC_HABLAa_minAsrcFog    (HC_XA_minAsrcFog << 10)
++#define HC_HABLAa_minAsrcAdst   (HC_XA_minAsrcAdst << 10)
++#define HC_HABLAa_maxAsrcFog    (HC_XA_maxAsrcFog << 10)
++#define HC_HABLAa_maxAsrcAdst   (HC_XA_maxAsrcAdst << 10)
++#define HC_HABLAa_HABLRA        (HC_XA_HABLRA << 10)
++#define HC_HABLFAa_OPA          (HC_XA_OPA << 4)
++#define HC_HABLFAa_InvOPA       (HC_XA_InvOPA << 4)
++#define HC_HABLFAa_OPAp5        (HC_XA_OPAp5 << 4)
++#define HC_HABLFAa_0            (HC_XA_0 << 4)
++#define HC_HABLFAa_Asrc         (HC_XA_Asrc << 4)
++#define HC_HABLFAa_Adst         (HC_XA_Adst << 4)
++#define HC_HABLFAa_Fog          (HC_XA_Fog << 4)
++#define HC_HABLFAa_minAsrcFog   (HC_XA_minAsrcFog << 4)
++#define HC_HABLFAa_minAsrcAdst  (HC_XA_minAsrcAdst << 4)
++#define HC_HABLFAa_maxAsrcFog   (HC_XA_maxAsrcFog << 4)
++#define HC_HABLFAa_maxAsrcAdst  (HC_XA_maxAsrcAdst << 4)
++#define HC_HABLFAa_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 4)
++#define HC_HABLFAa_HABLFRA          (HC_XA_HABLFRA << 4)
++#define HC_HABLAbias_HABLRAbias 0x00000000
++#define HC_HABLAbias_Asrc       0x00000001
++#define HC_HABLAbias_Adst       0x00000002
++#define HC_HABLAbias_Fog        0x00000003
++#define HC_HABLAbias_Aaa        0x00000004
++/* HC_SubA_HABLAop         0x0037
++ */
++#define HC_HABLAop_MASK         0x00004000
++#define HC_HABLAb_MASK          0x00003f00
++#define HC_HABLAb_OPA_MASK      0x00000f00
++#define HC_HABLFAb_MASK         0x000000fc
++#define HC_HABLFAb_OPA_MASK     0x0000003c
++#define HC_HABLAshift_MASK      0x00000003
++#define HC_HABLAb_OPA           (HC_XA_OPA << 8)
++#define HC_HABLAb_InvOPA        (HC_XA_InvOPA << 8)
++#define HC_HABLAb_OPAp5         (HC_XA_OPAp5 << 8)
++#define HC_HABLAb_0             (HC_XA_0 << 8)
++#define HC_HABLAb_Asrc          (HC_XA_Asrc << 8)
++#define HC_HABLAb_Adst          (HC_XA_Adst << 8)
++#define HC_HABLAb_Fog           (HC_XA_Fog << 8)
++#define HC_HABLAb_minAsrcFog    (HC_XA_minAsrcFog << 8)
++#define HC_HABLAb_minAsrcAdst   (HC_XA_minAsrcAdst << 8)
++#define HC_HABLAb_maxAsrcFog    (HC_XA_maxAsrcFog << 8)
++#define HC_HABLAb_maxAsrcAdst   (HC_XA_maxAsrcAdst << 8)
++#define HC_HABLAb_HABLRA        (HC_XA_HABLRA << 8)
++#define HC_HABLFAb_OPA          (HC_XA_OPA << 2)
++#define HC_HABLFAb_InvOPA       (HC_XA_InvOPA << 2)
++#define HC_HABLFAb_OPAp5        (HC_XA_OPAp5 << 2)
++#define HC_HABLFAb_0            (HC_XA_0 << 2)
++#define HC_HABLFAb_Asrc         (HC_XA_Asrc << 2)
++#define HC_HABLFAb_Adst         (HC_XA_Adst << 2)
++#define HC_HABLFAb_Fog          (HC_XA_Fog << 2)
++#define HC_HABLFAb_minAsrcFog   (HC_XA_minAsrcFog << 2)
++#define HC_HABLFAb_minAsrcAdst  (HC_XA_minAsrcAdst << 2)
++#define HC_HABLFAb_maxAsrcFog   (HC_XA_maxAsrcFog << 2)
++#define HC_HABLFAb_maxAsrcAdst  (HC_XA_maxAsrcAdst << 2)
++#define HC_HABLFAb_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 2)
++#define HC_HABLFAb_HABLFRA          (HC_XA_HABLFRA << 2)
++/* HC_SubA_HABLRAa         0x003d
++ */
++#define HC_HABLRAa_MASK         0x00ff0000
++#define HC_HABLRFAa_MASK        0x0000ff00
++#define HC_HABLRAbias_MASK      0x000000ff
++#define HC_HABLRAa_SHIFT        16
++#define HC_HABLRFAa_SHIFT       8
++/* HC_SubA_HABLRAb         0x003e
++ */
++#define HC_HABLRAb_MASK         0x0000ff00
++#define HC_HABLRFAb_MASK        0x000000ff
++#define HC_HABLRAb_SHIFT        8
++
++/* Destination Setting
++ */
++#define HC_SubA_HDBBasL         0x0040
++#define HC_SubA_HDBBasH         0x0041
++#define HC_SubA_HDBFM           0x0042
++#define HC_SubA_HFBBMSKL        0x0043
++#define HC_SubA_HROP            0x0044
++/* HC_SubA_HDBFM           0x0042
++ */
++#define HC_HDBFM_MASK           0x001f0000
++#define HC_HDBLoc_MASK          0x0000c000
++#define HC_HDBPit_MASK          0x00003fff
++#define HC_HDBFM_RGB555         0x00000000
++#define HC_HDBFM_RGB565         0x00010000
++#define HC_HDBFM_ARGB4444       0x00020000
++#define HC_HDBFM_ARGB1555       0x00030000
++#define HC_HDBFM_BGR555         0x00040000
++#define HC_HDBFM_BGR565         0x00050000
++#define HC_HDBFM_ABGR4444       0x00060000
++#define HC_HDBFM_ABGR1555       0x00070000
++#define HC_HDBFM_ARGB0888       0x00080000
++#define HC_HDBFM_ARGB8888       0x00090000
++#define HC_HDBFM_ABGR0888       0x000a0000
++#define HC_HDBFM_ABGR8888       0x000b0000
++#define HC_HDBLoc_Local         0x00000000
++#define HC_HDBLoc_Sys           0x00004000
++/* HC_SubA_HROP            0x0044
++ */
++#define HC_HROP_MASK            0x00000f00
++#define HC_HFBBMSKH_MASK        0x000000ff
++#define HC_HROP_BLACK           0x00000000
++#define HC_HROP_DPon            0x00000100
++#define HC_HROP_DPna            0x00000200
++#define HC_HROP_Pn              0x00000300
++#define HC_HROP_PDna            0x00000400
++#define HC_HROP_Dn              0x00000500
++#define HC_HROP_DPx             0x00000600
++#define HC_HROP_DPan            0x00000700
++#define HC_HROP_DPa             0x00000800
++#define HC_HROP_DPxn            0x00000900
++#define HC_HROP_D               0x00000a00
++#define HC_HROP_DPno            0x00000b00
++#define HC_HROP_P               0x00000c00
++#define HC_HROP_PDno            0x00000d00
++#define HC_HROP_DPo             0x00000e00
++#define HC_HROP_WHITE           0x00000f00
++
++/* Fog Setting
++ */
++#define HC_SubA_HFogLF          0x0050
++#define HC_SubA_HFogCL          0x0051
++#define HC_SubA_HFogCH          0x0052
++#define HC_SubA_HFogStL         0x0053
++#define HC_SubA_HFogStH         0x0054
++#define HC_SubA_HFogOOdMF       0x0055
++#define HC_SubA_HFogOOdEF       0x0056
++#define HC_SubA_HFogEndL        0x0057
++#define HC_SubA_HFogDenst       0x0058
++/* HC_SubA_FogLF           0x0050
++ */
++#define HC_FogLF_MASK           0x00000010
++#define HC_FogEq_MASK           0x00000008
++#define HC_FogMD_MASK           0x00000007
++#define HC_FogMD_LocalFog        0x00000000
++#define HC_FogMD_LinearFog       0x00000002
++#define HC_FogMD_ExponentialFog  0x00000004
++#define HC_FogMD_Exponential2Fog 0x00000005
++/* #define HC_FogMD_FogTable       0x00000003 */
++
++/* HC_SubA_HFogDenst        0x0058
++ */
++#define HC_FogDenst_MASK        0x001fff00
++#define HC_FogEndL_MASK         0x000000ff
++
++/* Texture subtype definitions
++ */
++#define HC_SubType_Tex0         0x00000000
++#define HC_SubType_Tex1         0x00000001
++#define HC_SubType_TexGeneral   0x000000fe
++
++/* Attribute of texture n
++ */
++#define HC_SubA_HTXnL0BasL      0x0000
++#define HC_SubA_HTXnL1BasL      0x0001
++#define HC_SubA_HTXnL2BasL      0x0002
++#define HC_SubA_HTXnL3BasL      0x0003
++#define HC_SubA_HTXnL4BasL      0x0004
++#define HC_SubA_HTXnL5BasL      0x0005
++#define HC_SubA_HTXnL6BasL      0x0006
++#define HC_SubA_HTXnL7BasL      0x0007
++#define HC_SubA_HTXnL8BasL      0x0008
++#define HC_SubA_HTXnL9BasL      0x0009
++#define HC_SubA_HTXnLaBasL      0x000a
++#define HC_SubA_HTXnLbBasL      0x000b
++#define HC_SubA_HTXnLcBasL      0x000c
++#define HC_SubA_HTXnLdBasL      0x000d
++#define HC_SubA_HTXnLeBasL      0x000e
++#define HC_SubA_HTXnLfBasL      0x000f
++#define HC_SubA_HTXnL10BasL     0x0010
++#define HC_SubA_HTXnL11BasL     0x0011
++#define HC_SubA_HTXnL012BasH    0x0020
++#define HC_SubA_HTXnL345BasH    0x0021
++#define HC_SubA_HTXnL678BasH    0x0022
++#define HC_SubA_HTXnL9abBasH    0x0023
++#define HC_SubA_HTXnLcdeBasH    0x0024
++#define HC_SubA_HTXnLf1011BasH  0x0025
++#define HC_SubA_HTXnL0Pit       0x002b
++#define HC_SubA_HTXnL1Pit       0x002c
++#define HC_SubA_HTXnL2Pit       0x002d
++#define HC_SubA_HTXnL3Pit       0x002e
++#define HC_SubA_HTXnL4Pit       0x002f
++#define HC_SubA_HTXnL5Pit       0x0030
++#define HC_SubA_HTXnL6Pit       0x0031
++#define HC_SubA_HTXnL7Pit       0x0032
++#define HC_SubA_HTXnL8Pit       0x0033
++#define HC_SubA_HTXnL9Pit       0x0034
++#define HC_SubA_HTXnLaPit       0x0035
++#define HC_SubA_HTXnLbPit       0x0036
++#define HC_SubA_HTXnLcPit       0x0037
++#define HC_SubA_HTXnLdPit       0x0038
++#define HC_SubA_HTXnLePit       0x0039
++#define HC_SubA_HTXnLfPit       0x003a
++#define HC_SubA_HTXnL10Pit      0x003b
++#define HC_SubA_HTXnL11Pit      0x003c
++#define HC_SubA_HTXnL0_5WE      0x004b
++#define HC_SubA_HTXnL6_bWE      0x004c
++#define HC_SubA_HTXnLc_11WE     0x004d
++#define HC_SubA_HTXnL0_5HE      0x0051
++#define HC_SubA_HTXnL6_bHE      0x0052
++#define HC_SubA_HTXnLc_11HE     0x0053
++#define HC_SubA_HTXnL0OS        0x0077
++#define HC_SubA_HTXnTB          0x0078
++#define HC_SubA_HTXnMPMD        0x0079
++#define HC_SubA_HTXnCLODu       0x007a
++#define HC_SubA_HTXnFM          0x007b
++#define HC_SubA_HTXnTRCH        0x007c
++#define HC_SubA_HTXnTRCL        0x007d
++#define HC_SubA_HTXnTBC         0x007e
++#define HC_SubA_HTXnTRAH        0x007f
++#define HC_SubA_HTXnTBLCsat     0x0080
++#define HC_SubA_HTXnTBLCop      0x0081
++#define HC_SubA_HTXnTBLMPfog    0x0082
++#define HC_SubA_HTXnTBLAsat     0x0083
++#define HC_SubA_HTXnTBLRCa      0x0085
++#define HC_SubA_HTXnTBLRCb      0x0086
++#define HC_SubA_HTXnTBLRCc      0x0087
++#define HC_SubA_HTXnTBLRCbias   0x0088
++#define HC_SubA_HTXnTBLRAa      0x0089
++#define HC_SubA_HTXnTBLRFog     0x008a
++#define HC_SubA_HTXnBumpM00     0x0090
++#define HC_SubA_HTXnBumpM01     0x0091
++#define HC_SubA_HTXnBumpM10     0x0092
++#define HC_SubA_HTXnBumpM11     0x0093
++#define HC_SubA_HTXnLScale      0x0094
++#define HC_SubA_HTXSMD          0x0000
++/* HC_SubA_HTXnL012BasH    0x0020
++ */
++#define HC_HTXnL0BasH_MASK      0x000000ff
++#define HC_HTXnL1BasH_MASK      0x0000ff00
++#define HC_HTXnL2BasH_MASK      0x00ff0000
++#define HC_HTXnL1BasH_SHIFT     8
++#define HC_HTXnL2BasH_SHIFT     16
++/* HC_SubA_HTXnL345BasH    0x0021
++ */
++#define HC_HTXnL3BasH_MASK      0x000000ff
++#define HC_HTXnL4BasH_MASK      0x0000ff00
++#define HC_HTXnL5BasH_MASK      0x00ff0000
++#define HC_HTXnL4BasH_SHIFT     8
++#define HC_HTXnL5BasH_SHIFT     16
++/* HC_SubA_HTXnL678BasH    0x0022
++ */
++#define HC_HTXnL6BasH_MASK      0x000000ff
++#define HC_HTXnL7BasH_MASK      0x0000ff00
++#define HC_HTXnL8BasH_MASK      0x00ff0000
++#define HC_HTXnL7BasH_SHIFT     8
++#define HC_HTXnL8BasH_SHIFT     16
++/* HC_SubA_HTXnL9abBasH    0x0023
++ */
++#define HC_HTXnL9BasH_MASK      0x000000ff
++#define HC_HTXnLaBasH_MASK      0x0000ff00
++#define HC_HTXnLbBasH_MASK      0x00ff0000
++#define HC_HTXnLaBasH_SHIFT     8
++#define HC_HTXnLbBasH_SHIFT     16
++/* HC_SubA_HTXnLcdeBasH    0x0024
++ */
++#define HC_HTXnLcBasH_MASK      0x000000ff
++#define HC_HTXnLdBasH_MASK      0x0000ff00
++#define HC_HTXnLeBasH_MASK      0x00ff0000
++#define HC_HTXnLdBasH_SHIFT     8
++#define HC_HTXnLeBasH_SHIFT     16
++/* HC_SubA_HTXnLcdeBasH    0x0025
++ */
++#define HC_HTXnLfBasH_MASK      0x000000ff
++#define HC_HTXnL10BasH_MASK      0x0000ff00
++#define HC_HTXnL11BasH_MASK      0x00ff0000
++#define HC_HTXnL10BasH_SHIFT     8
++#define HC_HTXnL11BasH_SHIFT     16
++/* HC_SubA_HTXnL0Pit       0x002b
++ */
++#define HC_HTXnLnPit_MASK       0x00003fff
++#define HC_HTXnEnPit_MASK       0x00080000
++#define HC_HTXnLnPitE_MASK      0x00f00000
++#define HC_HTXnLnPitE_SHIFT     20
++/* HC_SubA_HTXnL0_5WE      0x004b
++ */
++#define HC_HTXnL0WE_MASK        0x0000000f
++#define HC_HTXnL1WE_MASK        0x000000f0
++#define HC_HTXnL2WE_MASK        0x00000f00
++#define HC_HTXnL3WE_MASK        0x0000f000
++#define HC_HTXnL4WE_MASK        0x000f0000
++#define HC_HTXnL5WE_MASK        0x00f00000
++#define HC_HTXnL1WE_SHIFT       4
++#define HC_HTXnL2WE_SHIFT       8
++#define HC_HTXnL3WE_SHIFT       12
++#define HC_HTXnL4WE_SHIFT       16
++#define HC_HTXnL5WE_SHIFT       20
++/* HC_SubA_HTXnL6_bWE      0x004c
++ */
++#define HC_HTXnL6WE_MASK        0x0000000f
++#define HC_HTXnL7WE_MASK        0x000000f0
++#define HC_HTXnL8WE_MASK        0x00000f00
++#define HC_HTXnL9WE_MASK        0x0000f000
++#define HC_HTXnLaWE_MASK        0x000f0000
++#define HC_HTXnLbWE_MASK        0x00f00000
++#define HC_HTXnL7WE_SHIFT       4
++#define HC_HTXnL8WE_SHIFT       8
++#define HC_HTXnL9WE_SHIFT       12
++#define HC_HTXnLaWE_SHIFT       16
++#define HC_HTXnLbWE_SHIFT       20
++/* HC_SubA_HTXnLc_11WE      0x004d
++ */
++#define HC_HTXnLcWE_MASK        0x0000000f
++#define HC_HTXnLdWE_MASK        0x000000f0
++#define HC_HTXnLeWE_MASK        0x00000f00
++#define HC_HTXnLfWE_MASK        0x0000f000
++#define HC_HTXnL10WE_MASK       0x000f0000
++#define HC_HTXnL11WE_MASK       0x00f00000
++#define HC_HTXnLdWE_SHIFT       4
++#define HC_HTXnLeWE_SHIFT       8
++#define HC_HTXnLfWE_SHIFT       12
++#define HC_HTXnL10WE_SHIFT      16
++#define HC_HTXnL11WE_SHIFT      20
++/* HC_SubA_HTXnL0_5HE      0x0051
++ */
++#define HC_HTXnL0HE_MASK        0x0000000f
++#define HC_HTXnL1HE_MASK        0x000000f0
++#define HC_HTXnL2HE_MASK        0x00000f00
++#define HC_HTXnL3HE_MASK        0x0000f000
++#define HC_HTXnL4HE_MASK        0x000f0000
++#define HC_HTXnL5HE_MASK        0x00f00000
++#define HC_HTXnL1HE_SHIFT       4
++#define HC_HTXnL2HE_SHIFT       8
++#define HC_HTXnL3HE_SHIFT       12
++#define HC_HTXnL4HE_SHIFT       16
++#define HC_HTXnL5HE_SHIFT       20
++/* HC_SubA_HTXnL6_bHE      0x0052
++ */
++#define HC_HTXnL6HE_MASK        0x0000000f
++#define HC_HTXnL7HE_MASK        0x000000f0
++#define HC_HTXnL8HE_MASK        0x00000f00
++#define HC_HTXnL9HE_MASK        0x0000f000
++#define HC_HTXnLaHE_MASK        0x000f0000
++#define HC_HTXnLbHE_MASK        0x00f00000
++#define HC_HTXnL7HE_SHIFT       4
++#define HC_HTXnL8HE_SHIFT       8
++#define HC_HTXnL9HE_SHIFT       12
++#define HC_HTXnLaHE_SHIFT       16
++#define HC_HTXnLbHE_SHIFT       20
++/* HC_SubA_HTXnLc_11HE      0x0053
++ */
++#define HC_HTXnLcHE_MASK        0x0000000f
++#define HC_HTXnLdHE_MASK        0x000000f0
++#define HC_HTXnLeHE_MASK        0x00000f00
++#define HC_HTXnLfHE_MASK        0x0000f000
++#define HC_HTXnL10HE_MASK       0x000f0000
++#define HC_HTXnL11HE_MASK       0x00f00000
++#define HC_HTXnLdHE_SHIFT       4
++#define HC_HTXnLeHE_SHIFT       8
++#define HC_HTXnLfHE_SHIFT       12
++#define HC_HTXnL10HE_SHIFT      16
++#define HC_HTXnL11HE_SHIFT      20
++/* HC_SubA_HTXnL0OS        0x0077
++ */
++#define HC_HTXnL0OS_MASK        0x003ff000
++#define HC_HTXnLVmax_MASK       0x00000fc0
++#define HC_HTXnLVmin_MASK       0x0000003f
++#define HC_HTXnL0OS_SHIFT       12
++#define HC_HTXnLVmax_SHIFT      6
++/* HC_SubA_HTXnTB          0x0078
++ */
++#define HC_HTXnTB_MASK          0x00f00000
++#define HC_HTXnFLSe_MASK        0x0000e000
++#define HC_HTXnFLSs_MASK        0x00001c00
++#define HC_HTXnFLTe_MASK        0x00000380
++#define HC_HTXnFLTs_MASK        0x00000070
++#define HC_HTXnFLDs_MASK        0x0000000f
++#define HC_HTXnTB_NoTB          0x00000000
++#define HC_HTXnTB_TBC_S         0x00100000
++#define HC_HTXnTB_TBC_T         0x00200000
++#define HC_HTXnTB_TB_S          0x00400000
++#define HC_HTXnTB_TB_T          0x00800000
++#define HC_HTXnFLSe_Nearest     0x00000000
++#define HC_HTXnFLSe_Linear      0x00002000
++#define HC_HTXnFLSe_NonLinear   0x00004000
++#define HC_HTXnFLSe_Sharp       0x00008000
++#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
++#define HC_HTXnFLSs_Nearest     0x00000000
++#define HC_HTXnFLSs_Linear      0x00000400
++#define HC_HTXnFLSs_NonLinear   0x00000800
++#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
++#define HC_HTXnFLTe_Nearest     0x00000000
++#define HC_HTXnFLTe_Linear      0x00000080
++#define HC_HTXnFLTe_NonLinear   0x00000100
++#define HC_HTXnFLTe_Sharp       0x00000180
++#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
++#define HC_HTXnFLTs_Nearest     0x00000000
++#define HC_HTXnFLTs_Linear      0x00000010
++#define HC_HTXnFLTs_NonLinear   0x00000020
++#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
++#define HC_HTXnFLDs_Tex0        0x00000000
++#define HC_HTXnFLDs_Nearest     0x00000001
++#define HC_HTXnFLDs_Linear      0x00000002
++#define HC_HTXnFLDs_NonLinear   0x00000003
++#define HC_HTXnFLDs_Dither      0x00000004
++#define HC_HTXnFLDs_ConstLOD    0x00000005
++#define HC_HTXnFLDs_Ani         0x00000006
++#define HC_HTXnFLDs_AniDither   0x00000007
++/* HC_SubA_HTXnMPMD        0x0079
++ */
++#define HC_HTXnMPMD_SMASK       0x00070000
++#define HC_HTXnMPMD_TMASK       0x00380000
++#define HC_HTXnLODDTf_MASK      0x00000007
++#define HC_HTXnXY2ST_MASK       0x00000008
++#define HC_HTXnMPMD_Tsingle     0x00000000
++#define HC_HTXnMPMD_Tclamp      0x00080000
++#define HC_HTXnMPMD_Trepeat     0x00100000
++#define HC_HTXnMPMD_Tmirror     0x00180000
++#define HC_HTXnMPMD_Twrap       0x00200000
++#define HC_HTXnMPMD_Ssingle     0x00000000
++#define HC_HTXnMPMD_Sclamp      0x00010000
++#define HC_HTXnMPMD_Srepeat     0x00020000
++#define HC_HTXnMPMD_Smirror     0x00030000
++#define HC_HTXnMPMD_Swrap       0x00040000
++/* HC_SubA_HTXnCLODu       0x007a
++ */
++#define HC_HTXnCLODu_MASK       0x000ffc00
++#define HC_HTXnCLODd_MASK       0x000003ff
++#define HC_HTXnCLODu_SHIFT      10
++/* HC_SubA_HTXnFM          0x007b
++ */
++#define HC_HTXnFM_MASK          0x00ff0000
++#define HC_HTXnLoc_MASK         0x00000003
++#define HC_HTXnFM_INDEX         0x00000000
++#define HC_HTXnFM_Intensity     0x00080000
++#define HC_HTXnFM_Lum           0x00100000
++#define HC_HTXnFM_Alpha         0x00180000
++#define HC_HTXnFM_DX            0x00280000
++#define HC_HTXnFM_ARGB16        0x00880000
++#define HC_HTXnFM_ARGB32        0x00980000
++#define HC_HTXnFM_ABGR16        0x00a80000
++#define HC_HTXnFM_ABGR32        0x00b80000
++#define HC_HTXnFM_RGBA16        0x00c80000
++#define HC_HTXnFM_RGBA32        0x00d80000
++#define HC_HTXnFM_BGRA16        0x00e80000
++#define HC_HTXnFM_BGRA32        0x00f80000
++#define HC_HTXnFM_BUMPMAP       0x00380000
++#define HC_HTXnFM_Index1        (HC_HTXnFM_INDEX     | 0x00000000)
++#define HC_HTXnFM_Index2        (HC_HTXnFM_INDEX     | 0x00010000)
++#define HC_HTXnFM_Index4        (HC_HTXnFM_INDEX     | 0x00020000)
++#define HC_HTXnFM_Index8        (HC_HTXnFM_INDEX     | 0x00030000)
++#define HC_HTXnFM_T1            (HC_HTXnFM_Intensity | 0x00000000)
++#define HC_HTXnFM_T2            (HC_HTXnFM_Intensity | 0x00010000)
++#define HC_HTXnFM_T4            (HC_HTXnFM_Intensity | 0x00020000)
++#define HC_HTXnFM_T8            (HC_HTXnFM_Intensity | 0x00030000)
++#define HC_HTXnFM_L1            (HC_HTXnFM_Lum       | 0x00000000)
++#define HC_HTXnFM_L2            (HC_HTXnFM_Lum       | 0x00010000)
++#define HC_HTXnFM_L4            (HC_HTXnFM_Lum       | 0x00020000)
++#define HC_HTXnFM_L8            (HC_HTXnFM_Lum       | 0x00030000)
++#define HC_HTXnFM_AL44          (HC_HTXnFM_Lum       | 0x00040000)
++#define HC_HTXnFM_AL88          (HC_HTXnFM_Lum       | 0x00050000)
++#define HC_HTXnFM_A1            (HC_HTXnFM_Alpha     | 0x00000000)
++#define HC_HTXnFM_A2            (HC_HTXnFM_Alpha     | 0x00010000)
++#define HC_HTXnFM_A4            (HC_HTXnFM_Alpha     | 0x00020000)
++#define HC_HTXnFM_A8            (HC_HTXnFM_Alpha     | 0x00030000)
++#define HC_HTXnFM_DX1           (HC_HTXnFM_DX        | 0x00010000)
++#define HC_HTXnFM_DX23          (HC_HTXnFM_DX        | 0x00020000)
++#define HC_HTXnFM_DX45          (HC_HTXnFM_DX        | 0x00030000)
++#define HC_HTXnFM_RGB555        (HC_HTXnFM_ARGB16    | 0x00000000)
++#define HC_HTXnFM_RGB565        (HC_HTXnFM_ARGB16    | 0x00010000)
++#define HC_HTXnFM_ARGB1555      (HC_HTXnFM_ARGB16    | 0x00020000)
++#define HC_HTXnFM_ARGB4444      (HC_HTXnFM_ARGB16    | 0x00030000)
++#define HC_HTXnFM_ARGB0888      (HC_HTXnFM_ARGB32    | 0x00000000)
++#define HC_HTXnFM_ARGB8888      (HC_HTXnFM_ARGB32    | 0x00010000)
++#define HC_HTXnFM_BGR555        (HC_HTXnFM_ABGR16    | 0x00000000)
++#define HC_HTXnFM_BGR565        (HC_HTXnFM_ABGR16    | 0x00010000)
++#define HC_HTXnFM_ABGR1555      (HC_HTXnFM_ABGR16    | 0x00020000)
++#define HC_HTXnFM_ABGR4444      (HC_HTXnFM_ABGR16    | 0x00030000)
++#define HC_HTXnFM_ABGR0888      (HC_HTXnFM_ABGR32    | 0x00000000)
++#define HC_HTXnFM_ABGR8888      (HC_HTXnFM_ABGR32    | 0x00010000)
++#define HC_HTXnFM_RGBA5550      (HC_HTXnFM_RGBA16    | 0x00000000)
++#define HC_HTXnFM_RGBA5551      (HC_HTXnFM_RGBA16    | 0x00020000)
++#define HC_HTXnFM_RGBA4444      (HC_HTXnFM_RGBA16    | 0x00030000)
++#define HC_HTXnFM_RGBA8880      (HC_HTXnFM_RGBA32    | 0x00000000)
++#define HC_HTXnFM_RGBA8888      (HC_HTXnFM_RGBA32    | 0x00010000)
++#define HC_HTXnFM_BGRA5550      (HC_HTXnFM_BGRA16    | 0x00000000)
++#define HC_HTXnFM_BGRA5551      (HC_HTXnFM_BGRA16    | 0x00020000)
++#define HC_HTXnFM_BGRA4444      (HC_HTXnFM_BGRA16    | 0x00030000)
++#define HC_HTXnFM_BGRA8880      (HC_HTXnFM_BGRA32    | 0x00000000)
++#define HC_HTXnFM_BGRA8888      (HC_HTXnFM_BGRA32    | 0x00010000)
++#define HC_HTXnFM_VU88          (HC_HTXnFM_BUMPMAP   | 0x00000000)
++#define HC_HTXnFM_LVU655        (HC_HTXnFM_BUMPMAP   | 0x00010000)
++#define HC_HTXnFM_LVU888        (HC_HTXnFM_BUMPMAP   | 0x00020000)
++#define HC_HTXnLoc_Local        0x00000000
++#define HC_HTXnLoc_Sys          0x00000002
++#define HC_HTXnLoc_AGP          0x00000003
++/* HC_SubA_HTXnTRAH        0x007f
++ */
++#define HC_HTXnTRAH_MASK        0x00ff0000
++#define HC_HTXnTRAL_MASK        0x0000ff00
++#define HC_HTXnTBA_MASK         0x000000ff
++#define HC_HTXnTRAH_SHIFT       16
++#define HC_HTXnTRAL_SHIFT       8
++/* HC_SubA_HTXnTBLCsat     0x0080
++ *-- Define the input texture.
++ */
++#define HC_XTC_TOPC             0x00000000
++#define HC_XTC_InvTOPC          0x00000010
++#define HC_XTC_TOPCp5           0x00000020
++#define HC_XTC_Cbias            0x00000000
++#define HC_XTC_InvCbias         0x00000010
++#define HC_XTC_0                0x00000000
++#define HC_XTC_Dif              0x00000001
++#define HC_XTC_Spec             0x00000002
++#define HC_XTC_Tex              0x00000003
++#define HC_XTC_Cur              0x00000004
++#define HC_XTC_Adif             0x00000005
++#define HC_XTC_Fog              0x00000006
++#define HC_XTC_Atex             0x00000007
++#define HC_XTC_Acur             0x00000008
++#define HC_XTC_HTXnTBLRC        0x00000009
++#define HC_XTC_Ctexnext         0x0000000a
++/*--
++ */
++#define HC_HTXnTBLCsat_MASK     0x00800000
++#define HC_HTXnTBLCa_MASK       0x000fc000
++#define HC_HTXnTBLCb_MASK       0x00001f80
++#define HC_HTXnTBLCc_MASK       0x0000003f
++#define HC_HTXnTBLCa_TOPC       (HC_XTC_TOPC << 14)
++#define HC_HTXnTBLCa_InvTOPC    (HC_XTC_InvTOPC << 14)
++#define HC_HTXnTBLCa_TOPCp5     (HC_XTC_TOPCp5 << 14)
++#define HC_HTXnTBLCa_0          (HC_XTC_0 << 14)
++#define HC_HTXnTBLCa_Dif        (HC_XTC_Dif << 14)
++#define HC_HTXnTBLCa_Spec       (HC_XTC_Spec << 14)
++#define HC_HTXnTBLCa_Tex        (HC_XTC_Tex << 14)
++#define HC_HTXnTBLCa_Cur        (HC_XTC_Cur << 14)
++#define HC_HTXnTBLCa_Adif       (HC_XTC_Adif << 14)
++#define HC_HTXnTBLCa_Fog        (HC_XTC_Fog << 14)
++#define HC_HTXnTBLCa_Atex       (HC_XTC_Atex << 14)
++#define HC_HTXnTBLCa_Acur       (HC_XTC_Acur << 14)
++#define HC_HTXnTBLCa_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 14)
++#define HC_HTXnTBLCa_Ctexnext   (HC_XTC_Ctexnext << 14)
++#define HC_HTXnTBLCb_TOPC       (HC_XTC_TOPC << 7)
++#define HC_HTXnTBLCb_InvTOPC    (HC_XTC_InvTOPC << 7)
++#define HC_HTXnTBLCb_TOPCp5     (HC_XTC_TOPCp5 << 7)
++#define HC_HTXnTBLCb_0          (HC_XTC_0 << 7)
++#define HC_HTXnTBLCb_Dif        (HC_XTC_Dif << 7)
++#define HC_HTXnTBLCb_Spec       (HC_XTC_Spec << 7)
++#define HC_HTXnTBLCb_Tex        (HC_XTC_Tex << 7)
++#define HC_HTXnTBLCb_Cur        (HC_XTC_Cur << 7)
++#define HC_HTXnTBLCb_Adif       (HC_XTC_Adif << 7)
++#define HC_HTXnTBLCb_Fog        (HC_XTC_Fog << 7)
++#define HC_HTXnTBLCb_Atex       (HC_XTC_Atex << 7)
++#define HC_HTXnTBLCb_Acur       (HC_XTC_Acur << 7)
++#define HC_HTXnTBLCb_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 7)
++#define HC_HTXnTBLCb_Ctexnext   (HC_XTC_Ctexnext << 7)
++#define HC_HTXnTBLCc_TOPC       (HC_XTC_TOPC << 0)
++#define HC_HTXnTBLCc_InvTOPC    (HC_XTC_InvTOPC << 0)
++#define HC_HTXnTBLCc_TOPCp5     (HC_XTC_TOPCp5 << 0)
++#define HC_HTXnTBLCc_0          (HC_XTC_0 << 0)
++#define HC_HTXnTBLCc_Dif        (HC_XTC_Dif << 0)
++#define HC_HTXnTBLCc_Spec       (HC_XTC_Spec << 0)
++#define HC_HTXnTBLCc_Tex        (HC_XTC_Tex << 0)
++#define HC_HTXnTBLCc_Cur        (HC_XTC_Cur << 0)
++#define HC_HTXnTBLCc_Adif       (HC_XTC_Adif << 0)
++#define HC_HTXnTBLCc_Fog        (HC_XTC_Fog << 0)
++#define HC_HTXnTBLCc_Atex       (HC_XTC_Atex << 0)
++#define HC_HTXnTBLCc_Acur       (HC_XTC_Acur << 0)
++#define HC_HTXnTBLCc_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 0)
++#define HC_HTXnTBLCc_Ctexnext   (HC_XTC_Ctexnext << 0)
++/* HC_SubA_HTXnTBLCop      0x0081
++ */
++#define HC_HTXnTBLdot_MASK      0x00c00000
++#define HC_HTXnTBLCop_MASK      0x00380000
++#define HC_HTXnTBLCbias_MASK    0x0007c000
++#define HC_HTXnTBLCshift_MASK   0x00001800
++#define HC_HTXnTBLAop_MASK      0x00000380
++#define HC_HTXnTBLAbias_MASK    0x00000078
++#define HC_HTXnTBLAshift_MASK   0x00000003
++#define HC_HTXnTBLCop_Add       0x00000000
++#define HC_HTXnTBLCop_Sub       0x00080000
++#define HC_HTXnTBLCop_Min       0x00100000
++#define HC_HTXnTBLCop_Max       0x00180000
++#define HC_HTXnTBLCop_Mask      0x00200000
++#define HC_HTXnTBLCbias_Cbias           (HC_XTC_Cbias << 14)
++#define HC_HTXnTBLCbias_InvCbias        (HC_XTC_InvCbias << 14)
++#define HC_HTXnTBLCbias_0               (HC_XTC_0 << 14)
++#define HC_HTXnTBLCbias_Dif             (HC_XTC_Dif << 14)
++#define HC_HTXnTBLCbias_Spec            (HC_XTC_Spec << 14)
++#define HC_HTXnTBLCbias_Tex             (HC_XTC_Tex << 14)
++#define HC_HTXnTBLCbias_Cur             (HC_XTC_Cur << 14)
++#define HC_HTXnTBLCbias_Adif            (HC_XTC_Adif << 14)
++#define HC_HTXnTBLCbias_Fog             (HC_XTC_Fog << 14)
++#define HC_HTXnTBLCbias_Atex            (HC_XTC_Atex << 14)
++#define HC_HTXnTBLCbias_Acur            (HC_XTC_Acur << 14)
++#define HC_HTXnTBLCbias_HTXnTBLRC       (HC_XTC_HTXnTBLRC << 14)
++#define HC_HTXnTBLCshift_1      0x00000000
++#define HC_HTXnTBLCshift_2      0x00000800
++#define HC_HTXnTBLCshift_No     0x00001000
++#define HC_HTXnTBLCshift_DotP   0x00001800
++/*=* John Sheng [2003.7.18] texture combine *=*/
++#define HC_HTXnTBLDOT3   0x00080000
++#define HC_HTXnTBLDOT4   0x000C0000
++
++#define HC_HTXnTBLAop_Add       0x00000000
++#define HC_HTXnTBLAop_Sub       0x00000080
++#define HC_HTXnTBLAop_Min       0x00000100
++#define HC_HTXnTBLAop_Max       0x00000180
++#define HC_HTXnTBLAop_Mask      0x00000200
++#define HC_HTXnTBLAbias_Inv             0x00000040
++#define HC_HTXnTBLAbias_Adif            0x00000000
++#define HC_HTXnTBLAbias_Fog             0x00000008
++#define HC_HTXnTBLAbias_Acur            0x00000010
++#define HC_HTXnTBLAbias_HTXnTBLRAbias   0x00000018
++#define HC_HTXnTBLAbias_Atex            0x00000020
++#define HC_HTXnTBLAshift_1      0x00000000
++#define HC_HTXnTBLAshift_2      0x00000001
++#define HC_HTXnTBLAshift_No     0x00000002
++/* #define HC_HTXnTBLAshift_DotP   0x00000003 */
++/* HC_SubA_HTXnTBLMPFog    0x0082
++ */
++#define HC_HTXnTBLMPfog_MASK    0x00e00000
++#define HC_HTXnTBLMPfog_0       0x00000000
++#define HC_HTXnTBLMPfog_Adif    0x00200000
++#define HC_HTXnTBLMPfog_Fog     0x00400000
++#define HC_HTXnTBLMPfog_Atex    0x00600000
++#define HC_HTXnTBLMPfog_Acur    0x00800000
++#define HC_HTXnTBLMPfog_GHTXnTBLRFog    0x00a00000
++/* HC_SubA_HTXnTBLAsat     0x0083
++ *-- Define the texture alpha input.
++ */
++#define HC_XTA_TOPA             0x00000000
++#define HC_XTA_InvTOPA          0x00000008
++#define HC_XTA_TOPAp5           0x00000010
++#define HC_XTA_Adif             0x00000000
++#define HC_XTA_Fog              0x00000001
++#define HC_XTA_Acur             0x00000002
++#define HC_XTA_HTXnTBLRA        0x00000003
++#define HC_XTA_Atex             0x00000004
++#define HC_XTA_Atexnext         0x00000005
++/*--
++ */
++#define HC_HTXnTBLAsat_MASK     0x00800000
++#define HC_HTXnTBLAMB_MASK      0x00700000
++#define HC_HTXnTBLAa_MASK       0x0007c000
++#define HC_HTXnTBLAb_MASK       0x00000f80
++#define HC_HTXnTBLAc_MASK       0x0000001f
++#define HC_HTXnTBLAMB_SHIFT     20
++#define HC_HTXnTBLAa_TOPA       (HC_XTA_TOPA << 14)
++#define HC_HTXnTBLAa_InvTOPA    (HC_XTA_InvTOPA << 14)
++#define HC_HTXnTBLAa_TOPAp5     (HC_XTA_TOPAp5 << 14)
++#define HC_HTXnTBLAa_Adif       (HC_XTA_Adif << 14)
++#define HC_HTXnTBLAa_Fog        (HC_XTA_Fog << 14)
++#define HC_HTXnTBLAa_Acur       (HC_XTA_Acur << 14)
++#define HC_HTXnTBLAa_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 14)
++#define HC_HTXnTBLAa_Atex       (HC_XTA_Atex << 14)
++#define HC_HTXnTBLAa_Atexnext   (HC_XTA_Atexnext << 14)
++#define HC_HTXnTBLAb_TOPA       (HC_XTA_TOPA << 7)
++#define HC_HTXnTBLAb_InvTOPA    (HC_XTA_InvTOPA << 7)
++#define HC_HTXnTBLAb_TOPAp5     (HC_XTA_TOPAp5 << 7)
++#define HC_HTXnTBLAb_Adif       (HC_XTA_Adif << 7)
++#define HC_HTXnTBLAb_Fog        (HC_XTA_Fog << 7)
++#define HC_HTXnTBLAb_Acur       (HC_XTA_Acur << 7)
++#define HC_HTXnTBLAb_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 7)
++#define HC_HTXnTBLAb_Atex       (HC_XTA_Atex << 7)
++#define HC_HTXnTBLAb_Atexnext   (HC_XTA_Atexnext << 7)
++#define HC_HTXnTBLAc_TOPA       (HC_XTA_TOPA << 0)
++#define HC_HTXnTBLAc_InvTOPA    (HC_XTA_InvTOPA << 0)
++#define HC_HTXnTBLAc_TOPAp5     (HC_XTA_TOPAp5 << 0)
++#define HC_HTXnTBLAc_Adif       (HC_XTA_Adif << 0)
++#define HC_HTXnTBLAc_Fog        (HC_XTA_Fog << 0)
++#define HC_HTXnTBLAc_Acur       (HC_XTA_Acur << 0)
++#define HC_HTXnTBLAc_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 0)
++#define HC_HTXnTBLAc_Atex       (HC_XTA_Atex << 0)
++#define HC_HTXnTBLAc_Atexnext   (HC_XTA_Atexnext << 0)
++/* HC_SubA_HTXnTBLRAa      0x0089
++ */
++#define HC_HTXnTBLRAa_MASK      0x00ff0000
++#define HC_HTXnTBLRAb_MASK      0x0000ff00
++#define HC_HTXnTBLRAc_MASK      0x000000ff
++#define HC_HTXnTBLRAa_SHIFT     16
++#define HC_HTXnTBLRAb_SHIFT     8
++#define HC_HTXnTBLRAc_SHIFT     0
++/* HC_SubA_HTXnTBLRFog     0x008a
++ */
++#define HC_HTXnTBLRFog_MASK     0x0000ff00
++#define HC_HTXnTBLRAbias_MASK   0x000000ff
++#define HC_HTXnTBLRFog_SHIFT    8
++#define HC_HTXnTBLRAbias_SHIFT  0
++/* HC_SubA_HTXnLScale      0x0094
++ */
++#define HC_HTXnLScale_MASK      0x0007fc00
++#define HC_HTXnLOff_MASK        0x000001ff
++#define HC_HTXnLScale_SHIFT     10
++/* HC_SubA_HTXSMD          0x0000
++ */
++#define HC_HTXSMD_MASK          0x00000080
++#define HC_HTXTMD_MASK          0x00000040
++#define HC_HTXNum_MASK          0x00000038
++#define HC_HTXTRMD_MASK         0x00000006
++#define HC_HTXCHCLR_MASK        0x00000001
++#define HC_HTXNum_SHIFT         3
++
++/* Texture Palette n
++ */
++#define HC_SubType_TexPalette0  0x00000000
++#define HC_SubType_TexPalette1  0x00000001
++#define HC_SubType_FogTable     0x00000010
++#define HC_SubType_Stipple      0x00000014
++/* HC_SubA_TexPalette0     0x0000
++ */
++#define HC_HTPnA_MASK           0xff000000
++#define HC_HTPnR_MASK           0x00ff0000
++#define HC_HTPnG_MASK           0x0000ff00
++#define HC_HTPnB_MASK           0x000000ff
++/* HC_SubA_FogTable        0x0010
++ */
++#define HC_HFPn3_MASK           0xff000000
++#define HC_HFPn2_MASK           0x00ff0000
++#define HC_HFPn1_MASK           0x0000ff00
++#define HC_HFPn_MASK            0x000000ff
++#define HC_HFPn3_SHIFT          24
++#define HC_HFPn2_SHIFT          16
++#define HC_HFPn1_SHIFT          8
++
++/* Auto Testing & Security
++ */
++#define HC_SubA_HenFIFOAT       0x0000
++#define HC_SubA_HFBDrawFirst    0x0004
++#define HC_SubA_HFBBasL         0x0005
++#define HC_SubA_HFBDst          0x0006
++/* HC_SubA_HenFIFOAT       0x0000
++ */
++#define HC_HenFIFOAT_MASK       0x00000020
++#define HC_HenGEMILock_MASK     0x00000010
++#define HC_HenFBASwap_MASK      0x00000008
++#define HC_HenOT_MASK           0x00000004
++#define HC_HenCMDQ_MASK         0x00000002
++#define HC_HenTXCTSU_MASK       0x00000001
++/* HC_SubA_HFBDrawFirst    0x0004
++ */
++#define HC_HFBDrawFirst_MASK    0x00000800
++#define HC_HFBQueue_MASK        0x00000400
++#define HC_HFBLock_MASK         0x00000200
++#define HC_HEOF_MASK            0x00000100
++#define HC_HFBBasH_MASK         0x000000ff
++
++/* GEMI Setting
++ */
++#define HC_SubA_HTArbRCM        0x0008
++#define HC_SubA_HTArbRZ         0x000a
++#define HC_SubA_HTArbWZ         0x000b
++#define HC_SubA_HTArbRTX        0x000c
++#define HC_SubA_HTArbRCW        0x000d
++#define HC_SubA_HTArbE2         0x000e
++#define HC_SubA_HArbRQCM        0x0010
++#define HC_SubA_HArbWQCM        0x0011
++#define HC_SubA_HGEMITout       0x0020
++#define HC_SubA_HFthRTXD        0x0040
++#define HC_SubA_HFthRTXA        0x0044
++#define HC_SubA_HCMDQstL        0x0050
++#define HC_SubA_HCMDQendL       0x0051
++#define HC_SubA_HCMDQLen        0x0052
++/* HC_SubA_HTArbRCM        0x0008
++ */
++#define HC_HTArbRCM_MASK        0x0000ffff
++/* HC_SubA_HTArbRZ         0x000a
++ */
++#define HC_HTArbRZ_MASK         0x0000ffff
++/* HC_SubA_HTArbWZ         0x000b
++ */
++#define HC_HTArbWZ_MASK         0x0000ffff
++/* HC_SubA_HTArbRTX        0x000c
++ */
++#define HC_HTArbRTX_MASK        0x0000ffff
++/* HC_SubA_HTArbRCW        0x000d
++ */
++#define HC_HTArbRCW_MASK        0x0000ffff
++/* HC_SubA_HTArbE2         0x000e
++ */
++#define HC_HTArbE2_MASK         0x0000ffff
++/* HC_SubA_HArbRQCM        0x0010
++ */
++#define HC_HTArbRQCM_MASK       0x0000ffff
++/* HC_SubA_HArbWQCM        0x0011
++ */
++#define HC_HArbWQCM_MASK        0x0000ffff
++/* HC_SubA_HGEMITout       0x0020
++ */
++#define HC_HGEMITout_MASK       0x000f0000
++#define HC_HNPArbZC_MASK        0x0000ffff
++#define HC_HGEMITout_SHIFT      16
++/* HC_SubA_HFthRTXD        0x0040
++ */
++#define HC_HFthRTXD_MASK        0x00ff0000
++#define HC_HFthRZD_MASK         0x0000ff00
++#define HC_HFthWZD_MASK         0x000000ff
++#define HC_HFthRTXD_SHIFT       16
++#define HC_HFthRZD_SHIFT        8
++/* HC_SubA_HFthRTXA        0x0044
++ */
++#define HC_HFthRTXA_MASK        0x000000ff
++
++/******************************************************************************
++** Define the Halcyon Internal register access constants. For simulator only.
++******************************************************************************/
++#define HC_SIMA_HAGPBstL        0x0000
++#define HC_SIMA_HAGPBendL       0x0001
++#define HC_SIMA_HAGPCMNT        0x0002
++#define HC_SIMA_HAGPBpL         0x0003
++#define HC_SIMA_HAGPBpH         0x0004
++#define HC_SIMA_HClipTB         0x0005
++#define HC_SIMA_HClipLR         0x0006
++#define HC_SIMA_HFPClipTL       0x0007
++#define HC_SIMA_HFPClipBL       0x0008
++#define HC_SIMA_HFPClipLL       0x0009
++#define HC_SIMA_HFPClipRL       0x000a
++#define HC_SIMA_HFPClipTBH      0x000b
++#define HC_SIMA_HFPClipLRH      0x000c
++#define HC_SIMA_HLP             0x000d
++#define HC_SIMA_HLPRF           0x000e
++#define HC_SIMA_HSolidCL        0x000f
++#define HC_SIMA_HPixGC          0x0010
++#define HC_SIMA_HSPXYOS         0x0011
++#define HC_SIMA_HCmdA           0x0012
++#define HC_SIMA_HCmdB           0x0013
++#define HC_SIMA_HEnable         0x0014
++#define HC_SIMA_HZWBBasL        0x0015
++#define HC_SIMA_HZWBBasH        0x0016
++#define HC_SIMA_HZWBType        0x0017
++#define HC_SIMA_HZBiasL         0x0018
++#define HC_SIMA_HZWBend         0x0019
++#define HC_SIMA_HZWTMD          0x001a
++#define HC_SIMA_HZWCDL          0x001b
++#define HC_SIMA_HZWCTAGnum      0x001c
++#define HC_SIMA_HZCYNum         0x001d
++#define HC_SIMA_HZWCFire        0x001e
++/* #define HC_SIMA_HSBBasL         0x001d */
++/* #define HC_SIMA_HSBBasH         0x001e */
++/* #define HC_SIMA_HSBFM           0x001f */
++#define HC_SIMA_HSTREF          0x0020
++#define HC_SIMA_HSTMD           0x0021
++#define HC_SIMA_HABBasL         0x0022
++#define HC_SIMA_HABBasH         0x0023
++#define HC_SIMA_HABFM           0x0024
++#define HC_SIMA_HATMD           0x0025
++#define HC_SIMA_HABLCsat        0x0026
++#define HC_SIMA_HABLCop         0x0027
++#define HC_SIMA_HABLAsat        0x0028
++#define HC_SIMA_HABLAop         0x0029
++#define HC_SIMA_HABLRCa         0x002a
++#define HC_SIMA_HABLRFCa        0x002b
++#define HC_SIMA_HABLRCbias      0x002c
++#define HC_SIMA_HABLRCb         0x002d
++#define HC_SIMA_HABLRFCb        0x002e
++#define HC_SIMA_HABLRAa         0x002f
++#define HC_SIMA_HABLRAb         0x0030
++#define HC_SIMA_HDBBasL         0x0031
++#define HC_SIMA_HDBBasH         0x0032
++#define HC_SIMA_HDBFM           0x0033
++#define HC_SIMA_HFBBMSKL        0x0034
++#define HC_SIMA_HROP            0x0035
++#define HC_SIMA_HFogLF          0x0036
++#define HC_SIMA_HFogCL          0x0037
++#define HC_SIMA_HFogCH          0x0038
++#define HC_SIMA_HFogStL         0x0039
++#define HC_SIMA_HFogStH         0x003a
++#define HC_SIMA_HFogOOdMF       0x003b
++#define HC_SIMA_HFogOOdEF       0x003c
++#define HC_SIMA_HFogEndL        0x003d
++#define HC_SIMA_HFogDenst       0x003e
++/*---- start of texture 0 setting ----
++ */
++#define HC_SIMA_HTX0L0BasL      0x0040
++#define HC_SIMA_HTX0L1BasL      0x0041
++#define HC_SIMA_HTX0L2BasL      0x0042
++#define HC_SIMA_HTX0L3BasL      0x0043
++#define HC_SIMA_HTX0L4BasL      0x0044
++#define HC_SIMA_HTX0L5BasL      0x0045
++#define HC_SIMA_HTX0L6BasL      0x0046
++#define HC_SIMA_HTX0L7BasL      0x0047
++#define HC_SIMA_HTX0L8BasL      0x0048
++#define HC_SIMA_HTX0L9BasL      0x0049
++#define HC_SIMA_HTX0LaBasL      0x004a
++#define HC_SIMA_HTX0LbBasL      0x004b
++#define HC_SIMA_HTX0LcBasL      0x004c
++#define HC_SIMA_HTX0LdBasL      0x004d
++#define HC_SIMA_HTX0LeBasL      0x004e
++#define HC_SIMA_HTX0LfBasL      0x004f
++#define HC_SIMA_HTX0L10BasL     0x0050
++#define HC_SIMA_HTX0L11BasL     0x0051
++#define HC_SIMA_HTX0L012BasH    0x0052
++#define HC_SIMA_HTX0L345BasH    0x0053
++#define HC_SIMA_HTX0L678BasH    0x0054
++#define HC_SIMA_HTX0L9abBasH    0x0055
++#define HC_SIMA_HTX0LcdeBasH    0x0056
++#define HC_SIMA_HTX0Lf1011BasH  0x0057
++#define HC_SIMA_HTX0L0Pit       0x0058
++#define HC_SIMA_HTX0L1Pit       0x0059
++#define HC_SIMA_HTX0L2Pit       0x005a
++#define HC_SIMA_HTX0L3Pit       0x005b
++#define HC_SIMA_HTX0L4Pit       0x005c
++#define HC_SIMA_HTX0L5Pit       0x005d
++#define HC_SIMA_HTX0L6Pit       0x005e
++#define HC_SIMA_HTX0L7Pit       0x005f
++#define HC_SIMA_HTX0L8Pit       0x0060
++#define HC_SIMA_HTX0L9Pit       0x0061
++#define HC_SIMA_HTX0LaPit       0x0062
++#define HC_SIMA_HTX0LbPit       0x0063
++#define HC_SIMA_HTX0LcPit       0x0064
++#define HC_SIMA_HTX0LdPit       0x0065
++#define HC_SIMA_HTX0LePit       0x0066
++#define HC_SIMA_HTX0LfPit       0x0067
++#define HC_SIMA_HTX0L10Pit      0x0068
++#define HC_SIMA_HTX0L11Pit      0x0069
++#define HC_SIMA_HTX0L0_5WE      0x006a
++#define HC_SIMA_HTX0L6_bWE      0x006b
++#define HC_SIMA_HTX0Lc_11WE     0x006c
++#define HC_SIMA_HTX0L0_5HE      0x006d
++#define HC_SIMA_HTX0L6_bHE      0x006e
++#define HC_SIMA_HTX0Lc_11HE     0x006f
++#define HC_SIMA_HTX0L0OS        0x0070
++#define HC_SIMA_HTX0TB          0x0071
++#define HC_SIMA_HTX0MPMD        0x0072
++#define HC_SIMA_HTX0CLODu       0x0073
++#define HC_SIMA_HTX0FM          0x0074
++#define HC_SIMA_HTX0TRCH        0x0075
++#define HC_SIMA_HTX0TRCL        0x0076
++#define HC_SIMA_HTX0TBC         0x0077
++#define HC_SIMA_HTX0TRAH        0x0078
++#define HC_SIMA_HTX0TBLCsat     0x0079
++#define HC_SIMA_HTX0TBLCop      0x007a
++#define HC_SIMA_HTX0TBLMPfog    0x007b
++#define HC_SIMA_HTX0TBLAsat     0x007c
++#define HC_SIMA_HTX0TBLRCa      0x007d
++#define HC_SIMA_HTX0TBLRCb      0x007e
++#define HC_SIMA_HTX0TBLRCc      0x007f
++#define HC_SIMA_HTX0TBLRCbias   0x0080
++#define HC_SIMA_HTX0TBLRAa      0x0081
++#define HC_SIMA_HTX0TBLRFog     0x0082
++#define HC_SIMA_HTX0BumpM00     0x0083
++#define HC_SIMA_HTX0BumpM01     0x0084
++#define HC_SIMA_HTX0BumpM10     0x0085
++#define HC_SIMA_HTX0BumpM11     0x0086
++#define HC_SIMA_HTX0LScale      0x0087
++/*---- end of texture 0 setting ----      0x008f
++ */
++#define HC_SIMA_TX0TX1_OFF      0x0050
++/*---- start of texture 1 setting ----
++ */
++#define HC_SIMA_HTX1L0BasL      (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L1BasL      (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L2BasL      (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L3BasL      (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L4BasL      (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L5BasL      (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6BasL      (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L7BasL      (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L8BasL      (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9BasL      (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LaBasL      (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LbBasL      (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcBasL      (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LdBasL      (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LeBasL      (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LfBasL      (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L10BasL     (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L11BasL     (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L012BasH    (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L345BasH    (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L678BasH    (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9abBasH    (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcdeBasH    (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lf1011BasH  (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0Pit       (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L1Pit       (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L2Pit       (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L3Pit       (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L4Pit       (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L5Pit       (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6Pit       (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L7Pit       (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L8Pit       (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L9Pit       (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LaPit       (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LbPit       (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LcPit       (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LdPit       (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LePit       (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LfPit       (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L10Pit      (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L11Pit      (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0_5WE      (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6_bWE      (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lc_11WE     (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0_5HE      (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L6_bHE      (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1Lc_11HE      (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1L0OS        (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TB          (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1MPMD        (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1CLODu       (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1FM          (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRCH        (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRCL        (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBC         (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TRAH        (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LTC         (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LTA         (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLCsat     (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLCop      (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLMPfog    (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLAsat     (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCa      (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCb      (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCc      (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRCbias   (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRAa      (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1TBLRFog     (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM00     (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM01     (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM10     (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1BumpM11     (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
++#define HC_SIMA_HTX1LScale      (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
++/*---- end of texture 1 setting ---- 0xaf
++ */
++#define HC_SIMA_HTXSMD          0x00b0
++#define HC_SIMA_HenFIFOAT       0x00b1
++#define HC_SIMA_HFBDrawFirst    0x00b2
++#define HC_SIMA_HFBBasL         0x00b3
++#define HC_SIMA_HTArbRCM        0x00b4
++#define HC_SIMA_HTArbRZ         0x00b5
++#define HC_SIMA_HTArbWZ         0x00b6
++#define HC_SIMA_HTArbRTX        0x00b7
++#define HC_SIMA_HTArbRCW        0x00b8
++#define HC_SIMA_HTArbE2         0x00b9
++#define HC_SIMA_HGEMITout       0x00ba
++#define HC_SIMA_HFthRTXD        0x00bb
++#define HC_SIMA_HFthRTXA        0x00bc
++/* Define the texture palette 0
++ */
++#define HC_SIMA_HTP0            0x0100
++#define HC_SIMA_HTP1            0x0200
++#define HC_SIMA_FOGTABLE        0x0300
++#define HC_SIMA_STIPPLE         0x0400
++#define HC_SIMA_HE3Fire         0x0440
++#define HC_SIMA_TRANS_SET       0x0441
++#define HC_SIMA_HREngSt         0x0442
++#define HC_SIMA_HRFIFOempty     0x0443
++#define HC_SIMA_HRFIFOfull      0x0444
++#define HC_SIMA_HRErr           0x0445
++#define HC_SIMA_FIFOstatus      0x0446
++
++/******************************************************************************
++** Define the AGP command header.
++******************************************************************************/
++#define HC_ACMD_MASK            0xfe000000
++#define HC_ACMD_SUB_MASK        0x0c000000
++#define HC_ACMD_HCmdA           0xee000000
++#define HC_ACMD_HCmdB           0xec000000
++#define HC_ACMD_HCmdC           0xea000000
++#define HC_ACMD_H1              0xf0000000
++#define HC_ACMD_H2              0xf2000000
++#define HC_ACMD_H3              0xf4000000
++#define HC_ACMD_H4              0xf6000000
++
++#define HC_ACMD_H1IO_MASK       0x000001ff
++#define HC_ACMD_H2IO1_MASK      0x001ff000
++#define HC_ACMD_H2IO2_MASK      0x000001ff
++#define HC_ACMD_H2IO1_SHIFT     12
++#define HC_ACMD_H2IO2_SHIFT     0
++#define HC_ACMD_H3IO_MASK       0x000001ff
++#define HC_ACMD_H3COUNT_MASK    0x01fff000
++#define HC_ACMD_H3COUNT_SHIFT   12
++#define HC_ACMD_H4ID_MASK       0x000001ff
++#define HC_ACMD_H4COUNT_MASK    0x01fffe00
++#define HC_ACMD_H4COUNT_SHIFT   9
++
++/********************************************************************************
++** Define Header
++********************************************************************************/
++#define HC_HEADER2            0xF210F110
++
++/********************************************************************************
++** Define Dummy Value
++********************************************************************************/
++#define HC_DUMMY              0xCCCCCCCC
++/********************************************************************************
++** Define for DMA use
++********************************************************************************/
++#define HALCYON_HEADER2     0XF210F110
++#define HALCYON_FIRECMD     0XEE100000
++#define HALCYON_FIREMASK    0XFFF00000
++#define HALCYON_CMDB        0XEC000000
++#define HALCYON_CMDBMASK    0XFFFE0000
++#define HALCYON_SUB_ADDR0   0X00000000
++#define HALCYON_HEADER1MASK 0XFFFFFC00
++#define HALCYON_HEADER1     0XF0000000
++#define HC_SubA_HAGPBstL        0x0060
++#define HC_SubA_HAGPBendL       0x0061
++#define HC_SubA_HAGPCMNT        0x0062
++#define HC_SubA_HAGPBpL         0x0063
++#define HC_SubA_HAGPBpH         0x0064
++#define HC_HAGPCMNT_MASK        0x00800000
++#define HC_HCmdErrClr_MASK      0x00400000
++#define HC_HAGPBendH_MASK       0x0000ff00
++#define HC_HAGPBstH_MASK        0x000000ff
++#define HC_HAGPBendH_SHIFT      8
++#define HC_HAGPBstH_SHIFT       0
++#define HC_HAGPBpL_MASK         0x00fffffc
++#define HC_HAGPBpID_MASK        0x00000003
++#define HC_HAGPBpID_PAUSE       0x00000000
++#define HC_HAGPBpID_JUMP        0x00000001
++#define HC_HAGPBpID_STOP        0x00000002
++#define HC_HAGPBpH_MASK         0x00ffffff
++
++#define VIA_VIDEO_HEADER5       0xFE040000
++#define VIA_VIDEO_HEADER6       0xFE050000
++#define VIA_VIDEO_HEADER7       0xFE060000
++#define VIA_VIDEOMASK           0xFFFF0000
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_buffer.c git-nokia/drivers/gpu/drm-tungsten/via_buffer.c
+--- git/drivers/gpu/drm-tungsten/via_buffer.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_buffer.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,163 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
++{
++      return drm_agp_init_ttm(dev);
++}
++
++int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass,
++                  uint32_t * type)
++{
++      *type = 3;
++      return 0;
++}
++
++int via_invalidate_caches(struct drm_device * dev, uint64_t flags)
++{
++      /*
++       * FIXME: Invalidate texture caches here.
++       */
++
++      return 0;
++}
++
++
++static int via_vram_info(struct drm_device *dev,
++                       unsigned long *offset,
++                       unsigned long *size)
++{
++      struct pci_dev *pdev = dev->pdev;
++      unsigned long flags;
++
++      int ret = -EINVAL;
++      int i;
++      for (i=0; i<6; ++i) {
++              flags = pci_resource_flags(pdev, i);
++              if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
++                  (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
++                      ret = 0;
++                      break;
++              }
++      }
++
++      if (ret) {
++              DRM_ERROR("Could not find VRAM PCI resource\n");
++              return ret;
++      }
++
++      *offset = pci_resource_start(pdev, i);
++      *size = pci_resource_end(pdev, i) - *offset + 1;
++      return 0;
++}
++
++int via_init_mem_type(struct drm_device * dev, uint32_t type,
++                     struct drm_mem_type_manager * man)
++{
++      switch (type) {
++      case DRM_BO_MEM_LOCAL:
++              /* System memory */
++
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
++                      _DRM_FLAG_MEMTYPE_CACHED;
++              man->drm_bus_maptype = 0;
++              break;
++
++      case DRM_BO_MEM_TT:
++              /* Dynamic agpgart memory */
++
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
++
++              /* Only to get pte protection right. */
++
++              man->drm_bus_maptype = _DRM_AGP;
++              break;
++
++      case DRM_BO_MEM_VRAM:
++              /* "On-card" video ram */
++
++              man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_FRAME_BUFFER;
++              man->io_addr = NULL;
++              return via_vram_info(dev, &man->io_offset, &man->io_size);
++              break;
++
++      case DRM_BO_MEM_PRIV0:
++              /* Pre-bound agpgart memory */
++
++              if (!(drm_core_has_AGP(dev) && dev->agp)) {
++                      DRM_ERROR("AGP is not enabled for memory type %u\n",
++                                (unsigned)type);
++                      return -EINVAL;
++              }
++              man->io_offset = dev->agp->agp_info.aper_base;
++              man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
++              man->io_addr = NULL;
++              man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
++                  _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
++              man->drm_bus_maptype = _DRM_AGP;
++              break;
++
++      default:
++              DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++              return -EINVAL;
++      }
++      return 0;
++}
++
++uint64_t via_evict_flags(struct drm_buffer_object *bo)
++{
++      switch (bo->mem.mem_type) {
++      case DRM_BO_MEM_LOCAL:
++      case DRM_BO_MEM_TT:
++              return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
++      case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
++              return DRM_BO_MEM_TT;
++      case DRM_BO_MEM_VRAM:
++              if (bo->mem.num_pages > 128)
++                      return DRM_BO_MEM_TT;
++              else
++                      return DRM_BO_MEM_LOCAL;
++      default:
++              return DRM_BO_MEM_LOCAL;
++      }
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dmablit.c git-nokia/drivers/gpu/drm-tungsten/via_dmablit.c
+--- git/drivers/gpu/drm-tungsten/via_dmablit.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dmablit.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,829 @@
++/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
++ *
++ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Thomas Hellstrom.
++ *    Partially based on code obtained from Digeo Inc.
++ */
++
++
++/*
++ * Unmaps the DMA mappings.
++ * FIXME: Is this a NoOp on x86? Also
++ * FIXME: What happens if this one is called and a pending blit has previously done
++ * the same DMA mappings?
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "via_dmablit.h"
++
++#include <linux/pagemap.h>
++
++#define VIA_PGDN(x)             (((unsigned long)(x)) & PAGE_MASK)
++#define VIA_PGOFF(x)            (((unsigned long)(x)) & ~PAGE_MASK)
++#define VIA_PFN(x)              ((unsigned long)(x) >> PAGE_SHIFT)
++
++typedef struct _drm_via_descriptor {
++      uint32_t mem_addr;
++      uint32_t dev_addr;
++      uint32_t size;
++      uint32_t next;
++} drm_via_descriptor_t;
++
++
++/*
++ * Unmap a DMA mapping.
++ */
++
++
++
++static void
++via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
++{
++      int num_desc = vsg->num_desc;
++      unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
++      unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
++      drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
++              descriptor_this_page;
++      dma_addr_t next = vsg->chain_start;
++
++      while(num_desc--) {
++              if (descriptor_this_page-- == 0) {
++                      cur_descriptor_page--;
++                      descriptor_this_page = vsg->descriptors_per_page - 1;
++                      desc_ptr = vsg->desc_pages[cur_descriptor_page] +
++                              descriptor_this_page;
++              }
++              dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
++              dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
++              next = (dma_addr_t) desc_ptr->next;
++              desc_ptr--;
++      }
++}
++
++/*
++ * If mode = 0, count how many descriptors are needed.
++ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
++ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
++ * 'next' field without syncing calls when the descriptor is already mapped.
++ */
++
++static void
++via_map_blit_for_device(struct pci_dev *pdev,
++                 const drm_via_dmablit_t *xfer,
++                 drm_via_sg_info_t *vsg,
++                 int mode)
++{
++      unsigned cur_descriptor_page = 0;
++      unsigned num_descriptors_this_page = 0;
++      unsigned char *mem_addr = xfer->mem_addr;
++      unsigned char *cur_mem;
++      unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
++      uint32_t fb_addr = xfer->fb_addr;
++      uint32_t cur_fb;
++      unsigned long line_len;
++      unsigned remaining_len;
++      int num_desc = 0;
++      int cur_line;
++      dma_addr_t next = 0 | VIA_DMA_DPR_EC;
++      drm_via_descriptor_t *desc_ptr = NULL;
++
++      if (mode == 1)
++              desc_ptr = vsg->desc_pages[cur_descriptor_page];
++
++      for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
++
++              line_len = xfer->line_length;
++              cur_fb = fb_addr;
++              cur_mem = mem_addr;
++
++              while (line_len > 0) {
++
++                      remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
++                      line_len -= remaining_len;
++
++                      if (mode == 1) {
++                              desc_ptr->mem_addr = dma_map_page(&pdev->dev,
++                                      vsg->pages[VIA_PFN(cur_mem) -
++                                      VIA_PFN(first_addr)],
++                                      VIA_PGOFF(cur_mem), remaining_len,
++                                      vsg->direction);
++                              desc_ptr->dev_addr = cur_fb;
++
++                              desc_ptr->size = remaining_len;
++                              desc_ptr->next = (uint32_t) next;
++                              next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
++                                                    DMA_TO_DEVICE);
++                              desc_ptr++;
++                              if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
++                                      num_descriptors_this_page = 0;
++                                      desc_ptr = vsg->desc_pages[++cur_descriptor_page];
++                              }
++                      }
++
++                      num_desc++;
++                      cur_mem += remaining_len;
++                      cur_fb += remaining_len;
++              }
++
++              mem_addr += xfer->mem_stride;
++              fb_addr += xfer->fb_stride;
++      }
++
++      if (mode == 1) {
++              vsg->chain_start = next;
++              vsg->state = dr_via_device_mapped;
++      }
++      vsg->num_desc = num_desc;
++}
++
++/*
++ * Function that frees up all resources for a blit. It is usable even if the
++ * blit info has only been partially built as long as the status enum is consistent
++ * with the actual status of the used resources.
++ */
++
++
++static void
++via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
++{
++      struct page *page;
++      int i;
++
++      switch(vsg->state) {
++      case dr_via_device_mapped:
++              via_unmap_blit_from_device(pdev, vsg);
++      case dr_via_desc_pages_alloc:
++              for (i=0; i<vsg->num_desc_pages; ++i) {
++                      if (vsg->desc_pages[i] != NULL)
++                        free_page((unsigned long)vsg->desc_pages[i]);
++              }
++              kfree(vsg->desc_pages);
++      case dr_via_pages_locked:
++              for (i=0; i<vsg->num_pages; ++i) {
++                      if ( NULL != (page = vsg->pages[i])) {
++                              if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
++                                      SetPageDirty(page);
++                              page_cache_release(page);
++                      }
++              }
++      case dr_via_pages_alloc:
++              vfree(vsg->pages);
++      default:
++              vsg->state = dr_via_sg_init;
++      }
++      if (vsg->bounce_buffer) {
++              vfree(vsg->bounce_buffer);
++              vsg->bounce_buffer = NULL;
++      }
++      vsg->free_on_sequence = 0;
++}
++
++/*
++ * Fire a blit engine.
++ */
++
++static void
++via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
++                VIA_DMA_CSR_DE);
++      VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
++      VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
++      VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
++      DRM_WRITEMEMORYBARRIER();
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
++      VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
++}
++
++/*
++ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
++ * occur here if the calling user does not have access to the submitted address.
++ */
++
++static int
++via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
++{
++      int ret;
++      unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
++      vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
++              first_pfn + 1;
++
++      if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
++              return -ENOMEM;
++      memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
++      down_read(&current->mm->mmap_sem);
++      ret = get_user_pages(current, current->mm,
++                           (unsigned long)xfer->mem_addr,
++                           vsg->num_pages,
++                           (vsg->direction == DMA_FROM_DEVICE),
++                           0, vsg->pages, NULL);
++
++      up_read(&current->mm->mmap_sem);
++      if (ret != vsg->num_pages) {
++              if (ret < 0)
++                      return ret;
++              vsg->state = dr_via_pages_locked;
++              return -EINVAL;
++      }
++      vsg->state = dr_via_pages_locked;
++      DRM_DEBUG("DMA pages locked\n");
++      return 0;
++}
++
++/*
++ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
++ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
++ * quite large for some blits, and pages don't need to be contingous.
++ */
++
++static int
++via_alloc_desc_pages(drm_via_sg_info_t *vsg)
++{
++      int i;
++
++      vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
++      vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
++              vsg->descriptors_per_page;
++
++      if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
++              return -ENOMEM;
++
++      memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
++      vsg->state = dr_via_desc_pages_alloc;
++      for (i=0; i<vsg->num_desc_pages; ++i) {
++              if (NULL == (vsg->desc_pages[i] =
++                           (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
++                      return -ENOMEM;
++      }
++      DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
++                vsg->num_desc);
++      return 0;
++}
++
++static void
++via_abort_dmablit(struct drm_device *dev, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
++}
++
++static void
++via_dmablit_engine_off(struct drm_device *dev, int engine)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++
++      VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
++}
++
++
++
++/*
++ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
++ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
++ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
++ * the workqueue task takes care of processing associated with the old blit.
++ */
++
++void
++via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
++      int cur;
++      int done_transfer;
++      unsigned long irqsave=0;
++      uint32_t status = 0;
++
++      DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
++                engine, from_irq, (unsigned long) blitq);
++
++      if (from_irq) {
++              spin_lock(&blitq->blit_lock);
++      } else {
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      done_transfer = blitq->is_active &&
++        (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
++      done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
++
++      cur = blitq->cur;
++      if (done_transfer) {
++
++              blitq->blits[cur]->aborted = blitq->aborting;
++              blitq->done_blit_handle++;
++              DRM_WAKEUP(blitq->blit_queue + cur);
++
++              cur++;
++              if (cur >= VIA_NUM_BLIT_SLOTS)
++                      cur = 0;
++              blitq->cur = cur;
++
++              /*
++               * Clear transfer done flag.
++               */
++
++              VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
++
++              blitq->is_active = 0;
++              blitq->aborting = 0;
++              schedule_work(&blitq->wq);
++
++      } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
++
++              /*
++               * Abort transfer after one second.
++               */
++
++              via_abort_dmablit(dev, engine);
++              blitq->aborting = 1;
++              blitq->end = jiffies + DRM_HZ;
++      }
++
++      if (!blitq->is_active) {
++              if (blitq->num_outstanding) {
++                      via_fire_dmablit(dev, blitq->blits[cur], engine);
++                      blitq->is_active = 1;
++                      blitq->cur = cur;
++                      blitq->num_outstanding--;
++                      blitq->end = jiffies + DRM_HZ;
++                      if (!timer_pending(&blitq->poll_timer)) {
++                              blitq->poll_timer.expires = jiffies+1;
++                              add_timer(&blitq->poll_timer);
++                      }
++              } else {
++                      if (timer_pending(&blitq->poll_timer)) {
++                              del_timer(&blitq->poll_timer);
++                      }
++                      via_dmablit_engine_off(dev, engine);
++              }
++      }
++
++      if (from_irq) {
++              spin_unlock(&blitq->blit_lock);
++      } else {
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      }
++}
++
++
++
++/*
++ * Check whether this blit is still active, performing necessary locking.
++ */
++
++static int
++via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
++{
++      unsigned long irqsave;
++      uint32_t slot;
++      int active;
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      /*
++       * Allow for handle wraparounds.
++       */
++
++      active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
++              ((blitq->cur_blit_handle - handle) <= (1 << 23));
++
++      if (queue && active) {
++              slot = handle - blitq->done_blit_handle + blitq->cur -1;
++              if (slot >= VIA_NUM_BLIT_SLOTS) {
++                      slot -= VIA_NUM_BLIT_SLOTS;
++              }
++              *queue = blitq->blit_queue + slot;
++      }
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++      return active;
++}
++
++/*
++ * Sync. Wait for at least three seconds for the blit to be performed.
++ */
++
++static int
++via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
++      wait_queue_head_t *queue;
++      int ret = 0;
++
++      if (via_dmablit_active(blitq, engine, handle, &queue)) {
++              DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
++                          !via_dmablit_active(blitq, engine, handle, NULL));
++      }
++      DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
++                handle, engine, ret);
++
++      return ret;
++}
++
++
++/*
++ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
++ * a) Broken hardware (typically those that don't have any video capture facility).
++ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
++ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
++ * irqs, it will shorten the latency somewhat.
++ */
++
++
++
++static void
++via_dmablit_timer(unsigned long data)
++{
++      drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
++      struct drm_device *dev = blitq->dev;
++      int engine = (int)
++              (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
++
++      DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
++                (unsigned long) jiffies);
++
++      via_dmablit_handler(dev, engine, 0);
++
++      if (!timer_pending(&blitq->poll_timer)) {
++              blitq->poll_timer.expires = jiffies+1;
++              add_timer(&blitq->poll_timer);
++
++              /*
++               * Rerun handler to delete timer if engines are off, and
++               * to shorten abort latency. This is a little nasty.
++               */
++
++              via_dmablit_handler(dev, engine, 0);
++      }
++}
++
++
++
++
++/*
++ * Workqueue task that frees data and mappings associated with a blit.
++ * Also wakes up waiting processes. Each of these tasks handles one
++ * blit engine only and may not be called on each interrupt.
++ */
++
++
++static void
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++via_dmablit_workqueue(void *data)
++#else
++via_dmablit_workqueue(struct work_struct *work)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++      drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
++#else
++      drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
++#endif
++      struct drm_device *dev = blitq->dev;
++      unsigned long irqsave;
++      drm_via_sg_info_t *cur_sg;
++      int cur_released;
++
++
++      DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
++                (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      while(blitq->serviced != blitq->cur) {
++
++              cur_released = blitq->serviced++;
++
++              DRM_DEBUG("Releasing blit slot %d\n", cur_released);
++
++              if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
++                      blitq->serviced = 0;
++
++              cur_sg = blitq->blits[cur_released];
++              blitq->num_free++;
++
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++              DRM_WAKEUP(&blitq->busy_queue);
++
++              via_free_sg_info(dev->pdev, cur_sg);
++              kfree(cur_sg);
++
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++}
++
++
++/*
++ * Init all blit engines. Currently we use two, but some hardware have 4.
++ */
++
++
++void
++via_init_dmablit(struct drm_device *dev)
++{
++      int i,j;
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_blitq_t *blitq;
++
++      pci_set_master(dev->pdev);
++
++      for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
++              blitq = dev_priv->blit_queues + i;
++              blitq->dev = dev;
++              blitq->cur_blit_handle = 0;
++              blitq->done_blit_handle = 0;
++              blitq->head = 0;
++              blitq->cur = 0;
++              blitq->serviced = 0;
++              blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
++              blitq->num_outstanding = 0;
++              blitq->is_active = 0;
++              blitq->aborting = 0;
++              spin_lock_init(&blitq->blit_lock);
++              for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
++                      DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
++              }
++              DRM_INIT_WAITQUEUE(&blitq->busy_queue);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++              INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
++#else
++              INIT_WORK(&blitq->wq, via_dmablit_workqueue);
++#endif
++              init_timer(&blitq->poll_timer);
++              blitq->poll_timer.function = &via_dmablit_timer;
++              blitq->poll_timer.data = (unsigned long) blitq;
++      }
++}
++
++/*
++ * Build all info and do all mappings required for a blit.
++ */
++
++
++static int
++via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
++{
++      int draw = xfer->to_fb;
++      int ret = 0;
++
++      vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
++      vsg->bounce_buffer = NULL;
++
++      vsg->state = dr_via_sg_init;
++
++      if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
++              DRM_ERROR("Zero size bitblt.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * Below check is a driver limitation, not a hardware one. We
++       * don't want to lock unused pages, and don't want to incoporate the
++       * extra logic of avoiding them. Make sure there are no.
++       * (Not a big limitation anyway.)
++       */
++
++      if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
++              DRM_ERROR("Too large system memory stride. Stride: %d, "
++                        "Length: %d\n", xfer->mem_stride, xfer->line_length);
++              return -EINVAL;
++      }
++
++      if ((xfer->mem_stride == xfer->line_length) &&
++          (xfer->fb_stride == xfer->line_length)) {
++              xfer->mem_stride *= xfer->num_lines;
++              xfer->line_length = xfer->mem_stride;
++              xfer->fb_stride = xfer->mem_stride;
++              xfer->num_lines = 1;
++      }
++
++      /*
++       * Don't lock an arbitrary large number of pages, since that causes a
++       * DOS security hole.
++       */
++
++      if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
++              DRM_ERROR("Too large PCI DMA bitblt.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * we allow a negative fb stride to allow flipping of images in
++       * transfer.
++       */
++
++      if (xfer->mem_stride < xfer->line_length ||
++          abs(xfer->fb_stride) < xfer->line_length) {
++              DRM_ERROR("Invalid frame-buffer / memory stride.\n");
++              return -EINVAL;
++      }
++
++      /*
++       * A hardware bug seems to be worked around if system memory addresses start on
++       * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
++       * about this. Meanwhile, impose the following restrictions:
++       */
++
++#ifdef VIA_BUGFREE
++      if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
++          ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
++              DRM_ERROR("Invalid DRM bitblt alignment.\n");
++              return -EINVAL;
++      }
++#else
++      if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
++          ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
++              DRM_ERROR("Invalid DRM bitblt alignment.\n");
++              return -EINVAL;
++      }
++#endif
++
++      if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
++              DRM_ERROR("Could not lock DMA pages.\n");
++              via_free_sg_info(dev->pdev, vsg);
++              return ret;
++      }
++
++      via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
++      if (0 != (ret = via_alloc_desc_pages(vsg))) {
++              DRM_ERROR("Could not allocate DMA descriptor pages.\n");
++              via_free_sg_info(dev->pdev, vsg);
++              return ret;
++      }
++      via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
++
++      return 0;
++}
++
++
++/*
++ * Reserve one free slot in the blit queue. Will wait for one second for one
++ * to become available. Otherwise -EBUSY is returned.
++ */
++
++static int
++via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
++{
++      int ret=0;
++      unsigned long irqsave;
++
++      DRM_DEBUG("Num free is %d\n", blitq->num_free);
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      while(blitq->num_free == 0) {
++              spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++              DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
++              if (ret) {
++                      return (-EINTR == ret) ? -EAGAIN : ret;
++              }
++
++              spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      }
++
++      blitq->num_free--;
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++
++      return 0;
++}
++
++/*
++ * Hand back a free slot if we changed our mind.
++ */
++
++static void
++via_dmablit_release_slot(drm_via_blitq_t *blitq)
++{
++      unsigned long irqsave;
++
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++      blitq->num_free++;
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      DRM_WAKEUP( &blitq->busy_queue );
++}
++
++/*
++ * Grab a free slot. Build blit info and queue a blit.
++ */
++
++
++static int
++via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
++      drm_via_sg_info_t *vsg;
++      drm_via_blitq_t *blitq;
++      int ret;
++      int engine;
++      unsigned long irqsave;
++
++      if (dev_priv == NULL) {
++              DRM_ERROR("Called without initialization.\n");
++              return -EINVAL;
++      }
++
++      engine = (xfer->to_fb) ? 0 : 1;
++      blitq = dev_priv->blit_queues + engine;
++      if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
++              return ret;
++      }
++      if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
++              via_dmablit_release_slot(blitq);
++              return -ENOMEM;
++      }
++      if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
++              via_dmablit_release_slot(blitq);
++              kfree(vsg);
++              return ret;
++      }
++      spin_lock_irqsave(&blitq->blit_lock, irqsave);
++
++      blitq->blits[blitq->head++] = vsg;
++      if (blitq->head >= VIA_NUM_BLIT_SLOTS)
++              blitq->head = 0;
++      blitq->num_outstanding++;
++      xfer->sync.sync_handle = ++blitq->cur_blit_handle;
++
++      spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
++      xfer->sync.engine = engine;
++
++      via_dmablit_handler(dev, engine, 0);
++
++      return 0;
++}
++
++/*
++ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
++ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
++ * case it returns with -EAGAIN for the signal to be delivered.
++ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
++ */
++
++int
++via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
++{
++      drm_via_blitsync_t *sync = data;
++      int err;
++
++      if (sync->engine >= VIA_NUM_BLIT_ENGINES)
++              return -EINVAL;
++
++      err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
++
++      if (-EINTR == err)
++              err = -EAGAIN;
++
++      return err;
++}
++
++
++/*
++ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
++ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
++ * be reissued. See the above IOCTL code.
++ */
++
++int
++via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
++{
++      drm_via_dmablit_t *xfer = data;
++      int err;
++
++      err = via_dmablit(dev, xfer);
++
++      return err;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dmablit.h git-nokia/drivers/gpu/drm-tungsten/via_dmablit.h
+--- git/drivers/gpu/drm-tungsten/via_dmablit.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dmablit.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,140 @@
++/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
++ *
++ * Copyright 2005 Thomas Hellstrom.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Thomas Hellstrom.
++ *    Register info from Digeo Inc.
++ */
++
++#ifndef _VIA_DMABLIT_H
++#define _VIA_DMABLIT_H
++
++#include <linux/dma-mapping.h>
++
++#define VIA_NUM_BLIT_ENGINES 2
++#define VIA_NUM_BLIT_SLOTS 8
++
++struct _drm_via_descriptor;
++
++typedef struct _drm_via_sg_info {
++      struct page **pages;
++      unsigned long num_pages;
++      struct _drm_via_descriptor **desc_pages;
++      int num_desc_pages;
++      int num_desc;
++      enum dma_data_direction direction;
++      unsigned char *bounce_buffer;
++      dma_addr_t chain_start;
++      uint32_t free_on_sequence;
++      unsigned int descriptors_per_page;
++      int aborted;
++      enum {
++              dr_via_device_mapped,
++              dr_via_desc_pages_alloc,
++              dr_via_pages_locked,
++              dr_via_pages_alloc,
++              dr_via_sg_init
++      } state;
++} drm_via_sg_info_t;
++
++typedef struct _drm_via_blitq {
++      struct drm_device *dev;
++      uint32_t cur_blit_handle;
++      uint32_t done_blit_handle;
++      unsigned serviced;
++      unsigned head;
++      unsigned cur;
++      unsigned num_free;
++      unsigned num_outstanding;
++      unsigned long end;
++      int aborting;
++      int is_active;
++      drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
++      spinlock_t blit_lock;
++      wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
++      wait_queue_head_t busy_queue;
++      struct work_struct wq;
++      struct timer_list poll_timer;
++} drm_via_blitq_t;
++
++
++/*
++ *  PCI DMA Registers
++ *  Channels 2 & 3 don't seem to be implemented in hardware.
++ */
++
++#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */
++#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */
++#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */
++#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */
++
++#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */
++#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */
++#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */
++#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */
++
++#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */
++#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */
++#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */
++#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */
++
++#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */
++#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */
++#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */
++#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */
++
++#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */
++#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */
++#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */
++#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */
++
++#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */
++#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */
++#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */
++#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */
++
++#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */
++
++/* Define for DMA engine */
++/* DPR */
++#define VIA_DMA_DPR_EC                (1<<1)  /* end of chain */
++#define VIA_DMA_DPR_DDIE      (1<<2)  /* descriptor done interrupt enable */
++#define VIA_DMA_DPR_DT                (1<<3)  /* direction of transfer (RO) */
++
++/* MR */
++#define VIA_DMA_MR_CM         (1<<0)  /* chaining mode */
++#define VIA_DMA_MR_TDIE               (1<<1)  /* transfer done interrupt enable */
++#define VIA_DMA_MR_HENDMACMD          (1<<7) /* ? */
++
++/* CSR */
++#define VIA_DMA_CSR_DE                (1<<0)  /* DMA enable */
++#define VIA_DMA_CSR_TS                (1<<1)  /* transfer start */
++#define VIA_DMA_CSR_TA                (1<<2)  /* transfer abort */
++#define VIA_DMA_CSR_TD                (1<<3)  /* transfer done */
++#define VIA_DMA_CSR_DD                (1<<4)  /* descriptor done */
++#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
++
++
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_dma.c git-nokia/drivers/gpu/drm-tungsten/via_dma.c
+--- git/drivers/gpu/drm-tungsten/via_dma.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_dma.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,763 @@
++/* via_dma.c -- DMA support for the VIA Unichrome/Pro
++ *
++ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
++ * All Rights Reserved.
++ *
++ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
++ * All Rights Reserved.
++ *
++ * Copyright 2004 The Unichrome project.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Tungsten Graphics,
++ *    Erdi Chen,
++ *    Thomas Hellstrom.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "via_3d_reg.h"
++
++#define SetReg2DAGP(nReg, nData) {                            \
++      *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;  \
++      *((uint32_t *)(vb) + 1) = (nData);                      \
++      vb = ((uint32_t *)vb) + 2;                              \
++      dev_priv->dma_low +=8;                                  \
++}
++
++#define via_flush_write_combine() DRM_MEMORYBARRIER()
++
++#define VIA_OUT_RING_QW(w1,w2)                        \
++      *vb++ = (w1);                           \
++      *vb++ = (w2);                           \
++      dev_priv->dma_low += 8;
++
++static void via_cmdbuf_start(drm_via_private_t *dev_priv);
++static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
++static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
++static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
++static int via_wait_idle(drm_via_private_t *dev_priv);
++static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
++
++
++/*
++ * Free space in command buffer.
++ */
++
++static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
++
++      return ((hw_addr <= dev_priv->dma_low) ?
++              (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
++              (hw_addr - dev_priv->dma_low));
++}
++
++/*
++ * How much does the command regulator lag behind?
++ */
++
++static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
++
++      return ((hw_addr <= dev_priv->dma_low) ?
++              (dev_priv->dma_low - hw_addr) :
++              (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
++}
++
++/*
++ * Check that the given size fits in the buffer, otherwise wait.
++ */
++
++static inline int
++via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
++{
++      uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      uint32_t cur_addr, hw_addr, next_addr;
++      volatile uint32_t *hw_addr_ptr;
++      uint32_t count;
++      hw_addr_ptr = dev_priv->hw_addr_ptr;
++      cur_addr = dev_priv->dma_low;
++      next_addr = cur_addr + size + 512 * 1024;
++      count = 1000000;
++      do {
++              hw_addr = *hw_addr_ptr - agp_base;
++              if (count-- == 0) {
++                      DRM_ERROR
++                          ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
++                           hw_addr, cur_addr, next_addr);
++                      return -1;
++              }
++              if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
++                      msleep(1);
++      } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
++      return 0;
++}
++
++
++/*
++ * Checks whether buffer head has reach the end. Rewind the ring buffer
++ * when necessary.
++ *
++ * Returns virtual pointer to ring buffer.
++ */
++
++static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
++                                    unsigned int size)
++{
++      if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
++          dev_priv->dma_high) {
++              via_cmdbuf_rewind(dev_priv);
++      }
++      if (via_cmdbuf_wait(dev_priv, size) != 0) {
++              return NULL;
++      }
++
++      return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
++}
++
++int via_dma_cleanup(struct drm_device * dev)
++{
++      if (dev->dev_private) {
++              drm_via_private_t *dev_priv =
++                      (drm_via_private_t *) dev->dev_private;
++
++              if (dev_priv->ring.virtual_start) {
++                      via_cmdbuf_reset(dev_priv);
++
++                      drm_core_ioremapfree(&dev_priv->ring.map, dev);
++                      dev_priv->ring.virtual_start = NULL;
++              }
++
++      }
++
++      return 0;
++}
++
++static int via_initialize(struct drm_device * dev,
++                        drm_via_private_t * dev_priv,
++                        drm_via_dma_init_t * init)
++{
++      if (!dev_priv || !dev_priv->mmio) {
++              DRM_ERROR("via_dma_init called before via_map_init\n");
++              return -EFAULT;
++      }
++
++      if (dev_priv->ring.virtual_start != NULL) {
++              DRM_ERROR("called again without calling cleanup\n");
++              return -EFAULT;
++      }
++
++      if (!dev->agp || !dev->agp->base) {
++              DRM_ERROR("called with no agp memory available\n");
++              return -EFAULT;
++      }
++
++      if (dev_priv->chipset == VIA_DX9_0) {
++              DRM_ERROR("AGP DMA is not supported on this chip\n");
++              return -EINVAL;
++      }
++
++      dev_priv->ring.map.offset = dev->agp->base + init->offset;
++      dev_priv->ring.map.size = init->size;
++      dev_priv->ring.map.type = 0;
++      dev_priv->ring.map.flags = 0;
++      dev_priv->ring.map.mtrr = 0;
++
++      drm_core_ioremap(&dev_priv->ring.map, dev);
++
++      if (dev_priv->ring.map.handle == NULL) {
++              via_dma_cleanup(dev);
++              DRM_ERROR("can not ioremap virtual address for"
++                        " ring buffer\n");
++              return -ENOMEM;
++      }
++
++      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
++
++      dev_priv->dma_ptr = dev_priv->ring.virtual_start;
++      dev_priv->dma_low = 0;
++      dev_priv->dma_high = init->size;
++      dev_priv->dma_wrap = init->size;
++      dev_priv->dma_offset = init->offset;
++      dev_priv->last_pause_ptr = NULL;
++      dev_priv->hw_addr_ptr =
++              (volatile uint32_t *)((char *)dev_priv->mmio->handle +
++              init->reg_pause_addr);
++
++      via_cmdbuf_start(dev_priv);
++
++      return 0;
++}
++
++static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_dma_init_t *init = data;
++      int retcode = 0;
++
++      switch (init->func) {
++      case VIA_INIT_DMA:
++              if (!DRM_SUSER(DRM_CURPROC))
++                      retcode = -EPERM;
++              else
++                      retcode = via_initialize(dev, dev_priv, init);
++              break;
++      case VIA_CLEANUP_DMA:
++              if (!DRM_SUSER(DRM_CURPROC))
++                      retcode = -EPERM;
++              else
++                      retcode = via_dma_cleanup(dev);
++              break;
++      case VIA_DMA_INITIALIZED:
++              retcode = (dev_priv->ring.virtual_start != NULL) ?
++                      0 : -EFAULT;
++              break;
++      default:
++              retcode = -EINVAL;
++              break;
++      }
++
++      return retcode;
++}
++
++
++
++static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
++{
++      drm_via_private_t *dev_priv;
++      uint32_t *vb;
++      int ret;
++
++      dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (dev_priv->ring.virtual_start == NULL) {
++              DRM_ERROR("called without initializing AGP ring buffer.\n");
++              return -EFAULT;
++      }
++
++      if (cmd->size > VIA_PCI_BUF_SIZE) {
++              return -ENOMEM;
++      }
++
++      if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
++              return -EFAULT;
++
++      /*
++       * Running this function on AGP memory is dead slow. Therefore
++       * we run it on a temporary cacheable system memory buffer and
++       * copy it to AGP memory when ready.
++       */
++
++      if ((ret =
++           via_verify_command_stream((uint32_t *)dev_priv->pci_buf,
++                                     cmd->size, dev, 1))) {
++              return ret;
++      }
++
++      vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
++      if (vb == NULL) {
++              return -EAGAIN;
++      }
++
++      memcpy(vb, dev_priv->pci_buf, cmd->size);
++
++      dev_priv->dma_low += cmd->size;
++
++      /*
++       * Small submissions somehow stalls the CPU. (AGP cache effects?)
++       * pad to greater size.
++       */
++
++      if (cmd->size < 0x100)
++              via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
++      via_cmdbuf_pause(dev_priv);
++
++      return 0;
++}
++
++int via_driver_dma_quiescent(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      if (!via_wait_idle(dev_priv)) {
++              return -EBUSY;
++      }
++      return 0;
++}
++
++static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      return via_driver_dma_quiescent(dev);
++}
++
++static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
++
++      ret = via_dispatch_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              return ret;
++      }
++
++      return 0;
++}
++
++static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
++                                    drm_via_cmdbuffer_t * cmd)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      int ret;
++
++      if (cmd->size > VIA_PCI_BUF_SIZE) {
++              return -ENOMEM;
++      }
++      if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
++              return -EFAULT;
++
++      if ((ret =
++           via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
++                                     cmd->size, dev, 0))) {
++              return ret;
++      }
++
++      ret =
++          via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
++                                   cmd->size);
++      return ret;
++}
++
++static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuffer_t *cmdbuf = data;
++      int ret;
++
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
++
++      ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
++      if (ret) {
++              return ret;
++      }
++
++      return 0;
++}
++
++static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
++                                       uint32_t * vb, int qw_count)
++{
++      for (; qw_count > 0; --qw_count) {
++              VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
++      }
++      return vb;
++}
++
++/*
++ * This function is used internally by ring buffer mangement code.
++ *
++ * Returns virtual pointer to ring buffer.
++ */
++static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
++{
++      return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
++}
++
++/*
++ * Hooks a segment of data into the tail of the ring-buffer by
++ * modifying the pause address stored in the buffer itself. If
++ * the regulator has already paused, restart it.
++ */
++static int via_hook_segment(drm_via_private_t * dev_priv,
++                          uint32_t pause_addr_hi, uint32_t pause_addr_lo,
++                          int no_pci_fire)
++{
++      int paused, count;
++      volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
++      uint32_t reader,ptr;
++      uint32_t diff;
++
++      paused = 0;
++      via_flush_write_combine();
++      (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
++
++      *paused_at = pause_addr_lo;
++      via_flush_write_combine();
++      (void) *paused_at;
++
++      reader = *(dev_priv->hw_addr_ptr);
++      ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
++              dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
++
++      dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
++
++      /*
++       * If there is a possibility that the command reader will 
++       * miss the new pause address and pause on the old one,
++       * In that case we need to program the new start address
++       * using PCI.
++       */
++
++      diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++      count = 10000000;
++      while(diff == 0 && count--) {
++              paused = (VIA_READ(0x41c) & 0x80000000);
++              if (paused) 
++                      break;
++              reader = *(dev_priv->hw_addr_ptr);
++              diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++      }
++
++      paused = VIA_READ(0x41c) & 0x80000000;
++
++      if (paused && !no_pci_fire) {
++              reader = *(dev_priv->hw_addr_ptr);
++              diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
++              diff &= (dev_priv->dma_high - 1);
++              if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
++                      DRM_ERROR("Paused at incorrect address. "
++                                "0x%08x, 0x%08x 0x%08x\n",
++                                ptr, reader, dev_priv->dma_diff);
++              } else if (diff == 0) {
++                      /*
++                       * There is a concern that these writes may stall the PCI bus
++                       * if the GPU is not idle. However, idling the GPU first
++                       * doesn't make a difference.
++                       */
++
++                      VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
++                      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
++                      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
++                      VIA_READ(VIA_REG_TRANSPACE);
++              }
++      }
++
++      return paused;
++}
++
++
++
++static int via_wait_idle(drm_via_private_t *dev_priv)
++{
++      int count = 10000000;
++
++      while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
++
++      while (count-- && (VIA_READ(VIA_REG_STATUS) &
++                         (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
++                          VIA_3D_ENG_BUSY))) ;
++      return count;
++}
++
++static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
++                             uint32_t addr, uint32_t *cmd_addr_hi,
++                             uint32_t *cmd_addr_lo, int skip_wait)
++{
++      uint32_t agp_base;
++      uint32_t cmd_addr, addr_lo, addr_hi;
++      uint32_t *vb;
++      uint32_t qw_pad_count;
++
++      if (!skip_wait)
++              via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
++
++      vb = via_get_dma(dev_priv);
++      VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
++                      (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
++
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
++              ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
++
++      cmd_addr = (addr) ? addr :
++              agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
++      addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
++                 (cmd_addr & HC_HAGPBpL_MASK));
++      addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
++
++      vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
++      VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
++      return vb;
++}
++
++static void via_cmdbuf_start(drm_via_private_t * dev_priv)
++{
++      uint32_t pause_addr_lo, pause_addr_hi;
++      uint32_t start_addr, start_addr_lo;
++      uint32_t end_addr, end_addr_lo;
++      uint32_t command;
++      uint32_t agp_base;
++      uint32_t ptr;
++      uint32_t reader;
++      int count;
++
++      dev_priv->dma_low = 0;
++
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      start_addr = agp_base;
++      end_addr = agp_base + dev_priv->dma_high;
++
++      start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
++      end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
++      command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
++                 ((end_addr & 0xff000000) >> 16));
++
++      dev_priv->last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
++                            &pause_addr_hi, & pause_addr_lo, 1) - 1;
++
++      via_flush_write_combine();
++      (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
++
++      VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
++      VIA_WRITE(VIA_REG_TRANSPACE, command);
++      VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
++      VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
++
++      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
++      VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
++      DRM_WRITEMEMORYBARRIER();
++      VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
++      VIA_READ(VIA_REG_TRANSPACE);
++
++      dev_priv->dma_diff = 0;
++
++      count = 10000000;
++      while (!(VIA_READ(0x41c) & 0x80000000) && count--);
++
++      reader = *(dev_priv->hw_addr_ptr);
++      ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
++          dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
++
++      /*
++       * This is the difference between where we tell the
++       * command reader to pause and where it actually pauses.
++       * This differs between hw implementation so we need to
++       * detect it.
++       */
++
++      dev_priv->dma_diff = ptr - reader;
++}
++
++static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
++{
++      uint32_t *vb;
++
++      via_cmdbuf_wait(dev_priv, qwords + 2);
++      vb = via_get_dma(dev_priv);
++      VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
++      via_align_buffer(dev_priv, vb, qwords);
++}
++
++static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
++{
++      uint32_t *vb = via_get_dma(dev_priv);
++      SetReg2DAGP(0x0C, (0 | (0 << 16)));
++      SetReg2DAGP(0x10, 0 | (0 << 16));
++      SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
++}
++
++static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
++{
++      uint32_t agp_base;
++      uint32_t pause_addr_lo, pause_addr_hi;
++      uint32_t jump_addr_lo, jump_addr_hi;
++      volatile uint32_t *last_pause_ptr;
++      uint32_t dma_low_save1, dma_low_save2;
++      
++      agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
++      via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
++                    &jump_addr_lo, 0);
++
++      dev_priv->dma_wrap = dev_priv->dma_low;
++
++      /*
++       * Wrap command buffer to the beginning.
++       */
++
++      dev_priv->dma_low = 0;
++      if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
++              DRM_ERROR("via_cmdbuf_jump failed\n");
++      }
++
++      via_dummy_bitblt(dev_priv);
++      via_dummy_bitblt(dev_priv);
++
++      last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                        &pause_addr_lo, 0) - 1;
++      via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                    &pause_addr_lo, 0);
++
++      *last_pause_ptr = pause_addr_lo;
++      dma_low_save1 = dev_priv->dma_low;
++      
++      /*
++       * Now, set a trap that will pause the regulator if it tries to rerun the old
++       * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
++       * and reissues the jump command over PCI, while the regulator has already taken the jump
++       * and actually paused at the current buffer end).
++       * There appears to be no other way to detect this condition, since the hw_addr_pointer
++       * does not seem to get updated immediately when a jump occurs.
++       */
++
++      last_pause_ptr =
++              via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                            &pause_addr_lo, 0) - 1;
++      via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
++                    &pause_addr_lo, 0);
++      *last_pause_ptr = pause_addr_lo;
++      
++      dma_low_save2 = dev_priv->dma_low;
++      dev_priv->dma_low = dma_low_save1;
++      via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
++      dev_priv->dma_low = dma_low_save2;
++      via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
++}
++
++
++static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_jump(dev_priv);
++}
++
++static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
++{
++      uint32_t pause_addr_lo, pause_addr_hi;
++
++      via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
++      via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
++}
++
++
++static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
++}
++
++static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
++{
++      via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
++      via_wait_idle(dev_priv);
++}
++
++/*
++ * User interface to the space and lag functions.
++ */
++
++static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_cmdbuf_size_t *d_siz = data;
++      int ret = 0;
++      uint32_t tmp_size, count;
++      drm_via_private_t *dev_priv;
++
++      DRM_DEBUG("\n");
++      LOCK_TEST_WITH_RETURN(dev, file_priv);
++
++      dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (dev_priv->ring.virtual_start == NULL) {
++              DRM_ERROR("called without initializing AGP ring buffer.\n");
++              return -EFAULT;
++      }
++
++      count = 1000000;
++      tmp_size = d_siz->size;
++      switch (d_siz->func) {
++      case VIA_CMDBUF_SPACE:
++              while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
++                     && count--) {
++                      if (!d_siz->wait) {
++                              break;
++                      }
++              }
++              if (!count) {
++                      DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
++                      ret = -EAGAIN;
++              }
++              break;
++      case VIA_CMDBUF_LAG:
++              while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
++                     && count--) {
++                      if (!d_siz->wait) {
++                              break;
++                      }
++              }
++              if (!count) {
++                      DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
++                      ret = -EAGAIN;
++              }
++              break;
++      default:
++              ret = -EFAULT;
++      }
++      d_siz->size = tmp_size;
++
++      return ret;
++}
++
++#ifndef VIA_HAVE_DMABLIT
++int
++via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
++      DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
++      return -EINVAL;
++}
++int
++via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
++      DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
++      return -EINVAL;
++}
++#endif
++
++struct drm_ioctl_desc via_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
++};
++
++int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drm.h git-nokia/drivers/gpu/drm-tungsten/via_drm.h
+--- git/drivers/gpu/drm-tungsten/via_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,282 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _VIA_DRM_H_
++#define _VIA_DRM_H_
++
++/* WARNING: These defines must be the same as what the Xserver uses.
++ * if you change them, you must change the defines in the Xserver.
++ */
++
++#ifndef _VIA_DEFINES_
++#define _VIA_DEFINES_
++
++
++#if !defined(__KERNEL__) && !defined(_KERNEL)
++#include "via_drmclient.h"
++#endif
++
++/*
++ * With the arrival of libdrm there is a need to version this file.
++ * As usual, bump MINOR for new features, MAJOR for changes that create
++ * backwards incompatibilities, (which should be avoided whenever possible).
++ */
++
++#define VIA_DRM_DRIVER_DATE           "20070202"
++
++#define VIA_DRM_DRIVER_MAJOR          2
++#define VIA_DRM_DRIVER_MINOR          11
++#define VIA_DRM_DRIVER_PATCHLEVEL     1
++#define VIA_DRM_DRIVER_VERSION          (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR))
++
++#define VIA_NR_SAREA_CLIPRECTS                8
++#define VIA_NR_XVMC_PORTS            10
++#define VIA_NR_XVMC_LOCKS            5
++#define VIA_MAX_CACHELINE_SIZE          64
++#define XVMCLOCKPTR(saPriv,lockNo)                                    \
++      ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
++                                    (VIA_MAX_CACHELINE_SIZE - 1)) &   \
++                                   ~(VIA_MAX_CACHELINE_SIZE - 1)) +   \
++                                  VIA_MAX_CACHELINE_SIZE*(lockNo)))
++#define VIA_NR_TEX_REGIONS 64
++
++#endif
++
++#define DRM_VIA_FENCE_TYPE_ACCEL 0x00000002
++
++/* VIA specific ioctls */
++#define DRM_VIA_ALLOCMEM      0x00
++#define DRM_VIA_FREEMEM               0x01
++#define DRM_VIA_AGP_INIT      0x02
++#define DRM_VIA_FB_INIT               0x03
++#define DRM_VIA_MAP_INIT      0x04
++#define DRM_VIA_DEC_FUTEX       0x05
++#define NOT_USED
++#define DRM_VIA_DMA_INIT      0x07
++#define DRM_VIA_CMDBUFFER     0x08
++#define DRM_VIA_FLUSH         0x09
++#define DRM_VIA_PCICMD                0x0a
++#define DRM_VIA_CMDBUF_SIZE   0x0b
++#define NOT_USED
++#define DRM_VIA_WAIT_IRQ      0x0d
++#define DRM_VIA_DMA_BLIT      0x0e
++#define DRM_VIA_BLIT_SYNC       0x0f
++
++#define DRM_IOCTL_VIA_ALLOCMEM          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
++#define DRM_IOCTL_VIA_FREEMEM   DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
++#define DRM_IOCTL_VIA_AGP_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
++#define DRM_IOCTL_VIA_FB_INIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
++#define DRM_IOCTL_VIA_MAP_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
++#define DRM_IOCTL_VIA_DEC_FUTEX   DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
++#define DRM_IOCTL_VIA_DMA_INIT          DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
++#define DRM_IOCTL_VIA_CMDBUFFER         DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
++#define DRM_IOCTL_VIA_FLUSH     DRM_IO(  DRM_COMMAND_BASE + DRM_VIA_FLUSH)
++#define DRM_IOCTL_VIA_PCICMD    DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
++#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
++                                          drm_via_cmdbuf_size_t)
++#define DRM_IOCTL_VIA_WAIT_IRQ    DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
++#define DRM_IOCTL_VIA_DMA_BLIT    DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
++#define DRM_IOCTL_VIA_BLIT_SYNC   DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
++
++/* Indices into buf.Setup where various bits of state are mirrored per
++ * context and per buffer.  These can be fired at the card as a unit,
++ * or in a piecewise fashion as required.
++ */
++
++#define VIA_TEX_SETUP_SIZE 8
++
++/* Flags for clear ioctl
++ */
++#define VIA_FRONT   0x1
++#define VIA_BACK    0x2
++#define VIA_DEPTH   0x4
++#define VIA_STENCIL 0x8
++
++#define VIA_MEM_VIDEO   0     /* matches drm constant */
++#define VIA_MEM_AGP     1     /* matches drm constant */
++#define VIA_MEM_SYSTEM  2
++#define VIA_MEM_MIXED   3
++#define VIA_MEM_UNKNOWN 4
++
++typedef struct {
++      uint32_t offset;
++      uint32_t size;
++} drm_via_agp_t;
++
++typedef struct {
++      uint32_t offset;
++      uint32_t size;
++} drm_via_fb_t;
++
++typedef struct {
++      uint32_t context;
++      uint32_t type;
++      uint32_t size;
++      unsigned long index;
++      unsigned long offset;
++} drm_via_mem_t;
++
++typedef struct _drm_via_init {
++      enum {
++              VIA_INIT_MAP = 0x01,
++              VIA_CLEANUP_MAP = 0x02
++      } func;
++
++      unsigned long sarea_priv_offset;
++      unsigned long fb_offset;
++      unsigned long mmio_offset;
++      unsigned long agpAddr;
++} drm_via_init_t;
++
++typedef struct _drm_via_futex {
++      enum {
++              VIA_FUTEX_WAIT = 0x00,
++              VIA_FUTEX_WAKE = 0X01
++      } func;
++      uint32_t ms;
++      uint32_t lock;
++      uint32_t val;
++} drm_via_futex_t;
++
++typedef struct _drm_via_dma_init {
++      enum {
++              VIA_INIT_DMA = 0x01,
++              VIA_CLEANUP_DMA = 0x02,
++              VIA_DMA_INITIALIZED = 0x03
++      } func;
++
++      unsigned long offset;
++      unsigned long size;
++      unsigned long reg_pause_addr;
++} drm_via_dma_init_t;
++
++typedef struct _drm_via_cmdbuffer {
++      char __user *buf;
++      unsigned long size;
++} drm_via_cmdbuffer_t;
++
++/* Warning: If you change the SAREA structure you must change the Xserver
++ * structure as well */
++
++typedef struct _drm_via_tex_region {
++      unsigned char next, prev;       /* indices to form a circular LRU  */
++      unsigned char inUse;    /* owned by a client, or free? */
++      int age;                /* tracked by clients to update local LRU's */
++} drm_via_tex_region_t;
++
++typedef struct _drm_via_sarea {
++      unsigned int dirty;
++      unsigned int nbox;
++      struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
++      drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
++      int texAge;             /* last time texture was uploaded */
++      int ctxOwner;           /* last context to upload state */
++      int vertexPrim;
++
++      /*
++       * Below is for XvMC.
++       * We want the lock integers alone on, and aligned to, a cache line.
++       * Therefore this somewhat strange construct.
++       */
++
++      char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
++
++      unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
++      unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
++      unsigned int XvMCCtxNoGrabbed;  /* Last context to hold decoder */
++
++      /* Used by the 3d driver only at this point, for pageflipping:
++       */
++      unsigned int pfCurrentOffset;
++} drm_via_sarea_t;
++
++typedef struct _drm_via_cmdbuf_size {
++      enum {
++              VIA_CMDBUF_SPACE = 0x01,
++              VIA_CMDBUF_LAG = 0x02
++      } func;
++      int wait;
++      uint32_t size;
++} drm_via_cmdbuf_size_t;
++
++typedef enum {
++      VIA_IRQ_ABSOLUTE = 0x0,
++      VIA_IRQ_RELATIVE = 0x1,
++      VIA_IRQ_SIGNAL = 0x10000000,
++      VIA_IRQ_FORCE_SEQUENCE = 0x20000000
++} via_irq_seq_type_t;
++
++#define VIA_IRQ_FLAGS_MASK 0xF0000000
++
++enum drm_via_irqs {
++      drm_via_irq_hqv0 = 0,
++      drm_via_irq_hqv1,
++      drm_via_irq_dma0_dd,
++      drm_via_irq_dma0_td,
++      drm_via_irq_dma1_dd,
++      drm_via_irq_dma1_td,
++      drm_via_irq_num
++};
++
++struct drm_via_wait_irq_request {
++      unsigned irq;
++      via_irq_seq_type_t type;
++      uint32_t sequence;
++      uint32_t signal;
++};
++
++typedef union drm_via_irqwait {
++      struct drm_via_wait_irq_request request;
++      struct drm_wait_vblank_reply reply;
++} drm_via_irqwait_t;
++
++typedef struct drm_via_blitsync {
++      uint32_t sync_handle;
++      unsigned engine;
++} drm_via_blitsync_t;
++
++/*
++ * Below,"flags" is currently unused but will be used for possible future
++ * extensions like kernel space bounce buffers for bad alignments and
++ * blit engine busy-wait polling for better latency in the absence of
++ * interrupts.
++ */
++
++typedef struct drm_via_dmablit {
++      uint32_t num_lines;
++      uint32_t line_length;
++
++      uint32_t fb_addr;
++      uint32_t fb_stride;
++
++      unsigned char *mem_addr;
++      uint32_t mem_stride;
++
++      uint32_t flags;
++      int to_fb;
++
++      drm_via_blitsync_t sync;
++} drm_via_dmablit_t;
++
++
++#endif                                /* _VIA_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drv.c git-nokia/drivers/gpu/drm-tungsten/via_drv.c
+--- git/drivers/gpu/drm-tungsten/via_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,157 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++#include "drm_pciids.h"
++
++
++static int dri_library_name(struct drm_device * dev, char * buf)
++{
++      return snprintf(buf, PAGE_SIZE, "unichrome\n");
++}
++
++static struct pci_device_id pciidlist[] = {
++      viadrv_PCI_IDS
++};
++
++
++#ifdef VIA_HAVE_FENCE
++extern struct drm_fence_driver via_fence_driver;
++#endif
++
++#ifdef VIA_HAVE_BUFFER
++
++/**
++ * If there's no thrashing. This is the preferred memory type order.
++ */
++static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
++
++/**
++ * If we have thrashing, most memory will be evicted to TT anyway, so we might as well
++ * just move the new buffer into TT from the start.
++ */
++static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
++
++
++static struct drm_bo_driver via_bo_driver = {
++      .mem_type_prio = via_mem_prios,
++      .mem_busy_prio = via_busy_prios,
++      .num_mem_type_prio = ARRAY_SIZE(via_mem_prios),
++      .num_mem_busy_prio = ARRAY_SIZE(via_busy_prios),
++      .create_ttm_backend_entry = via_create_ttm_backend_entry,
++      .fence_type = via_fence_types,
++      .invalidate_caches = via_invalidate_caches,
++      .init_mem_type = via_init_mem_type,
++      .evict_flags = via_evict_flags,
++      .move = NULL,
++      .ttm_cache_flush = NULL,
++      .command_stream_barrier = NULL
++};
++#endif
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static struct drm_driver driver = {
++      .driver_features =
++          DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
++          DRIVER_IRQ_SHARED,
++      .load = via_driver_load,
++      .unload = via_driver_unload,
++#ifndef VIA_HAVE_CORE_MM
++      .context_ctor = via_init_context,
++#endif
++      .context_dtor = via_final_context,
++      .get_vblank_counter = via_get_vblank_counter,
++      .enable_vblank = via_enable_vblank,
++      .disable_vblank = via_disable_vblank,
++      .irq_preinstall = via_driver_irq_preinstall,
++      .irq_postinstall = via_driver_irq_postinstall,
++      .irq_uninstall = via_driver_irq_uninstall,
++      .irq_handler = via_driver_irq_handler,
++      .dma_quiescent = via_driver_dma_quiescent,
++      .dri_library_name = dri_library_name,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .reclaim_buffers_locked = NULL,
++#ifdef VIA_HAVE_CORE_MM
++      .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
++      .lastclose = via_lastclose,
++#endif
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = via_ioctls,
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++              },
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++#ifdef VIA_HAVE_FENCE
++      .fence_driver = &via_fence_driver,
++#endif
++#ifdef VIA_HAVE_BUFFER
++      .bo_driver = &via_bo_driver,
++#endif
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = VIA_DRM_DRIVER_DATE,
++      .major = VIA_DRM_DRIVER_MAJOR,
++      .minor = VIA_DRM_DRIVER_MINOR,
++      .patchlevel = VIA_DRM_DRIVER_PATCHLEVEL
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init via_init(void)
++{
++      driver.num_ioctls = via_max_ioctl;
++
++      via_init_command_verifier();
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit via_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(via_init);
++module_exit(via_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nurd git/drivers/gpu/drm-tungsten/via_drv.h git-nokia/drivers/gpu/drm-tungsten/via_drv.h
+--- git/drivers/gpu/drm-tungsten/via_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,211 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#ifndef _VIA_DRV_H_
++#define _VIA_DRV_H_
++
++#include "drm_sman.h"
++#define DRIVER_AUTHOR "Various"
++
++#define DRIVER_NAME           "via"
++#define DRIVER_DESC           "VIA Unichrome / Pro"
++
++#include "via_verifier.h"
++
++/*
++ * Registers go here.
++ */
++
++
++#define CMDBUF_ALIGNMENT_SIZE   (0x100)
++#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
++
++/* defines for VIA 3D registers */
++#define VIA_REG_STATUS                0x400
++#define VIA_REG_TRANSET               0x43C
++#define VIA_REG_TRANSPACE       0x440
++
++/* VIA_REG_STATUS(0x400): Engine Status */
++#define VIA_CMD_RGTR_BUSY       0x00000080    /* Command Regulator is busy */
++#define VIA_2D_ENG_BUSY               0x00000001      /* 2D Engine is busy */
++#define VIA_3D_ENG_BUSY               0x00000002      /* 3D Engine is busy */
++#define VIA_VR_QUEUE_BUSY       0x00020000    /* Virtual Queue is busy */
++
++
++
++#if defined(__linux__)
++#include "via_dmablit.h"
++
++/*
++ * This define and all its references can be removed when
++ * the DMA blit code has been implemented for FreeBSD.
++ */
++#define VIA_HAVE_DMABLIT 1
++#define VIA_HAVE_CORE_MM 1
++#define VIA_HAVE_FENCE   1
++#define VIA_HAVE_BUFFER  1
++#endif
++
++#define VIA_PCI_BUF_SIZE 60000
++#define VIA_FIRE_BUF_SIZE  1024
++#define VIA_NUM_IRQS 4
++
++typedef struct drm_via_ring_buffer {
++      drm_local_map_t map;
++      char *virtual_start;
++} drm_via_ring_buffer_t;
++
++typedef uint32_t maskarray_t[5];
++
++typedef struct drm_via_irq {
++      atomic_t irq_received;
++      uint32_t pending_mask;
++      uint32_t enable_mask;
++      wait_queue_head_t irq_queue;
++} drm_via_irq_t;
++
++typedef struct drm_via_private {
++      drm_via_sarea_t *sarea_priv;
++      drm_local_map_t *sarea;
++      drm_local_map_t *fb;
++      drm_local_map_t *mmio;
++      unsigned long agpAddr;
++      wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
++      char *dma_ptr;
++      unsigned int dma_low;
++      unsigned int dma_high;
++      unsigned int dma_offset;
++      uint32_t dma_wrap;
++      volatile uint32_t *last_pause_ptr;
++      volatile uint32_t *hw_addr_ptr;
++      drm_via_ring_buffer_t ring;
++      struct timeval last_vblank;
++      int last_vblank_valid;
++      unsigned usec_per_vblank;
++      atomic_t vbl_received;
++      drm_via_state_t hc_state;
++      char pci_buf[VIA_PCI_BUF_SIZE];
++      const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
++      uint32_t num_fire_offsets;
++      int chipset;
++      drm_via_irq_t via_irqs[VIA_NUM_IRQS];
++      unsigned num_irqs;
++      maskarray_t *irq_masks;
++      uint32_t irq_enable_mask;
++      uint32_t irq_pending_mask;
++      int *irq_map;
++      /* Memory manager stuff */
++#ifdef VIA_HAVE_CORE_MM
++      unsigned int idle_fault;
++      struct drm_sman sman;
++      int vram_initialized;
++      int agp_initialized;
++      unsigned long vram_offset;
++      unsigned long agp_offset;
++#endif
++#ifdef VIA_HAVE_DMABLIT
++      drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
++#endif
++        uint32_t dma_diff;
++#ifdef VIA_HAVE_FENCE
++      spinlock_t fence_lock;
++      uint32_t emit_0_sequence;
++      int have_idlelock;
++      struct timer_list fence_timer;
++#endif
++} drm_via_private_t;
++
++enum via_family {
++  VIA_OTHER = 0,     /* Baseline */
++  VIA_PRO_GROUP_A,   /* Another video engine and DMA commands */
++  VIA_DX9_0          /* Same video as pro_group_a, but 3D is unsupported */
++};
++
++/* VIA MMIO register access */
++#define VIA_BASE ((dev_priv->mmio))
++
++#define VIA_READ(reg)         DRM_READ32(VIA_BASE, reg)
++#define VIA_WRITE(reg,val)    DRM_WRITE32(VIA_BASE, reg, val)
++#define VIA_READ8(reg)                DRM_READ8(VIA_BASE, reg)
++#define VIA_WRITE8(reg,val)   DRM_WRITE8(VIA_BASE, reg, val)
++
++extern struct drm_ioctl_desc via_ioctls[];
++extern int via_max_ioctl;
++
++extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
++extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
++
++extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
++extern int via_driver_unload(struct drm_device *dev);
++extern int via_final_context(struct drm_device * dev, int context);
++
++extern int via_do_cleanup_map(struct drm_device * dev);
++extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
++extern int via_enable_vblank(struct drm_device *dev, int crtc);
++extern void via_disable_vblank(struct drm_device *dev, int crtc);
++
++extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
++extern void via_driver_irq_preinstall(struct drm_device * dev);
++extern int via_driver_irq_postinstall(struct drm_device * dev);
++extern void via_driver_irq_uninstall(struct drm_device * dev);
++
++extern int via_dma_cleanup(struct drm_device * dev);
++extern void via_init_command_verifier(void);
++extern int via_driver_dma_quiescent(struct drm_device * dev);
++extern void via_init_futex(drm_via_private_t *dev_priv);
++extern void via_cleanup_futex(drm_via_private_t *dev_priv);
++extern void via_release_futex(drm_via_private_t *dev_priv, int context);
++
++#ifdef VIA_HAVE_CORE_MM
++extern void via_reclaim_buffers_locked(struct drm_device *dev,
++                                     struct drm_file *file_priv);
++extern void via_lastclose(struct drm_device *dev);
++#else
++extern int via_init_context(struct drm_device * dev, int context);
++#endif
++
++#ifdef VIA_HAVE_DMABLIT
++extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
++extern void via_init_dmablit(struct drm_device *dev);
++#endif
++
++#ifdef VIA_HAVE_BUFFER
++extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
++extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
++                         uint32_t *type);
++extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
++                             struct drm_mem_type_manager *man);
++extern uint64_t via_evict_flags(struct drm_buffer_object *bo);
++extern int via_move(struct drm_buffer_object *bo, int evict,
++              int no_wait, struct drm_bo_mem_reg *new_mem);
++#endif
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_fence.c git-nokia/drivers/gpu/drm-tungsten/via_fence.c
+--- git/drivers/gpu/drm-tungsten/via_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++/*
++ * DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
++ * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
++ */
++
++static void via_fence_poll(struct drm_device *dev, uint32_t class,
++                         uint32_t waiting_types)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      uint32_t signaled_flush_types = 0;
++      uint32_t status;
++
++      if (class != 0)
++              return;
++
++      if (unlikely(!dev_priv))
++              return;
++
++      spin_lock(&dev_priv->fence_lock);
++      if (waiting_types) {
++
++              /*
++               * Take the idlelock. This guarantees that the next time a client tries
++               * to grab the lock, it will stall until the idlelock is released. This
++               * guarantees that eventually, the GPU engines will be idle, but nothing
++               * else. It cannot be used to protect the hardware.
++               */
++
++
++              if (!dev_priv->have_idlelock) {
++                      drm_idlelock_take(&dev->lock);
++                      dev_priv->have_idlelock = 1;
++              }
++
++              /*
++               * Check if AGP command reader is idle.
++               */
++
++              if (waiting_types & DRM_FENCE_TYPE_EXE)
++                      if (VIA_READ(0x41C) & 0x80000000)
++                              signaled_flush_types |= DRM_FENCE_TYPE_EXE;
++
++              /*
++               * Check VRAM command queue empty and 2D + 3D engines idle.
++               */
++
++              if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) {
++                      status = VIA_READ(VIA_REG_STATUS);
++                      if ((status & VIA_VR_QUEUE_BUSY) &&
++                          !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
++                              signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
++              }
++
++              if (signaled_flush_types) {
++                      waiting_types &= ~signaled_flush_types;
++                      if (!waiting_types && dev_priv->have_idlelock) {
++                              drm_idlelock_release(&dev->lock);
++                              dev_priv->have_idlelock = 0;
++                      }
++                      drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
++                                        signaled_flush_types, 0);
++              }
++      }
++
++      spin_unlock(&dev_priv->fence_lock);
++
++      return;
++}
++
++
++/**
++ * Emit a fence sequence.
++ */
++
++static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
++                                 uint32_t * sequence, uint32_t * native_type)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret = 0;
++
++      if (!dev_priv)
++              return -EINVAL;
++
++      switch(class) {
++      case 0: /* AGP command stream */
++
++              /*
++               * The sequence number isn't really used by the hardware yet.
++               */
++
++              spin_lock(&dev_priv->fence_lock);
++              *sequence = ++dev_priv->emit_0_sequence;
++              spin_unlock(&dev_priv->fence_lock);
++
++              /*
++               * When drm_fence_handler() is called with flush type 0x01, and a
++               * sequence number, That means that the EXE flag is expired.
++               * Nothing else. No implicit flushing or other engines idle.
++               */
++
++              *native_type = DRM_FENCE_TYPE_EXE;
++              break;
++      default:
++              ret = -EINVAL;
++              break;
++      }
++      return ret;
++}
++
++/**
++ * No irq fence expirations implemented yet.
++ * Although both the HQV engines and PCI dmablit engines signal
++ * idle with an IRQ, we haven't implemented this yet.
++ * This means that the drm fence manager will always poll for engine idle,
++ * unless the caller wanting to wait for a fence object has indicated a lazy wait.
++ */
++
++static int via_fence_has_irq(struct drm_device * dev, uint32_t class,
++                           uint32_t flags)
++{
++      return 0;
++}
++
++struct drm_fence_driver via_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = (1 << 30),
++      .flush_diff = (1 << 20),
++      .sequence_mask = 0xffffffffU,
++      .has_irq = via_fence_has_irq,
++      .emit = via_fence_emit_sequence,
++      .poll = via_fence_poll,
++      .needed_flush = NULL,
++      .wait = NULL
++};
+diff -Nurd git/drivers/gpu/drm-tungsten/via_irq.c git-nokia/drivers/gpu/drm-tungsten/via_irq.c
+--- git/drivers/gpu/drm-tungsten/via_irq.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_irq.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,403 @@
++/* via_irq.c
++ *
++ * Copyright 2004 BEAM Ltd.
++ * Copyright 2002 Tungsten Graphics, Inc.
++ * Copyright 2005 Thomas Hellstrom.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Terry Barnaby <terry1@beam.ltd.uk>
++ *    Keith Whitwell <keith@tungstengraphics.com>
++ *    Thomas Hellstrom <unichrome@shipmail.org>
++ *
++ * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
++ * interrupt, as well as an infrastructure to handle other interrupts of the chip.
++ * The refresh rate is also calculated for video playback sync purposes.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++#define VIA_REG_INTERRUPT       0x200
++
++/* VIA_REG_INTERRUPT */
++#define VIA_IRQ_GLOBAL          (1 << 31)
++#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
++#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
++#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
++#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
++#define VIA_IRQ_HQV0_PENDING    (1 << 9)
++#define VIA_IRQ_HQV1_PENDING    (1 << 10)
++#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
++#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
++#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
++#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
++#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
++#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
++#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
++#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
++
++
++/*
++ * Device-specific IRQs go here. This type might need to be extended with
++ * the register if there are multiple IRQ control registers.
++ * Currently we activate the HQV interrupts of  Unichrome Pro group A.
++ */
++
++static maskarray_t via_pro_group_a_irqs[] = {
++      {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
++       0x00000000 },
++      {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
++       0x00000000 },
++      {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++      {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++};
++static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
++static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
++
++static maskarray_t via_unichrome_irqs[] = {
++      {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
++      {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
++       VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
++};
++static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
++static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
++
++
++static unsigned time_diff(struct timeval *now,struct timeval *then)
++{
++      return (now->tv_usec >= then->tv_usec) ?
++              now->tv_usec - then->tv_usec :
++              1000000 - (then->tv_usec - now->tv_usec);
++}
++
++u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      if (crtc != 0)
++              return 0;
++
++      return atomic_read(&dev_priv->vbl_received);
++}
++
++irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++      int handled = 0;
++      struct timeval cur_vblank;
++      drm_via_irq_t *cur_irq = dev_priv->via_irqs;
++      int i;
++
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      if (status & VIA_IRQ_VBLANK_PENDING) {
++              atomic_inc(&dev_priv->vbl_received);
++              if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
++#ifdef __linux__
++                      do_gettimeofday(&cur_vblank);
++#else
++                      microtime(&cur_vblank);
++#endif
++                      if (dev_priv->last_vblank_valid) {
++                              dev_priv->usec_per_vblank =
++                                      time_diff(&cur_vblank,
++                                                &dev_priv->last_vblank) >> 4;
++                      }
++                      dev_priv->last_vblank = cur_vblank;
++                      dev_priv->last_vblank_valid = 1;
++              }
++              if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
++                      DRM_DEBUG("US per vblank is: %u\n",
++                                dev_priv->usec_per_vblank);
++              }
++              drm_handle_vblank(dev, 0);
++              handled = 1;
++      }
++
++      for (i = 0; i < dev_priv->num_irqs; ++i) {
++              if (status & cur_irq->pending_mask) {
++                      atomic_inc(&cur_irq->irq_received);
++                      DRM_WAKEUP(&cur_irq->irq_queue);
++                      handled = 1;
++#ifdef VIA_HAVE_DMABLIT
++                      if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
++                              via_dmablit_handler(dev, 0, 1);
++                      } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
++                              via_dmablit_handler(dev, 1, 1);
++                      }
++#endif
++              }
++              cur_irq++;
++      }
++
++      /* Acknowlege interrupts */
++      VIA_WRITE(VIA_REG_INTERRUPT, status);
++
++
++      if (handled)
++              return IRQ_HANDLED;
++      else
++              return IRQ_NONE;
++}
++
++static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
++{
++      u32 status;
++
++      if (dev_priv) {
++              /* Acknowlege interrupts */
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status |
++                        dev_priv->irq_pending_mask);
++      }
++}
++
++int via_enable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      u32 status;
++
++      if (crtc != 0) {
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++              return -EINVAL;
++      }
++
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
++
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
++
++      return 0;
++}
++
++void via_disable_vblank(struct drm_device *dev, int crtc)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
++
++      if (crtc != 0)
++              DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc);
++}
++
++static int
++via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
++                  unsigned int *sequence)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      unsigned int cur_irq_sequence;
++      drm_via_irq_t *cur_irq;
++      int ret = 0;
++      maskarray_t *masks;
++      int real_irq;
++
++      DRM_DEBUG("\n");
++
++      if (!dev_priv) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      if (irq >= drm_via_irq_num) {
++              DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
++              return -EINVAL;
++      }
++
++      real_irq = dev_priv->irq_map[irq];
++
++      if (real_irq < 0) {
++              DRM_ERROR("Video IRQ %d not available on this hardware.\n",
++                        irq);
++              return -EINVAL;
++      }
++
++      masks = dev_priv->irq_masks;
++      cur_irq = dev_priv->via_irqs + real_irq;
++
++      if (masks[real_irq][2] && !force_sequence) {
++              DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
++                          ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
++                           masks[irq][4]));
++              cur_irq_sequence = atomic_read(&cur_irq->irq_received);
++      } else {
++              DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
++                          (((cur_irq_sequence =
++                             atomic_read(&cur_irq->irq_received)) -
++                            *sequence) <= (1 << 23)));
++      }
++      *sequence = cur_irq_sequence;
++      return ret;
++}
++
++
++/*
++ * drm_dma.h hooks
++ */
++
++void via_driver_irq_preinstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++      drm_via_irq_t *cur_irq;
++      int i;
++
++      DRM_DEBUG("dev_priv: %p\n", dev_priv);
++      if (dev_priv) {
++              cur_irq = dev_priv->via_irqs;
++
++              dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
++              dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
++
++              if (dev_priv->chipset == VIA_PRO_GROUP_A ||
++                  dev_priv->chipset == VIA_DX9_0) {
++                      dev_priv->irq_masks = via_pro_group_a_irqs;
++                      dev_priv->num_irqs = via_num_pro_group_a;
++                      dev_priv->irq_map = via_irqmap_pro_group_a;
++              } else {
++                      dev_priv->irq_masks = via_unichrome_irqs;
++                      dev_priv->num_irqs = via_num_unichrome;
++                      dev_priv->irq_map = via_irqmap_unichrome;
++              }
++
++              for (i = 0; i < dev_priv->num_irqs; ++i) {
++                      atomic_set(&cur_irq->irq_received, 0);
++                      cur_irq->enable_mask = dev_priv->irq_masks[i][0];
++                      cur_irq->pending_mask = dev_priv->irq_masks[i][1];
++                      DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
++                      dev_priv->irq_enable_mask |= cur_irq->enable_mask;
++                      dev_priv->irq_pending_mask |= cur_irq->pending_mask;
++                      cur_irq++;
++
++                      DRM_DEBUG("Initializing IRQ %d\n", i);
++              }
++
++              dev_priv->last_vblank_valid = 0;
++
++              /* Clear VSync interrupt regs */
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status &
++                        ~(dev_priv->irq_enable_mask));
++
++              /* Clear bits if they're already high */
++              viadrv_acknowledge_irqs(dev_priv);
++      }
++}
++
++int via_driver_irq_postinstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++
++      DRM_DEBUG("via_driver_irq_postinstall\n");
++      if (!dev_priv)
++              return -EINVAL;
++
++      drm_vblank_init(dev, 1);
++      status = VIA_READ(VIA_REG_INTERRUPT);
++      VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
++                | dev_priv->irq_enable_mask);
++
++      /* Some magic, oh for some data sheets ! */
++      VIA_WRITE8(0x83d4, 0x11);
++      VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
++
++      return 0;
++}
++
++void via_driver_irq_uninstall(struct drm_device * dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      u32 status;
++
++      DRM_DEBUG("\n");
++      if (dev_priv) {
++
++              /* Some more magic, oh for some data sheets ! */
++
++              VIA_WRITE8(0x83d4, 0x11);
++              VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
++
++              status = VIA_READ(VIA_REG_INTERRUPT);
++              VIA_WRITE(VIA_REG_INTERRUPT, status &
++                        ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
++      }
++}
++
++int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_irqwait_t *irqwait = data;
++      struct timeval now;
++      int ret = 0;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_irq_t *cur_irq = dev_priv->via_irqs;
++      int force_sequence;
++
++      if (!dev->irq)
++              return -EINVAL;
++
++      if (irqwait->request.irq >= dev_priv->num_irqs) {
++              DRM_ERROR("Trying to wait on unknown irq %d\n",
++                        irqwait->request.irq);
++              return -EINVAL;
++      }
++
++      cur_irq += irqwait->request.irq;
++
++      switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
++      case VIA_IRQ_RELATIVE:
++              irqwait->request.sequence +=
++                      atomic_read(&cur_irq->irq_received);
++              irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
++      case VIA_IRQ_ABSOLUTE:
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      if (irqwait->request.type & VIA_IRQ_SIGNAL) {
++              DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
++              return -EINVAL;
++      }
++
++      force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
++
++      ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
++                                &irqwait->request.sequence);
++#ifdef __linux__
++      do_gettimeofday(&now);
++#else
++      microtime(&now);
++#endif
++      irqwait->reply.tval_sec = now.tv_sec;
++      irqwait->reply.tval_usec = now.tv_usec;
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_map.c git-nokia/drivers/gpu/drm-tungsten/via_map.c
+--- git/drivers/gpu/drm-tungsten/via_map.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_map.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,139 @@
++/*
++ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
++ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      dev_priv->sarea = drm_getsarea(dev);
++      if (!dev_priv->sarea) {
++              DRM_ERROR("could not find sarea!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
++      if (!dev_priv->fb) {
++              DRM_ERROR("could not find framebuffer!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++      dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
++      if (!dev_priv->mmio) {
++              DRM_ERROR("could not find mmio region!\n");
++              dev->dev_private = (void *)dev_priv;
++              via_do_cleanup_map(dev);
++              return -EINVAL;
++      }
++
++      dev_priv->sarea_priv =
++          (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
++                               init->sarea_priv_offset);
++
++      dev_priv->agpAddr = init->agpAddr;
++
++      via_init_futex( dev_priv );
++#ifdef VIA_HAVE_DMABLIT
++      via_init_dmablit( dev );
++#endif
++#ifdef VIA_HAVE_FENCE
++      dev_priv->emit_0_sequence = 0;
++      dev_priv->have_idlelock = 0;
++      spin_lock_init(&dev_priv->fence_lock);
++#endif /* VIA_HAVE_FENCE */
++      dev->dev_private = (void *)dev_priv;
++#ifdef VIA_HAVE_BUFFER
++      ret = drm_bo_driver_init(dev);
++      if (ret)
++              DRM_ERROR("Could not initialize buffer object driver.\n");
++#endif
++      return ret;
++
++}
++
++int via_do_cleanup_map(struct drm_device * dev)
++{
++      via_dma_cleanup(dev);
++
++      return 0;
++}
++
++
++int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_init_t *init = data;
++
++      DRM_DEBUG("\n");
++
++      switch (init->func) {
++      case VIA_INIT_MAP:
++              return via_do_init_map(dev, init);
++      case VIA_CLEANUP_MAP:
++              return via_do_cleanup_map(dev);
++      }
++
++      return -EINVAL;
++}
++
++int via_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++      drm_via_private_t *dev_priv;
++      int ret = 0;
++
++      dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
++      if (dev_priv == NULL)
++              return -ENOMEM;
++
++      dev->dev_private = (void *)dev_priv;
++
++      dev_priv->chipset = chipset;
++
++#ifdef VIA_HAVE_CORE_MM
++      ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
++      if (ret) {
++              drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
++      }
++#endif
++      return ret;
++}
++
++int via_driver_unload(struct drm_device *dev)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++#ifdef VIA_HAVE_CORE_MM
++      drm_sman_takedown(&dev_priv->sman);
++#endif
++      drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_mm.c git-nokia/drivers/gpu/drm-tungsten/via_mm.c
+--- git/drivers/gpu/drm-tungsten/via_mm.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_mm.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,196 @@
++/*
++ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
++ * All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++/*
++ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++#include "drm_sman.h"
++
++#define VIA_MM_ALIGN_SHIFT 4
++#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
++
++int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_agp_t *agp = data;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
++                               agp->size >> VIA_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("AGP memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->agp_initialized = 1;
++      dev_priv->agp_offset = agp->offset;
++      mutex_unlock(&dev->struct_mutex);
++
++      DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
++      return 0;
++}
++
++int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_fb_t *fb = data;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
++                               fb->size >> VIA_MM_ALIGN_SHIFT);
++
++      if (ret) {
++              DRM_ERROR("VRAM memory manager initialisation error\n");
++              mutex_unlock(&dev->struct_mutex);
++              return ret;
++      }
++
++      dev_priv->vram_initialized = 1;
++      dev_priv->vram_offset = fb->offset;
++
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
++
++      return 0;
++
++}
++
++int via_final_context(struct drm_device *dev, int context)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      via_release_futex(dev_priv, context);
++
++#if defined(__linux__)
++      /* Linux specific until context tracking code gets ported to BSD */
++      /* Last context, perform cleanup */
++      if (dev->ctx_count == 1 && dev->dev_private) {
++              DRM_DEBUG("Last Context\n");
++              if (dev->irq)
++                      drm_irq_uninstall(dev);
++              via_cleanup_futex(dev_priv);
++              via_do_cleanup_map(dev);
++      }
++#endif
++      return 1;
++}
++
++void via_lastclose(struct drm_device *dev)
++{
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++
++      if (!dev_priv)
++              return;
++
++      mutex_lock(&dev->struct_mutex);
++      drm_sman_cleanup(&dev_priv->sman);
++      dev_priv->vram_initialized = 0;
++      dev_priv->agp_initialized = 0;
++      mutex_unlock(&dev->struct_mutex);
++}
++
++int via_mem_alloc(struct drm_device *dev, void *data,
++                struct drm_file *file_priv)
++{
++      drm_via_mem_t *mem = data;
++      int retval = 0;
++      struct drm_memblock_item *item;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      unsigned long tmpSize;
++
++      if (mem->type > VIA_MEM_AGP) {
++              DRM_ERROR("Unknown memory type allocation\n");
++              return -EINVAL;
++      }
++      mutex_lock(&dev->struct_mutex);
++      if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
++                    dev_priv->agp_initialized)) {
++              DRM_ERROR
++                  ("Attempt to allocate from uninitialized memory manager.\n");
++              mutex_unlock(&dev->struct_mutex);
++              return -EINVAL;
++      }
++
++      tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
++      item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
++                            (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      if (item) {
++              mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
++                            dev_priv->vram_offset : dev_priv->agp_offset) +
++                  (item->mm->
++                   offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
++              mem->index = item->user_hash.key;
++      } else {
++              mem->offset = 0;
++              mem->size = 0;
++              mem->index = 0;
++              DRM_DEBUG("Video memory allocation failed\n");
++              retval = -ENOMEM;
++      }
++
++      return retval;
++}
++
++int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++      drm_via_mem_t *mem = data;
++      int ret;
++
++      mutex_lock(&dev->struct_mutex);
++      ret = drm_sman_free_key(&dev_priv->sman, mem->index);
++      mutex_unlock(&dev->struct_mutex);
++      DRM_DEBUG("free = 0x%lx\n", mem->index);
++
++      return ret;
++}
++
++
++void via_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file *file_priv)
++{
++      drm_via_private_t *dev_priv = dev->dev_private;
++
++      mutex_lock(&dev->struct_mutex);
++      if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++              mutex_unlock(&dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++      mutex_unlock(&dev->struct_mutex);
++      return;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_verifier.c git-nokia/drivers/gpu/drm-tungsten/via_verifier.c
+--- git/drivers/gpu/drm-tungsten/via_verifier.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_verifier.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1121 @@
++/*
++ * Copyright 2004 The Unichrome Project. All Rights Reserved.
++ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellstrom 2004, 2005.
++ * This code was written using docs obtained under NDA from VIA Inc.
++ *
++ * Don't run this code directly on an AGP buffer. Due to cache problems it will
++ * be very slow.
++ */
++
++#include "via_3d_reg.h"
++#include "drmP.h"
++#include "drm.h"
++#include "via_drm.h"
++#include "via_verifier.h"
++#include "via_drv.h"
++
++typedef enum {
++      state_command,
++      state_header2,
++      state_header1,
++      state_vheader5,
++      state_vheader6,
++      state_error
++} verifier_state_t;
++
++typedef enum {
++      no_check = 0,
++      check_for_header2,
++      check_for_header1,
++      check_for_header2_err,
++      check_for_header1_err,
++      check_for_fire,
++      check_z_buffer_addr0,
++      check_z_buffer_addr1,
++      check_z_buffer_addr_mode,
++      check_destination_addr0,
++      check_destination_addr1,
++      check_destination_addr_mode,
++      check_for_dummy,
++      check_for_dd,
++      check_texture_addr0,
++      check_texture_addr1,
++      check_texture_addr2,
++      check_texture_addr3,
++      check_texture_addr4,
++      check_texture_addr5,
++      check_texture_addr6,
++      check_texture_addr7,
++      check_texture_addr8,
++      check_texture_addr_mode,
++      check_for_vertex_count,
++      check_number_texunits,
++      forbidden_command
++} hazard_t;
++
++/*
++ * Associates each hazard above with a possible multi-command
++ * sequence. For example an address that is split over multiple
++ * commands and that needs to be checked at the first command
++ * that does not include any part of the address.
++ */
++
++static drm_via_sequence_t seqs[] = {
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      no_sequence,
++      z_address,
++      z_address,
++      z_address,
++      dest_address,
++      dest_address,
++      dest_address,
++      no_sequence,
++      no_sequence,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      tex_address,
++      no_sequence
++};
++
++typedef struct {
++      unsigned int code;
++      hazard_t hz;
++} hz_init_t;
++
++static hz_init_t init_table1[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xee, check_for_fire},
++      {0xcc, check_for_dummy},
++      {0xdd, check_for_dd},
++      {0x00, no_check},
++      {0x10, check_z_buffer_addr0},
++      {0x11, check_z_buffer_addr1},
++      {0x12, check_z_buffer_addr_mode},
++      {0x13, no_check},
++      {0x14, no_check},
++      {0x15, no_check},
++      {0x23, no_check},
++      {0x24, no_check},
++      {0x33, no_check},
++      {0x34, no_check},
++      {0x35, no_check},
++      {0x36, no_check},
++      {0x37, no_check},
++      {0x38, no_check},
++      {0x39, no_check},
++      {0x3A, no_check},
++      {0x3B, no_check},
++      {0x3C, no_check},
++      {0x3D, no_check},
++      {0x3E, no_check},
++      {0x40, check_destination_addr0},
++      {0x41, check_destination_addr1},
++      {0x42, check_destination_addr_mode},
++      {0x43, no_check},
++      {0x44, no_check},
++      {0x50, no_check},
++      {0x51, no_check},
++      {0x52, no_check},
++      {0x53, no_check},
++      {0x54, no_check},
++      {0x55, no_check},
++      {0x56, no_check},
++      {0x57, no_check},
++      {0x58, no_check},
++      {0x70, no_check},
++      {0x71, no_check},
++      {0x78, no_check},
++      {0x79, no_check},
++      {0x7A, no_check},
++      {0x7B, no_check},
++      {0x7C, no_check},
++      {0x7D, check_for_vertex_count}
++};
++
++static hz_init_t init_table2[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xee, check_for_fire},
++      {0xcc, check_for_dummy},
++      {0x00, check_texture_addr0},
++      {0x01, check_texture_addr0},
++      {0x02, check_texture_addr0},
++      {0x03, check_texture_addr0},
++      {0x04, check_texture_addr0},
++      {0x05, check_texture_addr0},
++      {0x06, check_texture_addr0},
++      {0x07, check_texture_addr0},
++      {0x08, check_texture_addr0},
++      {0x09, check_texture_addr0},
++      {0x20, check_texture_addr1},
++      {0x21, check_texture_addr1},
++      {0x22, check_texture_addr1},
++      {0x23, check_texture_addr4},
++      {0x2B, check_texture_addr3},
++      {0x2C, check_texture_addr3},
++      {0x2D, check_texture_addr3},
++      {0x2E, check_texture_addr3},
++      {0x2F, check_texture_addr3},
++      {0x30, check_texture_addr3},
++      {0x31, check_texture_addr3},
++      {0x32, check_texture_addr3},
++      {0x33, check_texture_addr3},
++      {0x34, check_texture_addr3},
++      {0x4B, check_texture_addr5},
++      {0x4C, check_texture_addr6},
++      {0x51, check_texture_addr7},
++      {0x52, check_texture_addr8},
++      {0x77, check_texture_addr2},
++      {0x78, no_check},
++      {0x79, no_check},
++      {0x7A, no_check},
++      {0x7B, check_texture_addr_mode},
++      {0x7C, no_check},
++      {0x7D, no_check},
++      {0x7E, no_check},
++      {0x7F, no_check},
++      {0x80, no_check},
++      {0x81, no_check},
++      {0x82, no_check},
++      {0x83, no_check},
++      {0x85, no_check},
++      {0x86, no_check},
++      {0x87, no_check},
++      {0x88, no_check},
++      {0x89, no_check},
++      {0x8A, no_check},
++      {0x90, no_check},
++      {0x91, no_check},
++      {0x92, no_check},
++      {0x93, no_check}
++};
++
++static hz_init_t init_table3[] = {
++      {0xf2, check_for_header2_err},
++      {0xf0, check_for_header1_err},
++      {0xcc, check_for_dummy},
++      {0x00, check_number_texunits}
++};
++
++static hazard_t table1[256];
++static hazard_t table2[256];
++static hazard_t table3[256];
++
++static __inline__ int
++eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
++{
++      if ((buf_end - *buf) >= num_words) {
++              *buf += num_words;
++              return 0;
++      }
++      DRM_ERROR("Illegal termination of DMA command buffer\n");
++      return 1;
++}
++
++/*
++ * Partially stolen from drm_memory.h
++ */
++
++static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
++                                                        unsigned long offset,
++                                                        unsigned long size,
++                                                        struct drm_device *dev)
++{
++#ifdef __linux__
++      struct drm_map_list *r_list;
++#endif
++      drm_local_map_t *map = seq->map_cache;
++
++      if (map && map->offset <= offset
++          && (offset + size) <= (map->offset + map->size)) {
++              return map;
++      }
++#ifdef __linux__
++      list_for_each_entry(r_list, &dev->maplist, head) {
++              map = r_list->map;
++              if (!map)
++                      continue;
++#else
++      TAILQ_FOREACH(map, &dev->maplist, link) {
++#endif
++              if (map->offset <= offset
++                  && (offset + size) <= (map->offset + map->size)
++                  && !(map->flags & _DRM_RESTRICTED)
++                  && (map->type == _DRM_AGP)) {
++                      seq->map_cache = map;
++                      return map;
++              }
++      }
++      return NULL;
++}
++
++/*
++ * Require that all AGP texture levels reside in the same AGP map which should
++ * be mappable by the client. This is not a big restriction.
++ * FIXME: To actually enforce this security policy strictly, drm_rmmap
++ * would have to wait for dma quiescent before removing an AGP map.
++ * The via_drm_lookup_agp_map call in reality seems to take
++ * very little CPU time.
++ */
++
++static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
++{
++      switch (cur_seq->unfinished) {
++      case z_address:
++              DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
++              break;
++      case dest_address:
++              DRM_DEBUG("Destination start address is 0x%x\n",
++                        cur_seq->d_addr);
++              break;
++      case tex_address:
++              if (cur_seq->agp_texture) {
++                      unsigned start =
++                          cur_seq->tex_level_lo[cur_seq->texture];
++                      unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
++                      unsigned long lo = ~0, hi = 0, tmp;
++                      uint32_t *addr, *pitch, *height, tex;
++                      unsigned i;
++                      int npot;
++
++                      if (end > 9)
++                              end = 9;
++                      if (start > 9)
++                              start = 9;
++
++                      addr =
++                          &(cur_seq->t_addr[tex = cur_seq->texture][start]);
++                      pitch = &(cur_seq->pitch[tex][start]);
++                      height = &(cur_seq->height[tex][start]);
++                      npot = cur_seq->tex_npot[tex];
++                      for (i = start; i <= end; ++i) {
++                              tmp = *addr++;
++                              if (tmp < lo)
++                                      lo = tmp;
++                              if (i == 0 && npot)
++                                      tmp += (*height++ * *pitch++);
++                              else
++                                      tmp += (*height++ << *pitch++);
++                              if (tmp > hi)
++                                      hi = tmp;
++                      }
++
++                      if (!via_drm_lookup_agp_map
++                          (cur_seq, lo, hi - lo, cur_seq->dev)) {
++                              DRM_ERROR
++                                  ("AGP texture is not in allowed map\n");
++                              return 2;
++                      }
++              }
++              break;
++      default:
++              break;
++      }
++      cur_seq->unfinished = no_sequence;
++      return 0;
++}
++
++static __inline__ int
++investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
++{
++      register uint32_t tmp, *tmp_addr;
++
++      if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
++              int ret;
++              if ((ret = finish_current_sequence(cur_seq)))
++                      return ret;
++      }
++
++      switch (hz) {
++      case check_for_header2:
++              if (cmd == HALCYON_HEADER2)
++                      return 1;
++              return 0;
++      case check_for_header1:
++              if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                      return 1;
++              return 0;
++      case check_for_header2_err:
++              if (cmd == HALCYON_HEADER2)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
++              break;
++      case check_for_header1_err:
++              if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
++              break;
++      case check_for_fire:
++              if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
++                      return 1;
++              DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
++              break;
++      case check_for_dummy:
++              if (HC_DUMMY == cmd)
++                      return 0;
++              DRM_ERROR("Illegal DMA HC_DUMMY command\n");
++              break;
++      case check_for_dd:
++              if (0xdddddddd == cmd)
++                      return 0;
++              DRM_ERROR("Illegal DMA 0xdddddddd command\n");
++              break;
++      case check_z_buffer_addr0:
++              cur_seq->unfinished = z_address;
++              cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
++                  (cmd & 0x00FFFFFF);
++              return 0;
++      case check_z_buffer_addr1:
++              cur_seq->unfinished = z_address;
++              cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
++                  ((cmd & 0xFF) << 24);
++              return 0;
++      case check_z_buffer_addr_mode:
++              cur_seq->unfinished = z_address;
++              if ((cmd & 0x0000C000) == 0)
++                      return 0;
++              DRM_ERROR("Attempt to place Z buffer in system memory\n");
++              return 2;
++      case check_destination_addr0:
++              cur_seq->unfinished = dest_address;
++              cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
++                  (cmd & 0x00FFFFFF);
++              return 0;
++      case check_destination_addr1:
++              cur_seq->unfinished = dest_address;
++              cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
++                  ((cmd & 0xFF) << 24);
++              return 0;
++      case check_destination_addr_mode:
++              cur_seq->unfinished = dest_address;
++              if ((cmd & 0x0000C000) == 0)
++                      return 0;
++              DRM_ERROR
++                  ("Attempt to place 3D drawing buffer in system memory\n");
++              return 2;
++      case check_texture_addr0:
++              cur_seq->unfinished = tex_address;
++              tmp = (cmd >> 24);
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
++              *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
++              return 0;
++      case check_texture_addr1:
++              cur_seq->unfinished = tex_address;
++              tmp = ((cmd >> 24) - 0x20);
++              tmp += tmp << 1;
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
++              tmp_addr++;
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
++              tmp_addr++;
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
++              return 0;
++      case check_texture_addr2:
++              cur_seq->unfinished = tex_address;
++              cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
++              cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
++              return 0;
++      case check_texture_addr3:
++              cur_seq->unfinished = tex_address;
++              tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
++              if (tmp == 0 &&
++                  (cmd & HC_HTXnEnPit_MASK)) {
++                      cur_seq->pitch[cur_seq->texture][tmp] =
++                              (cmd & HC_HTXnLnPit_MASK);
++                      cur_seq->tex_npot[cur_seq->texture] = 1;
++              } else {
++                      cur_seq->pitch[cur_seq->texture][tmp] =
++                              (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
++                      cur_seq->tex_npot[cur_seq->texture] = 0;
++                      if (cmd & 0x000FFFFF) {
++                              DRM_ERROR
++                                      ("Unimplemented texture level 0 pitch mode.\n");
++                              return 2;
++                      }
++              }
++              return 0;
++      case check_texture_addr4:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
++              *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
++              return 0;
++      case check_texture_addr5:
++      case check_texture_addr6:
++              cur_seq->unfinished = tex_address;
++              /*
++               * Texture width. We don't care since we have the pitch.
++               */
++              return 0;
++      case check_texture_addr7:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
++              tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
++              tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
++              tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
++              tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
++              tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
++              tmp_addr[0] = 1 << (cmd & 0x0000000F);
++              return 0;
++      case check_texture_addr8:
++              cur_seq->unfinished = tex_address;
++              tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
++              tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
++              tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
++              tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
++              tmp_addr[6] = 1 << (cmd & 0x0000000F);
++              return 0;
++      case check_texture_addr_mode:
++              cur_seq->unfinished = tex_address;
++              if (2 == (tmp = cmd & 0x00000003)) {
++                      DRM_ERROR
++                          ("Attempt to fetch texture from system memory.\n");
++                      return 2;
++              }
++              cur_seq->agp_texture = (tmp == 3);
++              cur_seq->tex_palette_size[cur_seq->texture] =
++                  (cmd >> 16) & 0x000000007;
++              return 0;
++      case check_for_vertex_count:
++              cur_seq->vertex_count = cmd & 0x0000FFFF;
++              return 0;
++      case check_number_texunits:
++              cur_seq->multitex = (cmd >> 3) & 1;
++              return 0;
++      default:
++              DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
++              return 2;
++      }
++      return 2;
++}
++
++static __inline__ int
++via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
++                  drm_via_state_t * cur_seq)
++{
++      drm_via_private_t *dev_priv =
++          (drm_via_private_t *) cur_seq->dev->dev_private;
++      uint32_t a_fire, bcmd, dw_count;
++      int ret = 0;
++      int have_fire;
++      const uint32_t *buf = *buffer;
++
++      while (buf < buf_end) {
++              have_fire = 0;
++              if ((buf_end - buf) < 2) {
++                      DRM_ERROR
++                          ("Unexpected termination of primitive list.\n");
++                      ret = 1;
++                      break;
++              }
++              if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
++                      break;
++              bcmd = *buf++;
++              if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
++                      DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
++                                *buf);
++                      ret = 1;
++                      break;
++              }
++              a_fire =
++                  *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
++                  HC_HE3Fire_MASK;
++
++              /*
++               * How many dwords per vertex ?
++               */
++
++              if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
++                      DRM_ERROR("Illegal B command vertex data for AGP.\n");
++                      ret = 1;
++                      break;
++              }
++
++              dw_count = 0;
++              if (bcmd & (1 << 7))
++                      dw_count += (cur_seq->multitex) ? 2 : 1;
++              if (bcmd & (1 << 8))
++                      dw_count += (cur_seq->multitex) ? 2 : 1;
++              if (bcmd & (1 << 9))
++                      dw_count++;
++              if (bcmd & (1 << 10))
++                      dw_count++;
++              if (bcmd & (1 << 11))
++                      dw_count++;
++              if (bcmd & (1 << 12))
++                      dw_count++;
++              if (bcmd & (1 << 13))
++                      dw_count++;
++              if (bcmd & (1 << 14))
++                      dw_count++;
++
++              while (buf < buf_end) {
++                      if (*buf == a_fire) {
++                              if (dev_priv->num_fire_offsets >=
++                                  VIA_FIRE_BUF_SIZE) {
++                                      DRM_ERROR("Fire offset buffer full.\n");
++                                      ret = 1;
++                                      break;
++                              }
++                              dev_priv->fire_offsets[dev_priv->
++                                                     num_fire_offsets++] =
++                                  buf;
++                              have_fire = 1;
++                              buf++;
++                              if (buf < buf_end && *buf == a_fire)
++                                      buf++;
++                              break;
++                      }
++                      if ((*buf == HALCYON_HEADER2) ||
++                          ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
++                              DRM_ERROR("Missing Vertex Fire command, "
++                                        "Stray Vertex Fire command  or verifier "
++                                        "lost sync.\n");
++                              ret = 1;
++                              break;
++                      }
++                      if ((ret = eat_words(&buf, buf_end, dw_count)))
++                              break;
++              }
++              if (buf >= buf_end && !have_fire) {
++                      DRM_ERROR("Missing Vertex Fire command or verifier "
++                                "lost sync.\n");
++                      ret = 1;
++                      break;
++              }
++              if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
++                      DRM_ERROR("AGP Primitive list end misaligned.\n");
++                      ret = 1;
++                      break;
++              }
++      }
++      *buffer = buf;
++      return ret;
++}
++
++static __inline__ verifier_state_t
++via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
++                drm_via_state_t * hc_state)
++{
++      uint32_t cmd;
++      int hz_mode;
++      hazard_t hz;
++      const uint32_t *buf = *buffer;
++      const hazard_t *hz_table;
++
++      if ((buf_end - buf) < 2) {
++              DRM_ERROR
++                  ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
++              return state_error;
++      }
++      buf++;
++      cmd = (*buf++ & 0xFFFF0000) >> 16;
++
++      switch (cmd) {
++      case HC_ParaType_CmdVdata:
++              if (via_check_prim_list(&buf, buf_end, hc_state))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case HC_ParaType_NotTex:
++              hz_table = table1;
++              break;
++      case HC_ParaType_Tex:
++              hc_state->texture = 0;
++              hz_table = table2;
++              break;
++      case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
++              hc_state->texture = 1;
++              hz_table = table2;
++              break;
++      case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
++              hz_table = table3;
++              break;
++      case HC_ParaType_Auto:
++              if (eat_words(&buf, buf_end, 2))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
++              if (eat_words(&buf, buf_end, 32))
++                      return state_error;
++              *buffer = buf;
++              return state_command;
++      case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
++      case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
++              DRM_ERROR("Texture palettes are rejected because of "
++                        "lack of info how to determine their size.\n");
++              return state_error;
++      case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
++              DRM_ERROR("Fog factor palettes are rejected because of "
++                        "lack of info how to determine their size.\n");
++              return state_error;
++      default:
++
++              /*
++               * There are some unimplemented HC_ParaTypes here, that
++               * need to be implemented if the Mesa driver is extended.
++               */
++
++              DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
++                        "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
++                        cmd, *(buf - 2));
++              *buffer = buf;
++              return state_error;
++      }
++
++      while (buf < buf_end) {
++              cmd = *buf++;
++              if ((hz = hz_table[cmd >> 24])) {
++                      if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
++                              if (hz_mode == 1) {
++                                      buf--;
++                                      break;
++                              }
++                              return state_error;
++                      }
++              } else if (hc_state->unfinished &&
++                         finish_current_sequence(hc_state)) {
++                      return state_error;
++              }
++      }
++      if (hc_state->unfinished && finish_current_sequence(hc_state)) {
++              return state_error;
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                const uint32_t * buf_end, int *fire_count)
++{
++      uint32_t cmd;
++      const uint32_t *buf = *buffer;
++      const uint32_t *next_fire;
++      int burst = 0;
++
++      next_fire = dev_priv->fire_offsets[*fire_count];
++      buf++;
++      cmd = (*buf & 0xFFFF0000) >> 16;
++      VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
++      switch (cmd) {
++      case HC_ParaType_CmdVdata:
++              while ((buf < buf_end) &&
++                     (*fire_count < dev_priv->num_fire_offsets) &&
++                     (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
++                      while (buf <= next_fire) {
++                              VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
++                                        (burst & 63), *buf++);
++                              burst += 4;
++                      }
++                      if ((buf < buf_end)
++                          && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
++                              buf++;
++
++                      if (++(*fire_count) < dev_priv->num_fire_offsets)
++                              next_fire = dev_priv->fire_offsets[*fire_count];
++              }
++              break;
++      default:
++              while (buf < buf_end) {
++
++                      if (*buf == HC_HEADER2 ||
++                          (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
++                          (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
++                          (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              break;
++
++                      VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
++                                (burst & 63), *buf++);
++                      burst += 4;
++              }
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ int verify_mmio_address(uint32_t address)
++{
++      if ((address > 0x3FF) && (address < 0xC00)) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access 3D- or command burst area.\n");
++              return 1;
++      } else if ((address > 0xCFF) && (address < 0x1300)) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access PCI DMA area.\n");
++              return 1;
++      } else if (address > 0x13FF) {
++              DRM_ERROR("Invalid VIDEO DMA command. "
++                        "Attempt to access VGA registers.\n");
++              return 1;
++      }
++      return 0;
++}
++
++static __inline__ int
++verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
++                uint32_t dwords)
++{
++      const uint32_t *buf = *buffer;
++
++      if (buf_end - buf < dwords) {
++              DRM_ERROR("Illegal termination of video command.\n");
++              return 1;
++      }
++      while (dwords--) {
++              if (*buf++) {
++                      DRM_ERROR("Illegal video command tail.\n");
++                      return 1;
++              }
++      }
++      *buffer = buf;
++      return 0;
++}
++
++static __inline__ verifier_state_t
++via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t cmd;
++      const uint32_t *buf = *buffer;
++      verifier_state_t ret = state_command;
++
++      while (buf < buf_end) {
++              cmd = *buf;
++              if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
++                  (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
++                      if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                              break;
++                      DRM_ERROR("Invalid HALCYON_HEADER1 command. "
++                                "Attempt to access 3D- or command burst area.\n");
++                      ret = state_error;
++                      break;
++              } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
++                      if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                              break;
++                      DRM_ERROR("Invalid HALCYON_HEADER1 command. "
++                                "Attempt to access VGA registers.\n");
++                      ret = state_error;
++                      break;
++              } else {
++                      buf += 2;
++              }
++      }
++      *buffer = buf;
++      return ret;
++}
++
++static __inline__ verifier_state_t
++via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                const uint32_t * buf_end)
++{
++      register uint32_t cmd;
++      const uint32_t *buf = *buffer;
++
++      while (buf < buf_end) {
++              cmd = *buf;
++              if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
++                      break;
++              VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
++              buf++;
++      }
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t data;
++      const uint32_t *buf = *buffer;
++
++      if (buf_end - buf < 4) {
++              DRM_ERROR("Illegal termination of video header5 command\n");
++              return state_error;
++      }
++
++      data = *buf++ & ~VIA_VIDEOMASK;
++      if (verify_mmio_address(data))
++              return state_error;
++
++      data = *buf++;
++      if (*buf++ != 0x00F50000) {
++              DRM_ERROR("Illegal header5 header data\n");
++              return state_error;
++      }
++      if (*buf++ != 0x00000000) {
++              DRM_ERROR("Illegal header5 header data\n");
++              return state_error;
++      }
++      if (eat_words(&buf, buf_end, data))
++              return state_error;
++      if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
++              return state_error;
++      *buffer = buf;
++      return state_command;
++
++}
++
++static __inline__ verifier_state_t
++via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                 const uint32_t * buf_end)
++{
++      uint32_t addr, count, i;
++      const uint32_t *buf = *buffer;
++
++      addr = *buf++ & ~VIA_VIDEOMASK;
++      i = count = *buf;
++      buf += 3;
++      while (i--) {
++              VIA_WRITE(addr, *buf++);
++      }
++      if (count & 3)
++              buf += 4 - (count & 3);
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
++{
++      uint32_t data;
++      const uint32_t *buf = *buffer;
++      uint32_t i;
++
++      if (buf_end - buf < 4) {
++              DRM_ERROR("Illegal termination of video header6 command\n");
++              return state_error;
++      }
++      buf++;
++      data = *buf++;
++      if (*buf++ != 0x00F60000) {
++              DRM_ERROR("Illegal header6 header data\n");
++              return state_error;
++      }
++      if (*buf++ != 0x00000000) {
++              DRM_ERROR("Illegal header6 header data\n");
++              return state_error;
++      }
++      if ((buf_end - buf) < (data << 1)) {
++              DRM_ERROR("Illegal termination of video header6 command\n");
++              return state_error;
++      }
++      for (i = 0; i < data; ++i) {
++              if (verify_mmio_address(*buf++))
++                      return state_error;
++              buf++;
++      }
++      data <<= 1;
++      if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
++              return state_error;
++      *buffer = buf;
++      return state_command;
++}
++
++static __inline__ verifier_state_t
++via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
++                 const uint32_t * buf_end)
++{
++
++      uint32_t addr, count, i;
++      const uint32_t *buf = *buffer;
++
++      i = count = *++buf;
++      buf += 3;
++      while (i--) {
++              addr = *buf++;
++              VIA_WRITE(addr, *buf++);
++      }
++      count <<= 1;
++      if (count & 3)
++              buf += 4 - (count & 3);
++      *buffer = buf;
++      return state_command;
++}
++
++int
++via_verify_command_stream(const uint32_t * buf, unsigned int size,
++                        struct drm_device * dev, int agp)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_state_t *hc_state = &dev_priv->hc_state;
++      drm_via_state_t saved_state = *hc_state;
++      uint32_t cmd;
++      const uint32_t *buf_end = buf + (size >> 2);
++      verifier_state_t state = state_command;
++      int cme_video;
++      int supported_3d;
++
++      cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
++                   dev_priv->chipset == VIA_DX9_0);
++
++      supported_3d = dev_priv->chipset != VIA_DX9_0;
++
++      hc_state->dev = dev;
++      hc_state->unfinished = no_sequence;
++      hc_state->map_cache = NULL;
++      hc_state->agp = agp;
++      hc_state->buf_start = buf;
++      dev_priv->num_fire_offsets = 0;
++
++      while (buf < buf_end) {
++
++              switch (state) {
++              case state_header2:
++                      state = via_check_header2(&buf, buf_end, hc_state);
++                      break;
++              case state_header1:
++                      state = via_check_header1(&buf, buf_end);
++                      break;
++              case state_vheader5:
++                      state = via_check_vheader5(&buf, buf_end);
++                      break;
++              case state_vheader6:
++                      state = via_check_vheader6(&buf, buf_end);
++                      break;
++              case state_command:
++                      if ((HALCYON_HEADER2 == (cmd = *buf)) &&
++                          supported_3d)
++                              state = state_header2;
++                      else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                              state = state_header1;
++                      else if (cme_video
++                               && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
++                              state = state_vheader5;
++                      else if (cme_video
++                               && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              state = state_vheader6;
++                      else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
++                              DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
++                              state = state_error;
++                      } else {
++                              DRM_ERROR
++                                  ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
++                                   cmd);
++                              state = state_error;
++                      }
++                      break;
++              case state_error:
++              default:
++                      *hc_state = saved_state;
++                      return -EINVAL;
++              }
++      }
++      if (state == state_error) {
++              *hc_state = saved_state;
++              return -EINVAL;
++      }
++      return 0;
++}
++
++int
++via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
++                       unsigned int size)
++{
++
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      uint32_t cmd;
++      const uint32_t *buf_end = buf + (size >> 2);
++      verifier_state_t state = state_command;
++      int fire_count = 0;
++
++      while (buf < buf_end) {
++
++              switch (state) {
++              case state_header2:
++                      state =
++                          via_parse_header2(dev_priv, &buf, buf_end,
++                                            &fire_count);
++                      break;
++              case state_header1:
++                      state = via_parse_header1(dev_priv, &buf, buf_end);
++                      break;
++              case state_vheader5:
++                      state = via_parse_vheader5(dev_priv, &buf, buf_end);
++                      break;
++              case state_vheader6:
++                      state = via_parse_vheader6(dev_priv, &buf, buf_end);
++                      break;
++              case state_command:
++                      if (HALCYON_HEADER2 == (cmd = *buf))
++                              state = state_header2;
++                      else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
++                              state = state_header1;
++                      else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
++                              state = state_vheader5;
++                      else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
++                              state = state_vheader6;
++                      else {
++                              DRM_ERROR
++                                  ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
++                                   cmd);
++                              state = state_error;
++                      }
++                      break;
++              case state_error:
++              default:
++                      return -EINVAL;
++              }
++      }
++      if (state == state_error) {
++              return -EINVAL;
++      }
++      return 0;
++}
++
++static void
++setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
++{
++      int i;
++
++      for (i = 0; i < 256; ++i) {
++              table[i] = forbidden_command;
++      }
++
++      for (i = 0; i < size; ++i) {
++              table[init_table[i].code] = init_table[i].hz;
++      }
++}
++
++void via_init_command_verifier(void)
++{
++      setup_hazard_table(init_table1, table1,
++                         sizeof(init_table1) / sizeof(hz_init_t));
++      setup_hazard_table(init_table2, table2,
++                         sizeof(init_table2) / sizeof(hz_init_t));
++      setup_hazard_table(init_table3, table3,
++                         sizeof(init_table3) / sizeof(hz_init_t));
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/via_verifier.h git-nokia/drivers/gpu/drm-tungsten/via_verifier.h
+--- git/drivers/gpu/drm-tungsten/via_verifier.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_verifier.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,62 @@
++/*
++ * Copyright 2004 The Unichrome Project. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellström 2004.
++ */
++
++#ifndef _VIA_VERIFIER_H_
++#define _VIA_VERIFIER_H_
++
++typedef enum {
++      no_sequence = 0,
++      z_address,
++      dest_address,
++      tex_address
++} drm_via_sequence_t;
++
++typedef struct {
++      unsigned texture;
++      uint32_t z_addr;
++      uint32_t d_addr;
++      uint32_t t_addr[2][10];
++      uint32_t pitch[2][10];
++      uint32_t height[2][10];
++      uint32_t tex_level_lo[2];
++      uint32_t tex_level_hi[2];
++      uint32_t tex_palette_size[2];
++      uint32_t tex_npot[2];
++      drm_via_sequence_t unfinished;
++      int agp_texture;
++      int multitex;
++      struct drm_device *dev;
++      drm_local_map_t *map_cache;
++      uint32_t vertex_count;
++      int agp;
++      const uint32_t *buf_start;
++} drm_via_state_t;
++
++extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
++                                  struct drm_device *dev, int agp);
++extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
++                                   unsigned int size);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/via_video.c git-nokia/drivers/gpu/drm-tungsten/via_video.c
+--- git/drivers/gpu/drm-tungsten/via_video.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/via_video.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/*
++ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Thomas Hellstrom 2005.
++ *
++ * Video and XvMC related functions.
++ */
++
++#include "drmP.h"
++#include "via_drm.h"
++#include "via_drv.h"
++
++void via_init_futex(drm_via_private_t * dev_priv)
++{
++      unsigned int i;
++
++      DRM_DEBUG("\n");
++
++      for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
++              DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
++              XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
++      }
++}
++
++void via_cleanup_futex(drm_via_private_t * dev_priv)
++{
++}
++
++void via_release_futex(drm_via_private_t * dev_priv, int context)
++{
++      unsigned int i;
++      volatile int *lock;
++
++      if (!dev_priv->sarea_priv)
++              return;
++
++      for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
++              lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
++              if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
++                      if (_DRM_LOCK_IS_HELD(*lock)
++                          && (*lock & _DRM_LOCK_CONT)) {
++                              DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
++                      }
++                      *lock = 0;
++              }
++      }
++}
++
++int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
++{
++      drm_via_futex_t *fx = data;
++      volatile int *lock;
++      drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++      drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
++      int ret = 0;
++
++      DRM_DEBUG("\n");
++
++      if (fx->lock > VIA_NR_XVMC_LOCKS)
++              return -EFAULT;
++
++      lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
++
++      switch (fx->func) {
++      case VIA_FUTEX_WAIT:
++              DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
++                          (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
++              return ret;
++      case VIA_FUTEX_WAKE:
++              DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
++              return 0;
++      }
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_cmdlist.c git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.c
+--- git/drivers/gpu/drm-tungsten/xgi_cmdlist.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,328 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++static void xgi_emit_flush(struct xgi_info * info, bool stop);
++static void xgi_emit_nop(struct xgi_info * info);
++static unsigned int get_batch_command(enum xgi_batch_type type);
++static void triggerHWCommandList(struct xgi_info * info);
++static void xgi_cmdlist_reset(struct xgi_info * info);
++
++
++/**
++ * Graphic engine register (2d/3d) acessing interface
++ */
++static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
++{
++#ifdef XGI_MMIO_DEBUG
++      DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
++               map->handle, addr, data);
++#endif
++      DRM_WRITE32(map, addr, data);
++}
++
++
++int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
++                         struct drm_file * filp)
++{
++      struct xgi_mem_alloc mem_alloc = {
++              .location = XGI_MEMLOC_NON_LOCAL,
++              .size = size,
++      };
++      int err;
++
++      err = xgi_alloc(info, &mem_alloc, filp);
++      if (err) {
++              return err;
++      }
++
++      info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
++      info->cmdring.size = mem_alloc.size;
++      info->cmdring.ring_hw_base = mem_alloc.hw_addr;
++      info->cmdring.last_ptr = NULL;
++      info->cmdring.ring_offset = 0;
++
++      return 0;
++}
++
++
++/**
++ * get_batch_command - Get the command ID for the current begin type.
++ * @type: Type of the current batch
++ *
++ * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
++ *
++ * This function assumes that @type is on the range [0,3].
++ */
++unsigned int get_batch_command(enum xgi_batch_type type)
++{
++      static const unsigned int ports[4] = {
++              0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
++      };
++
++      return ports[type];
++}
++
++
++int xgi_submit_cmdlist(struct drm_device * dev, void * data,
++                     struct drm_file * filp)
++{
++      struct xgi_info *const info = dev->dev_private;
++      const struct xgi_cmd_info *const pCmdInfo =
++              (struct xgi_cmd_info *) data;
++      const unsigned int cmd = get_batch_command(pCmdInfo->type);
++      u32 begin[4];
++
++
++      begin[0] = (cmd << 24) | BEGIN_VALID_MASK
++              | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
++      begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
++      begin[2] = pCmdInfo->hw_addr >> 4;
++      begin[3] = 0;
++
++      if (info->cmdring.last_ptr == NULL) {
++              const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
++
++
++              /* Enable PCI Trigger Mode
++               */
++              dwWriteReg(info->mmio_map,
++                         BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                         (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
++                         M2REG_CLEAR_COUNTERS_MASK | 0x08 |
++                         M2REG_PCI_TRIGGER_MODE_MASK);
++
++              dwWriteReg(info->mmio_map,
++                         BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                         (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
++                         M2REG_PCI_TRIGGER_MODE_MASK);
++
++
++              /* Send PCI begin command
++               */
++              dwWriteReg(info->mmio_map, portOffset,      begin[0]);
++              dwWriteReg(info->mmio_map, portOffset +  4, begin[1]);
++              dwWriteReg(info->mmio_map, portOffset +  8, begin[2]);
++              dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
++      } else {
++              DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
++
++              if (pCmdInfo->type == BTYPE_3D) {
++                      xgi_emit_flush(info, false);
++              }
++
++              info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
++              info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]);
++              info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]);
++              DRM_WRITEMEMORYBARRIER();
++              info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]);
++
++              triggerHWCommandList(info);
++      }
++
++      info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
++#ifdef XGI_HAVE_FENCE
++      drm_fence_flush_old(info->dev, 0, info->next_sequence);
++#endif /* XGI_HAVE_FENCE */
++      return 0;
++}
++
++
++/*
++    state:      0 - console
++                1 - graphic
++                2 - fb
++                3 - logout
++*/
++int xgi_state_change(struct xgi_info * info, unsigned int to,
++                   unsigned int from)
++{
++#define STATE_CONSOLE   0
++#define STATE_GRAPHIC   1
++#define STATE_FBTERM    2
++#define STATE_LOGOUT    3
++#define STATE_REBOOT    4
++#define STATE_SHUTDOWN  5
++
++      if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
++              DRM_INFO("Leaving graphical mode (probably VT switch)\n");
++      } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
++              DRM_INFO("Entering graphical mode (probably VT switch)\n");
++              xgi_cmdlist_reset(info);
++      } else if ((from == STATE_GRAPHIC)
++                 && ((to == STATE_LOGOUT)
++                     || (to == STATE_REBOOT)
++                     || (to == STATE_SHUTDOWN))) {
++              DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
++      } else {
++              DRM_ERROR("Invalid state change.\n");
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++int xgi_state_change_ioctl(struct drm_device * dev, void * data,
++                         struct drm_file * filp)
++{
++      struct xgi_state_info *const state =
++              (struct xgi_state_info *) data;
++      struct xgi_info *info = dev->dev_private;
++
++
++      return xgi_state_change(info, state->_toState, state->_fromState);
++}
++
++
++void xgi_cmdlist_reset(struct xgi_info * info)
++{
++      info->cmdring.last_ptr = NULL;
++      info->cmdring.ring_offset = 0;
++}
++
++
++void xgi_cmdlist_cleanup(struct xgi_info * info)
++{
++      if (info->cmdring.ring_hw_base != 0) {
++              /* If command lists have been issued, terminate the command
++               * list chain with a flush command.
++               */
++              if (info->cmdring.last_ptr != NULL) {
++                      xgi_emit_flush(info, false);
++                      xgi_emit_nop(info);
++              }
++
++              xgi_waitfor_pci_idle(info);
++
++              (void) memset(&info->cmdring, 0, sizeof(info->cmdring));
++      }
++}
++
++static void triggerHWCommandList(struct xgi_info * info)
++{
++      static unsigned int s_triggerID = 1;
++
++      dwWriteReg(info->mmio_map,
++                 BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
++                 0x05000000 + (0x0ffff & s_triggerID++));
++}
++
++
++/**
++ * Emit a flush to the CRTL command stream.
++ * @info XGI info structure
++ *
++ * This function assumes info->cmdring.ptr is non-NULL.
++ */
++void xgi_emit_flush(struct xgi_info * info, bool stop)
++{
++      const u32 flush_command[8] = {
++              ((0x10 << 24)
++               | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
++              BEGIN_LINK_ENABLE_MASK | (0x00004),
++              0x00000000, 0x00000000,
++
++              /* Flush the 2D engine with the default 32 clock delay.
++               */
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++              M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
++      };
++      const unsigned int flush_size = sizeof(flush_command);
++      u32 *batch_addr;
++      u32 hw_addr;
++      unsigned int i;
++
++
++      /* check buf is large enough to contain a new flush batch */
++      if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
++              info->cmdring.ring_offset = 0;
++      }
++
++      hw_addr = info->cmdring.ring_hw_base
++              + info->cmdring.ring_offset;
++      batch_addr = info->cmdring.ptr
++              + (info->cmdring.ring_offset / 4);
++
++      for (i = 0; i < (flush_size / 4); i++) {
++              batch_addr[i] = cpu_to_le32(flush_command[i]);
++      }
++
++      if (stop) {
++              *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK);
++      }
++
++      info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4));
++      info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4);
++      info->cmdring.last_ptr[3] = 0;
++      DRM_WRITEMEMORYBARRIER();
++      info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
++              | (BEGIN_VALID_MASK));
++
++      triggerHWCommandList(info);
++
++      info->cmdring.ring_offset += flush_size;
++      info->cmdring.last_ptr = batch_addr;
++}
++
++
++/**
++ * Emit an empty command to the CRTL command stream.
++ * @info XGI info structure
++ *
++ * This function assumes info->cmdring.ptr is non-NULL.  In addition, since
++ * this function emits a command that does not have linkage information,
++ * it sets info->cmdring.ptr to NULL.
++ */
++void xgi_emit_nop(struct xgi_info * info)
++{
++      info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK
++              | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence));
++      info->cmdring.last_ptr[2] = 0;
++      info->cmdring.last_ptr[3] = 0;
++      DRM_WRITEMEMORYBARRIER();
++      info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
++              | (BEGIN_VALID_MASK));
++
++      triggerHWCommandList(info);
++
++      info->cmdring.last_ptr = NULL;
++}
++
++
++void xgi_emit_irq(struct xgi_info * info)
++{
++      if (info->cmdring.last_ptr == NULL)
++              return;
++
++      xgi_emit_flush(info, true);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_cmdlist.h git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.h
+--- git/drivers/gpu/drm-tungsten/xgi_cmdlist.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_cmdlist.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,66 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_CMDLIST_H_
++#define _XGI_CMDLIST_H_
++
++struct xgi_cmdring_info {
++      /**
++       * Kernel space pointer to the base of the command ring.
++       */
++      u32 * ptr;
++
++      /**
++       * Size, in bytes, of the command ring.
++       */
++      unsigned int size;
++
++      /**
++       * Base address of the command ring from the hardware's PoV.
++       */
++      unsigned int ring_hw_base;
++
++      u32 * last_ptr;
++
++      /**
++       * Offset, in bytes, from the start of the ring to the next available
++       * location to store a command.
++       */
++      unsigned int ring_offset;
++};
++
++struct xgi_info;
++extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
++      struct drm_file * filp);
++
++extern int xgi_state_change(struct xgi_info * info, unsigned int to,
++      unsigned int from);
++
++extern void xgi_cmdlist_cleanup(struct xgi_info * info);
++
++extern void xgi_emit_irq(struct xgi_info * info);
++
++#endif                                /* _XGI_CMDLIST_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drm.h git-nokia/drivers/gpu/drm-tungsten/xgi_drm.h
+--- git/drivers/gpu/drm-tungsten/xgi_drm.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drm.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,137 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR
++ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_DRM_H_
++#define _XGI_DRM_H_
++
++#include <linux/types.h>
++#include <asm/ioctl.h>
++
++struct drm_xgi_sarea {
++      __u16 device_id;
++      __u16 vendor_id;
++
++      char device_name[32];
++
++      unsigned int scrn_start;
++      unsigned int scrn_xres;
++      unsigned int scrn_yres;
++      unsigned int scrn_bpp;
++      unsigned int scrn_pitch;
++};
++
++
++struct xgi_bootstrap {
++      /**
++       * Size of PCI-e GART range in megabytes.
++       */
++      struct drm_map gart;
++};
++
++
++enum xgi_mem_location {
++      XGI_MEMLOC_NON_LOCAL = 0,
++      XGI_MEMLOC_LOCAL = 1,
++      XGI_MEMLOC_INVALID = 0x7fffffff
++};
++
++struct xgi_mem_alloc {
++      /**
++       * Memory region to be used for allocation.
++       *
++       * Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.
++       */
++      unsigned int location;
++
++      /**
++       * Number of bytes request.
++       *
++       * On successful allocation, set to the actual number of bytes
++       * allocated.
++       */
++      unsigned int size;
++
++      /**
++       * Address of the memory from the graphics hardware's point of view.
++       */
++      __u32 hw_addr;
++
++      /**
++       * Offset of the allocation in the mapping.
++       */
++      __u32 offset;
++
++      /**
++       * Magic handle used to release memory.
++       *
++       * See also DRM_XGI_FREE ioctl.
++       */
++      __u32 index;
++};
++
++enum xgi_batch_type {
++      BTYPE_2D = 0,
++      BTYPE_3D = 1,
++      BTYPE_FLIP = 2,
++      BTYPE_CTRL = 3,
++      BTYPE_NONE = 0x7fffffff
++};
++
++struct xgi_cmd_info {
++      __u32 type;
++      __u32 hw_addr;
++      __u32 size;
++      __u32 id;
++};
++
++struct xgi_state_info {
++      unsigned int _fromState;
++      unsigned int _toState;
++};
++
++
++/*
++ * Ioctl definitions
++ */
++
++#define DRM_XGI_BOOTSTRAP           0
++#define DRM_XGI_ALLOC               1
++#define DRM_XGI_FREE                2
++#define DRM_XGI_SUBMIT_CMDLIST      3
++#define DRM_XGI_STATE_CHANGE        4
++#define DRM_XGI_SET_FENCE           5
++#define DRM_XGI_WAIT_FENCE          6
++
++#define XGI_IOCTL_BOOTSTRAP         DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
++#define XGI_IOCTL_ALLOC             DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
++#define XGI_IOCTL_FREE              DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
++#define XGI_IOCTL_SUBMIT_CMDLIST    DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
++#define XGI_IOCTL_STATE_CHANGE      DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
++#define XGI_IOCTL_SET_FENCE         DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_SET_FENCE, u32)
++#define XGI_IOCTL_WAIT_FENCE        DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_WAIT_FENCE, u32)
++
++#endif /* _XGI_DRM_H_ */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drv.c git-nokia/drivers/gpu/drm-tungsten/xgi_drv.c
+--- git/drivers/gpu/drm-tungsten/xgi_drv.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drv.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,441 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "drmP.h"
++#include "drm.h"
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++#include "drm_pciids.h"
++
++static struct pci_device_id pciidlist[] = {
++      xgi_PCI_IDS
++};
++
++#ifdef XGI_HAVE_FENCE
++extern struct drm_fence_driver xgi_fence_driver;
++#endif /* XGI_HAVE_FENCE */
++
++int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
++
++static struct drm_ioctl_desc xgi_ioctls[] = {
++      DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++      DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
++      DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
++      DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
++};
++
++static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
++static int xgi_driver_unload(struct drm_device *dev);
++static void xgi_driver_lastclose(struct drm_device * dev);
++static void xgi_reclaim_buffers_locked(struct drm_device * dev,
++      struct drm_file * filp);
++static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
++static int xgi_kern_isr_postinstall(struct drm_device * dev);
++
++
++static struct drm_driver driver = {
++      .driver_features =
++              DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
++              DRIVER_IRQ_SHARED | DRIVER_SG,
++      .dev_priv_size = sizeof(struct xgi_info),
++      .load = xgi_driver_load,
++      .unload = xgi_driver_unload,
++      .lastclose = xgi_driver_lastclose,
++      .dma_quiescent = NULL,
++      .irq_preinstall = NULL,
++      .irq_postinstall = xgi_kern_isr_postinstall,
++      .irq_uninstall = NULL,
++      .irq_handler = xgi_kern_isr,
++      .reclaim_buffers = drm_core_reclaim_buffers,
++      .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
++      .get_map_ofs = drm_core_get_map_ofs,
++      .get_reg_ofs = drm_core_get_reg_ofs,
++      .ioctls = xgi_ioctls,
++      .dma_ioctl = NULL,
++
++      .fops = {
++              .owner = THIS_MODULE,
++              .open = drm_open,
++              .release = drm_release,
++              .ioctl = drm_ioctl,
++              .mmap = drm_mmap,
++              .poll = drm_poll,
++              .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
++              .compat_ioctl = xgi_compat_ioctl,
++#endif
++      },
++
++      .pci_driver = {
++              .name = DRIVER_NAME,
++              .id_table = pciidlist,
++              .probe = probe,
++              .remove = __devexit_p(drm_cleanup_pci),
++      },
++
++#ifdef XGI_HAVE_FENCE
++      .fence_driver = &xgi_fence_driver,
++#endif /* XGI_HAVE_FENCE */
++
++      .name = DRIVER_NAME,
++      .desc = DRIVER_DESC,
++      .date = DRIVER_DATE,
++      .major = DRIVER_MAJOR,
++      .minor = DRIVER_MINOR,
++      .patchlevel = DRIVER_PATCHLEVEL,
++
++};
++
++static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++      return drm_get_dev(pdev, ent, &driver);
++}
++
++
++static int __init xgi_init(void)
++{
++      driver.num_ioctls = xgi_max_ioctl;
++      return drm_init(&driver, pciidlist);
++}
++
++static void __exit xgi_exit(void)
++{
++      drm_exit(&driver);
++}
++
++module_init(xgi_init);
++module_exit(xgi_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
++
++
++void xgi_engine_init(struct xgi_info * info)
++{
++      u8 temp;
++
++
++      OUT3C5B(info->mmio_map, 0x11, 0x92);
++
++      /* -------> copy from OT2D
++       * PCI Retry Control Register.
++       * disable PCI read retry & enable write retry in mem. (10xx xxxx)b
++       */
++      temp = IN3X5B(info->mmio_map, 0x55);
++      OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
++
++      xgi_enable_ge(info);
++
++      /* Enable linear addressing of the card. */
++      temp = IN3X5B(info->mmio_map, 0x21);
++      OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
++
++      /* Enable 32-bit internal data path */
++      temp = IN3X5B(info->mmio_map, 0x2A);
++      OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
++
++      /* Enable PCI burst write ,disable burst read and enable MMIO. */
++      /*
++       * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
++       * 7 ---- Pixel Data Format 1:  big endian 0:  little endian
++       * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]#  with Big Endian Format
++       * 2 ---- PCI Burst Write Enable
++       * 1 ---- PCI Burst Read Enable
++       * 0 ---- MMIO Control
++       */
++      temp = IN3X5B(info->mmio_map, 0x39);
++      OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
++
++      /* enable GEIO decode */
++      /* temp = IN3X5B(info->mmio_map, 0x29);
++       * OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
++       */
++
++      /* Enable graphic engine I/O PCI retry function*/
++      /* temp = IN3X5B(info->mmio_map, 0x62);
++       * OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
++       */
++
++      /* protect all register except which protected by 3c5.0e.7 */
++        /* OUT3C5B(info->mmio_map, 0x11, 0x87); */
++}
++
++
++int xgi_bootstrap(struct drm_device * dev, void * data,
++                struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++      struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
++      struct drm_map_list *maplist;
++      int err;
++
++
++      DRM_SPININIT(&info->fence_lock, "fence lock");
++      info->next_sequence = 0;
++      info->complete_sequence = 0;
++
++      if (info->mmio_map == NULL) {
++              err = drm_addmap(dev, info->mmio.base, info->mmio.size,
++                               _DRM_REGISTERS, _DRM_KERNEL,
++                               &info->mmio_map);
++              if (err) {
++                      DRM_ERROR("Unable to map MMIO region: %d\n", err);
++                      return err;
++              }
++
++              xgi_enable_mmio(info);
++              xgi_engine_init(info);
++      }
++
++
++      info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
++
++      DRM_INFO("fb   base: 0x%lx, size: 0x%x (probed)\n",
++               (unsigned long) info->fb.base, info->fb.size);
++
++
++      if ((info->fb.base == 0) || (info->fb.size == 0)) {
++              DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
++                        (unsigned long) info->fb.base, info->fb.size);
++              return -EINVAL;
++      }
++
++
++      /* Init the resource manager */
++      if (!info->fb_heap_initialized) {
++              err = xgi_fb_heap_init(info);
++              if (err) {
++                      DRM_ERROR("Unable to initialize FB heap.\n");
++                      return err;
++              }
++      }
++
++
++      info->pcie.size = bs->gart.size;
++
++      /* Init the resource manager */
++      if (!info->pcie_heap_initialized) {
++              err = xgi_pcie_heap_init(info);
++              if (err) {
++                      DRM_ERROR("Unable to initialize GART heap.\n");
++                      return err;
++              }
++
++              /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
++              err = xgi_cmdlist_initialize(info, 0x100000, filp);
++              if (err) {
++                      DRM_ERROR("xgi_cmdlist_initialize() failed\n");
++                      return err;
++              }
++      }
++
++
++      if (info->pcie_map == NULL) {
++              err = drm_addmap(info->dev, 0, info->pcie.size,
++                               _DRM_SCATTER_GATHER, _DRM_LOCKED,
++                               & info->pcie_map);
++              if (err) {
++                      DRM_ERROR("Could not add map for GART backing "
++                                "store.\n");
++                      return err;
++              }
++      }
++
++
++      maplist = drm_find_matching_map(dev, info->pcie_map);
++      if (maplist == NULL) {
++              DRM_ERROR("Could not find GART backing store map.\n");
++              return -EINVAL;
++      }
++
++      bs->gart = *info->pcie_map;
++      bs->gart.handle = (void *)(unsigned long) maplist->user_token;
++      return 0;
++}
++
++
++void xgi_driver_lastclose(struct drm_device * dev)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      if (info != NULL) {
++              if (info->mmio_map != NULL) {
++                      xgi_cmdlist_cleanup(info);
++                      xgi_disable_ge(info);
++                      xgi_disable_mmio(info);
++              }
++
++              /* The core DRM lastclose routine will destroy all of our
++               * mappings for us.  NULL out the pointers here so that
++               * xgi_bootstrap can do the right thing.
++               */
++              info->pcie_map = NULL;
++              info->mmio_map = NULL;
++              info->fb_map = NULL;
++
++              if (info->pcie_heap_initialized) {
++                      drm_ati_pcigart_cleanup(dev, &info->gart_info);
++              }
++
++              if (info->fb_heap_initialized
++                  || info->pcie_heap_initialized) {
++                      drm_sman_cleanup(&info->sman);
++
++                      info->fb_heap_initialized = false;
++                      info->pcie_heap_initialized = false;
++              }
++      }
++}
++
++
++void xgi_reclaim_buffers_locked(struct drm_device * dev,
++                              struct drm_file * filp)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      mutex_lock(&info->dev->struct_mutex);
++      if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
++              mutex_unlock(&info->dev->struct_mutex);
++              return;
++      }
++
++      if (dev->driver->dma_quiescent) {
++              dev->driver->dma_quiescent(dev);
++      }
++
++      drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
++      mutex_unlock(&info->dev->struct_mutex);
++      return;
++}
++
++
++/*
++ * driver receives an interrupt if someone waiting, then hand it off.
++ */
++irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
++{
++      struct drm_device *dev = (struct drm_device *) arg;
++      struct xgi_info *info = dev->dev_private;
++      const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
++                                      (0x2800
++                                       + M2REG_AUTO_LINK_STATUS_ADDRESS)))
++              & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
++                 | M2REG_ACTIVE_INTERRUPT_0_MASK
++                 | M2REG_ACTIVE_INTERRUPT_2_MASK
++                 | M2REG_ACTIVE_INTERRUPT_3_MASK);
++
++
++      if (irq_bits != 0) {
++              DRM_WRITE32(info->mmio_map,
++                          0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
++                          cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
++#ifdef XGI_HAVE_FENCE
++              xgi_fence_handler(dev);
++#endif /* XGI_HAVE_FENCE */
++              DRM_WAKEUP(&info->fence_queue);
++              return IRQ_HANDLED;
++      } else {
++              return IRQ_NONE;
++      }
++}
++
++
++int xgi_kern_isr_postinstall(struct drm_device * dev)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      DRM_INIT_WAITQUEUE(&info->fence_queue);
++      return 0;
++}
++
++
++int xgi_driver_load(struct drm_device *dev, unsigned long flags)
++{
++      struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
++      int err;
++
++      if (!info)
++              return -ENOMEM;
++
++      (void) memset(info, 0, sizeof(*info));
++      dev->dev_private = info;
++      info->dev = dev;
++
++      info->mmio.base = drm_get_resource_start(dev, 1);
++      info->mmio.size = drm_get_resource_len(dev, 1);
++
++      DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
++               (unsigned long) info->mmio.base, info->mmio.size);
++
++
++      if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
++              DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
++                        (unsigned long) info->mmio.base, info->mmio.size);
++              err = -EINVAL;
++              goto fail;
++      }
++
++
++      info->fb.base = drm_get_resource_start(dev, 0);
++      info->fb.size = drm_get_resource_len(dev, 0);
++
++      DRM_INFO("fb   base: 0x%lx, size: 0x%x\n",
++               (unsigned long) info->fb.base, info->fb.size);
++
++
++      err = drm_sman_init(&info->sman, 2, 12, 8);
++      if (err) {
++              goto fail;
++      }
++
++
++      return 0;
++
++fail:
++      drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
++      return err;
++}
++
++int xgi_driver_unload(struct drm_device *dev)
++{
++      struct xgi_info * info = dev->dev_private;
++
++      drm_sman_takedown(&info->sman);
++      drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
++      dev->dev_private = NULL;
++
++      return 0;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_drv.h git-nokia/drivers/gpu/drm-tungsten/xgi_drv.h
+--- git/drivers/gpu/drm-tungsten/xgi_drv.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_drv.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_DRV_H_
++#define _XGI_DRV_H_
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sman.h"
++
++#define DRIVER_AUTHOR         "Andrea Zhang <andrea_zhang@macrosynergy.com>"
++
++#define DRIVER_NAME           "xgi"
++#define DRIVER_DESC           "XGI XP5 / XP10 / XG47"
++#define DRIVER_DATE           "20080612"
++
++#define DRIVER_MAJOR          1
++#define DRIVER_MINOR          2
++#define DRIVER_PATCHLEVEL     0
++
++#include "xgi_cmdlist.h"
++#include "xgi_drm.h"
++
++struct xgi_aperture {
++      dma_addr_t base;
++      unsigned int size;
++};
++
++struct xgi_info {
++      struct drm_device *dev;
++
++      bool bootstrap_done;
++
++      /* physical characteristics */
++      struct xgi_aperture mmio;
++      struct xgi_aperture fb;
++      struct xgi_aperture pcie;
++
++      struct drm_map *mmio_map;
++      struct drm_map *pcie_map;
++      struct drm_map *fb_map;
++
++      /* look up table parameters */
++      struct drm_ati_pcigart_info gart_info;
++      unsigned int lutPageSize;
++
++      struct drm_sman sman;
++      bool fb_heap_initialized;
++      bool pcie_heap_initialized;
++
++      struct xgi_cmdring_info cmdring;
++
++      DRM_SPINTYPE fence_lock;
++      wait_queue_head_t fence_queue;
++      unsigned complete_sequence;
++      unsigned next_sequence;
++};
++
++extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
++      unsigned long arg);
++
++extern int xgi_fb_heap_init(struct xgi_info * info);
++
++extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
++      struct drm_file * filp);
++
++extern int xgi_free(struct xgi_info * info, unsigned int index,
++      struct drm_file * filp);
++
++extern int xgi_pcie_heap_init(struct xgi_info * info);
++
++extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
++
++extern void xgi_enable_mmio(struct xgi_info * info);
++extern void xgi_disable_mmio(struct xgi_info * info);
++extern void xgi_enable_ge(struct xgi_info * info);
++extern void xgi_disable_ge(struct xgi_info * info);
++
++/* TTM-style fences.
++ */
++#ifdef XGI_HAVE_FENCE
++extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
++extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
++      uint32_t flags, uint32_t * sequence, uint32_t * native_type);
++extern void xgi_fence_handler(struct drm_device * dev);
++extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
++      uint32_t flags);
++#endif /* XGI_HAVE_FENCE */
++
++
++/* Non-TTM-style fences.
++ */
++extern int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++
++extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_free_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_submit_cmdlist(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++extern int xgi_state_change_ioctl(struct drm_device * dev, void * data,
++      struct drm_file * filp);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_fb.c git-nokia/drivers/gpu/drm-tungsten/xgi_fb.c
+--- git/drivers/gpu/drm-tungsten/xgi_fb.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_fb.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++
++#define XGI_FB_HEAP_START 0x1000000
++
++int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
++            struct drm_file * filp)
++{
++      struct drm_memblock_item *block;
++      const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
++              ? "on-card" : "GART";
++
++
++      if ((alloc->location != XGI_MEMLOC_LOCAL)
++          && (alloc->location != XGI_MEMLOC_NON_LOCAL)) {
++              DRM_ERROR("Invalid memory pool (0x%08x) specified.\n",
++                        alloc->location);
++              return -EINVAL;
++      }
++
++      if ((alloc->location == XGI_MEMLOC_LOCAL)
++          ? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
++              DRM_ERROR("Attempt to allocate from uninitialized memory "
++                        "pool (0x%08x).\n", alloc->location);
++              return -EINVAL;
++      }
++
++      mutex_lock(&info->dev->struct_mutex);
++      block = drm_sman_alloc(&info->sman, alloc->location, alloc->size,
++                             0, (unsigned long) filp);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      if (block == NULL) {
++              alloc->size = 0;
++              DRM_ERROR("%s memory allocation failed\n", mem_name);
++              return -ENOMEM;
++      } else {
++              alloc->offset = (*block->mm->offset)(block->mm,
++                                                   block->mm_info);
++              alloc->hw_addr = alloc->offset;
++              alloc->index = block->user_hash.key;
++
++              if (block->user_hash.key != (unsigned long) alloc->index) {
++                      DRM_ERROR("%s truncated handle %lx for pool %d "
++                                "offset %x\n",
++                                __func__, block->user_hash.key,
++                                alloc->location, alloc->offset);
++              }
++
++              if (alloc->location == XGI_MEMLOC_NON_LOCAL) {
++                      alloc->hw_addr += info->pcie.base;
++              }
++
++              DRM_DEBUG("%s memory allocation succeeded: 0x%x\n",
++                        mem_name, alloc->offset);
++      }
++
++      return 0;
++}
++
++
++int xgi_alloc_ioctl(struct drm_device * dev, void * data,
++                  struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp);
++}
++
++
++int xgi_free(struct xgi_info * info, unsigned int index,
++           struct drm_file * filp)
++{
++      int err;
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_free_key(&info->sman, index);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      return err;
++}
++
++
++int xgi_free_ioctl(struct drm_device * dev, void * data,
++                 struct drm_file * filp)
++{
++      struct xgi_info *info = dev->dev_private;
++
++      return xgi_free(info, *(unsigned int *) data, filp);
++}
++
++
++int xgi_fb_heap_init(struct xgi_info * info)
++{
++      int err;
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
++                               XGI_FB_HEAP_START,
++                               info->fb.size - XGI_FB_HEAP_START);
++      mutex_unlock(&info->dev->struct_mutex);
++
++      info->fb_heap_initialized = (err == 0);
++      return err;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_fence.c git-nokia/drivers/gpu/drm-tungsten/xgi_fence.c
+--- git/drivers/gpu/drm-tungsten/xgi_fence.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_fence.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,186 @@
++/*
++ * (C) Copyright IBM Corporation 2007
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Ian Romanick <idr@us.ibm.com>
++ */
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++#include "xgi_cmdlist.h"
++
++static int xgi_low_level_fence_emit(struct drm_device *dev, u32 *sequence)
++{
++      struct xgi_info *const info = dev->dev_private;
++
++      if (info == NULL) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      DRM_SPINLOCK(&info->fence_lock);
++      info->next_sequence++;
++      if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
++              info->next_sequence = 1;
++      }
++
++      *sequence = (u32) info->next_sequence;
++      DRM_SPINUNLOCK(&info->fence_lock);
++
++
++      xgi_emit_irq(info);
++      return 0;
++}
++
++#define GET_BEGIN_ID(i) (le32_to_cpu(DRM_READ32((i)->mmio_map, 0x2820)) \
++                               & BEGIN_BEGIN_IDENTIFICATION_MASK)
++
++static int xgi_low_level_fence_wait(struct drm_device *dev, unsigned *sequence)
++{
++      struct xgi_info *const info = dev->dev_private;
++      unsigned int cur_fence;
++      int ret = 0;
++
++      if (info == NULL) {
++              DRM_ERROR("called with no initialization\n");
++              return -EINVAL;
++      }
++
++      /* Assume that the user has missed the current sequence number
++       * by about a day rather than she wants to wait for years
++       * using fences.
++       */
++      DRM_WAIT_ON(ret, info->fence_queue, 3 * DRM_HZ,
++                  ((((cur_fence = GET_BEGIN_ID(info))
++                    - *sequence) & BEGIN_BEGIN_IDENTIFICATION_MASK)
++                   <= (1 << 18)));
++
++      info->complete_sequence = cur_fence;
++      *sequence = cur_fence;
++
++      return ret;
++}
++
++
++int xgi_set_fence_ioctl(struct drm_device * dev, void * data,
++                      struct drm_file * filp)
++{
++      (void) filp;
++      return xgi_low_level_fence_emit(dev, (u32 *) data);
++}
++
++
++int xgi_wait_fence_ioctl(struct drm_device * dev, void * data,
++                       struct drm_file * filp)
++{
++      (void) filp;
++      return xgi_low_level_fence_wait(dev, (u32 *) data);
++}
++
++
++#ifdef XGI_HAVE_FENCE
++static void xgi_fence_poll(struct drm_device * dev, uint32_t class, 
++                         uint32_t waiting_types)
++{
++      struct xgi_info * info = dev->dev_private;
++      uint32_t signaled_types = 0;
++
++
++      if ((info == NULL) || (class != 0))
++              return;
++
++      DRM_SPINLOCK(&info->fence_lock);
++
++      if (waiting_types) {
++              if (waiting_types & DRM_FENCE_TYPE_EXE) {
++                      const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
++                                                      0x2820))
++                              & BEGIN_BEGIN_IDENTIFICATION_MASK;
++
++                      if (begin_id != info->complete_sequence) {
++                              info->complete_sequence = begin_id;
++                              signaled_types |= DRM_FENCE_TYPE_EXE;
++                      }
++              }
++
++              if (signaled_types) {
++                      drm_fence_handler(dev, 0, info->complete_sequence,
++                                        signaled_types, 0);
++              }
++      }
++
++      DRM_SPINUNLOCK(&info->fence_lock);
++}
++
++
++int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
++                          uint32_t flags, uint32_t * sequence,
++                          uint32_t * native_type)
++{
++      int err;
++
++      (void) flags;
++
++      if (class != 0)
++              return -EINVAL;
++
++      err = xgi_low_level_fence_emit(dev, sequence);
++      if (err)
++              return err;
++
++      *native_type = DRM_FENCE_TYPE_EXE;
++      return 0;
++}
++
++
++void xgi_fence_handler(struct drm_device * dev)
++{
++      struct drm_fence_manager * fm = &dev->fm;
++      struct drm_fence_class_manager *fc = &fm->fence_class[0];
++
++      write_lock(&fm->lock);
++      xgi_fence_poll(dev, 0, fc->waiting_types);
++      write_unlock(&fm->lock);
++}
++
++
++int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
++{
++      return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
++}
++
++struct drm_fence_driver xgi_fence_driver = {
++      .num_classes = 1,
++      .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
++      .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
++      .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
++      .has_irq = xgi_fence_has_irq,
++      .emit = xgi_fence_emit_sequence,
++      .flush = NULL,
++      .poll = xgi_fence_poll,
++      .needed_flush = NULL,
++      .wait = NULL
++};
++
++#endif /* XGI_HAVE_FENCE */
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_ioc32.c git-nokia/drivers/gpu/drm-tungsten/xgi_ioc32.c
+--- git/drivers/gpu/drm-tungsten/xgi_ioc32.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_ioc32.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,140 @@
++/*
++ * (C) Copyright IBM Corporation 2007
++ * Copyright (C) Paul Mackerras 2005.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * on the rights to use, copy, modify, merge, publish, distribute, sub
++ * license, and/or sell copies of the Software, and to permit persons to whom
++ * the Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Ian Romanick <idr@us.ibm.com>
++ */
++
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "xgi_drm.h"
++
++/* This is copied from drm_ioc32.c.
++ */
++struct drm_map32 {
++      u32 offset;             /**< Requested physical address (0 for SAREA)*/
++      u32 size;               /**< Requested physical size (bytes) */
++      enum drm_map_type type; /**< Type of memory to map */
++      enum drm_map_flags flags;       /**< Flags */
++      u32 handle;             /**< User-space: "Handle" to pass to mmap() */
++      int mtrr;               /**< MTRR slot used */
++};
++
++struct drm32_xgi_bootstrap {
++      struct drm_map32 gart;
++};
++
++
++extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
++
++static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd,
++                              unsigned long arg)
++{
++      struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg;
++      struct drm32_xgi_bootstrap bs32;
++      struct xgi_bootstrap __user *bs;
++      int err;
++      void *handle;
++
++
++      if (copy_from_user(&bs32, argp, sizeof(bs32))) {
++              return -EFAULT;
++      }
++
++      bs = compat_alloc_user_space(sizeof(*bs));
++      if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) {
++              return -EFAULT;
++      }
++
++      if (__put_user(bs32.gart.offset, &bs->gart.offset)
++          || __put_user(bs32.gart.size, &bs->gart.size)
++          || __put_user(bs32.gart.type, &bs->gart.type)
++          || __put_user(bs32.gart.flags, &bs->gart.flags)) {
++              return -EFAULT;
++      }
++
++      err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP,
++                      (unsigned long)bs);
++      if (err) {
++              return err;
++      }
++
++      if (__get_user(bs32.gart.offset, &bs->gart.offset)
++          || __get_user(bs32.gart.mtrr, &bs->gart.mtrr)
++          || __get_user(handle, &bs->gart.handle)) {
++              return -EFAULT;
++      }
++
++      bs32.gart.handle = (unsigned long)handle;
++      if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) {
++              printk(KERN_ERR "%s truncated handle %p for type %d "
++                     "offset %x\n",
++                     __func__, handle, bs32.gart.type, bs32.gart.offset);
++      }
++
++      if (copy_to_user(argp, &bs32, sizeof(bs32))) {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++
++drm_ioctl_compat_t *xgi_compat_ioctls[] = {
++      [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap,
++};
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
++                    unsigned long arg)
++{
++      const unsigned int nr = DRM_IOCTL_NR(cmd);
++      drm_ioctl_compat_t *fn = NULL;
++      int ret;
++
++      if (nr < DRM_COMMAND_BASE)
++              return drm_compat_ioctl(filp, cmd, arg);
++
++      if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls))
++              fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE];
++
++      lock_kernel();
++      ret = (fn != NULL)
++              ? (*fn)(filp, cmd, arg)
++              : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++      unlock_kernel();
++
++      return ret;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_misc.c git-nokia/drivers/gpu/drm-tungsten/xgi_misc.c
+--- git/drivers/gpu/drm-tungsten/xgi_misc.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_misc.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,477 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++
++#include <linux/delay.h>
++
++/*
++ * irq functions
++ */
++#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
++
++static unsigned int s_invalid_begin = 0;
++
++static bool xgi_validate_signal(struct drm_map * map)
++{
++      if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) {
++              u16 check;
++
++              /* Check Read back status */
++              DRM_WRITE8(map, 0x235c, 0x80);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++
++              if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
++                      return false;
++              }
++
++              /* Check RO channel */
++              DRM_WRITE8(map, 0x235c, 0x83);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
++                      return false;
++              }
++
++              /* Check RW channel */
++              DRM_WRITE8(map, 0x235c, 0x88);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
++                      return false;
++              }
++
++              /* Check RO channel outstanding */
++              DRM_WRITE8(map, 0x235c, 0x8f);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if (0 != (check & 0x3ff)) {
++                      return false;
++              }
++
++              /* Check RW channel outstanding */
++              DRM_WRITE8(map, 0x235c, 0x90);
++              check = le16_to_cpu(DRM_READ16(map, 0x2360));
++              if (0 != (check & 0x3ff)) {
++                      return false;
++              }
++
++              /* No pending PCIE request. GE stall. */
++      }
++
++      return true;
++}
++
++
++static void xgi_ge_hang_reset(struct drm_map * map)
++{
++      int time_out = 0xffff;
++
++      DRM_WRITE8(map, 0xb057, 8);
++      while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
++              while (0 != ((--time_out) & 0xfff))
++                      /* empty */ ;
++
++              if (0 == time_out) {
++                      u8 old_3ce;
++                      u8 old_3cf;
++                      u8 old_index;
++                      u8 old_36;
++
++                      DRM_INFO("Can not reset back 0x%x!\n",
++                               le32_to_cpu(DRM_READ32(map, 0x2800)));
++
++                      DRM_WRITE8(map, 0xb057, 0);
++
++                      /* Have to use 3x5.36 to reset. */
++                      /* Save and close dynamic gating */
++
++                      old_3ce = DRM_READ8(map, 0x3ce);
++                      DRM_WRITE8(map, 0x3ce, 0x2a);
++                      old_3cf = DRM_READ8(map, 0x3cf);
++                      DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);
++
++                      /* Reset GE */
++                      old_index = DRM_READ8(map, 0x3d4);
++                      DRM_WRITE8(map, 0x3d4, 0x36);
++                      old_36 = DRM_READ8(map, 0x3d5);
++                      DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
++
++                      while (0 != ((--time_out) & 0xfff))
++                              /* empty */ ;
++
++                      DRM_WRITE8(map, 0x3d5, old_36);
++                      DRM_WRITE8(map, 0x3d4, old_index);
++
++                      /* Restore dynamic gating */
++                      DRM_WRITE8(map, 0x3cf, old_3cf);
++                      DRM_WRITE8(map, 0x3ce, old_3ce);
++                      break;
++              }
++      }
++
++      DRM_WRITE8(map, 0xb057, 0);
++}
++
++
++bool xgi_ge_irq_handler(struct xgi_info * info)
++{
++      const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
++      bool is_support_auto_reset = false;
++
++      /* Check GE on/off */
++      if (0 == (0xffffc0f0 & int_status)) {
++              if (0 != (0x1000 & int_status)) {
++                      /* We got GE stall interrupt.
++                       */
++                      DRM_WRITE32(info->mmio_map, 0x2810,
++                                  cpu_to_le32(int_status | 0x04000000));
++
++                      if (is_support_auto_reset) {
++                              static cycles_t last_tick;
++                              static unsigned continue_int_count = 0;
++
++                              /* OE II is busy. */
++
++                              if (!xgi_validate_signal(info->mmio_map)) {
++                                      /* Nothing but skip. */
++                              } else if (0 == continue_int_count++) {
++                                      last_tick = get_cycles();
++                              } else {
++                                      const cycles_t new_tick = get_cycles();
++                                      if ((new_tick - last_tick) >
++                                          STALL_INTERRUPT_RESET_THRESHOLD) {
++                                              continue_int_count = 0;
++                                      } else if (continue_int_count >= 3) {
++                                              continue_int_count = 0;
++
++                                              /* GE Hung up, need reset. */
++                                              DRM_INFO("Reset GE!\n");
++
++                                              xgi_ge_hang_reset(info->mmio_map);
++                                      }
++                              }
++                      }
++              } else if (0 != (0x1 & int_status)) {
++                      s_invalid_begin++;
++                      DRM_WRITE32(info->mmio_map, 0x2810,
++                                  cpu_to_le32((int_status & ~0x01) | 0x04000000));
++              }
++
++              return true;
++      }
++
++      return false;
++}
++
++bool xgi_crt_irq_handler(struct xgi_info * info)
++{
++      bool ret = false;
++      u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
++
++      /* CRT1 interrupt just happened
++       */
++      if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
++              u8 op3cf_3d;
++              u8 op3cf_37;
++
++              /* What happened?
++               */
++              op3cf_37 = IN3CFB(info->mmio_map, 0x37);
++
++              /* Clear CRT interrupt
++               */
++              op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
++              OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
++              OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
++              ret = true;
++      }
++      DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
++
++      return (ret);
++}
++
++bool xgi_dvi_irq_handler(struct xgi_info * info)
++{
++      bool ret = false;
++      const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
++
++      /* DVI interrupt just happened
++       */
++      if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
++              const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
++              u8 op3cf_39;
++              u8 op3cf_37;
++              u8 op3x5_5a;
++
++              /* What happened?
++               */
++              op3cf_37 = IN3CFB(info->mmio_map, 0x37);
++
++              /* Notify BIOS that DVI plug/unplug happened
++               */
++              op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
++              OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
++
++              DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
++
++              /* Clear DVI interrupt
++               */
++              op3cf_39 = IN3CFB(info->mmio_map, 0x39);
++              OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
++              OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
++
++              ret = true;
++      }
++      DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
++
++      return (ret);
++}
++
++
++static void dump_reg_header(unsigned regbase)
++{
++      printk("\n=====xgi_dump_register========0x%x===============\n",
++             regbase);
++      printk("    0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f\n");
++}
++
++
++static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
++{
++      unsigned i, j;
++      u8 temp;
++
++
++      dump_reg_header(regbase);
++      for (i = 0; i < 0x10; i++) {
++              printk("%1x ", i);
++
++              for (j = 0; j < 0x10; j++) {
++                      DRM_WRITE8(info->mmio_map, regbase - 1,
++                                 (i * 0x10) + j);
++                      temp = DRM_READ8(info->mmio_map, regbase);
++                      printk("%3x", temp);
++              }
++              printk("\n");
++      }
++}
++
++
++static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
++{
++      unsigned i, j;
++
++
++      dump_reg_header(regbase);
++      for (i = 0; i < range; i++) {
++              printk("%1x ", i);
++
++              for (j = 0; j < 0x10; j++) {
++                      u8 temp = DRM_READ8(info->mmio_map,
++                                          regbase + (i * 0x10) + j);
++                      printk("%3x", temp);
++              }
++              printk("\n");
++      }
++}
++
++
++void xgi_dump_register(struct xgi_info * info)
++{
++      dump_indexed_reg(info, 0x3c5);
++      dump_indexed_reg(info, 0x3d5);
++      dump_indexed_reg(info, 0x3cf);
++
++      dump_reg(info, 0xB000, 0x05);
++      dump_reg(info, 0x2200, 0x0B);
++      dump_reg(info, 0x2300, 0x07);
++      dump_reg(info, 0x2400, 0x10);
++      dump_reg(info, 0x2800, 0x10);
++}
++
++
++#define WHOLD_GE_STATUS             0x2800
++
++/* Test everything except the "whole GE busy" bit, the "master engine busy"
++ * bit, and the reserved bits [26:21].
++ */
++#define IDLE_MASK                   ~((1U<<31) | (1U<<28) | (0x3f<<21))
++
++void xgi_waitfor_pci_idle(struct xgi_info * info)
++{
++      unsigned int idleCount = 0;
++      u32 old_status = 0;
++      unsigned int same_count = 0;
++
++      while (idleCount < 5) {
++              const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
++                      & IDLE_MASK;
++
++              if (status == old_status) {
++                      same_count++;
++
++                      if ((same_count % 100) == 0) {
++                              DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
++                                        old_status, same_count);
++                      }
++              } else {
++                      old_status = status;
++                      same_count = 0;
++              }
++
++              if (status != 0) {
++                      msleep(1);
++                      idleCount = 0;
++              } else {
++                      idleCount++;
++              }
++      }
++}
++
++
++void xgi_enable_mmio(struct xgi_info * info)
++{
++      u8 protect = 0;
++      u8 temp;
++
++      /* Unprotect registers */
++      DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
++      protect = DRM_READ8(info->mmio_map, 0x3C5);
++      DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
++
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
++
++      /* Enable MMIO */
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
++
++      /* Protect registers */
++      OUT3C5B(info->mmio_map, 0x11, protect);
++}
++
++
++void xgi_disable_mmio(struct xgi_info * info)
++{
++      u8 protect = 0;
++      u8 temp;
++
++      /* Unprotect registers */
++      DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
++      protect = DRM_READ8(info->mmio_map, 0x3C5);
++      DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
++
++      /* Disable MMIO access */
++      DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
++      temp = DRM_READ8(info->mmio_map, 0x3D5);
++      DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
++
++      /* Protect registers */
++      OUT3C5B(info->mmio_map, 0x11, protect);
++}
++
++
++void xgi_enable_ge(struct xgi_info * info)
++{
++      u8 bOld3cf2a;
++      int wait = 0;
++
++      OUT3C5B(info->mmio_map, 0x11, 0x92);
++
++      /* Save and close dynamic gating
++       */
++      bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
++      OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);
++
++      /* Enable 2D and 3D GE
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Reset both 3D and 2D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL,
++              (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Enable 2D engine only
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);
++
++      /* Enable 2D+3D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      /* Restore dynamic gating
++       */
++      OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
++}
++
++
++void xgi_disable_ge(struct xgi_info * info)
++{
++      int wait = 0;
++
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Reset both 3D and 2D engine
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL,
++              (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
++
++      wait = 10;
++      while (wait--) {
++              DRM_READ8(info->mmio_map, 0x36);
++      }
++
++      /* Disable 2D engine and 3D engine.
++       */
++      OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_misc.h git-nokia/drivers/gpu/drm-tungsten/xgi_misc.h
+--- git/drivers/gpu/drm-tungsten/xgi_misc.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_misc.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,37 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_MISC_H_
++#define _XGI_MISC_H_
++
++extern void xgi_dump_register(struct xgi_info * info);
++
++extern bool xgi_ge_irq_handler(struct xgi_info * info);
++extern bool xgi_crt_irq_handler(struct xgi_info * info);
++extern bool xgi_dvi_irq_handler(struct xgi_info * info);
++extern void xgi_waitfor_pci_idle(struct xgi_info * info);
++
++#endif
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_pcie.c git-nokia/drivers/gpu/drm-tungsten/xgi_pcie.c
+--- git/drivers/gpu/drm-tungsten/xgi_pcie.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_pcie.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#include "xgi_drv.h"
++#include "xgi_regs.h"
++#include "xgi_misc.h"
++
++void xgi_gart_flush(struct drm_device *dev)
++{
++      struct xgi_info *const info = dev->dev_private;
++      u8 temp;
++
++      DRM_MEMORYBARRIER();
++
++      /* Set GART in SFB */
++      temp = DRM_READ8(info->mmio_map, 0xB00C);
++      DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
++
++      /* Set GART base address to HW */
++      DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
++
++      /* Flush GART table. */
++      DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);
++      DRM_WRITE8(info->mmio_map, 0xB03F, 0x00);
++}
++
++
++int xgi_pcie_heap_init(struct xgi_info * info)
++{
++      u8 temp = 0;
++      int err;
++      struct drm_scatter_gather request;
++
++      /* Get current FB aperture size */
++      temp = IN3X5B(info->mmio_map, 0x27);
++      DRM_INFO("In3x5(0x27): 0x%x \n", temp);
++
++      if (temp & 0x01) {      /* 256MB; Jong 06/05/2006; 0x10000000 */
++              info->pcie.base = 256 * 1024 * 1024;
++      } else {                /* 128MB; Jong 06/05/2006; 0x08000000 */
++              info->pcie.base = 128 * 1024 * 1024;
++      }
++
++
++      DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
++
++      /* Get current lookup table page size */
++      temp = DRM_READ8(info->mmio_map, 0xB00C);
++      if (temp & 0x04) {      /* 8KB */
++              info->lutPageSize = 8 * 1024;
++      } else {                /* 4KB */
++              info->lutPageSize = 4 * 1024;
++      }
++
++      DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
++
++
++      request.size = info->pcie.size;
++      err = drm_sg_alloc(info->dev, & request);
++      if (err) {
++              DRM_ERROR("cannot allocate PCIE GART backing store!  "
++                        "size = %d\n", info->pcie.size);
++              return err;
++      }
++
++      info->gart_info.table_mask = DMA_BIT_MASK(32);
++      info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
++      info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
++      info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
++
++      if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) {
++              DRM_ERROR("failed to init PCI GART!\n");
++              return -ENOMEM;
++      }
++
++
++      xgi_gart_flush(info->dev);
++
++      mutex_lock(&info->dev->struct_mutex);
++      err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL,
++                               0, info->pcie.size);
++      mutex_unlock(&info->dev->struct_mutex);
++      if (err) {
++              drm_ati_pcigart_cleanup(info->dev, &info->gart_info);
++      }
++
++      info->pcie_heap_initialized = (err == 0);
++      return err;
++}
++
++
++/**
++ * xgi_find_pcie_virt
++ * @address: GE HW address
++ *
++ * Returns CPU virtual address.  Assumes the CPU VAddr is continuous in not
++ * the same block
++ */
++void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
++{
++      const unsigned long offset = address - info->pcie.base;
++
++      return ((u8 *) info->dev->sg->virtual) + offset;
++}
+diff -Nurd git/drivers/gpu/drm-tungsten/xgi_regs.h git-nokia/drivers/gpu/drm-tungsten/xgi_regs.h
+--- git/drivers/gpu/drm-tungsten/xgi_regs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/drm-tungsten/xgi_regs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,169 @@
++/****************************************************************************
++ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation on the rights to use, copy, modify, merge,
++ * publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so,
++ * subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
++ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ ***************************************************************************/
++
++#ifndef _XGI_REGS_H_
++#define _XGI_REGS_H_
++
++#include "drmP.h"
++#include "drm.h"
++
++#define MAKE_MASK(bits)  ((1U << (bits)) - 1)
++
++#define ONE_BIT_MASK        MAKE_MASK(1)
++#define TWENTY_BIT_MASK     MAKE_MASK(20)
++#define TWENTYONE_BIT_MASK  MAKE_MASK(21)
++#define TWENTYTWO_BIT_MASK  MAKE_MASK(22)
++
++
++/* Port 0x3d4/0x3d5, index 0x2a */
++#define XGI_INTERFACE_SEL 0x2a
++#define DUAL_64BIT        (1U<<7)
++#define INTERNAL_32BIT    (1U<<6)
++#define EN_SEP_WR         (1U<<5)
++#define POWER_DOWN_SEL    (1U<<4)
++/*#define RESERVED_3      (1U<<3) */
++#define SUBS_MCLK_PCICLK  (1U<<2)
++#define MEM_SIZE_MASK     (3<<0)
++#define MEM_SIZE_32MB     (0<<0)
++#define MEM_SIZE_64MB     (1<<0)
++#define MEM_SIZE_128MB    (2<<0)
++#define MEM_SIZE_256MB    (3<<0)
++
++/* Port 0x3d4/0x3d5, index 0x36 */
++#define XGI_GE_CNTL 0x36
++#define GE_ENABLE        (1U<<7)
++/*#define RESERVED_6     (1U<<6) */
++/*#define RESERVED_5     (1U<<5) */
++#define GE_RESET         (1U<<4)
++/*#define RESERVED_3     (1U<<3) */
++#define GE_ENABLE_3D     (1U<<2)
++/*#define RESERVED_1     (1U<<1) */
++/*#define RESERVED_0     (1U<<0) */
++
++/* Port 0x3ce/0x3cf, index 0x2a */
++#define XGI_MISC_CTRL 0x2a
++#define MOTION_VID_SUSPEND   (1U<<7)
++#define DVI_CRTC_TIMING_SEL  (1U<<6)
++#define LCD_SEL_CTL_NEW      (1U<<5)
++#define LCD_SEL_EXT_DELYCTRL (1U<<4)
++#define REG_LCDDPARST        (1U<<3)
++#define LCD2DPAOFF           (1U<<2)
++/*#define RESERVED_1         (1U<<1) */
++#define EN_GEPWM             (1U<<0)  /* Enable GE power management */
++
++
++#define BASE_3D_ENG 0x2800
++
++#define M2REG_FLUSH_ENGINE_ADDRESS 0x000
++#define M2REG_FLUSH_ENGINE_COMMAND 0x00
++#define M2REG_FLUSH_FLIP_ENGINE_MASK              (ONE_BIT_MASK<<21)
++#define M2REG_FLUSH_2D_ENGINE_MASK                (ONE_BIT_MASK<<20)
++#define M2REG_FLUSH_3D_ENGINE_MASK                TWENTY_BIT_MASK
++
++#define M2REG_RESET_ADDRESS 0x004
++#define M2REG_RESET_COMMAND 0x01
++#define M2REG_RESET_STATUS2_MASK                  (ONE_BIT_MASK<<10)
++#define M2REG_RESET_STATUS1_MASK                  (ONE_BIT_MASK<<9)
++#define M2REG_RESET_STATUS0_MASK                  (ONE_BIT_MASK<<8)
++#define M2REG_RESET_3DENG_MASK                    (ONE_BIT_MASK<<4)
++#define M2REG_RESET_2DENG_MASK                    (ONE_BIT_MASK<<2)
++
++/* Write register */
++#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010
++#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04
++#define M2REG_CLEAR_TIMER_INTERRUPT_MASK          (ONE_BIT_MASK<<11)
++#define M2REG_CLEAR_INTERRUPT_3_MASK              (ONE_BIT_MASK<<10)
++#define M2REG_CLEAR_INTERRUPT_2_MASK              (ONE_BIT_MASK<<9)
++#define M2REG_CLEAR_INTERRUPT_0_MASK              (ONE_BIT_MASK<<8)
++#define M2REG_CLEAR_COUNTERS_MASK                 (ONE_BIT_MASK<<4)
++#define M2REG_PCI_TRIGGER_MODE_MASK               (ONE_BIT_MASK<<1)
++#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK    (ONE_BIT_MASK<<0)
++
++/* Read register */
++#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010
++#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04
++#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK          (ONE_BIT_MASK<<11)
++#define M2REG_ACTIVE_INTERRUPT_3_MASK              (ONE_BIT_MASK<<10)
++#define M2REG_ACTIVE_INTERRUPT_2_MASK              (ONE_BIT_MASK<<9)
++#define M2REG_ACTIVE_INTERRUPT_0_MASK              (ONE_BIT_MASK<<8)
++#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK    (ONE_BIT_MASK<<0)
++
++#define     M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014
++#define     M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05
++
++
++/**
++ * Begin instruction, double-word 0
++ */
++#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK   (ONE_BIT_MASK<<22)
++#define BEGIN_VALID_MASK                        (ONE_BIT_MASK<<20)
++#define BEGIN_BEGIN_IDENTIFICATION_MASK         TWENTY_BIT_MASK
++
++/**
++ * Begin instruction, double-word 1
++ */
++#define BEGIN_LINK_ENABLE_MASK                  (ONE_BIT_MASK<<31)
++#define BEGIN_COMMAND_LIST_LENGTH_MASK          TWENTYTWO_BIT_MASK
++
++
++/* Hardware access functions */
++static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3C4, index);
++      DRM_WRITE8(map, 0x3C5, data);
++}
++
++static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3D4, index);
++      DRM_WRITE8(map, 0x3D5, data);
++}
++
++static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
++{
++      DRM_WRITE8(map, 0x3CE, index);
++      DRM_WRITE8(map, 0x3CF, data);
++}
++
++static inline u8 IN3C5B(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3C4, index);
++      return DRM_READ8(map, 0x3C5);
++}
++
++static inline u8 IN3X5B(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3D4, index);
++      return DRM_READ8(map, 0x3D5);
++}
++
++static inline u8 IN3CFB(struct drm_map * map, u8 index)
++{
++      DRM_WRITE8(map, 0x3CE, index);
++      return DRM_READ8(map, 0x3CF);
++}
++
++#endif
+diff -Nurd git/drivers/gpu/Kconfig git-nokia/drivers/gpu/Kconfig
+--- git/drivers/gpu/Kconfig    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/Kconfig      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,20 @@
++source drivers/gpu/pvr/Kconfig
++
++choice DRM_VERSION
++      prompt "Direct Rendering Manager"
++      optional
++
++menuconfig DRM_VER_ORIG
++      bool "Original version"
++      select DRM
++
++menuconfig DRM_VER_TUNGSTEN
++      bool "Tungsten version"
++      select DRM_TUNGSTEN
++
++endchoice
++
++source drivers/gpu/drm/Kconfig
++
++source drivers/gpu/drm-tungsten/Kconfig
++
+diff -Nurd git/drivers/gpu/pvr/include4/dbgdrvif.h git-nokia/drivers/gpu/pvr/include4/dbgdrvif.h
+--- git/drivers/gpu/pvr/include4/dbgdrvif.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/dbgdrvif.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,259 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _DBGDRVIF_
++#define _DBGDRVIF_
++
++
++#include "ioctldef.h"
++
++#define DEBUG_CAPMODE_FRAMED                  0x00000001
++#define DEBUG_CAPMODE_CONTINUOUS              0x00000002
++#define DEBUG_CAPMODE_HOTKEY                  0x00000004
++
++#define DEBUG_OUTMODE_STANDARDDBG             0x00000001
++#define DEBUG_OUTMODE_MONO                            0x00000002
++#define DEBUG_OUTMODE_STREAMENABLE            0x00000004
++#define DEBUG_OUTMODE_ASYNC                           0x00000008
++#define DEBUG_OUTMODE_SGXVGA            0x00000010
++
++#define DEBUG_FLAGS_USE_NONPAGED_MEM  0x00000001
++#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002
++#define DEBUG_FLAGS_ENABLESAMPLE              0x00000004
++
++#define DEBUG_FLAGS_TEXTSTREAM                        0x80000000
++
++#define DEBUG_LEVEL_0                                 0x00000001
++#define DEBUG_LEVEL_1                                 0x00000003
++#define DEBUG_LEVEL_2                                 0x00000007
++#define DEBUG_LEVEL_3                                 0x0000000F
++#define DEBUG_LEVEL_4                                 0x0000001F
++#define DEBUG_LEVEL_5                                 0x0000003F
++#define DEBUG_LEVEL_6                                 0x0000007F
++#define DEBUG_LEVEL_7                                 0x000000FF
++#define DEBUG_LEVEL_8                                 0x000001FF
++#define DEBUG_LEVEL_9                                 0x000003FF
++#define DEBUG_LEVEL_10                                        0x000007FF
++#define DEBUG_LEVEL_11                                        0x00000FFF
++
++#define DEBUG_LEVEL_SEL0                              0x00000001
++#define DEBUG_LEVEL_SEL1                              0x00000002
++#define DEBUG_LEVEL_SEL2                              0x00000004
++#define DEBUG_LEVEL_SEL3                              0x00000008
++#define DEBUG_LEVEL_SEL4                              0x00000010
++#define DEBUG_LEVEL_SEL5                              0x00000020
++#define DEBUG_LEVEL_SEL6                              0x00000040
++#define DEBUG_LEVEL_SEL7                              0x00000080
++#define DEBUG_LEVEL_SEL8                              0x00000100
++#define DEBUG_LEVEL_SEL9                              0x00000200
++#define DEBUG_LEVEL_SEL10                             0x00000400
++#define DEBUG_LEVEL_SEL11                             0x00000800
++
++#define DEBUG_SERVICE_IOCTL_BASE              0x800
++#define DEBUG_SERVICE_CREATESTREAM            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DESTROYSTREAM           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSTREAM                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRING             CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READSTRING              CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE                           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READ                            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGMODE            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGOUTMODE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x09, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETDEBUGLEVEL           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0A, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETFRAME                        CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0B, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETFRAME                        CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0C, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_OVERRIDEMODE            CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0D, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_DEFAULTMODE             CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0E, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x0F, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITE2                  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x10, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITESTRINGCM           CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x11, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITECM                 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x12, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_SETMARKER                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x13, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_GETMARKER                       CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x14, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_ISCAPTUREFRAME  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x15, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_WRITELF                 CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x16, METHOD_BUFFERED, FILE_ANY_ACCESS)
++#define DEBUG_SERVICE_READLF                  CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x17, METHOD_BUFFERED, FILE_ANY_ACCESS)
++
++
++typedef struct _DBG_IN_CREATESTREAM_
++{
++      IMG_UINT32 ui32Pages;
++      IMG_UINT32 ui32CapMode;
++      IMG_UINT32 ui32OutMode;
++      IMG_CHAR *pszName;
++}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
++
++typedef struct _DBG_IN_FINDSTREAM_
++{
++      IMG_BOOL bResetStream;
++      IMG_CHAR *pszName;
++}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
++
++typedef struct _DBG_IN_WRITESTRING_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_CHAR *pszString;
++}DBG_IN_WRITESTRING, *PDBG_IN_WRITESTRING;
++
++typedef struct _DBG_IN_READSTRING_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32StringLen;
++      IMG_CHAR *pszString;
++} DBG_IN_READSTRING, *PDBG_IN_READSTRING;
++
++typedef struct _DBG_IN_SETDEBUGMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32End;
++      IMG_UINT32 ui32SampleRate;
++} DBG_IN_SETDEBUGMODE, *PDBG_IN_SETDEBUGMODE;
++
++typedef struct _DBG_IN_SETDEBUGOUTMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++} DBG_IN_SETDEBUGOUTMODE, *PDBG_IN_SETDEBUGOUTMODE;
++
++typedef struct _DBG_IN_SETDEBUGLEVEL_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++} DBG_IN_SETDEBUGLEVEL, *PDBG_IN_SETDEBUGLEVEL;
++
++typedef struct _DBG_IN_SETFRAME_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Frame;
++} DBG_IN_SETFRAME, *PDBG_IN_SETFRAME;
++
++typedef struct _DBG_IN_WRITE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_UINT32 ui32TransferSize;
++      IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE, *PDBG_IN_WRITE;
++
++typedef struct _DBG_IN_READ_
++{
++      IMG_VOID *pvStream;
++      IMG_BOOL bReadInitBuffer;
++      IMG_UINT32 ui32OutBufferSize;
++      IMG_UINT8 *pui8OutBuffer;
++} DBG_IN_READ, *PDBG_IN_READ;
++
++typedef struct _DBG_IN_OVERRIDEMODE_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Mode;
++} DBG_IN_OVERRIDEMODE, *PDBG_IN_OVERRIDEMODE;
++
++typedef struct _DBG_IN_ISCAPTUREFRAME_
++{
++      IMG_VOID *pvStream;
++      IMG_BOOL bCheckPreviousFrame;
++} DBG_IN_ISCAPTUREFRAME, *PDBG_IN_ISCAPTUREFRAME;
++
++typedef struct _DBG_IN_SETMARKER_
++{
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Marker;
++} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
++
++typedef struct _DBG_IN_WRITE_LF_
++{
++      IMG_UINT32 ui32Flags;
++      IMG_VOID *pvStream;
++      IMG_UINT32 ui32Level;
++      IMG_UINT32 ui32BufferSize;
++      IMG_UINT8 *pui8InBuffer;
++} DBG_IN_WRITE_LF, *PDBG_IN_WRITE_LF;
++
++#define WRITELF_FLAGS_RESETBUF                0x00000001
++
++typedef struct _DBG_STREAM_
++{
++      struct _DBG_STREAM_ *psNext;
++      struct _DBG_STREAM_ *psInitStream;
++      IMG_BOOL   bInitPhaseComplete;
++      IMG_UINT32 ui32Flags;
++      IMG_UINT32 ui32Base;
++      IMG_UINT32 ui32Size;
++      IMG_UINT32 ui32RPtr;
++      IMG_UINT32 ui32WPtr;
++      IMG_UINT32 ui32DataWritten;
++      IMG_UINT32 ui32CapMode;
++      IMG_UINT32 ui32OutMode;
++      IMG_UINT32 ui32DebugLevel;
++      IMG_UINT32 ui32DefaultMode;
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32End;
++      IMG_UINT32 ui32Current;
++      IMG_UINT32 ui32Access;
++      IMG_UINT32 ui32SampleRate;
++      IMG_UINT32 ui32Reserved;
++      IMG_UINT32 ui32Timeout;
++      IMG_UINT32 ui32Marker;
++      IMG_CHAR szName[30];            
++} DBG_STREAM,*PDBG_STREAM;
++
++typedef struct _DBGKM_SERVICE_TABLE_
++{
++      IMG_UINT32 ui32Size;
++      IMG_VOID *      (IMG_CALLCONV *pfnCreateStream)                 (IMG_CHAR * pszName,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32OutMode,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages);
++      IMG_VOID        (IMG_CALLCONV *pfnDestroyStream)                (PDBG_STREAM psStream);
++      IMG_VOID *      (IMG_CALLCONV *pfnFindStream)                   (IMG_CHAR * pszName, IMG_BOOL bResetInitBuffer);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteString)                  (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadString)                   (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Limit);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteBIN)                             (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadBIN)                              (PDBG_STREAM psStream,IMG_BOOL bReadInitBuffer, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
++      IMG_VOID        (IMG_CALLCONV *pfnSetCaptureMode)               (PDBG_STREAM psStream,IMG_UINT32 ui32CapMode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
++      IMG_VOID        (IMG_CALLCONV *pfnSetOutputMode)                (PDBG_STREAM psStream,IMG_UINT32 ui32OutMode);
++      IMG_VOID        (IMG_CALLCONV *pfnSetDebugLevel)                (PDBG_STREAM psStream,IMG_UINT32 ui32DebugLevel);
++      IMG_VOID        (IMG_CALLCONV *pfnSetFrame)                             (PDBG_STREAM psStream,IMG_UINT32 ui32Frame);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetFrame)                             (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnOverrideMode)                 (PDBG_STREAM psStream,IMG_UINT32 ui32Mode);
++      IMG_VOID        (IMG_CALLCONV *pfnDefaultMode)                  (PDBG_STREAM psStream);
++      IMG_UINT32      (IMG_CALLCONV *pfnDBGDrivWrite2)                (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteStringCM)                (PDBG_STREAM psStream,IMG_CHAR * pszString,IMG_UINT32 ui32Level);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteBINCM)                   (PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize,IMG_UINT32 ui32Level);
++      IMG_VOID        (IMG_CALLCONV *pfnSetMarker)                    (PDBG_STREAM psStream,IMG_UINT32 ui32Marker);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetMarker)                    (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnEndInitPhase)                 (PDBG_STREAM psStream);
++      IMG_UINT32      (IMG_CALLCONV *pfnIsCaptureFrame)               (PDBG_STREAM psStream, IMG_BOOL bCheckPreviousFrame);
++      IMG_UINT32      (IMG_CALLCONV *pfnWriteLF)                              (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf, IMG_UINT32 ui32InBuffSize, IMG_UINT32 ui32Level, IMG_UINT32 ui32Flags);
++      IMG_UINT32      (IMG_CALLCONV *pfnReadLF)                               (PDBG_STREAM psStream, IMG_UINT32 ui32OutBuffSize, IMG_UINT8 *pui8OutBuf);
++      IMG_UINT32      (IMG_CALLCONV *pfnGetStreamOffset)              (PDBG_STREAM psStream);
++      IMG_VOID        (IMG_CALLCONV *pfnSetStreamOffset)              (PDBG_STREAM psStream, IMG_UINT32 ui32StreamOffset);
++      IMG_UINT32      (IMG_CALLCONV *pfnIsLastCaptureFrame)   (PDBG_STREAM psStream);
++} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
++
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/include4/img_defs.h git-nokia/drivers/gpu/pvr/include4/img_defs.h
+--- git/drivers/gpu/pvr/include4/img_defs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/img_defs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,100 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__IMG_DEFS_H__)
++#define __IMG_DEFS_H__
++
++#include "img_types.h"
++
++typedef               enum    img_tag_TriStateSwitch
++{
++      IMG_ON          =       0x00,
++      IMG_OFF,
++      IMG_IGNORE
++
++} img_TriStateSwitch, * img_pTriStateSwitch;
++
++#define               IMG_SUCCESS                             0
++
++
++#define               IMG_NULL                                0
++#define               IMG_NO_REG                              1
++
++#if defined (NO_INLINE_FUNCS)
++      #define INLINE
++      #define FORCE_INLINE
++#else
++#if defined (__cplusplus)
++      #define INLINE                                  inline
++      #define FORCE_INLINE                    inline
++#else
++      #define INLINE                                  __inline
++      #define FORCE_INLINE                    static __inline
++#endif
++#endif
++
++
++#ifndef PVR_UNREFERENCED_PARAMETER
++#define       PVR_UNREFERENCED_PARAMETER(param) (param) = (param)
++#endif
++
++#ifdef __GNUC__
++#define unref__ __attribute__ ((unused))
++#else
++#define unref__
++#endif
++
++#if defined(UNICODE)
++typedef unsigned short                TCHAR, *PTCHAR, *PTSTR;
++#else 
++typedef char                          TCHAR, *PTCHAR, *PTSTR;
++#endif        
++
++                      #if defined(__linux__)
++
++                              #define IMG_CALLCONV
++                              #define IMG_INTERNAL    __attribute__ ((visibility ("hidden")))
++                              #define IMG_EXPORT
++                              #define IMG_IMPORT
++                              #define IMG_RESTRICT    __restrict__
++
++                      #else
++                                      #error("define an OS")
++                      #endif
++
++#ifndef IMG_ABORT
++      #define IMG_ABORT()     abort()
++#endif
++
++#ifndef IMG_MALLOC
++      #define IMG_MALLOC(A)           malloc  (A)
++#endif
++
++#ifndef IMG_FREE
++      #define IMG_FREE(A)                     free    (A)
++#endif
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/img_types.h git-nokia/drivers/gpu/pvr/include4/img_types.h
+--- git/drivers/gpu/pvr/include4/img_types.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/img_types.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,111 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_TYPES_H__
++#define __IMG_TYPES_H__
++
++#include "img_defs.h"
++
++typedef unsigned int  IMG_UINT,       *IMG_PUINT;
++typedef signed int            IMG_INT,        *IMG_PINT;
++
++typedef unsigned char IMG_UINT8,      *IMG_PUINT8;
++typedef unsigned char IMG_BYTE,       *IMG_PBYTE;
++typedef signed char           IMG_INT8,       *IMG_PINT8;
++typedef char                  IMG_CHAR,       *IMG_PCHAR;
++
++typedef unsigned short        IMG_UINT16,     *IMG_PUINT16;
++typedef signed short  IMG_INT16,      *IMG_PINT16;
++typedef unsigned long IMG_UINT32,     *IMG_PUINT32;
++typedef signed long           IMG_INT32,      *IMG_PINT32;
++
++      #if defined(LINUX)
++
++      #else
++
++              #error("define an OS")
++
++      #endif
++
++#if !(defined(LINUX) && defined (__KERNEL__))
++typedef float                 IMG_FLOAT,      *IMG_PFLOAT;
++typedef double                        IMG_DOUBLE, *IMG_PDOUBLE;
++#endif
++
++typedef       enum tag_img_bool
++{
++      IMG_FALSE               = 0,
++      IMG_TRUE                = 1,
++      IMG_FORCE_ALIGN = 0x7FFFFFFF
++} IMG_BOOL, *IMG_PBOOL;
++
++typedef void                  IMG_VOID,       *IMG_PVOID;
++
++typedef IMG_INT32             IMG_RESULT;
++
++typedef IMG_UINT32      IMG_UINTPTR_T;
++
++typedef IMG_PVOID       IMG_HANDLE;
++
++typedef void**                        IMG_HVOID,      * IMG_PHVOID;
++
++typedef IMG_UINT32      IMG_SIZE_T;
++
++#define IMG_NULL              0
++
++
++typedef IMG_PVOID IMG_CPU_VIRTADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_CPU_PHYADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_DEV_VIRTADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_DEV_PHYADDR;
++
++typedef struct {IMG_UINT32 uiAddr;} IMG_SYS_PHYADDR;
++
++typedef struct _SYSTEM_ADDR_
++{
++      
++      IMG_UINT32      ui32PageCount;
++      union
++      {
++              
++
++
++              IMG_SYS_PHYADDR sContig;                
++
++              
++
++
++
++
++
++              IMG_SYS_PHYADDR asNonContig[1];
++      } u;
++} SYSTEM_ADDR;
++
++#endif        
+diff -Nurd git/drivers/gpu/pvr/include4/ioctldef.h git-nokia/drivers/gpu/pvr/include4/ioctldef.h
+--- git/drivers/gpu/pvr/include4/ioctldef.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/ioctldef.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IOCTLDEF_H__
++#define __IOCTLDEF_H__
++
++#define MAKEIOCTLINDEX(i)     (((i) >> 2) & 0xFFF)
++
++#ifndef CTL_CODE
++
++#define DEVICE_TYPE ULONG
++
++#define FILE_DEVICE_BEEP                0x00000001
++#define FILE_DEVICE_CD_ROM              0x00000002
++#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
++#define FILE_DEVICE_CONTROLLER          0x00000004
++#define FILE_DEVICE_DATALINK            0x00000005
++#define FILE_DEVICE_DFS                 0x00000006
++#define FILE_DEVICE_DISK                0x00000007
++#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
++#define FILE_DEVICE_FILE_SYSTEM         0x00000009
++#define FILE_DEVICE_INPORT_PORT         0x0000000a
++#define FILE_DEVICE_KEYBOARD            0x0000000b
++#define FILE_DEVICE_MAILSLOT            0x0000000c
++#define FILE_DEVICE_MIDI_IN             0x0000000d
++#define FILE_DEVICE_MIDI_OUT            0x0000000e
++#define FILE_DEVICE_MOUSE               0x0000000f
++#define FILE_DEVICE_MULTI_UNC_PROVIDER  0x00000010
++#define FILE_DEVICE_NAMED_PIPE          0x00000011
++#define FILE_DEVICE_NETWORK             0x00000012
++#define FILE_DEVICE_NETWORK_BROWSER     0x00000013
++#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
++#define FILE_DEVICE_NULL                0x00000015
++#define FILE_DEVICE_PARALLEL_PORT       0x00000016
++#define FILE_DEVICE_PHYSICAL_NETCARD    0x00000017
++#define FILE_DEVICE_PRINTER             0x00000018
++#define FILE_DEVICE_SCANNER             0x00000019
++#define FILE_DEVICE_SERIAL_MOUSE_PORT   0x0000001a
++#define FILE_DEVICE_SERIAL_PORT         0x0000001b
++#define FILE_DEVICE_SCREEN              0x0000001c
++#define FILE_DEVICE_SOUND               0x0000001d
++#define FILE_DEVICE_STREAMS             0x0000001e
++#define FILE_DEVICE_TAPE                0x0000001f
++#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
++#define FILE_DEVICE_TRANSPORT           0x00000021
++#define FILE_DEVICE_UNKNOWN             0x00000022
++#define FILE_DEVICE_VIDEO               0x00000023
++#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
++#define FILE_DEVICE_WAVE_IN             0x00000025
++#define FILE_DEVICE_WAVE_OUT            0x00000026
++#define FILE_DEVICE_8042_PORT           0x00000027
++#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
++#define FILE_DEVICE_BATTERY             0x00000029
++#define FILE_DEVICE_BUS_EXTENDER        0x0000002a
++#define FILE_DEVICE_MODEM               0x0000002b
++#define FILE_DEVICE_VDM                 0x0000002c
++#define FILE_DEVICE_MASS_STORAGE        0x0000002d
++
++#define CTL_CODE( DeviceType, Function, Method, Access ) (                 \
++    ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method) \
++)
++
++#define METHOD_BUFFERED                 0
++#define METHOD_IN_DIRECT                1
++#define METHOD_OUT_DIRECT               2
++#define METHOD_NEITHER                  3
++
++#define FILE_ANY_ACCESS                 0
++#define FILE_READ_ACCESS          ( 0x0001 )    
++#define FILE_WRITE_ACCESS         ( 0x0002 )    
++
++#endif 
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/pdumpdefs.h git-nokia/drivers/gpu/pvr/include4/pdumpdefs.h
+--- git/drivers/gpu/pvr/include4/pdumpdefs.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pdumpdefs.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,92 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__PDUMPDEFS_H__)
++#define __PDUMPDEFS_H__
++
++typedef enum _PDUMP_PIXEL_FORMAT_
++{
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
++      PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
++      PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
++      PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
++      PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
++      PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
++      PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
++      PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
++      PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
++      PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
++      PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
++      PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
++      PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
++      PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
++      PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
++      PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
++      PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 =30,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 =31,
++      PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 =32,
++      PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 =33,
++      
++      PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
++
++} PDUMP_PIXEL_FORMAT;
++
++typedef enum _PDUMP_MEM_FORMAT_
++{
++      PVRSRV_PDUMP_MEM_FORMAT_STRIDE = 0,
++      PVRSRV_PDUMP_MEM_FORMAT_RESERVED = 1,
++      PVRSRV_PDUMP_MEM_FORMAT_TILED = 8,
++      PVRSRV_PDUMP_MEM_FORMAT_TWIDDLED = 9,
++      
++      PVRSRV_PDUMP_MEM_FORMAT_FORCE_I32 = 0x7fffffff
++} PDUMP_MEM_FORMAT;
++
++typedef enum _PDUMP_POLL_OPERATOR
++{
++      PDUMP_POLL_OPERATOR_EQUAL = 0,
++      PDUMP_POLL_OPERATOR_LESS = 1,
++      PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
++      PDUMP_POLL_OPERATOR_GREATER = 3,
++      PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
++      PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
++} PDUMP_POLL_OPERATOR;
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/pvr_debug.h git-nokia/drivers/gpu/pvr/include4/pvr_debug.h
+--- git/drivers/gpu/pvr/include4/pvr_debug.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pvr_debug.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,107 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_DEBUG_H__
++#define __PVR_DEBUG_H__
++
++
++#include "img_types.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#define PVR_MAX_DEBUG_MESSAGE_LEN     (512)
++
++#define DBGPRIV_FATAL         0x01
++#define DBGPRIV_ERROR         0x02
++#define DBGPRIV_WARNING               0x04
++#define DBGPRIV_MESSAGE               0x08
++#define DBGPRIV_VERBOSE               0x10
++#define DBGPRIV_CALLTRACE     0x20
++#define DBGPRIV_ALLOC         0x40
++#define DBGPRIV_ALLLEVELS     (DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING | DBGPRIV_MESSAGE | DBGPRIV_VERBOSE)
++
++
++
++#define PVR_DBG_FATAL         DBGPRIV_FATAL,__FILE__, __LINE__
++#define PVR_DBG_ERROR         DBGPRIV_ERROR,__FILE__, __LINE__
++#define PVR_DBG_WARNING               DBGPRIV_WARNING,__FILE__, __LINE__
++#define PVR_DBG_MESSAGE               DBGPRIV_MESSAGE,__FILE__, __LINE__
++#define PVR_DBG_VERBOSE               DBGPRIV_VERBOSE,__FILE__, __LINE__
++#define PVR_DBG_CALLTRACE     DBGPRIV_CALLTRACE,__FILE__, __LINE__
++#define PVR_DBG_ALLOC         DBGPRIV_ALLOC,__FILE__, __LINE__
++
++#if defined(DEBUG)
++
++      #define PVR_ASSERT(EXPR) if (!(EXPR)) PVRSRVDebugAssertFail(__FILE__, __LINE__);
++
++      #define PVR_DPF(X)              PVRSRVDebugPrintf X
++      #define PVR_TRACE(X)    PVRSRVTrace X
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
++                                                                      IMG_UINT32 ui32Line);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
++                                                                      const IMG_CHAR *pszFileName,
++                                                                      IMG_UINT32 ui32Line,
++                                                                      const IMG_CHAR *pszFormat,
++                                                                      ...);
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++IMG_VOID PVRSRVDebugSetLevel (IMG_UINT32 uDebugLevel);
++
++              #define PVR_DBG_BREAK
++
++#else
++
++#if defined(TIMING)
++
++      #define PVR_ASSERT(EXPR)
++      #define PVR_DPF(X)
++      #define PVR_TRACE(X)    PVRSRVTrace X
++      #define PVR_DBG_BREAK
++
++IMG_EXPORT IMG_VOID IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... );
++
++#else
++
++      #define PVR_ASSERT(EXPR)
++      #define PVR_DPF(X)
++      #define PVR_TRACE(X)
++      #define PVR_DBG_BREAK
++
++#endif 
++#endif 
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/include4/pvrversion.h git-nokia/drivers/gpu/pvr/include4/pvrversion.h
+--- git/drivers/gpu/pvr/include4/pvrversion.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/pvrversion.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,37 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _PVRVERSION_H_
++#define _PVRVERSION_H_
++
++#define PVRVERSION_MAJ 1
++#define PVRVERSION_MIN 1
++#define PVRVERSION_BRANCH 11
++#define PVRVERSION_BUILD 970
++#define PVRVERSION_STRING "1.1.11.970"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/regpaths.h git-nokia/drivers/gpu/pvr/include4/regpaths.h
+--- git/drivers/gpu/pvr/include4/regpaths.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/regpaths.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,43 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __REGPATHS_H__
++#define __REGPATHS_H__
++
++#define POWERVR_REG_ROOT                              "Drivers\\Display\\PowerVR"
++#define POWERVR_CHIP_KEY                              "\\SGX1\\"
++
++#define POWERVR_EURASIA_KEY                           "PowerVREurasia\\"
++
++#define POWERVR_SERVICES_KEY                  "\\Registry\\Machine\\System\\CurrentControlSet\\Services\\PowerVR\\"
++
++#define PVRSRV_REGISTRY_ROOT                  POWERVR_EURASIA_KEY "HWSettings\\PVRSRVKM"
++
++
++#define MAX_REG_STRING_SIZE 128
++
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/servicesext.h git-nokia/drivers/gpu/pvr/include4/servicesext.h
+--- git/drivers/gpu/pvr/include4/servicesext.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/servicesext.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,415 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESEXT_H__)
++#define __SERVICESEXT_H__
++
++#define PVRSRV_LOCKFLG_READONLY       (1)             
++
++typedef enum _PVRSRV_ERROR_
++{
++      PVRSRV_OK                                                               =  0,
++      PVRSRV_ERROR_GENERIC                                    =  1,
++      PVRSRV_ERROR_OUT_OF_MEMORY                              =  2,
++      PVRSRV_ERROR_TOO_MANY_BUFFERS                   =  3,
++      PVRSRV_ERROR_SYMBOL_NOT_FOUND                   =  4,
++      PVRSRV_ERROR_OUT_OF_HSPACE                              =  5,
++      PVRSRV_ERROR_INVALID_PARAMS                             =  6,
++      PVRSRV_ERROR_TILE_MAP_FAILED                    =  7,
++      PVRSRV_ERROR_INIT_FAILURE                               =  8,
++      PVRSRV_ERROR_CANT_REGISTER_CALLBACK     =  9,
++      PVRSRV_ERROR_INVALID_DEVICE                             = 10,
++      PVRSRV_ERROR_NOT_OWNER                                  = 11,
++      PVRSRV_ERROR_BAD_MAPPING                                = 12,
++      PVRSRV_ERROR_TIMEOUT                                    = 13,
++      PVRSRV_ERROR_NO_PRIMARY                                 = 14,
++      PVRSRV_ERROR_FLIP_CHAIN_EXISTS                  = 15,
++      PVRSRV_ERROR_CANNOT_ACQUIRE_SYSDATA     = 16,
++      PVRSRV_ERROR_SCENE_INVALID                              = 17,
++      PVRSRV_ERROR_STREAM_ERROR                               = 18,
++      PVRSRV_ERROR_INVALID_INTERRUPT          = 19,
++      PVRSRV_ERROR_FAILED_DEPENDENCIES                = 20,
++      PVRSRV_ERROR_CMD_NOT_PROCESSED                  = 21,
++      PVRSRV_ERROR_CMD_TOO_BIG                                = 22,
++      PVRSRV_ERROR_DEVICE_REGISTER_FAILED     = 23,
++      PVRSRV_ERROR_FIFO_SPACE                                 = 24,
++      PVRSRV_ERROR_TA_RECOVERY                                = 25,
++      PVRSRV_ERROR_INDOSORLOWPOWER                    = 26,
++      PVRSRV_ERROR_TOOMANYBUFFERS                             = 27,
++      PVRSRV_ERROR_NOT_SUPPORTED                              = 28,
++      PVRSRV_ERROR_PROCESSING_BLOCKED                 = 29,
++
++
++      PVRSRV_ERROR_CANNOT_FLUSH_QUEUE                 = 31,
++      PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE             = 32,
++      PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS   = 33,
++      PVRSRV_ERROR_RETRY                                              = 34,
++
++      PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
++
++} PVRSRV_ERROR;
++
++
++typedef enum _PVRSRV_DEVICE_CLASS_
++{
++      PVRSRV_DEVICE_CLASS_3D                          = 0 ,
++      PVRSRV_DEVICE_CLASS_DISPLAY                     = 1 ,
++      PVRSRV_DEVICE_CLASS_BUFFER                      = 2 ,
++      PVRSRV_DEVICE_CLASS_VIDEO                       = 3 ,
++
++      PVRSRV_DEVICE_CLASS_FORCE_I32           = 0x7fffffff
++
++} PVRSRV_DEVICE_CLASS;
++
++
++ 
++typedef enum _PVRSRV_POWER_STATE_
++{
++      PVRSRV_POWER_Unspecified                        = -1,   
++      PVRSRV_POWER_STATE_D0                           = 0,    
++      PVRSRV_POWER_STATE_D1                           = 1,    
++      PVRSRV_POWER_STATE_D2                           = 2,    
++      PVRSRV_POWER_STATE_D3                           = 3,    
++      PVRSRV_POWER_STATE_D4                           = 4,    
++
++      PVRSRV_POWER_STATE_FORCE_I32 = 0x7fffffff
++
++} PVR_POWER_STATE, *PPVR_POWER_STATE;
++
++
++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++
++
++typedef enum _PVRSRV_PIXEL_FORMAT_ {
++      PVRSRV_PIXEL_FORMAT_UNKNOWN                     =  0,
++      PVRSRV_PIXEL_FORMAT_RGB565                      =  1,
++      PVRSRV_PIXEL_FORMAT_RGB555                      =  2,
++      PVRSRV_PIXEL_FORMAT_RGB888                      =  3,
++      PVRSRV_PIXEL_FORMAT_BGR888                      =  4,
++      PVRSRV_PIXEL_FORMAT_YUV420                      =  5,
++      PVRSRV_PIXEL_FORMAT_YUV444                      =  6,
++      PVRSRV_PIXEL_FORMAT_VUY444                      =  7,
++      PVRSRV_PIXEL_FORMAT_GREY_SCALE          =  8,
++      PVRSRV_PIXEL_FORMAT_YUYV                        =  9,
++      PVRSRV_PIXEL_FORMAT_YVYU                        = 10,
++      PVRSRV_PIXEL_FORMAT_UYVY                        = 11, 
++      PVRSRV_PIXEL_FORMAT_VYUY                        = 12,
++      PVRSRV_PIXEL_FORMAT_PAL12                       = 13,
++      PVRSRV_PIXEL_FORMAT_PAL8                        = 14,
++      PVRSRV_PIXEL_FORMAT_PAL4                        = 15,
++      PVRSRV_PIXEL_FORMAT_PAL2                        = 16,
++      PVRSRV_PIXEL_FORMAT_PAL1                        = 17,
++      PVRSRV_PIXEL_FORMAT_ARGB1555            = 18,
++      PVRSRV_PIXEL_FORMAT_ARGB4444            = 19, 
++      PVRSRV_PIXEL_FORMAT_ARGB8888            = 20,
++      PVRSRV_PIXEL_FORMAT_ABGR8888            = 21,
++      PVRSRV_PIXEL_FORMAT_YV12                        = 22,
++      PVRSRV_PIXEL_FORMAT_I420                        = 23,
++      PVRSRV_PIXEL_FORMAT_DXT1                        = 24,
++    PVRSRV_PIXEL_FORMAT_IMC2            = 25,
++
++      PVRSRV_PIXEL_FORMAT_G16R16,
++      PVRSRV_PIXEL_FORMAT_G16R16F,
++      PVRSRV_PIXEL_FORMAT_ARGB8332,
++      PVRSRV_PIXEL_FORMAT_A2RGB10,
++      PVRSRV_PIXEL_FORMAT_A2BGR10,
++      PVRSRV_PIXEL_FORMAT_ABGR16,
++      PVRSRV_PIXEL_FORMAT_ABGR16F,
++      PVRSRV_PIXEL_FORMAT_ABGR32F,
++      PVRSRV_PIXEL_FORMAT_R32F,
++      PVRSRV_PIXEL_FORMAT_A8,
++      PVRSRV_PIXEL_FORMAT_L8,
++      PVRSRV_PIXEL_FORMAT_A8L8,
++      PVRSRV_PIXEL_FORMAT_L16,
++      PVRSRV_PIXEL_FORMAT_R16F,
++      PVRSRV_PIXEL_FORMAT_L6V5U5,
++      PVRSRV_PIXEL_FORMAT_V8U8,
++      PVRSRV_PIXEL_FORMAT_V16U16,
++      PVRSRV_PIXEL_FORMAT_QWVU8888,
++      PVRSRV_PIXEL_FORMAT_D16,
++      PVRSRV_PIXEL_FORMAT_D24S8,
++      PVRSRV_PIXEL_FORMAT_D24X8,
++      PVRSRV_PIXEL_FORMAT_D32F,
++      PVRSRV_PIXEL_FORMAT_R8G8_B8G8,
++      PVRSRV_PIXEL_FORMAT_G8R8_G8B8,
++      PVRSRV_PIXEL_FORMAT_YUY2,
++      PVRSRV_PIXEL_FORMAT_DXT23,
++      PVRSRV_PIXEL_FORMAT_DXT45,      
++      PVRSRV_PIXEL_FORMAT_G32R32F,    
++
++      PVRSRV_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff,
++} PVRSRV_PIXEL_FORMAT;
++
++typedef enum _PVRSRV_ALPHA_FORMAT_ {
++      PVRSRV_ALPHA_FORMAT_UNKNOWN             =  0x00000000,
++      PVRSRV_ALPHA_FORMAT_PRE                 =  0x00000001,
++      PVRSRV_ALPHA_FORMAT_NONPRE              =  0x00000002,
++      PVRSRV_ALPHA_FORMAT_MASK                =  0x0000000F,
++} PVRSRV_ALPHA_FORMAT;
++
++typedef enum _PVRSRV_COLOURSPACE_FORMAT_ {
++      PVRSRV_COLOURSPACE_FORMAT_UNKNOWN               =  0x00000000,
++      PVRSRV_COLOURSPACE_FORMAT_LINEAR                =  0x00010000,
++      PVRSRV_COLOURSPACE_FORMAT_NONLINEAR             =  0x00020000,
++      PVRSRV_COLOURSPACE_FORMAT_MASK                  =  0x000F0000,
++} PVRSRV_COLOURSPACE_FORMAT;
++
++#define PVRSRV_CREATE_SWAPCHAIN_SHARED                (1<<0)
++#define PVRSRV_CREATE_SWAPCHAIN_QUERY         (1<<1)
++
++typedef struct _PVRSRV_SYNC_DATA_
++{
++      
++      IMG_UINT32                                      ui32WriteOpsPending;
++      volatile IMG_UINT32                     ui32WriteOpsComplete;
++
++      
++      IMG_UINT32                                      ui32ReadOpsPending;
++      volatile IMG_UINT32                     ui32ReadOpsComplete;
++      
++      
++      IMG_UINT32                                      ui32LastOpDumpVal;
++      IMG_UINT32                                      ui32LastReadOpDumpVal;
++
++} PVRSRV_SYNC_DATA;
++
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                        *psSyncData;
++
++      
++
++
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_HANDLE                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                      hKernelSyncInfo;
++      
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++
++
++typedef struct PVRSRV_RESOURCE_TAG 
++{
++      volatile IMG_UINT32 ui32Lock;
++      IMG_UINT32                      ui32ID;
++}PVRSRV_RESOURCE;
++typedef PVRSRV_RESOURCE PVRSRV_RES_HANDLE;
++
++
++typedef IMG_VOID (*PFN_CMD_COMPLETE) (IMG_HANDLE);
++typedef IMG_VOID (**PPFN_CMD_COMPLETE) (IMG_HANDLE);
++
++typedef IMG_BOOL (*PFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); 
++typedef IMG_BOOL (**PPFN_CMD_PROC) (IMG_HANDLE, IMG_UINT32, IMG_VOID*); 
++
++
++typedef struct _IMG_RECT_
++{
++      IMG_INT32       x0;
++      IMG_INT32       y0;     
++      IMG_INT32       x1;     
++      IMG_INT32       y1;     
++}IMG_RECT;
++
++typedef struct _IMG_RECT_16_
++{
++      IMG_INT16       x0;
++      IMG_INT16       y0;     
++      IMG_INT16       x1;     
++      IMG_INT16       y1;     
++}IMG_RECT_16;
++
++
++typedef PVRSRV_ERROR (*PFN_GET_BUFFER_ADDR)(IMG_HANDLE, 
++                                                                                      IMG_HANDLE, 
++                                                                                      IMG_SYS_PHYADDR**, 
++                                                                                      IMG_UINT32*, 
++                                                                                      IMG_VOID**, 
++                                                                                      IMG_HANDLE*, 
++                                                                                      IMG_BOOL*);
++
++
++typedef struct DISPLAY_DIMS_TAG
++{
++      IMG_UINT32      ui32ByteStride;
++      IMG_UINT32      ui32Width;
++      IMG_UINT32      ui32Height;
++} DISPLAY_DIMS;
++
++
++typedef struct DISPLAY_FORMAT_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++} DISPLAY_FORMAT;
++
++typedef struct DISPLAY_SURF_ATTRIBUTES_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++      
++      DISPLAY_DIMS                    sDims;
++} DISPLAY_SURF_ATTRIBUTES;
++
++
++typedef struct DISPLAY_MODE_INFO_TAG
++{
++      
++      PVRSRV_PIXEL_FORMAT             pixelformat;
++      
++      DISPLAY_DIMS                    sDims;
++      
++      IMG_UINT32                              ui32RefreshHZ;
++      
++      IMG_UINT32                              ui32OEMFlags;
++} DISPLAY_MODE_INFO;
++
++
++
++#define MAX_DISPLAY_NAME_SIZE (50) 
++
++typedef struct DISPLAY_INFO_TAG
++{
++      IMG_UINT32 ui32MaxSwapChains;
++      
++      IMG_UINT32 ui32MaxSwapChainBuffers;
++
++      IMG_UINT32 ui32MinSwapInterval;
++
++      IMG_UINT32 ui32MaxSwapInterval;
++
++      IMG_CHAR        szDisplayName[MAX_DISPLAY_NAME_SIZE];
++
++#if defined(SUPPORT_HW_CURSOR)
++      IMG_UINT16      ui32CursorWidth;
++      IMG_UINT16      ui32CursorHeight;
++#endif
++      
++} DISPLAY_INFO;
++
++typedef struct ACCESS_INFO_TAG
++{
++      IMG_UINT32              ui32Size;
++      IMG_UINT32      ui32FBPhysBaseAddress;
++      IMG_UINT32              ui32FBMemAvailable;                     
++      IMG_UINT32      ui32SysPhysBaseAddress;
++      IMG_UINT32              ui32SysSize;
++      IMG_UINT32              ui32DevIRQ;
++}ACCESS_INFO; 
++
++
++typedef struct PVRSRV_CURSOR_SHAPE_TAG
++{
++      IMG_UINT16                      ui16Width;
++      IMG_UINT16                      ui16Height;
++      IMG_INT16                       i16XHot;
++      IMG_INT16                       i16YHot;
++      
++      
++      IMG_VOID*               pvMask;
++      IMG_INT16                       i16MaskByteStride;
++      
++      
++      IMG_VOID*                       pvColour;
++      IMG_INT16                       i16ColourByteStride;
++      PVRSRV_PIXEL_FORMAT     eColourPixelFormat; 
++} PVRSRV_CURSOR_SHAPE;
++
++#define PVRSRV_SET_CURSOR_VISIBILITY  (1<<0)
++#define PVRSRV_SET_CURSOR_POSITION            (1<<1)
++#define PVRSRV_SET_CURSOR_SHAPE                       (1<<2)
++#define PVRSRV_SET_CURSOR_ROTATION            (1<<3)
++
++typedef struct PVRSRV_CURSOR_INFO_TAG
++{
++      
++      IMG_UINT32 ui32Flags;
++      
++      
++      IMG_BOOL bVisible;
++      
++      
++      IMG_INT16 i16XPos;
++      IMG_INT16 i16YPos;
++      
++      
++      PVRSRV_CURSOR_SHAPE sCursorShape;
++      
++      
++      IMG_UINT32 ui32Rotation;
++ 
++} PVRSRV_CURSOR_INFO;
++
++
++typedef struct _PVRSRV_REGISTRY_INFO_
++{
++    IMG_UINT32                ui32DevCookie;
++    IMG_PCHAR         pszKey;
++    IMG_PCHAR         pszValue;
++    IMG_PCHAR         pszBuf;
++    IMG_UINT32                ui32BufSize;
++} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReadRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWriteRegistryString (PPVRSRV_REGISTRY_INFO psRegInfo);
++
++
++#define PVRSRV_BC_FLAGS_YUVCSC_CONFORMANT_RANGE       (0 << 0)
++#define PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE             (1 << 0)
++
++#define PVRSRV_BC_FLAGS_YUVCSC_BT601                  (0 << 1)
++#define PVRSRV_BC_FLAGS_YUVCSC_BT709                  (1 << 1)
++
++typedef struct BUFFER_INFO_TAG
++{
++      IMG_UINT32                      ui32BufferCount;
++      IMG_UINT32                      ui32BufferDeviceID;
++      PVRSRV_PIXEL_FORMAT     pixelformat;
++      IMG_UINT32                      ui32ByteStride;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32Flags;
++} BUFFER_INFO;
++
++typedef enum _OVERLAY_DEINTERLACE_MODE_
++{
++      WEAVE=0x0,
++      BOB_ODD,
++      BOB_EVEN,
++      BOB_EVEN_NONINTERLEAVED
++} OVERLAY_DEINTERLACE_MODE;
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/include4/services.h git-nokia/drivers/gpu/pvr/include4/services.h
+--- git/drivers/gpu/pvr/include4/services.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/services.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,790 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_H__
++#define __SERVICES_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "pdumpdefs.h"
++
++
++#if defined(SERVICES4)
++#define IMG_CONST const
++#else
++#define IMG_CONST
++#endif
++
++#define PVRSRV_MAX_CMD_SIZE           1024
++
++#define PVRSRV_MAX_DEVICES            16      
++
++#define PVRSRV_MEM_READ                                               (1<<0)
++#define PVRSRV_MEM_WRITE                                      (1<<1)
++#define PVRSRV_MEM_CACHE_CONSISTENT                   (1<<2)
++#define PVRSRV_MEM_NO_SYNCOBJ                         (1<<3)
++#define PVRSRV_MEM_INTERLEAVED                                (1<<4)
++#define PVRSRV_MEM_DUMMY                                      (1<<5)
++#define PVRSRV_MEM_EDM_PROTECT                                (1<<6)
++#define PVRSRV_MEM_ZERO                     (1<<7)
++#define PVRSRV_MEM_USER_SUPPLIED_DEVVADDR     (1<<8)
++#define PVRSRV_MEM_RAM_BACKED_ALLOCATION      (1<<9)
++#define PVRSRV_MEM_NO_RESMAN                          (1<<10)
++
++#define PVRSRV_HAP_CACHED                                     (1<<12)
++#define PVRSRV_HAP_UNCACHED                                   (1<<13)
++#define PVRSRV_HAP_WRITECOMBINE                               (1<<14)
++#define PVRSRV_HAP_CACHETYPE_MASK                     (PVRSRV_HAP_CACHED|PVRSRV_HAP_UNCACHED|PVRSRV_HAP_WRITECOMBINE)
++#define PVRSRV_HAP_KERNEL_ONLY                                (1<<15)
++#define PVRSRV_HAP_SINGLE_PROCESS                     (1<<16)
++#define PVRSRV_HAP_MULTI_PROCESS                      (1<<17)
++#define PVRSRV_HAP_FROM_EXISTING_PROCESS      (1<<18)
++#define PVRSRV_HAP_NO_CPU_VIRTUAL                     (1<<19)
++#define PVRSRV_HAP_MAPTYPE_MASK                               (PVRSRV_HAP_KERNEL_ONLY \
++                                            |PVRSRV_HAP_SINGLE_PROCESS \
++                                            |PVRSRV_HAP_MULTI_PROCESS \
++                                            |PVRSRV_HAP_FROM_EXISTING_PROCESS \
++                                            |PVRSRV_HAP_NO_CPU_VIRTUAL)
++#define PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT   (24)
++
++#define PVRSRV_MAP_NOUSERVIRTUAL            (1<<27)
++
++#define PVRSRV_NO_CONTEXT_LOSS                                        0               
++#define PVRSRV_SEVERE_LOSS_OF_CONTEXT                 1               
++#define PVRSRV_PRE_STATE_CHANGE_MASK                  0x80    
++
++
++#define PVRSRV_DEFAULT_DEV_COOKIE                     (1)      
++
++
++#define PVRSRVRESMAN_PROCESSID_FIND                   (0xffffffff) 
++
++
++#define PVRSRV_MISC_INFO_TIMER_PRESENT                        (1<<0)
++#define PVRSRV_MISC_INFO_CLOCKGATE_PRESENT            (1<<1)
++#define PVRSRV_MISC_INFO_MEMSTATS_PRESENT             (1<<2)
++
++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE                        20
++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE                 200
++
++
++#define PVRSRV_CHANGEDEVMEM_ATTRIBS_CACHECOHERENT             0x00000001
++
++#define PVRSRV_MAPEXTMEMORY_FLAGS_ALTERNATEVA                 0x00000001
++#define PVRSRV_MAPEXTMEMORY_FLAGS_PHYSCONTIG                  0x00000002
++
++typedef enum _PVRSRV_DEVICE_TYPE_
++{
++      PVRSRV_DEVICE_TYPE_UNKNOWN                      = 0 ,
++      PVRSRV_DEVICE_TYPE_MBX1                         = 1 ,
++      PVRSRV_DEVICE_TYPE_MBX1_LITE            = 2 ,
++
++      PVRSRV_DEVICE_TYPE_M24VA                        = 3,
++      PVRSRV_DEVICE_TYPE_MVDA2                        = 4,
++      PVRSRV_DEVICE_TYPE_MVED1                        = 5,
++      PVRSRV_DEVICE_TYPE_MSVDX                        = 6,
++
++      PVRSRV_DEVICE_TYPE_SGX                          = 7,
++
++      
++      PVRSRV_DEVICE_TYPE_EXT                          = 8,
++
++    PVRSRV_DEVICE_TYPE_LAST             = 8,
++
++      PVRSRV_DEVICE_TYPE_FORCE_I32            = 0x7fffffff
++
++} PVRSRV_DEVICE_TYPE;
++
++#define HEAP_ID( _dev_ , _dev_heap_idx_ )     (  ((_dev_)<<24) | ((_dev_heap_idx_)&((1<<24)-1))  )
++#define HEAP_IDX( _heap_id_ )                         ( (_heap_id_)&((1<<24) - 1 ) )
++#define HEAP_DEV( _heap_id_ )                         ( (_heap_id_)>>24 )
++
++typedef enum
++{
++      IMG_EGL        = 0x00000001,
++      IMG_OPENGLES1  = 0x00000002,
++      IMG_OPENGLES2  = 0x00000003,
++      IMG_D3DM           = 0x00000004,
++      IMG_SRV_UM         = 0x00000005,
++      IMG_OPENVG         = 0x00000006
++
++} IMG_MODULE_ID;
++
++
++#define APPHINT_MAX_STRING_SIZE       256
++
++typedef enum
++{
++      IMG_STRING_TYPE         = 1,
++      IMG_FLOAT_TYPE          ,
++      IMG_UINT_TYPE           ,
++      IMG_INT_TYPE            ,
++      IMG_FLAG_TYPE
++}IMG_DATA_TYPE;
++
++
++typedef enum _PVR_POWER_CONTROL_
++{
++      PVRSRV_POWER_CONTROL_SET                        = 0,    
++      PVRSRV_POWER_CONTROL_RETRY                      = 1,    
++      PVRSRV_POWER_CONTROL_QUERY                      = 2,    
++
++      PVRSRV_POWER_CONTROL_FORCE_I32 = 0x7fffffff
++
++} PVR_POWER_CONTROL, *PPVR_POWER_CONTROL;
++
++typedef struct _PVRSRV_CONNECTION_
++{
++      IMG_HANDLE hServices;                                   
++      IMG_UINT32 ui32ProcessID;                               
++}PVRSRV_CONNECTION;
++
++
++typedef struct _PVRSRV_DEV_DATA_
++{
++      PVRSRV_CONNECTION       sConnection;            
++      IMG_HANDLE                      hDevCookie;                     
++
++} PVRSRV_DEV_DATA, *PPVRSRV_DEV_DATA;
++
++typedef struct _PVRSRV_MEMUPDATE_
++{
++      IMG_UINT32                      ui32UpdateAddr;         
++      IMG_UINT32                      ui32UpdateVal;          
++} PVRSRV_MEMUPDATE;
++
++typedef struct _PVRSRV_HWREG_
++{
++      IMG_UINT32                      ui32RegAddr;    
++      IMG_UINT32                      ui32RegVal;             
++} PVRSRV_HWREG;
++
++typedef struct _PVRSRV_MEMBLK_  
++{
++      IMG_DEV_VIRTADDR        sDevVirtAddr;                   
++    IMG_HANDLE          hOSMemHandle;           
++      IMG_HANDLE                      hBuffer;                                
++      IMG_HANDLE                      hResItem;                               
++
++} PVRSRV_MEMBLK;
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_ *PPVRSRV_KERNEL_MEM_INFO;
++
++typedef struct _PVRSRV_CLIENT_MEM_INFO_
++{
++      
++      IMG_PVOID                               pvLinAddr;      
++
++#if defined(SERVICES4)
++    
++      IMG_PVOID                               pvLinAddrKM;
++#endif
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddr;
++
++      
++
++
++
++
++      IMG_CPU_PHYADDR                 sCpuPAddr;
++
++      
++      IMG_UINT32                              ui32Flags;
++
++      
++
++
++      IMG_UINT32                              ui32ClientFlags;
++      
++      
++      IMG_UINT32                              ui32AllocSize;          
++                                                                                              
++
++      
++      struct _PVRSRV_CLIENT_SYNC_INFO_        *psClientSyncInfo;
++
++      
++      IMG_HANDLE                                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                                      hKernelMemInfo;
++      
++      
++      IMG_HANDLE                                                      hResItem;
++      
++      
++
++
++      struct _PVRSRV_CLIENT_MEM_INFO_         *psNext;
++      
++} PVRSRV_CLIENT_MEM_INFO, *PPVRSRV_CLIENT_MEM_INFO;
++
++
++#if 0
++typedef struct _PVRSRV_CLIENT_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                        *psSyncData;
++
++      
++
++
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      IMG_HANDLE                                      hMappingInfo;
++
++      
++      IMG_HANDLE                                      hKernelSyncInfo;
++      
++} PVRSRV_CLIENT_SYNC_INFO, *PPVRSRV_CLIENT_SYNC_INFO;
++#endif
++
++#define PVRSRV_MAX_CLIENT_HEAPS (32)
++typedef struct _PVRSRV_HEAP_INFO_
++{
++      IMG_UINT32                      ui32HeapID;
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_DEV_VIRTADDR        sDevVAddrBase;
++      IMG_UINT32                      ui32HeapByteSize;
++      IMG_UINT32                      ui32Attribs;
++}PVRSRV_HEAP_INFO;
++
++
++
++
++typedef struct _PVRSRV_DEVICE_IDENTIFIER_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;            
++      PVRSRV_DEVICE_CLASS             eDeviceClass;           
++      IMG_UINT32                              ui32DeviceIndex;        
++
++} PVRSRV_DEVICE_IDENTIFIER;
++
++
++typedef struct _PVRSRV_MISC_INFO_
++{
++      IMG_UINT32      ui32StateRequest;               
++      IMG_UINT32      ui32StatePresent;               
++
++      
++      IMG_VOID        *pvSOCTimerRegisterKM;
++      IMG_VOID        *pvSOCTimerRegisterUM;
++
++      
++      IMG_VOID        *pvSOCClockGateRegs;    
++      IMG_UINT32      ui32SOCClockGateRegsSize;
++      
++      
++      IMG_CHAR        *pszMemoryStr;
++      IMG_UINT32      ui32MemoryStrLen;
++      
++      
++      
++} PVRSRV_MISC_INFO;
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVConnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDisconnect(PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevices(IMG_CONST PVRSRV_CONNECTION                  *psConnection,
++                                                                                                      IMG_UINT32                                      *puiNumDevices,
++                                                                                                      PVRSRV_DEVICE_IDENTIFIER        *puiDevIDs);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceData(IMG_CONST PVRSRV_CONNECTION         *psConnection,
++                                                                                                      IMG_UINT32                      uiDevIndex,
++                                                                                                      PVRSRV_DEV_DATA         *psDevData,
++                                                                                                      PVRSRV_DEVICE_TYPE      eDeviceType);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfo (IMG_CONST PVRSRV_CONNECTION *psConnection, PVRSRV_MISC_INFO *psMiscInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReleaseMiscInfo (PVRSRV_MISC_INFO *psMiscInfo);
++
++#if 1
++IMG_IMPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++
++IMG_IMPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++
++IMG_IMPORT IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR PollForValue (volatile IMG_UINT32 *pui32LinMemAddr,
++                                                                      IMG_UINT32 ui32Value,
++                                                                      IMG_UINT32 ui32Mask,
++                                                                      IMG_UINT32 ui32Waitus,
++                                                                      IMG_UINT32 ui32Tries);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE *phDevMemContext,
++                                                                                      IMG_UINT32 *pui32SharedHeapCount,
++                                                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContext(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE                      hDevMemContext);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapMemInfoToUser(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                               PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                               IMG_VOID* ppvUserLinAddr,
++                                                                                               IMG_HANDLE* phUserMappingInfo);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapMemInfoFromUser(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                                       PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                                       IMG_PVOID pvUserLinAddr,
++                                                                                                       IMG_HANDLE hUserMappingInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMem(IMG_CONST PVRSRV_DEV_DATA      *psDevData,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Attribs,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_CLIENT_MEM_INFO  **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMem(IMG_CONST PVRSRV_DEV_DATA       *psDevData,
++                                                              PVRSRV_CLIENT_MEM_INFO          *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_HANDLE                      hDevMemHeap,
++                                                                                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                                                                                      IMG_UINT32                      ui32Size,
++                                                                                      IMG_UINT32                      ui32Alignment,
++                                                                                      PVRSRV_CLIENT_MEM_INFO          **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMem(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                                      PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO *psSrcMemInfo,
++                                                                      IMG_HANDLE hDstDevMemHeap,
++                                                                      PVRSRV_CLIENT_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapExtMemory (IMG_CONST PVRSRV_DEV_DATA       *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO          *psMemInfo,
++                                                                      IMG_SYS_PHYADDR                         *psSysPAddr,
++                                                                      IMG_UINT32                                      ui32Flags);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                      PVRSRV_CLIENT_MEM_INFO          *psMemInfo,
++                                                                      IMG_UINT32                                      ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemory(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                              IMG_UINT32                              ui32ByteSize, 
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysPAddr,
++                                                                                              PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++PVRSRV_ERROR PVRSRVChangeDeviceMemoryAttributes(IMG_CONST PVRSRV_DEV_DATA                     *psDevData,
++                                                                                              PVRSRV_CLIENT_MEM_INFO  *psClientMemInfo,
++                                                                                              IMG_UINT32                              ui32Attribs);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              IMG_HANDLE hDeviceClassBuffer,
++                                                                              PVRSRV_CLIENT_MEM_INFO **ppsMemInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemory (IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                        IMG_SYS_PHYADDR sSysPhysAddr,
++                                                                        IMG_UINT32 uiSizeInBytes,
++                                                                        IMG_PVOID *ppvUserAddr,
++                                                                        IMG_UINT32 *puiActualSize,
++                                                                        IMG_PVOID *ppvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapPhysToUserSpace(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                              IMG_PVOID pvUserAddr,
++                                                                              IMG_PVOID pvProcess);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPowerControl(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                       PVR_POWER_CONTROL eControlMode,
++                                                                                       PVR_POWER_STATE *pePVRPowerState);
++
++typedef enum _PVRSRV_SYNCVAL_MODE_
++{
++      PVRSRV_SYNCVAL_READ                             = IMG_TRUE,
++      PVRSRV_SYNCVAL_WRITE                    = IMG_FALSE,
++
++} PVRSRV_SYNCVAL_MODE, *PPVRSRV_SYNCVAL_MODE;
++
++typedef IMG_UINT32 PVRSRV_SYNCVAL;
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT PVRSRV_ERROR PVRSRVWaitForAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode, PVRSRV_SYNCVAL OpRequired);
++
++IMG_IMPORT IMG_BOOL PVRSRVTestAllOpsNotComplete(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++IMG_IMPORT PVRSRV_SYNCVAL PVRSRVGetPendingOpSyncVal(PPVRSRV_CLIENT_MEM_INFO psMemInfo,
++      PVRSRV_SYNCVAL_MODE eMode);
++
++
++IMG_IMPORT 
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDeviceClass(IMG_CONST PVRSRV_CONNECTION *psConnection, 
++                                                                                                      PVRSRV_DEVICE_CLASS DeviceClass, 
++                                                                                                      IMG_UINT32 *pui32DevCount,
++                                                                                                      IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenDCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseDCDevice(IMG_CONST PVRSRV_CONNECTION     *psConnection, IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCFormats (IMG_HANDLE hDevice,
++                                                                                      IMG_UINT32              *pui32Count, 
++                                                                                      DISPLAY_FORMAT  *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumDCDims (IMG_HANDLE hDevice,
++                                                                              IMG_UINT32              *pui32Count, 
++                                                                              DISPLAY_FORMAT  *psFormat,
++                                                                              DISPLAY_DIMS    *psDims);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCSystemBuffer(IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCInfo(IMG_HANDLE hDevice,
++                                                                              DISPLAY_INFO* psDisplayInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDCSwapChain (IMG_HANDLE                         hDevice,
++                                                                                                      IMG_UINT32                              ui32Flags,
++                                                                                                      DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++                                                                                                      DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                                                      IMG_UINT32                              ui32BufferCount, 
++                                                                                                      IMG_UINT32                              ui32OEMFlags, 
++                                                                                                      IMG_UINT32                              *pui32SwapChainID, 
++                                                                                                      IMG_HANDLE                              *phSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDCSwapChain (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE              hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstRect (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE      hSwapChain,
++                                                                              IMG_RECT        *psDstRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcRect (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE      hSwapChain,
++                                                                              IMG_RECT        *psSrcRect);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCDstColourKey (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE      hSwapChain,
++                                                                                      IMG_UINT32      ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSetDCSrcColourKey (IMG_HANDLE hDevice,
++                                                                                      IMG_HANDLE      hSwapChain,
++                                                                                      IMG_UINT32      ui32CKColour);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDCBuffers(IMG_HANDLE hDevice,
++                                                                      IMG_HANDLE hSwapChain,
++                                                                      IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCBuffer (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE hBuffer,
++                                                                              IMG_UINT32 ui32ClipRectCount,
++                                                                              IMG_RECT *psClipRect,
++                                                                              IMG_UINT32 ui32SwapInterval,
++                                                                              IMG_HANDLE hPrivateTag);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSwapToDCSystem (IMG_HANDLE hDevice,
++                                                                              IMG_HANDLE hSwapChain);
++
++IMG_IMPORT
++IMG_HANDLE IMG_CALLCONV PVRSRVOpenBCDevice(IMG_CONST PVRSRV_DEV_DATA *psDevData,
++                                                                                      IMG_UINT32 ui32DeviceID);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCloseBCDevice(IMG_CONST PVRSRV_CONNECTION *psConnection, 
++                                                                                              IMG_HANDLE hDevice);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBufferInfo(IMG_HANDLE hDevice,
++                                                                                              BUFFER_INFO     *psBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetBCBuffer(IMG_HANDLE hDevice,
++                                                                                              IMG_UINT32 ui32BufferIndex,
++                                                                                              IMG_HANDLE *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpInit(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                IMG_UINT32 ui32Offset,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask,
++                                                                                IMG_BOOL bLastFrame,
++                                                                                IMG_BOOL bOverwrite);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSyncPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++                                                                                IMG_BOOL bIsRead,
++                                                                                IMG_UINT32 ui32Value,
++                                                                                IMG_UINT32 ui32Mask);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMem(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                      IMG_PVOID pvAltLinAddr,
++                                                                      PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                      IMG_UINT32 ui32Offset,
++                                                                      IMG_UINT32 ui32Bytes,
++                                                                      IMG_UINT32 ui32Flags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSync(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                              IMG_PVOID pvAltLinAddr,
++                                                                              PVRSRV_CLIENT_SYNC_INFO *psClientSyncInfo,
++                                                                              IMG_UINT32 ui32Offset,
++                                                                              IMG_UINT32 ui32Bytes);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue,
++                                                                                      IMG_UINT32 ui32Flags);
++
++#ifdef SERVICES4
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPolWithFlags(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                                       IMG_UINT32 ui32RegAddr,
++                                                                                                       IMG_UINT32 ui32RegValue,
++                                                                                                       IMG_UINT32 ui32Mask,
++                                                                                                       IMG_UINT32 ui32Flags);
++#endif
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegPol(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue,
++                                                                                      IMG_UINT32 ui32Mask);
++
++#ifdef SERVICES4
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDReg(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_UINT32 ui32RegAddr,
++                                                                                      IMG_UINT32 ui32RegValue);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpPDDevPAddr(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              PVRSRV_CLIENT_MEM_INFO *psMemInfo,
++                                                                                              IMG_UINT32 ui32Offset,
++                                                                                              IMG_DEV_PHYADDR sPDDevPAddr);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpMemPages(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_HANDLE                      hKernelMemInfo,
++                                                                                              IMG_DEV_PHYADDR         *pPages,
++                                                                                              IMG_UINT32                      ui32NumPages,
++                                                                                              IMG_DEV_VIRTADDR        sDevAddr,
++                                                                                              IMG_UINT32                      ui32Start,
++                                                                                              IMG_UINT32                      ui32Length,
++                                                                                              IMG_BOOL                        bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpSetFrame(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                        IMG_UINT32 ui32Frame);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpComment(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                       IMG_CONST IMG_CHAR *pszComment,
++                                                                                       IMG_BOOL bContinuous);
++
++#if defined(SERVICES4)
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                        IMG_BOOL bContinuous,
++                                                                                        IMG_CONST IMG_CHAR *pszFormat, ...);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCommentWithFlagsf(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                                         IMG_UINT32 ui32Flags,
++                                                                                                         IMG_CONST IMG_CHAR *pszFormat, ...);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpDriverInfo(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_CHAR *pszString,
++                                                                                              IMG_BOOL bContinuous);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpIsCapturing(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_BOOL *pbIsCapturing);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpBitmap(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_CHAR *pszFileName,
++                                                                                      IMG_UINT32 ui32FileOffset,
++                                                                                      IMG_UINT32 ui32Width,
++                                                                                      IMG_UINT32 ui32Height,
++                                                                                      IMG_UINT32 ui32StrideInBytes,
++                                                                                      IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                                                      IMG_UINT32 ui32Size,
++                                                                                      PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                                                      PDUMP_MEM_FORMAT eMemFormat,
++                                                                                      IMG_UINT32 ui32PDumpFlags);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                      IMG_CONST IMG_CHAR *pszFileName,
++                                                                                      IMG_UINT32 ui32FileOffset,
++                                                                                      IMG_UINT32 ui32Address,
++                                                                                      IMG_UINT32 ui32Size,
++                                                                                      IMG_UINT32 ui32PDumpFlags);
++
++#ifdef SERVICES4
++IMG_IMPORT
++IMG_BOOL IMG_CALLCONV PVRSRVPDumpIsCapturingTest(IMG_CONST PVRSRV_CONNECTION *psConnection);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVPDumpCycleCountRegRead(IMG_CONST PVRSRV_CONNECTION *psConnection,
++                                                                                              IMG_UINT32 ui32RegOffset,
++                                                                                              IMG_BOOL bLastFrame);
++#endif
++
++IMG_IMPORT IMG_HANDLE PVRSRVLoadLibrary(IMG_CHAR *pszLibraryName);
++IMG_IMPORT PVRSRV_ERROR       PVRSRVUnloadLibrary(IMG_HANDLE hExtDrv);
++IMG_IMPORT PVRSRV_ERROR       PVRSRVGetLibFuncAddr(IMG_HANDLE hExtDrv, IMG_CHAR *pszFunctionName, IMG_VOID **ppvFuncAddr);
++
++IMG_IMPORT IMG_UINT32 PVRSRVClockus (void);
++IMG_IMPORT IMG_VOID PVRSRVWaitus (IMG_UINT32 ui32Timeus);
++IMG_IMPORT IMG_VOID PVRSRVReleaseThreadQuanta (void);
++IMG_IMPORT PVRSRV_ERROR PVRSRVLockResource (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID, IMG_BOOL bBlock);
++IMG_IMPORT PVRSRV_ERROR PVRSRVUnlockResource (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID);
++
++#ifdef DEBUG
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVIsResourceLocked (PVRSRV_RES_HANDLE *phResource, IMG_UINT32 ui32ID);
++#endif
++
++
++
++
++
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVCreateAppHintState(IMG_MODULE_ID eModuleID,
++                                                                                                              const IMG_CHAR *pszAppName,
++                                                                                                              IMG_VOID **ppvState);
++IMG_IMPORT IMG_VOID IMG_CALLCONV PVRSRVFreeAppHintState(IMG_MODULE_ID eModuleID,
++                                                                               IMG_VOID *pvHintState);
++
++IMG_IMPORT IMG_BOOL IMG_CALLCONV PVRSRVGetAppHint(IMG_VOID                    *pvHintState,
++                                                                                                const IMG_CHAR        *pszHintName,
++                                                                                                IMG_DATA_TYPE         eDataType,
++                                                                                                const IMG_VOID        *pvDefault,
++                                                                                                IMG_VOID                      *pvReturn);
++
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVAllocUserModeMem (IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVCallocUserModeMem (IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_PVOID IMG_CALLCONV PVRSRVReallocUserModeMem (IMG_PVOID pvBase, IMG_SIZE_T uNewSize);
++IMG_IMPORT IMG_VOID  IMG_CALLCONV PVRSRVFreeUserModeMem (IMG_PVOID pvMem);
++IMG_IMPORT IMG_VOID PVRSRVMemCopy(IMG_VOID *pvDst, const IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_IMPORT IMG_VOID PVRSRVMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateMutex(IMG_PVOID *ppvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyMutex(IMG_PVOID pvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVLockMutex(IMG_PVOID pvMutex);
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnlockMutex(IMG_PVOID pvMutex);
++
++#if (defined(DEBUG) && defined(__linux__))
++IMG_PVOID PVRSRVAllocUserModeMemTracking(IMG_UINT32 ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_PVOID PVRSRVCallocUserModeMemTracking(IMG_UINT32 ui32Size, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++IMG_VOID  PVRSRVFreeUserModeMemTracking(IMG_VOID *pvMem);
++IMG_PVOID PVRSRVReallocUserModeMemTracking(IMG_VOID *pvMem, IMG_UINT32 ui32NewSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32LineNumber);
++#endif 
++
++PVRSRV_ERROR PVRSRVEventObjectWait(PVRSRV_CONNECTION *psConnection, 
++                                                                      IMG_HANDLE hOSEvent, 
++                                                                      IMG_UINT32 ui32MSTimeout);
++
++#define TIME_NOT_PASSED_UINT32(a,b,c)         ((a - b) < c)
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/sgxapi_km.h git-nokia/drivers/gpu/pvr/include4/sgxapi_km.h
+--- git/drivers/gpu/pvr/include4/sgxapi_km.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/sgxapi_km.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,170 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXAPI_KM_H__
++#define __SGXAPI_KM_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "sgxdefs.h"
++#if defined(__linux__) && !defined(USE_CODE)
++      #if defined(__KERNEL__)
++              #include <asm/unistd.h>
++      #else
++              #include <unistd.h>
++      #endif
++#endif
++
++#define SGX_GENERAL_HEAP_ID                                   0
++#define SGX_TADATA_HEAP_ID                                    1
++#define SGX_KERNEL_CODE_HEAP_ID                               2
++#define SGX_VIDEO_CODE_HEAP_ID                                3
++#define SGX_KERNEL_VIDEO_DATA_HEAP_ID         4
++#define SGX_PIXELSHADER_HEAP_ID                               5
++#define SGX_VERTEXSHADER_HEAP_ID                      6
++#define SGX_PDSPIXEL_CODEDATA_HEAP_ID         7
++#define SGX_PDSVERTEX_CODEDATA_HEAP_ID                8
++#define SGX_SYNCINFO_HEAP_ID                          9
++#define SGX_3DPARAMETERS_HEAP_ID                      10
++#define SGX_GENERAL_MAPPING_HEAP_ID                   11
++#define SGX_UNDEFINED_HEAP_ID                         (-1)
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      #define SGX_2D_HEAP_ID                                  12
++      #define SGX_MAX_HEAP_ID                                 13
++#else
++      #define SGX_MAX_HEAP_ID                                 12
++#endif
++
++#define SGX_MAX_TA_STATUS_VALS        32
++#define SGX_MAX_3D_STATUS_VALS        2
++
++#define PFLAGS_POWERDOWN                      0x00000001
++#define PFLAGS_POWERUP                                0x00000002
++ 
++typedef struct _SGX_SLAVE_PORT_
++{
++      IMG_PVOID                               pvData;                                 
++      IMG_UINT32                              ui32DataRange;                  
++      IMG_PUINT32                             pui32Offset;                    
++      IMG_SYS_PHYADDR                 sPhysBase;                              
++}SGX_SLAVE_PORT;
++
++typedef enum _SGX_MISC_INFO_REQUEST_
++{
++      SGX_MISC_INFO_REQUEST_FORCE_I16                                 =  0x7fff
++} SGX_MISC_INFO_REQUEST;
++
++typedef struct _SGX_MISC_INFO_
++{
++      SGX_MISC_INFO_REQUEST   eRequest;       
++
++      union
++      {
++              IMG_UINT32      reserved;       
++      } uData;
++} SGX_MISC_INFO;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_MAX_BLT_SRC_SYNCS              3
++#endif
++
++#ifdef PDUMP
++
++#define PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH            256
++
++typedef struct _PVR3DIF4_KICKTA_DUMPBITMAP_
++{
++      IMG_DEV_VIRTADDR        sDevBaseAddr;
++      IMG_UINT32                      ui32Flags;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32Stride;
++      IMG_UINT32                      ui32PDUMPFormat;
++      IMG_UINT32                      ui32BytesPP;
++      IMG_CHAR                        pszName[PVR3DIF4_KICKTA_DUMPBITMAP_MAX_NAME_LENGTH];
++} PVR3DIF4_KICKTA_DUMPBITMAP, *PPVR3DIF4_KICKTA_DUMPBITMAP;
++
++#define PVRSRV_SGX_PDUMP_CONTEXT_MAX_BITMAP_ARRAY_SIZE        (16)
++
++typedef struct _PVRSRV_SGX_PDUMP_CONTEXT_
++{
++      
++      IMG_UINT32                                              ui32RenderNumForTA;
++
++      
++      IMG_UINT32                                              ui32CacheControl;
++
++} PVRSRV_SGX_PDUMP_CONTEXT;
++
++
++typedef struct _PVR3DIF4_KICKTA_DUMP_ROFF_
++{
++      IMG_HANDLE                      hKernelMemInfo;                                         
++      IMG_UINT32                      uiAllocIndex;                                           
++      IMG_UINT32                      ui32Offset;                                                     
++      IMG_UINT32                      ui32Value;                                                      
++      IMG_PCHAR                       pszName;                                                        
++} PVR3DIF4_KICKTA_DUMP_ROFF, *PPVR3DIF4_KICKTA_DUMP_ROFF;
++#endif        
++
++typedef struct _PVR3DIF4_KICKTA_DUMP_BUFFER_
++{
++      IMG_UINT32                      ui32SpaceUsed;
++      IMG_UINT32                      ui32Start;                                                      
++      IMG_UINT32                      ui32End;                                                        
++      IMG_UINT32                      ui32BufferSize;                                         
++      IMG_UINT32                      ui32BackEndLength;                                      
++      IMG_UINT32                      uiAllocIndex;
++      IMG_HANDLE                      hKernelMemInfo;
++      IMG_PCHAR                       pszName;                                                        
++} PVR3DIF4_KICKTA_DUMP_BUFFER, *PPVR3DIF4_KICKTA_DUMP_BUFFER;
++
++#ifdef PDUMP
++typedef struct _PVR3DIF4_KICKTA_PDUMP_
++{
++      
++      PPVR3DIF4_KICKTA_DUMPBITMAP             psPDumpBitmapArray;
++      IMG_UINT32                                              ui32PDumpBitmapSize;
++
++      
++      PPVR3DIF4_KICKTA_DUMP_BUFFER    psBufferArray;
++      IMG_UINT32                                              ui32BufferArraySize;
++
++      
++      PPVR3DIF4_KICKTA_DUMP_ROFF              psROffArray;
++      IMG_UINT32                                              ui32ROffArraySize;
++} PVR3DIF4_KICKTA_PDUMP, *PPVR3DIF4_KICKTA_PDUMP;
++#endif        
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/include4/sgxscript.h git-nokia/drivers/gpu/pvr/include4/sgxscript.h
+--- git/drivers/gpu/pvr/include4/sgxscript.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/include4/sgxscript.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,67 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXSCRIPT_H__
++#define __SGXSCRIPT_H__
++
++#define       SGX_MAX_INIT_COMMANDS   64
++#define       SGX_MAX_DEINIT_COMMANDS 16
++
++typedef       enum _SGX_INIT_OPERATION
++{
++      SGX_INIT_OP_ILLEGAL = 0,
++      SGX_INIT_OP_WRITE_HW_REG,
++#if defined(PDUMP)
++      SGX_INIT_OP_PDUMP_HW_REG,
++#endif
++      SGX_INIT_OP_HALT
++} SGX_INIT_OPERATION;
++
++typedef union _SGX_INIT_COMMAND
++{
++      SGX_INIT_OPERATION eOp;
++      struct {
++              SGX_INIT_OPERATION eOp;
++              IMG_UINT32 ui32Offset;
++              IMG_UINT32 ui32Value;
++      } sWriteHWReg;
++#if defined(PDUMP)
++      struct {
++              SGX_INIT_OPERATION eOp;
++              IMG_UINT32 ui32Offset;
++              IMG_UINT32 ui32Value;
++      } sPDumpHWReg;
++#endif
++} SGX_INIT_COMMAND;
++
++typedef struct _SGX_INIT_SCRIPTS_
++{
++      SGX_INIT_COMMAND asInitCommands[SGX_MAX_INIT_COMMANDS];
++      SGX_INIT_COMMAND asDeinitCommands[SGX_MAX_DEINIT_COMMANDS];
++} SGX_INIT_SCRIPTS;
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/Kconfig git-nokia/drivers/gpu/pvr/Kconfig
+--- git/drivers/gpu/pvr/Kconfig        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/Kconfig  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,255 @@
++#
++# PowerVR Services framework from Imagination Technologies
++#
++
++menuconfig PVR
++      tristate "PowerVR Services framework"
++      help
++        Kernel-level support for the Imagination PowerVR Services framework.
++        This module provides support for resource handling for a PVR
++        compatible 2D/3D graphics accelerator like the Imagination MBX and
++        SGX accelerator cores.
++
++if PVR
++
++config PVR_TRANSFER_QUEUE
++      depends on PVR_SERVICES4
++      bool
++
++config PVR_SUPPORT_SRVINIT
++      depends on PVR_SERVICES4
++      bool
++
++config PVR_SUPPORT_SECURE_HANDLES
++      bool
++      depends on PVR_SERVICES4
++      default y
++
++config PVR_NEW_TRANSFER_QUEUE
++      bool
++      depends on !PVR_SERVICES4
++      default y
++
++
++choice PVR_SERVICES
++      prompt "Services version"
++
++config PVR_SERVICES4
++      bool "Version 4"
++      select PVR_TRANSFER_QUEUE
++      select PVR_SUPPORT_SRVINIT
++
++endchoice
++
++
++choice PVR_SGXCORE
++      prompt "SGX core"
++
++config PVR_SGXCORE_530
++      bool "530"
++
++endchoice
++
++
++choice PVR_SGX_CORE_REV
++      prompt "SGX core revision"
++
++config PVR_SGX_CORE_REV_103
++      bool "103"
++
++endchoice
++
++config PVR_PVR2D_ALT_2DHW
++      bool
++      default y
++
++config PVR_NO_HARDWARE
++      bool
++
++choice PVR_SYSTEM
++      prompt "PVR system"
++
++config PVR_SYSTEM_OMAP3430
++      bool "OMAP3430"
++      select PVR_SGXCORE_530
++      select PVR_SGX_CORE_REV_103
++      select PVR_PVR2D_ALT_2DHW
++
++config PVR_SYSTEM_NO_HARDWARE
++      bool "No hardware"
++      select PVR_NO_HARDWARE
++endchoice
++
++config PVR_BUFFERCLASS_EXAMPLE
++      bool "Buffer class example"
++
++config PVR_USE_PTHREADS
++      bool
++      default y
++
++config PVR_OPTIMISE_NON_NPTL_SINGLE_THREAD_TLS_LOOKUP
++      bool
++
++config PVR_DISABLE_THREADS
++      bool
++
++config PVR_SUPPORT_DRI2
++      bool
++
++config PVR_PDUMP
++      bool
++
++config PVR_SUPPORT_XWS
++      bool
++
++config PVR_SUPPORT_POWER_MANAGEMENT
++      bool
++
++config PVR_SUPPORT_BUFFER_CLASS
++      bool
++
++
++config PVR_SUPPORT_DYNAMIC_PBRESIZE
++      bool
++
++config PVR_USE_FBDEV
++      bool
++
++config PVR_FBDEV_NAME
++      string
++      depends on PVR_USE_FBDEV
++
++config PVR_SUPPORT_DYNAMIC_3DCLOCKGATING
++      bool
++
++config PVR_REENTRANCY_PROTECTION
++      bool
++
++config PVR_SCHEDULER_CONTROL_SUPPORT
++      bool
++
++config PVR_USE_IMG_POWER_DOMAIN_FUNCTION
++      bool
++
++config PVR_USE_DMALLOC
++      bool
++
++config PVR_SUPPORT_LINUX_X86_WRITECOMBINE
++      bool
++
++config PVR_SGX_PDS_EVENTS_DISABLED
++      bool
++
++config PVR_USE_SUPPORT_NO_TA3D_OVERLAP
++      bool
++
++config PVR_SUPPORT_SGX_TILING
++      bool
++
++config PVR_TRANSFER_QUEUE
++      bool
++      default y
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++      bool
++      default y if PVR_SERVICES4
++
++config PVR_SUPPORT_SGX_MMU_DUMMY_PAGE
++      bool
++
++config PVR_PVRSRV_USSE_EDM_STATUS_DEBUG
++      bool
++
++config PVR_SYS_USING_INTERRUPTS
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_SUPPORT_HW_RECOVERY
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_SUPPORT_ACTIVE_POWER_MANAGEMENT
++      depends on !PVR_NO_HARDWARE
++      default y
++      bool
++
++config PVR_USE_GCC__thread_KEYWORD
++      bool
++
++config PVR_SUPPORT_SGX_EVENT_OBJECT
++      default y
++      bool
++
++config PVR_LDM_PLATFORM
++      bool
++
++config PVR_LDM_PCI
++      bool
++
++config PVR_PVR_MANUAL_POWER_CONTROL
++      bool
++
++choice PVR_BUILD
++      bool "Type of build"
++
++config PVR_BUILD_RELEASE
++      bool "Release"
++      help
++        Release build.
++
++config PVR_BUILD_DEBUG
++      bool "Debugging"
++      help
++        Debug build.
++
++config PVR_BUILD_TIMING
++      bool "Timing"
++      help
++        Timing build.
++
++endchoice
++
++if PVR_BUILD_DEBUG
++
++config PVR_DEBUG_LINUX_MEMORY_ALLOCATIONS
++      bool "Debug memory allocations"
++      default y
++
++config PVR_DEBUG_LINUX_MEM_AREAS
++      bool "Debug memory areas"
++      default y
++
++config PVR_DEBUG_LINUX_MMAP_AREAS
++      bool "Debug mmap areas"
++      default y
++
++config PVR_DEBUG_LINUX_XML_PROC_FILES
++      bool "Debug XML proc files"
++      default n
++
++config PVR_DEBUG_LINUX_SLAB_ALLOCATIONS
++      bool "Debug SLAB allocations"
++      default n
++
++config PVR_DEBUG_BRIDGE_KM
++      bool "Debug brdige module"
++      default y
++
++config PVR_DEBUG_TRACE_BRIDGE_KM
++      bool "Debug trace bridge module"
++      default n
++
++config PVR_DEBUG_BEIDGE_KM_DISPATCH_TABLE
++      bool "Debug bridge module's dispatch table"
++      default n
++
++endif         # PVR_BUILD_DEBUG
++
++config PVR_SUPPORT_SGX1
++      bool
++      default y
++
++endif         # PVR
++
+diff -Nurd git/drivers/gpu/pvr/Makefile git-nokia/drivers/gpu/pvr/Makefile
+--- git/drivers/gpu/pvr/Makefile       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/Makefile 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,77 @@
++pvrobj :=
++pvrobj-$(CONFIG_PVR_SYSTEM_OMAP3430)          += omaplfb.o
++pvrobj-$(CONFIG_PVR_BUFFERCLASS_EXAMPLE)      += bc_example.o
++
++obj-$(CONFIG_PVR) += pvrsrvkm.o $(pvrobj-y)
++
++PVR_SYSTEM-$(CONFIG_PVR_SYSTEM_OMAP3430)=omap3430
++PVR_SYSTEM-$(CONFIG_PVR_SYSTEM_NO_HARDWARE)=no_hardware
++
++pvrsrvkm-objs:= services4/srvkm/env/linux/osfunc.o            \
++              services4/srvkm/env/linux/mmap.o                \
++              services4/srvkm/env/linux/module.o              \
++              services4/srvkm/env/linux/pdump.o               \
++              services4/srvkm/env/linux/proc.o                \
++              services4/srvkm/env/linux/pvr_bridge_k.o        \
++              services4/srvkm/env/linux/pvr_debug.o           \
++              services4/srvkm/env/linux/mm.o                  \
++              services4/srvkm/env/linux/mutex.o               \
++              services4/srvkm/common/buffer_manager.o         \
++              services4/srvkm/common/devicemem.o              \
++              services4/srvkm/common/deviceclass.o            \
++              services4/srvkm/common/handle.o                 \
++              services4/srvkm/common/hash.o                   \
++              services4/srvkm/common/metrics.o                \
++              services4/srvkm/common/pvrsrv.o                 \
++              services4/srvkm/common/queue.o                  \
++              services4/srvkm/common/ra.o                     \
++              services4/srvkm/common/resman.o                 \
++              services4/srvkm/common/power.o                  \
++              services4/srvkm/common/mem.o                    \
++              services4/srvkm/bridged/bridged_pvr_bridge.o    \
++              services4/srvkm/devices/sgx/sgxinit.o           \
++              services4/srvkm/devices/sgx/sgxutils.o          \
++              services4/srvkm/devices/sgx/sgxkick.o           \
++              services4/srvkm/devices/sgx/sgxtransfer.o       \
++              services4/srvkm/devices/sgx/mmu.o               \
++              services4/srvkm/devices/sgx/pb.o                \
++              services4/srvkm/devices/sgx/sgx2dcore.o         \
++              services4/srvkm/common/perproc.o
++
++pvrsrvkm-objs+=       services4/system/$(PVR_SYSTEM-y)/sysconfig.o    \
++              services4/system/$(PVR_SYSTEM-y)/sysutils.o
++
++omaplfb-objs :=       services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.o \
++              services4/3rdparty/dc_omap3430_linux/omaplfb_linux.o
++
++bc_example-objs :=                                                        \
++      services4/3rdparty/bufferclass_example/bufferclass_example.o        \
++      services4/3rdparty/bufferclass_example/bufferclass_example_linux.o  \
++      services4/3rdparty/bufferclass_example/bufferclass_example_private.o
++
++INCLUDES :=   -I$(src)/services4/srvkm/env/linux      \
++              -I$(src)/services4/srvkm/include        \
++              -I$(src)/services4/srvkm/bridged        \
++              -I$(src)/services4/srvkm/devices/sgx    \
++              -I$(src)/services4/srvkm/include        \
++              -I$(src)/services4/srvkm/hwdefs         \
++              -I$(src)/include4                       \
++              -I$(src)/services4/system/include       \
++              -I$(src)/services4/include
++
++INCLUDES +=   -I$(src)/services4/system/$(PVR_SYSTEM-y)
++
++# pvrconf.h translates kbuild options to PVR options.
++# We could do away with it by renaming the options in the source itself,
++# which would also result in finer grained option dependency, that is
++# avoiding the rebuild of files not affected by an option change.
++#
++PVR_OPTS := -include $(src)/pvrconf.h
++
++DATE := $(shell date "+%a %B %d %Z %Y")
++PVR_OPTS += -D"PVR_BUILD_DIR=KBUILD_STR($(src))" -D"PVR_BUILD_DATE=KBUILD_STR($(DATE))"
++
++PVR_OPTS += -DLINUX -D__linux__ -DLDM_PLATFORM
++
++ccflags-y :=  $(PVR_OPTS) $(INCLUDES)
++
+diff -Nurd git/drivers/gpu/pvr/pvrconf.h git-nokia/drivers/gpu/pvr/pvrconf.h
+--- git/drivers/gpu/pvr/pvrconf.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/pvrconf.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,118 @@
++#ifndef _PVRCONF_H
++#define _PVRCONF_H
++
++/* Define PVR equivalents of the kbuild config options */
++
++#ifdef CONFIG_PVR_TRANSFER_QUEUE
++#define TRANSFER_QUEUE
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SRVINIT
++#define SUPPORT_SRVINIT
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SECURE_HANDLES
++#define SUPPORT_SECURE_HANDLES
++#endif
++#ifdef CONFIG_PVR_SERVICES4
++#define SERVICES4
++#endif
++#ifdef CONFIG_PVR_SGXCORE_530
++#define SGXCORE_530
++#endif
++#ifdef CONFIG_PVR_SGX_CORE_REV_103
++#define SGX_CORE_REV_103
++#endif
++#ifdef CONFIG_PVR_PVR2D_ALT_2DHW
++#define PVR2D_ALT_2DHW
++#endif
++#ifdef CONFIG_PVR_SYSTEM_OMAP3430
++#define SYSTEM_OMAP3430
++#endif
++#ifdef CONFIG_PVR_SYSTEM_NO_HARDWARE
++#define SYSTEM_NO_HARDWARE
++#endif
++#ifdef CONFIG_PVR_BUFFERCLASS_EXAMPLE
++#define BUFFERCLASS_EXAMPLE
++#endif
++#ifdef CONFIG_PVR_USE_PTHREADS
++#define USE_PTHREADS
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SGX_EVENT_OBJECT
++#define SUPPORT_SGX_EVENT_OBJECT
++#endif
++#ifdef CONFIG_PVR_SYS_USING_INTERRUPTS
++#define SYS_USING_INTERRUPTS
++#endif
++#ifdef CONFIG_PVR_SUPPORT_HW_RECOVERY
++#define SUPPORT_HW_RECOVERY
++#endif
++#ifdef CONFIG_PVR_SUPPORT_ACTIVE_POWER_MANAGEMENT
++#define SUPPORT_ACTIVE_POWER_MANAGEMENT
++#endif
++#ifdef CONFIG_PVR_BUILD_RELEASE
++#define BUILD_RELEASE
++#endif
++#ifdef CONFIG_PVR_BUILD_DEBUG
++#define BUILD_DEBUG
++#endif
++#ifdef CONFIG_PVR_BUILD_TIMING
++#define BUILD_TIMING
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MEMORY_ALLOCATIONS
++#define DEBUG_LINUX_MEMORY_ALLOCATIONS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MEM_AREAS
++#define DEBUG_LINUX_MEM_AREAS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_MMAP_AREAS
++#define DEBUG_LINUX_MMAP_AREAS
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_XML_PROC_FILES
++#define DEBUG_LINUX_XML_PROC_FILES
++#endif
++#ifdef CONFIG_PVR_DEBUG_LINUX_SLAB_ALLOCATIONS
++#define DEBUG_LINUX_SLAB_ALLOCATIONS
++#endif
++#ifdef CONFIG_PVR_DEBUG_BRIDGE_KM
++#define DEBUG_BRIDGE_KM
++#endif
++#ifdef CONFIG_PVR_DEBUG_TRACE_BRIDGE_KM
++#define DEBUG_TRACE_BRIDGE_KM
++#endif
++#ifdef CONFIG_PVR_DEBUG_BEIDGE_KM_DISPATCH_TABLE
++#define DEBUG_BEIDGE_KM_DISPATCH_TABLE
++#endif
++#ifdef CONFIG_PVR_SUPPORT_SGX1
++#define SUPPORT_SGX1
++#endif
++
++#ifdef CONFIG_PVR_BUILD_DEBUG
++#define PVR_BUILD_TYPE        "debug"
++#define DEBUG
++#endif
++
++#ifdef CONFIG_PVR_BUILD_RELEASE
++#define PVR_BUILD_TYPE        "release"
++#define RELEASE
++#endif
++
++#ifdef CONFIG_PVR_BUILD_TIMING
++#define PVR_BUILD_TYPE        "timing"
++#define TIMING
++#endif
++
++#ifdef CONFIG_PVR_SERVICES4
++#define _XOPEN_SOURCE 600
++#endif
++
++#ifdef CONFIG_PVR_SGX_CORE_REV_103
++#define SGX_CORE_REV  103
++#endif
++
++#ifdef CONFIG_PVR_SGXCORE_530
++#define SGXCORE               530
++#define SGX530
++#define SUPPORT_SGX530
++#endif
++
++#endif                /* _PVRCONF_H */
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,307 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++
++
++static IMG_VOID *gpvAnchor = IMG_NULL;
++static PFN_BC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
++
++BC_EXAMPLE_DEVINFO * GetAnchorPtr(IMG_VOID)
++{
++      return (BC_EXAMPLE_DEVINFO *)gpvAnchor;
++}
++
++static IMG_VOID SetAnchorPtr(BC_EXAMPLE_DEVINFO *psDevInfo)
++{
++      gpvAnchor = (IMG_VOID*)psDevInfo;
++}
++
++
++static PVRSRV_ERROR OpenBCDevice(IMG_HANDLE *phDevice)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();
++
++      
++      *phDevice = (IMG_HANDLE)psDevInfo;
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR CloseBCDevice(IMG_HANDLE hDevice)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR GetBCBuffer(IMG_HANDLE                    hDevice,
++                                                              IMG_UINT32                      ui32BufferNumber,
++                                                              PVRSRV_SYNC_DATA        *psSyncData,
++                                                              IMG_HANDLE                      *phBuffer)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++
++      if(!hDevice || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDevInfo = (BC_EXAMPLE_DEVINFO*)hDevice;
++
++      if( ui32BufferNumber < psDevInfo->sBufferInfo.ui32BufferCount )
++      {
++              psDevInfo->psSystemBuffer[ui32BufferNumber].psSyncData = psSyncData;
++              *phBuffer = (IMG_HANDLE)&psDevInfo->psSystemBuffer[ui32BufferNumber];
++      }
++      else
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR GetBCInfo(IMG_HANDLE hDevice, BUFFER_INFO *psBCInfo)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++
++      if(!hDevice || !psBCInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDevInfo = (BC_EXAMPLE_DEVINFO*)hDevice;
++
++      *psBCInfo = psDevInfo->sBufferInfo;
++
++      return PVRSRV_OK;
++}
++
++
++static PVRSRV_ERROR GetBCBufferAddr(IMG_HANDLE                hDevice,
++                                                                      IMG_HANDLE              hBuffer,
++                                                                      IMG_SYS_PHYADDR **ppsSysAddr,
++                                                                      IMG_UINT32              *pui32ByteSize,
++                                                                      IMG_VOID                **ppvCpuVAddr,
++                                                                      IMG_HANDLE              *phOSMapInfo,
++                                                                      IMG_BOOL                *pbIsContiguous)
++{
++      BC_EXAMPLE_BUFFER *psBuffer;
++
++      if(!hDevice || !hBuffer || !ppsSysAddr || !pui32ByteSize)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBuffer = (BC_EXAMPLE_BUFFER *) hBuffer;
++
++      *ppsSysAddr = &psBuffer->sPageAlignSysAddr;
++      *ppvCpuVAddr = psBuffer->sCPUVAddr;
++
++      *pui32ByteSize = psBuffer->ui32Size;
++
++      *phOSMapInfo = IMG_NULL;
++      *pbIsContiguous = IMG_TRUE;
++
++      return PVRSRV_OK;
++}
++
++
++
++
++PVRSRV_ERROR  BC_Example_Init(IMG_VOID)
++{
++      BC_EXAMPLE_DEVINFO      *psDevInfo;
++      IMG_CPU_PHYADDR         sSystemBufferCPUPAddr;
++      IMG_UINT32 i;
++      
++
++
++
++      
++
++
++
++      
++
++      psDevInfo = GetAnchorPtr();
++
++      if (psDevInfo == IMG_NULL)
++      {
++              
++              psDevInfo = (BC_EXAMPLE_DEVINFO *)AllocKernelMem(sizeof(BC_EXAMPLE_DEVINFO));
++
++              if(!psDevInfo)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              SetAnchorPtr((IMG_VOID*)psDevInfo);
++
++              
++              psDevInfo->ui32RefCount = 0;
++
++      
++              if(OpenPVRServices(&psDevInfo->hPVRServices) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++              if(GetLibFuncAddr (psDevInfo->hPVRServices, "PVRGetBufferClassJTable", &pfnGetPVRJTable) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++              if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++
++              psDevInfo->ui32NumBuffers = 0;
++
++              psDevInfo->psSystemBuffer = AllocKernelMem(sizeof(BC_EXAMPLE_BUFFER) * BC_EXAMPLE_NUM_BUFFERS);
++
++              if(!psDevInfo->psSystemBuffer)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              for(i=0; i < BC_EXAMPLE_NUM_BUFFERS; i++)
++              {
++
++                      
++                      if (AllocContigMemory(BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE,
++                                                                &psDevInfo->psSystemBuffer[i].hMemHandle,
++                                                                &psDevInfo->psSystemBuffer[i].sCPUVAddr,
++                                                                &sSystemBufferCPUPAddr) != PVRSRV_OK)
++                      {
++                              break;
++                      }
++
++                      psDevInfo->ui32NumBuffers++;
++
++                      psDevInfo->psSystemBuffer[i].ui32Size = BC_EXAMPLE_HEIGHT * BC_EXAMPLE_STRIDE;
++                      psDevInfo->psSystemBuffer[i].sSysAddr = CpuPAddrToSysPAddr(sSystemBufferCPUPAddr);
++                      psDevInfo->psSystemBuffer[i].sPageAlignSysAddr.uiAddr = (psDevInfo->psSystemBuffer[i].sSysAddr.uiAddr & 0xFFFFF000);
++                      psDevInfo->psSystemBuffer[i].psSyncData = IMG_NULL;
++              }
++
++              
++
++              psDevInfo->sBCJTable.ui32TableSize = sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE);
++              psDevInfo->sBCJTable.pfnOpenBCDevice = OpenBCDevice;
++              psDevInfo->sBCJTable.pfnCloseBCDevice = CloseBCDevice;
++              psDevInfo->sBCJTable.pfnGetBCBuffer = GetBCBuffer;
++              psDevInfo->sBCJTable.pfnGetBCInfo = GetBCInfo;
++              psDevInfo->sBCJTable.pfnGetBufferAddr = GetBCBufferAddr;
++
++
++              
++              
++              if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterBCDevice (&psDevInfo->sBCJTable,
++                                                                                                                      &psDevInfo->ui32DeviceID ) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++              }
++
++              
++
++              psDevInfo->sBufferInfo.pixelformat = BC_EXAMPLE_PIXELFORMAT;
++              psDevInfo->sBufferInfo.ui32Width = BC_EXAMPLE_WIDTH;
++              psDevInfo->sBufferInfo.ui32Height = BC_EXAMPLE_HEIGHT;
++              psDevInfo->sBufferInfo.ui32ByteStride = BC_EXAMPLE_STRIDE;              
++              psDevInfo->sBufferInfo.ui32BufferDeviceID = BC_EXAMPLE_DEVICEID;
++              psDevInfo->sBufferInfo.ui32Flags = PVRSRV_BC_FLAGS_YUVCSC_FULL_RANGE | PVRSRV_BC_FLAGS_YUVCSC_BT601;
++              psDevInfo->sBufferInfo.ui32BufferCount = psDevInfo->ui32NumBuffers;
++      }
++
++      
++      psDevInfo->ui32RefCount++;
++
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BC_Example_Deinit(IMG_VOID)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo;
++      IMG_UINT32 i;
++      psDevInfo = GetAnchorPtr();
++
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      psDevInfo->ui32RefCount--;
++
++      if (psDevInfo->ui32RefCount == 0)
++      {
++              
++              PVRSRV_BC_BUFFER2SRV_KMJTABLE   *psJTable = &psDevInfo->sPVRJTable;
++
++
++              
++              if (psJTable->pfnPVRSRVRemoveBCDevice(psDevInfo->ui32DeviceID) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              if (ClosePVRServices(psDevInfo->hPVRServices) != PVRSRV_OK)
++              {
++                      psDevInfo->hPVRServices = IMG_NULL;
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              for(i=0; i < psDevInfo->ui32NumBuffers; i++)
++              {
++                      FreeContigMemory(psDevInfo->psSystemBuffer[i].ui32Size,
++                                                       psDevInfo->psSystemBuffer[i].hMemHandle,
++                                                       psDevInfo->psSystemBuffer[i].sCPUVAddr,
++                                                       SysPAddrToCpuPAddr(psDevInfo->psSystemBuffer[i].sSysAddr));
++              }
++
++              
++              FreeKernelMem(psDevInfo);
++
++              
++              SetAnchorPtr(IMG_NULL);
++      }
++
++      
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,122 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_H__
++#define __BC_EXAMPLE_H__
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kernelbuffer.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++extern IMG_IMPORT IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++
++#define BC_EXAMPLE_NUM_BUFFERS        3
++
++#define BC_EXAMPLE_WIDTH              (160)
++#define BC_EXAMPLE_HEIGHT             (160)
++#define BC_EXAMPLE_STRIDE             (160*2)
++#define BC_EXAMPLE_PIXELFORMAT        (PVRSRV_PIXEL_FORMAT_YVYU)
++
++#define BC_EXAMPLE_DEVICEID            0
++
++
++typedef struct BC_EXAMPLE_BUFFER_TAG
++{
++      IMG_UINT32                                      ui32Size;
++      IMG_HANDLE                                      hMemHandle;
++      IMG_SYS_PHYADDR                         sSysAddr;
++      IMG_SYS_PHYADDR                         sPageAlignSysAddr;
++      IMG_CPU_VIRTADDR                        sCPUVAddr;
++      PVRSRV_SYNC_DATA                        *psSyncData;
++      struct BC_EXAMPLE_BUFFER_TAG    *psNext;
++} BC_EXAMPLE_BUFFER;
++
++
++typedef struct BC_EXAMPLE_DEVINFO_TAG
++{
++      IMG_UINT32                              ui32DeviceID;   
++
++      BC_EXAMPLE_BUFFER                       *psSystemBuffer;
++
++      BUFFER_INFO                             sBufferInfo;
++
++      
++      IMG_UINT32                              ui32NumBuffers;
++
++      
++      PVRSRV_BC_BUFFER2SRV_KMJTABLE   sPVRJTable;
++
++      
++      PVRSRV_BC_SRV2BUFFER_KMJTABLE   sBCJTable;
++
++      
++
++
++      IMG_HANDLE                              hPVRServices;
++
++      
++      IMG_UINT32                              ui32RefCount;
++
++}  BC_EXAMPLE_DEVINFO;
++
++
++PVRSRV_ERROR BC_Example_Init(IMG_VOID);
++PVRSRV_ERROR BC_Example_Deinit(IMG_VOID);
++
++PVRSRV_ERROR OpenPVRServices(IMG_HANDLE *phPVRServices);
++PVRSRV_ERROR ClosePVRServices(IMG_HANDLE hPVRServices);
++
++IMG_VOID *AllocKernelMem(IMG_UINT32 ui32Size);
++IMG_VOID FreeKernelMem(IMG_VOID *pvMem);
++
++PVRSRV_ERROR AllocContigMemory(IMG_UINT32 ui32Size,
++                                                         IMG_HANDLE * phMemHandle,
++                                                         IMG_CPU_VIRTADDR *pLinAddr,
++                                                         IMG_CPU_PHYADDR *pPhysAddr);
++IMG_VOID FreeContigMemory(IMG_UINT32 ui32Size, 
++                                                IMG_HANDLE hMemHandle,
++                                                IMG_CPU_VIRTADDR LinAddr, 
++                                                IMG_CPU_PHYADDR PhysAddr);
++
++IMG_SYS_PHYADDR CpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr);
++IMG_CPU_PHYADDR SysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr);
++
++IMG_VOID *MapPhysAddr(IMG_SYS_PHYADDR sSysAddr, IMG_UINT32 ui32Size);
++IMG_VOID UnMapPhysAddr(IMG_VOID *pvAddr, IMG_UINT32 ui32Size);
++
++PVRSRV_ERROR GetLibFuncAddr (IMG_HANDLE hExtDrv, IMG_CHAR *szFunctionName, PFN_BC_GET_PVRJTABLE *ppfnFuncTable);
++BC_EXAMPLE_DEVINFO * GetAnchorPtr(IMG_VOID);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,372 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++
++#if defined(LMA)
++#include <linux/pci.h>
++#else
++#include <linux/dma-mapping.h>
++#endif
++
++#include "bufferclass_example.h"
++#include "bufferclass_example_linux.h"
++
++#define DEVNAME       "bc_example"
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++
++int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
++int FillBuffer(unsigned int ui32BufferIndex);
++int GetBufferCount(unsigned int *pui32BufferCount);
++
++static int AssignedMajorNumber;
++
++static struct file_operations bufferclass_example_fops = {
++      ioctl:BC_Example_Bridge,
++};
++
++
++#define unref__ __attribute__ ((unused))
++
++#if defined(LMA)
++
++#define PVR_BUFFERCLASS_MEMOFFSET (220 * 1024 * 1024) 
++#define PVR_BUFFERCLASS_MEMSIZE         (4 * 1024 * 1024)       
++
++unsigned int g_ui32MemBase = 0;
++unsigned int g_ui32MemCurrent = 0;
++
++typedef struct  
++{
++      union
++      {
++              IMG_UINT8       aui8PCISpace[256];
++              IMG_UINT16      aui16PCISpace[128];
++              IMG_UINT32      aui32PCISpace[64];
++
++              struct  
++              {
++                      IMG_UINT16      ui16VenID;
++                      IMG_UINT16      ui16DevID;
++                      IMG_UINT16      ui16PCICmd;
++                      IMG_UINT16      ui16PCIStatus;
++              }s;
++      }u;
++} PCICONFIG_SPACE, *PPCICONFIG_SPACE;
++
++#define VENDOR_ID_PVR                                 0x1010
++#define DEVICE_ID_PVR                                 0x1CF1
++
++#define PCI_BASEREG_OFFSET_DWORDS             4
++
++#define PVR_MEM_PCI_BASENUM                   2
++#define PVR_MEM_PCI_OFFSET                (PVR_MEM_PCI_BASENUM + PCI_BASEREG_OFFSET_DWORDS)
++
++
++
++IMG_UINT32 PCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg)
++{
++    struct pci_dev *dev;
++    IMG_UINT32 ui32Value;
++
++    dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++    if (dev)
++    {
++            pci_read_config_dword(dev, (int) ui32Reg, (u32 *) & ui32Value);
++            return (ui32Value);
++    }
++    else
++    {
++            return (0);
++    }
++}
++
++IMG_VOID PCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value)
++{
++    struct pci_dev *dev;
++
++    dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++    if (dev)
++    {
++            pci_write_config_dword(dev, (int) ui32Reg, (u32) ui32Value);
++    }
++}
++
++static IMG_UINT32 FindPCIDevice(IMG_UINT16 ui16VenID, IMG_UINT16 ui16DevID, PCICONFIG_SPACE *psPCISpace)
++{
++      IMG_UINT32 ui32BusNum;
++      IMG_UINT32 ui32DevNum;
++      IMG_UINT32 ui32VenDevID;
++
++      
++      for (ui32BusNum=0; ui32BusNum < 255; ui32BusNum++)
++      {
++              
++              for (ui32DevNum=0; ui32DevNum < 32; ui32DevNum++)
++              {
++                      
++                      ui32VenDevID=PCIReadDword(ui32BusNum, ui32DevNum, 0, 0);                        
++
++                      
++                      if (ui32VenDevID == (IMG_UINT32)((ui16DevID<<16)+ui16VenID))
++                      {
++                              IMG_UINT32 ui32Idx;
++
++                              
++                              PCIWriteDword(ui32BusNum, ui32DevNum, 0, 4, PCIReadDword(ui32BusNum, ui32DevNum, 0, 4) | 0x02);
++
++                              
++                              for (ui32Idx=0; ui32Idx < 64; ui32Idx++)
++                              {
++                                      psPCISpace->u.aui32PCISpace[ui32Idx] = PCIReadDword(ui32BusNum, ui32DevNum, 0, ui32Idx*4);
++                              }
++                              return PVRSRV_OK;
++                      }
++                                                      
++              }
++
++      }
++
++      return PVRSRV_ERROR_GENERIC;
++}
++#endif
++
++
++
++static int __init BC_Example_ModInit(void)
++{
++#if defined(LMA)
++      PCICONFIG_SPACE sPCISpace;
++#endif
++      
++      AssignedMajorNumber = register_chrdev(0, DEVNAME, &bufferclass_example_fops);
++
++      if (AssignedMajorNumber <= 0)
++      {
++              printk("BC_Example_ModInit: unable to get major number\n");
++
++              return -EBUSY;
++      }
++
++      printk("BC_Example_ModInit: major device %d\n", AssignedMajorNumber);
++
++#if defined(LMA)
++      if(FindPCIDevice(VENDOR_ID_PVR, DEVICE_ID_PVR, &sPCISpace) != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModInit: can't init device (can't find PCI) \n");
++              unregister_chrdev(AssignedMajorNumber, DEVNAME);
++              return PVRSRV_ERROR_INVALID_DEVICE;
++      }
++      
++      g_ui32MemBase = sPCISpace.u.aui32PCISpace[PVR_MEM_PCI_OFFSET] + PVR_BUFFERCLASS_MEMOFFSET;
++#endif
++
++      if(BC_Example_Init() != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModInit: can't init device\n");
++              unregister_chrdev(AssignedMajorNumber, DEVNAME);
++              return -ENODEV;
++      }
++
++      return 0;
++} 
++
++static void __exit BC_Example_ModCleanup(void)
++{    
++      unregister_chrdev(AssignedMajorNumber, DEVNAME);
++      
++      if(BC_Example_Deinit() != PVRSRV_OK)
++      {
++              printk ("BC_Example_ModCleanup: can't deinit device\n");
++      }
++
++} 
++
++
++IMG_VOID *AllocKernelMem(IMG_UINT32 ui32Size)
++{
++      return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++IMG_VOID FreeKernelMem(IMG_VOID *pvMem)
++{
++      kfree(pvMem);
++}
++
++PVRSRV_ERROR AllocContigMemory(       IMG_UINT32 ui32Size,
++                                                              IMG_HANDLE unref__ *phMemHandle, 
++                                                              IMG_CPU_VIRTADDR *pLinAddr, 
++                                                              IMG_CPU_PHYADDR *pPhysAddr)
++{
++#if defined(LMA)
++      IMG_VOID *pvLinAddr;
++      
++      
++      if(g_ui32MemCurrent + ui32Size >= PVR_BUFFERCLASS_MEMSIZE)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      pvLinAddr = ioremap(g_ui32MemBase + g_ui32MemCurrent, ui32Size);
++
++      if(pvLinAddr)
++      {
++              pPhysAddr->uiAddr = g_ui32MemBase + g_ui32MemCurrent;
++              *pLinAddr = pvLinAddr;  
++
++              
++              g_ui32MemCurrent += ui32Size;
++              return PVRSRV_OK;
++      }
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++#else
++      dma_addr_t dma;
++      IMG_VOID *pvLinAddr;
++      
++      pvLinAddr = dma_alloc_coherent(NULL, ui32Size, &dma, GFP_KERNEL);
++
++      if(pvLinAddr == IMG_NULL)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      else
++      {
++              IMG_VOID *pvPage;
++              IMG_VOID *pvEnd = pvLinAddr + ui32Size;
++
++              for(pvPage = pvLinAddr; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++              {
++                      SetPageReserved(virt_to_page(pvPage));
++              }
++
++              pPhysAddr->uiAddr = dma;
++              *pLinAddr = pvLinAddr;
++
++              return PVRSRV_OK;
++      }
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++#endif
++}
++
++void FreeContigMemory(  IMG_UINT32 ui32Size,
++                                          IMG_HANDLE unref__ hMemHandle, 
++                                              IMG_CPU_VIRTADDR LinAddr, 
++                                              IMG_CPU_PHYADDR PhysAddr)
++{
++#if defined(LMA)
++      g_ui32MemCurrent -= ui32Size;
++      iounmap(LinAddr);
++#else
++      dma_free_coherent(NULL, ui32Size, LinAddr, (dma_addr_t)PhysAddr.uiAddr);
++#endif
++}
++
++IMG_SYS_PHYADDR CpuPAddrToSysPAddr(IMG_CPU_PHYADDR cpu_paddr)
++{
++      IMG_SYS_PHYADDR sys_paddr;
++      
++      
++      sys_paddr.uiAddr = cpu_paddr.uiAddr;
++      return sys_paddr;
++}
++
++IMG_CPU_PHYADDR SysPAddrToCpuPAddr(IMG_SYS_PHYADDR sys_paddr)
++{
++      
++      IMG_CPU_PHYADDR cpu_paddr;
++      
++      cpu_paddr.uiAddr = sys_paddr.uiAddr;
++      return cpu_paddr;
++}
++
++PVRSRV_ERROR OpenPVRServices (IMG_HANDLE *phPVRServices)
++{
++      
++      *phPVRServices = 0;
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR ClosePVRServices (IMG_HANDLE unref__ hPVRServices)
++{
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR GetLibFuncAddr (IMG_HANDLE unref__ hExtDrv, IMG_CHAR *szFunctionName, PFN_BC_GET_PVRJTABLE *ppfnFuncTable)
++{
++      if(strcmp("PVRGetBufferClassJTable", szFunctionName) != 0)
++              return PVRSRV_ERROR_INVALID_PARAMS;
++
++      
++      *ppfnFuncTable = PVRGetBufferClassJTable;
++
++      return PVRSRV_OK;
++}
++
++
++int BC_Example_Bridge(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
++{
++      int err = -EFAULT;
++      int command = _IOC_NR(cmd);
++      BC_Example_ioctl_package *psBridge = (BC_Example_ioctl_package *)arg;
++
++      if(!access_ok(VERIFY_WRITE, psBridge, sizeof(BC_Example_ioctl_package)))
++              return err;
++
++      switch(command)
++      {
++              case _IOC_NR(BC_Example_ioctl_fill_buffer):
++              {
++                      if(FillBuffer(psBridge->inputparam) == -1)
++                              return err;
++                      break;
++              }
++              case _IOC_NR(BC_Example_ioctl_get_buffer_count):
++              {       
++                      if(GetBufferCount(&psBridge->outputparam) == -1)
++                              return err;
++                      
++                      break;
++              }
++              default:
++                      return err;
++      }
++
++      return 0;
++}
++
++
++module_init(BC_Example_ModInit);
++module_exit(BC_Example_ModCleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_linux.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BC_EXAMPLE_LINUX_H__
++#define __BC_EXAMPLE_LINUX_H__
++
++#include <linux/ioctl.h>
++
++typedef struct BC_Example_ioctl_package_TAG
++{
++      int inputparam;
++      int outputparam;
++
++}BC_Example_ioctl_package;
++
++#define BC_EXAMPLE_IOC_GID      'g'
++
++#define BC_EXAMPLE_IOWR(INDEX)  _IOWR(BC_EXAMPLE_IOC_GID, INDEX, BC_Example_ioctl_package)
++
++#define BC_Example_ioctl_fill_buffer          BC_EXAMPLE_IOWR(0)
++#define BC_Example_ioctl_get_buffer_count     BC_EXAMPLE_IOWR(1)
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c
+--- git/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/bufferclass_example/bufferclass_example_private.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,158 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "bufferclass_example.h"
++
++
++void FillYUV422Image(void *pvDest, int width, int height, int bytestride)
++{
++      static int iPhase = 0;
++      int x, y;
++      unsigned char u,v,y0,y1;
++      unsigned int *pui32yuv = (unsigned int *)pvDest;
++      unsigned int count = 0;
++
++      for(y=0;y<height;y++)
++      {
++              for(x=0;x<width >> 1;x++)
++              {
++                      u = (y<(height/2))? ((x<(width/4))? 0xFF:0x33) : ((x<(width/4))? 0x33:0xAA);
++                      v = (y<(height/2))? ((x<(width/4))? 0xAA:0x0) : ((x<(width/4))? 0x03:0xEE);
++
++                      y0 = y1 = (((x+iPhase)>>4)%(2)==0)? 0x7f:0x00;
++
++                      
++                      pui32yuv[count++] = (y1 << 24) | (v << 16) | (y0 << 8) | u;
++
++              }
++      }
++
++      iPhase++;
++}
++
++void FillRGB565Image(void *pvDest, int width, int height, int bytestride)
++{
++      int i, Count;
++      unsigned long *pui32Addr = (unsigned long *)pvDest;
++      unsigned short *pui16Addr = (unsigned short *)pvDest;
++      unsigned long Colour32;
++      unsigned short Colour16;
++      static  unsigned char   Colour8 = 0;
++      
++      Colour16 = (Colour8>>3) | ((Colour8>>2)<<5) | ((Colour8>>3)<<11);
++      Colour32 = Colour16 | Colour16 << 16;
++                      
++      Count = (height * bytestride)>>2;
++
++      for(i=0; i<Count; i++)
++      {
++              pui32Addr[i] = Colour32;
++      }
++
++      Count =  height;
++
++      pui16Addr = (unsigned short *)((unsigned char *)pvDest + (2 * Colour8));
++
++      for(i=0; i<Count; i++)
++      {
++              *pui16Addr = 0xF800;
++
++              pui16Addr = (unsigned short *)((unsigned char *)pui16Addr + bytestride);
++      }
++      Count = bytestride >> 2;
++      
++      pui32Addr = (unsigned long *)((unsigned char *)pvDest + (bytestride * (0xFF - Colour8)));
++
++      for(i=0; i<Count; i++)
++      {
++              pui32Addr[i] = 0x001F001F;
++      }
++
++      
++      Colour8 = (Colour8 + 1) % 0xFF;
++}
++
++
++int FillBuffer(unsigned int ui32BufferIndex)
++{
++      BC_EXAMPLE_DEVINFO              *psDevInfo = GetAnchorPtr();
++      BC_EXAMPLE_BUFFER               *psBuffer;
++      BUFFER_INFO                     *psBufferInfo;
++      PVRSRV_SYNC_DATA        *psSyncData;                    
++
++      
++      if(psDevInfo == IMG_NULL)
++      {
++              return -1;
++      }
++
++      psBuffer = &psDevInfo->psSystemBuffer[ui32BufferIndex];
++      psBufferInfo = &psDevInfo->sBufferInfo;
++
++      
++      psSyncData = psBuffer->psSyncData;
++
++      
++      if(psSyncData)
++      {
++              psSyncData->ui32WriteOpsPending++;
++      }
++
++      if(psBufferInfo->pixelformat == PVRSRV_PIXEL_FORMAT_RGB565)
++      {
++              FillRGB565Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++      }
++      else
++      {
++              FillYUV422Image(psBuffer->sCPUVAddr, BC_EXAMPLE_WIDTH, BC_EXAMPLE_HEIGHT, BC_EXAMPLE_STRIDE);
++      }
++
++      
++      if(psSyncData)
++      {
++              psSyncData->ui32WriteOpsComplete++;
++      }
++
++      return 0;
++}
++
++
++int GetBufferCount(unsigned int *pui32BufferCount)
++{
++      BC_EXAMPLE_DEVINFO *psDevInfo = GetAnchorPtr();
++
++      
++      if(psDevInfo == IMG_NULL)
++      {
++              return -1;
++      }
++
++      
++      *pui32BufferCount = psDevInfo->sBufferInfo.ui32BufferCount;
++      
++      return 0;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_displayclass.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1312 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/console.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/notifier.h>
++#include <linux/workqueue.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++#include "kerneldisplay.h"
++#include "omaplfb.h"
++
++#define DISPLAY_DEVICE_NAME "PowerVR OMAP Linux Display Driver"
++
++#define       DRIVER_PREFIX   "omaplfb"
++
++static IMG_VOID *gpvAnchor;
++
++static int fb_idx = 0;
++
++#define OMAPLFB_COMMAND_COUNT         1
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++PVRSRV_ERROR OMAPLFBPrePower (IMG_HANDLE              hDevHandle,
++                                                PVR_POWER_STATE       eNewPowerState,
++                                                PVR_POWER_STATE       eCurrentPowerState);
++PVRSRV_ERROR OMAPLFBPostPower (IMG_HANDLE             hDevHandle,
++                                                 PVR_POWER_STATE      eNewPowerState,
++                                                 PVR_POWER_STATE      eCurrentPowerState);
++#endif
++
++extern void omap_dispc_set_plane_base(int plane, IMG_UINT32 phys_addr);
++
++static PFN_DC_GET_PVRJTABLE pfnGetPVRJTable = IMG_NULL;
++
++static OMAPLFB_DEVINFO * GetAnchorPtr(IMG_VOID)
++{
++      return (OMAPLFB_DEVINFO *)gpvAnchor;
++}
++
++static IMG_VOID SetAnchorPtr(OMAPLFB_DEVINFO *psDevInfo)
++{
++      gpvAnchor = (IMG_VOID*)psDevInfo;
++}
++
++static int FrameBufferEvents(struct notifier_block *psNotif,
++              unsigned long event, void *data)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      struct fb_event *psFBEvent = (struct fb_event *)data;
++
++      
++      if (event != FB_EVENT_BLANK)
++      {
++              return 0;
++      }
++
++      
++      if (*(int *)psFBEvent->data == 0)
++      {
++              return 0;
++      }
++
++      psDevInfo = GetAnchorPtr();
++
++      
++      schedule_work(&psDevInfo->sLINWork);
++
++      return 0;
++}
++
++static void unblank_display(OMAPLFB_DEVINFO *psDevInfo)
++{
++      acquire_console_sem();
++      (void) fb_blank(psDevInfo->psLINFBInfo, 0);
++      release_console_sem();
++}
++
++static void WorkHandler(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++                      void *
++#else
++                      struct work_struct *
++#endif
++                      data)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      PVR_UNREFERENCED_PARAMETER(data);
++
++      psDevInfo = GetAnchorPtr();
++
++      unblank_display(psDevInfo);
++}
++
++static PVRSRV_ERROR Flip(OMAPLFB_SWAPCHAIN *psSwapChain,
++                                                IMG_UINT32 aPhyAddr)
++{
++      if (1 /* omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_LCD */)
++      {
++                omap_dispc_set_plane_base(0, aPhyAddr);
++              return PVRSRV_OK;
++      }
++      else
++      if (0 /*omap2_disp_get_output_dev(OMAP2_GRAPHICS) == OMAP2_OUTPUT_TV*/)
++      {
++                omap_dispc_set_plane_base(0, aPhyAddr);
++              return PVRSRV_OK;
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++static IMG_VOID EnableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++
++}
++
++static IMG_VOID DisableVSyncInterrupt(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++}
++
++static PVRSRV_ERROR OpenDCDevice(IMG_UINT32 ui32DeviceID,
++                                 IMG_HANDLE *phDevice,
++                                 PVRSRV_SYNC_DATA* psSystemBufferSyncData)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      int res;
++
++      PVR_UNREFERENCED_PARAMETER(ui32DeviceID);
++      
++      psDevInfo = GetAnchorPtr();
++      
++      
++      psDevInfo->sSystemBuffer.psSyncData = psSystemBufferSyncData;
++      
++      
++      INIT_WORK(&psDevInfo->sLINWork, WorkHandler
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
++      , NULL
++#endif
++      );
++
++      memset(&psDevInfo->sLINNotifBlock, 0, sizeof(psDevInfo->sLINNotifBlock));
++
++      psDevInfo->sLINNotifBlock.notifier_call = FrameBufferEvents;
++
++      res = fb_register_client(&psDevInfo->sLINNotifBlock);
++      if (res != 0)
++      {
++              printk(KERN_INFO DRIVER_PREFIX
++                      ": Couldn't register for framebuffer events: %d\n",
++                      res);
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      unblank_display(psDevInfo);
++
++      
++      *phDevice = (IMG_HANDLE)psDevInfo;
++      
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR CloseDCDevice(IMG_HANDLE hDevice)
++{
++      OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      BUG_ON(psDevInfo->psSwapChain != IMG_NULL);
++
++      
++      fb_unregister_client(&psDevInfo->sLINNotifBlock);
++
++      
++      flush_scheduled_work();
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR EnumDCFormats(IMG_HANDLE hDevice,
++                                                                      IMG_UINT32 *pui32NumFormats, 
++                                                                      DISPLAY_FORMAT *psFormat)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !pui32NumFormats)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      *pui32NumFormats = 1;
++      
++      if(psFormat)
++      {
++              psFormat[0] = psDevInfo->sDisplayFormat;
++      }
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR EnumDCDims(IMG_HANDLE hDevice, 
++                                                      DISPLAY_FORMAT *psFormat, 
++                                                      IMG_UINT32 *pui32NumDims, 
++                                                      DISPLAY_DIMS *psDim)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !psFormat || !pui32NumDims)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *pui32NumDims = 1;
++
++              
++      if(psDim)
++      {
++              psDim[0] = psDevInfo->sDisplayDim;
++      }
++      
++      return PVRSRV_OK;       
++}
++
++
++static PVRSRV_ERROR GetDCSystemBuffer(IMG_HANDLE hDevice, IMG_HANDLE *phBuffer)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *phBuffer = (IMG_HANDLE)&psDevInfo->sSystemBuffer;
++
++      return PVRSRV_OK;       
++}
++
++
++static PVRSRV_ERROR GetDCInfo(IMG_HANDLE hDevice, DISPLAY_INFO *psDCInfo)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      if(!hDevice || !psDCInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++
++      *psDCInfo = psDevInfo->sDisplayInfo;
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR GetDCBufferAddr(IMG_HANDLE        hDevice,
++                                                                      IMG_HANDLE        hBuffer, 
++                                                                      IMG_SYS_PHYADDR   **ppsSysAddr,
++                                                                      IMG_UINT32        *pui32ByteSize, 
++                                                                      IMG_VOID          **ppvCpuVAddr,
++                                                                      IMG_HANDLE        *phOSMapInfo,
++                                                                      IMG_BOOL          *pbIsContiguous)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_BUFFER *psSystemBuffer;
++
++      if(!hDevice)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      if(!hBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      psSystemBuffer = (OMAPLFB_BUFFER *)hBuffer;
++
++      if (!ppsSysAddr)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      *ppsSysAddr = &psSystemBuffer->sSysAddr;
++
++      if (!pui32ByteSize)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      *pui32ByteSize = psDevInfo->sFBInfo.ui32BufferSize;
++
++      if (ppvCpuVAddr)
++      {
++              *ppvCpuVAddr = psSystemBuffer->sCPUVAddr;
++      }
++
++      if (phOSMapInfo)
++      {
++              *phOSMapInfo = (IMG_HANDLE)0;
++      }
++
++      if (pbIsContiguous)
++      {
++              *pbIsContiguous = IMG_TRUE;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR CreateDCSwapChain(IMG_HANDLE hDevice,
++                                                                              IMG_UINT32 ui32Flags, 
++                                                                              DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib, 
++                                                                              DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                              IMG_UINT32 ui32BufferCount, 
++                                                                              PVRSRV_SYNC_DATA **ppsSyncData,
++                                                                              IMG_UINT32 ui32OEMFlags,
++                                                                              IMG_HANDLE *phSwapChain, 
++                                                                              IMG_UINT32 *pui32SwapChainID)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      OMAPLFB_BUFFER *psBuffer;
++      OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      PVR_UNREFERENCED_PARAMETER(ui32OEMFlags);       
++      PVR_UNREFERENCED_PARAMETER(pui32SwapChainID);
++      
++      if(!hDevice 
++      || !psDstSurfAttrib 
++      || !psSrcSurfAttrib 
++      || !ppsSyncData 
++      || !phSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }       
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      
++      if(psDevInfo->psSwapChain != IMG_NULL)
++      {
++              return PVRSRV_ERROR_FLIP_CHAIN_EXISTS;  
++      }
++      
++      
++      if(ui32BufferCount > psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers)
++      {
++              return PVRSRV_ERROR_TOOMANYBUFFERS;     
++      }
++      
++      if ((psDevInfo->sFBInfo.ui32RoundedBufferSize * ui32BufferCount) > psDevInfo->sFBInfo.ui32FBSize)
++      {
++              return PVRSRV_ERROR_TOOMANYBUFFERS;
++      }
++
++      
++
++
++      if(psDstSurfAttrib->pixelformat != psDevInfo->sDisplayFormat.pixelformat
++      || psDstSurfAttrib->sDims.ui32ByteStride != psDevInfo->sDisplayDim.ui32ByteStride
++      || psDstSurfAttrib->sDims.ui32Width != psDevInfo->sDisplayDim.ui32Width
++      || psDstSurfAttrib->sDims.ui32Height != psDevInfo->sDisplayDim.ui32Height)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }               
++
++      if(psDstSurfAttrib->pixelformat != psSrcSurfAttrib->pixelformat
++      || psDstSurfAttrib->sDims.ui32ByteStride != psSrcSurfAttrib->sDims.ui32ByteStride
++      || psDstSurfAttrib->sDims.ui32Width != psSrcSurfAttrib->sDims.ui32Width
++      || psDstSurfAttrib->sDims.ui32Height != psSrcSurfAttrib->sDims.ui32Height)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }               
++
++      
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      
++      
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_SWAPCHAIN));
++      if(!psSwapChain)
++      {
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      psBuffer = (OMAPLFB_BUFFER*)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_BUFFER) * ui32BufferCount);
++      if(!psBuffer)
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorFreeSwapChain;
++      }
++
++      psVSyncFlips = (OMAPLFB_VSYNC_FLIP_ITEM *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_VSYNC_FLIP_ITEM) * ui32BufferCount);
++      if (!psVSyncFlips)
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorFreeBuffers;
++      }
++
++      psSwapChain->ui32BufferCount = ui32BufferCount;
++      psSwapChain->psBuffer = psBuffer;
++      psSwapChain->psVSyncFlips = psVSyncFlips;
++      psSwapChain->ui32InsertIndex = 0;
++      psSwapChain->ui32RemoveIndex = 0;
++      psSwapChain->psPVRJTable = &psDevInfo->sPVRJTable;
++
++      
++      for(i=0; i<ui32BufferCount-1; i++)
++      {
++              psBuffer[i].psNext = &psBuffer[i+1];
++      }
++      
++      psBuffer[i].psNext = &psBuffer[0];
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              psBuffer[i].psSyncData = ppsSyncData[i];
++
++              psBuffer[i].sSysAddr.uiAddr = psDevInfo->sFBInfo.sSysAddr.uiAddr + (i * psDevInfo->sFBInfo.ui32RoundedBufferSize);
++              psBuffer[i].sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr + (i * psDevInfo->sFBInfo.ui32RoundedBufferSize);
++      }
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              psVSyncFlips[i].bValid = IMG_FALSE;
++              psVSyncFlips[i].bFlipped = IMG_FALSE;
++              psVSyncFlips[i].bCmdCompleted = IMG_FALSE;
++      }
++
++      
++      unblank_display(psDevInfo);
++
++      if (OMAPLFBInstallVSyncISR(psSwapChain) != PVRSRV_OK)
++      {
++              printk(KERN_WARNING DRIVER_PREFIX ": ISR handler failed to register\n");
++              goto ErrorFreeVSyncItems;
++      }
++              
++      EnableVSyncInterrupt(psSwapChain);
++              
++      
++      psDevInfo->psSwapChain = psSwapChain;
++
++      
++      *phSwapChain = (IMG_HANDLE)psSwapChain;
++
++      return PVRSRV_OK;
++
++ErrorFreeVSyncItems:
++      OMAPLFBFreeKernelMem(psVSyncFlips);
++ErrorFreeBuffers:
++      OMAPLFBFreeKernelMem(psBuffer);
++ErrorFreeSwapChain:
++      OMAPLFBFreeKernelMem(psSwapChain);
++
++      return eError;
++}
++
++      
++static IMG_VOID FlushInternalVSyncQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++      OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
++      IMG_UINT32               ui32MaxIndex;
++
++      
++      DisableVSyncInterrupt(psSwapChain);
++
++      
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      ui32MaxIndex = psSwapChain->ui32BufferCount - 1;
++
++      while(psFlipItem->bValid)
++      {
++              if(psFlipItem->bFlipped == IMG_FALSE)
++              {
++                      
++                      Flip(psSwapChain, (IMG_UINT32)psFlipItem->sSysAddr);
++              }
++
++              
++              if(psFlipItem->bCmdCompleted == IMG_FALSE)
++              {
++                      psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(psFlipItem->hCmdComplete, IMG_TRUE);
++              }
++
++              
++              psSwapChain->ui32RemoveIndex++;
++              
++              if(psSwapChain->ui32RemoveIndex == ui32MaxIndex)
++              {
++                      psSwapChain->ui32RemoveIndex = 0;
++              }
++
++              
++              psFlipItem->bFlipped = IMG_FALSE;
++              psFlipItem->bCmdCompleted = IMG_FALSE;
++              psFlipItem->bValid = IMG_FALSE;
++              
++              
++              psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      }
++
++      psSwapChain->ui32InsertIndex = 0;
++      psSwapChain->ui32RemoveIndex = 0;
++
++      
++      EnableVSyncInterrupt(psSwapChain);
++}
++
++static PVRSRV_ERROR DestroyDCSwapChain(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      PVRSRV_ERROR    eError;
++
++      
++      if(!hDevice || !hSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++
++      
++      psDevInfo->psSwapChain = IMG_NULL;
++
++      
++      FlushInternalVSyncQueue(psSwapChain);
++
++      
++      eError =Flip(psSwapChain, psSwapChain->psBuffer[0].sSysAddr.uiAddr);
++
++      if(eError != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;    
++      }
++
++      DisableVSyncInterrupt(psSwapChain);
++      
++      
++      unblank_display(psDevInfo);
++      if(OMAPLFBUninstallVSyncISR(psSwapChain) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;            
++      }
++
++      
++      OMAPLFBFreeKernelMem(psSwapChain->psVSyncFlips);
++      OMAPLFBFreeKernelMem(psSwapChain->psBuffer);
++      OMAPLFBFreeKernelMem(psSwapChain);
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR SetDCDstRect(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_RECT *psRect)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(psRect);     
++
++      
++      
++      return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static PVRSRV_ERROR SetDCSrcRect(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_RECT *psRect)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(psRect);     
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;      
++}
++
++static PVRSRV_ERROR SetDCDstColourKey(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_UINT32 ui32CKColour)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(ui32CKColour);       
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;
++}
++
++static PVRSRV_ERROR SetDCSrcColourKey(IMG_HANDLE hDevice,
++                                                                      IMG_HANDLE hSwapChain,
++                                                                      IMG_UINT32 ui32CKColour)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevice);    
++      PVR_UNREFERENCED_PARAMETER(hSwapChain); 
++      PVR_UNREFERENCED_PARAMETER(ui32CKColour);       
++
++      
++
++      return PVRSRV_ERROR_NOT_SUPPORTED;      
++}
++
++static PVRSRV_ERROR GetDCBuffers(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain,
++      IMG_UINT32 *pui32BufferCount,
++      IMG_HANDLE *phBuffer)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      IMG_UINT32 i;
++      
++      
++      if(!hDevice 
++      || !hSwapChain
++      || !pui32BufferCount
++      || !phBuffer)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      
++      *pui32BufferCount = psSwapChain->ui32BufferCount;
++      
++      
++      for(i=0; i<psSwapChain->ui32BufferCount; i++)
++      {
++              phBuffer[i] = (IMG_HANDLE)&psSwapChain->psBuffer[i];
++      }
++      
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR SwapToDCBuffer(IMG_HANDLE hDevice,
++      IMG_HANDLE hBuffer,
++      IMG_UINT32 ui32SwapInterval,
++      IMG_HANDLE hPrivateTag,
++      IMG_UINT32 ui32ClipRectCount,
++      IMG_RECT *psClipRect)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32SwapInterval);
++      PVR_UNREFERENCED_PARAMETER(hPrivateTag);        
++      PVR_UNREFERENCED_PARAMETER(psClipRect);
++      
++      if(!hDevice 
++      || !hBuffer
++      || (ui32ClipRectCount != 0))
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      PVR_UNREFERENCED_PARAMETER(hBuffer);
++
++      
++
++      return PVRSRV_OK;       
++}
++
++static PVRSRV_ERROR SwapToDCSystem(IMG_HANDLE hDevice,
++      IMG_HANDLE hSwapChain)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      
++      if(!hDevice || !hSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      psSwapChain = (OMAPLFB_SWAPCHAIN*)hSwapChain;
++      if (psSwapChain != psDevInfo->psSwapChain)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;     
++      }
++      
++      
++      FlushInternalVSyncQueue(psSwapChain);
++      
++      
++      Flip(psSwapChain, psSwapChain->psBuffer[0].sSysAddr.uiAddr);
++
++      return PVRSRV_OK;               
++}
++
++static IMG_VOID SetDCState(IMG_HANDLE hDevice,
++                                       IMG_UINT32 ui32State)
++{
++      OMAPLFB_DEVINFO *psDevInfo;
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)hDevice;
++      
++      if (ui32State == DC_STATE_FLUSH_COMMANDS)
++      {
++              OMAPLFB_SWAPCHAIN *psSwapChain = psDevInfo->psSwapChain;
++              if (psSwapChain != IMG_NULL)
++              {
++                      FlushInternalVSyncQueue(psSwapChain);
++              }
++              
++              psDevInfo->bFlushCommands = IMG_TRUE;
++      }
++      else if (ui32State == DC_STATE_NO_FLUSH_COMMANDS)
++      {
++              psDevInfo->bFlushCommands = IMG_FALSE;
++      }
++}
++
++IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++      IMG_BOOL                bStatus = IMG_FALSE;
++      OMAPLFB_VSYNC_FLIP_ITEM *psFlipItem;
++      IMG_UINT32 ui32MaxIndex;
++      
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      ui32MaxIndex = psSwapChain->ui32BufferCount - 1;        
++
++      while(psFlipItem->bValid)
++      {       
++              
++              if(psFlipItem->bFlipped)
++              {
++                      
++                      if(!psFlipItem->bCmdCompleted)
++                      {
++                              
++                              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(psFlipItem->hCmdComplete, IMG_TRUE);
++
++                              
++                              psFlipItem->bCmdCompleted = IMG_TRUE;                                   
++                      }
++
++                      
++                      psFlipItem->ui32SwapInterval--;                                 
++                                              
++                      
++                      if(psFlipItem->ui32SwapInterval == 0)
++                      {       
++                              
++                              psSwapChain->ui32RemoveIndex++;
++                              
++                              if(psSwapChain->ui32RemoveIndex == ui32MaxIndex)
++                              {
++                                      psSwapChain->ui32RemoveIndex = 0;
++                              }
++                              
++                              
++                              psFlipItem->bCmdCompleted = IMG_FALSE;  
++                              psFlipItem->bFlipped = IMG_FALSE;                       
++      
++                              
++                              psFlipItem->bValid = IMG_FALSE;
++                      }
++                      else
++                      {
++                              
++                              break;                                  
++                      }
++              }
++              else
++              {
++                      
++                      Flip(psSwapChain, (IMG_UINT32)psFlipItem->sSysAddr);
++                      
++                      
++                      psFlipItem->bFlipped = IMG_TRUE;
++                      
++                      
++                      break;
++              }
++              
++              
++              psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32RemoveIndex];
++      }
++              
++      return bStatus;
++}
++
++static IMG_BOOL ProcessFlip(IMG_HANDLE        hCmdCookie, 
++                                                      IMG_UINT32      ui32DataSize,
++                                                      IMG_VOID        *pvData)
++{
++      PVRSRV_ERROR eError;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      OMAPLFB_DEVINFO *psDevInfo;
++      OMAPLFB_BUFFER *psBuffer;
++      OMAPLFB_SWAPCHAIN *psSwapChain;
++      OMAPLFB_VSYNC_FLIP_ITEM* psFlipItem;
++
++      
++      if(!hCmdCookie || !pvData)
++      {
++              return IMG_FALSE;                                               
++      }
++
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)pvData;
++
++      if (psFlipCmd == IMG_NULL || sizeof(DISPLAYCLASS_FLIP_COMMAND) != ui32DataSize)
++      {
++              return IMG_FALSE;                               
++      }
++
++      
++      psDevInfo = (OMAPLFB_DEVINFO*)psFlipCmd->hExtDevice;
++      psBuffer = (OMAPLFB_BUFFER*)psFlipCmd->hExtBuffer; 
++      psSwapChain = (OMAPLFB_SWAPCHAIN*) psFlipCmd->hExtSwapChain;
++
++      if (psDevInfo->bFlushCommands)
++      {
++              
++              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++              return IMG_TRUE;
++      }
++       
++              
++              
++
++      if(psFlipCmd->ui32SwapInterval == 0)
++      {                       
++              
++              
++              
++
++              
++              eError = Flip(psSwapChain, psBuffer->sSysAddr.uiAddr);
++
++              if(eError != PVRSRV_OK)
++              {
++                      return IMG_FALSE;       
++              }
++
++              
++              psSwapChain->psPVRJTable->pfnPVRSRVCmdComplete(hCmdCookie, IMG_TRUE);
++
++              return IMG_TRUE;
++      }
++
++      psFlipItem = &psSwapChain->psVSyncFlips[psSwapChain->ui32InsertIndex];
++
++      
++      if(!psFlipItem->bValid)
++      {
++              IMG_UINT32 ui32MaxIndex = psSwapChain->ui32BufferCount - 1;
++              
++              if(psSwapChain->ui32InsertIndex == psSwapChain->ui32RemoveIndex)
++              {
++                      
++                      eError = Flip(psSwapChain, psBuffer->sSysAddr.uiAddr);
++                      if(eError != PVRSRV_OK)
++                      {
++                              return IMG_FALSE;       
++                      }
++
++                      psFlipItem->bFlipped = IMG_TRUE;
++              }
++              else
++              {
++                      psFlipItem->bFlipped = IMG_FALSE;
++              }
++
++              psFlipItem->hCmdComplete = hCmdCookie;
++              psFlipItem->ui32SwapInterval = psFlipCmd->ui32SwapInterval;
++              psFlipItem->sSysAddr = &psBuffer->sSysAddr;
++              psFlipItem->bValid = IMG_TRUE;
++
++              psSwapChain->ui32InsertIndex++;
++              if(psSwapChain->ui32InsertIndex >= ui32MaxIndex)
++              {
++                      psSwapChain->ui32InsertIndex = 0;
++              }
++
++              return IMG_TRUE;        
++      }
++      
++      return IMG_FALSE;
++}
++
++
++static PVRSRV_ERROR InitDev(OMAPLFB_DEVINFO *psDevInfo)
++{
++      struct fb_info *psLINFBInfo;
++      struct module *psLINFBOwner;
++      OMAPLFB_FBINFO *psPVRFBInfo = &psDevInfo->sFBInfo;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++      unsigned long FBSize;
++
++      acquire_console_sem();
++
++      if (fb_idx < 0 || fb_idx >= num_registered_fb)
++      {
++              eError = PVRSRV_ERROR_INVALID_DEVICE;
++              goto errRelSem;
++      }
++
++      psLINFBInfo = registered_fb[fb_idx];
++
++      psLINFBOwner = psLINFBInfo->fbops->owner;
++      if (!try_module_get(psLINFBOwner))
++      {
++              printk(KERN_INFO DRIVER_PREFIX
++                      ": Couldn't get framebuffer module\n");
++
++              goto errRelSem;
++      }
++
++      if (psLINFBInfo->fbops->fb_open != NULL)
++      {
++              int res;
++
++              res = psLINFBInfo->fbops->fb_open(psLINFBInfo, 0);
++              if (res != 0)
++              {
++                      printk(KERN_INFO DRIVER_PREFIX
++                              ": Couldn't open framebuffer: %d\n", res);
++
++                      goto errModPut;
++              }
++      }
++
++      psDevInfo->psLINFBInfo = psLINFBInfo;
++
++      FBSize = (psLINFBInfo->screen_size) != 0 ?
++                                      psLINFBInfo->screen_size :
++                                      psLINFBInfo->fix.smem_len;
++#ifdef        DEBUG
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer physical address: 0x%lx\n",
++                      psLINFBInfo->fix.smem_start);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual address: 0x%lx\n",
++                      (unsigned long)psLINFBInfo->screen_base);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer size: %lu\n",
++                      FBSize);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual width: %u\n",
++                      psLINFBInfo->var.xres_virtual);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer virtual height: %u\n",
++                      psLINFBInfo->var.yres_virtual);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer width: %u\n",
++                      psLINFBInfo->var.xres);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer height: %u\n",
++                      psLINFBInfo->var.yres);
++      printk(KERN_INFO DRIVER_PREFIX
++                      ": Framebuffer stride: %u\n",
++                      psLINFBInfo->fix.line_length);
++#endif
++
++      
++      psPVRFBInfo->sSysAddr.uiAddr = psLINFBInfo->fix.smem_start;
++      psPVRFBInfo->sCPUVAddr = psLINFBInfo->screen_base;
++
++      psPVRFBInfo->ui32Width = psLINFBInfo->var.xres;
++      psPVRFBInfo->ui32Height = psLINFBInfo->var.yres;
++      psPVRFBInfo->ui32ByteStride =  psLINFBInfo->fix.line_length;
++      psPVRFBInfo->ui32FBSize = FBSize;
++      psPVRFBInfo->ui32BufferSize = psPVRFBInfo->ui32Height * psPVRFBInfo->ui32ByteStride;
++      
++      psPVRFBInfo->ui32RoundedBufferSize = OMAPLFB_PAGE_ROUNDUP(psPVRFBInfo->ui32BufferSize);
++
++      if(psLINFBInfo->var.bits_per_pixel == 16)
++      {
++              if((psLINFBInfo->var.red.length == 5) &&
++                      (psLINFBInfo->var.green.length == 6) && 
++                      (psLINFBInfo->var.blue.length == 5) && 
++                      (psLINFBInfo->var.red.offset == 11) &&
++                      (psLINFBInfo->var.green.offset == 5) && 
++                      (psLINFBInfo->var.blue.offset == 0) && 
++                      (psLINFBInfo->var.red.msb_right == 0))
++              {
++                      psPVRFBInfo->ePixelFormat = PVRSRV_PIXEL_FORMAT_RGB565;
++              }
++              else
++              {
++                      printk("Unknown FB format\n");
++              }
++      }
++      else
++      {
++              printk("Unknown FB format\n");
++      }
++
++      
++      psDevInfo->sFBInfo.sSysAddr.uiAddr = psPVRFBInfo->sSysAddr.uiAddr;
++      psDevInfo->sFBInfo.sCPUVAddr = psPVRFBInfo->sCPUVAddr;
++
++      eError = PVRSRV_OK;
++      goto errRelSem;
++
++errModPut:
++      module_put(psLINFBOwner);
++errRelSem:
++      release_console_sem();
++      return eError;
++}
++
++static IMG_VOID DeInitDev(OMAPLFB_DEVINFO *psDevInfo)
++{
++      struct fb_info *psLINFBInfo = psDevInfo->psLINFBInfo;
++      struct module *psLINFBOwner;
++
++      acquire_console_sem();
++
++      psLINFBOwner = psLINFBInfo->fbops->owner;
++
++      if (psLINFBInfo->fbops->fb_release != NULL) 
++      {
++              (void) psLINFBInfo->fbops->fb_release(psLINFBInfo, 0);
++      }
++
++      module_put(psLINFBOwner);
++
++      release_console_sem();
++}
++
++PVRSRV_ERROR OMAPLFBInit(IMG_VOID)
++{
++      OMAPLFB_DEVINFO         *psDevInfo;
++
++      psDevInfo = GetAnchorPtr();
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              PFN_CMD_PROC                    pfnCmdProcList[OMAPLFB_COMMAND_COUNT];
++              IMG_UINT32                              aui32SyncCountList[OMAPLFB_COMMAND_COUNT][2];
++              
++              psDevInfo = (OMAPLFB_DEVINFO *)OMAPLFBAllocKernelMem(sizeof(OMAPLFB_DEVINFO));
++
++              if(!psDevInfo)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              memset(psDevInfo, 0, sizeof(OMAPLFB_DEVINFO));
++
++              
++              SetAnchorPtr((IMG_VOID*)psDevInfo);
++
++              
++              psDevInfo->ui32RefCount = 0;
++
++              
++              if(InitDev(psDevInfo) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              if(OMAPLFBGetLibFuncAddr ("PVRGetDisplayClassJTable", &pfnGetPVRJTable) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;       
++              }
++
++              
++              if(!(*pfnGetPVRJTable)(&psDevInfo->sPVRJTable))
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;       
++              }
++
++              
++
++              
++              psDevInfo->bFlushCommands = IMG_FALSE;
++              psDevInfo->psSwapChain = IMG_NULL;              
++              psDevInfo->sDisplayInfo.ui32MinSwapInterval = 0;
++              psDevInfo->sDisplayInfo.ui32MaxSwapInterval = 3;
++              psDevInfo->sDisplayInfo.ui32MaxSwapChains = 1;
++              psDevInfo->sDisplayInfo.ui32MaxSwapChainBuffers = psDevInfo->sFBInfo.ui32FBSize / psDevInfo->sFBInfo.ui32RoundedBufferSize;
++
++              strncpy(psDevInfo->sDisplayInfo.szDisplayName, DISPLAY_DEVICE_NAME, MAX_DISPLAY_NAME_SIZE);
++      
++              psDevInfo->sDisplayFormat.pixelformat = psDevInfo->sFBInfo.ePixelFormat;
++              psDevInfo->sDisplayDim.ui32Width =  psDevInfo->sFBInfo.ui32Width;
++              psDevInfo->sDisplayDim.ui32Height =  psDevInfo->sFBInfo.ui32Height;
++              psDevInfo->sDisplayDim.ui32ByteStride =  psDevInfo->sFBInfo.ui32ByteStride;
++
++              
++              psDevInfo->sSystemBuffer.sSysAddr = psDevInfo->sFBInfo.sSysAddr;
++              psDevInfo->sSystemBuffer.sCPUVAddr = psDevInfo->sFBInfo.sCPUVAddr;
++              psDevInfo->sSystemBuffer.ui32BufferSize = (psDevInfo->sFBInfo.ui32RoundedBufferSize);
++
++              
++
++              psDevInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
++              psDevInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
++              psDevInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
++              psDevInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
++              psDevInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
++              psDevInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
++              psDevInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
++              psDevInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
++              psDevInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
++              psDevInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
++              psDevInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
++              psDevInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
++              psDevInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
++              psDevInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
++              psDevInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
++              psDevInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
++              psDevInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
++              psDevInfo->sDCJTable.pfnSetDCState = SetDCState;
++
++              
++              if(psDevInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice (
++                      &psDevInfo->sDCJTable,
++                      &psDevInfo->ui32DeviceID ) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
++              }
++              
++              
++              pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
++
++              
++              aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
++              aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;
++
++              
++
++
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList (psDevInfo->ui32DeviceID, 
++                                                                                                                              &pfnCmdProcList[0], 
++                                                                                                                              aui32SyncCountList,
++                                                                                                                              OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
++              {
++                      printk(KERN_WARNING DRIVER_PREFIX ": Can't register callback\n");
++                      return PVRSRV_ERROR_CANT_REGISTER_CALLBACK;
++              }
++
++#if defined(OMAPLFB_DEVICE_POWER)
++              
++
++
++#else
++              
++
++
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice(psDevInfo->ui32DeviceID,
++                                                                      OMAPLFBPrePower,
++                                                                      OMAPLFBPostPower,
++                                                                      psDevInfo,
++                                                                      PVRSRV_POWER_Unspecified,
++                                                                      PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_INIT_FAILURE;
++              }
++#endif 
++      }
++
++      
++      psDevInfo->ui32RefCount++;
++
++                      
++      return PVRSRV_OK;
++      
++      }
++
++PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID)
++{
++      OMAPLFB_DEVINFO *psDevInfo, *psDevFirst;
++
++      psDevFirst = GetAnchorPtr();
++      psDevInfo = psDevFirst;
++
++      
++      if (psDevInfo == IMG_NULL)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDevInfo->ui32RefCount--;
++
++      if (psDevInfo->ui32RefCount == 0)
++      {
++              
++              PVRSRV_DC_DISP2SRV_KMJTABLE     *psJTable = &psDevInfo->sPVRJTable;
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++              
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRegisterPowerDevice(psDevInfo->ui32DeviceID,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL,
++                                                                     IMG_NULL) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;            
++              }
++#endif 
++
++              if (psDevInfo->sPVRJTable.pfnPVRSRVRemoveCmdProcList (psDevInfo->ui32DeviceID, OMAPLFB_COMMAND_COUNT) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              if (psJTable->pfnPVRSRVRemoveDCDevice(psDevInfo->ui32DeviceID) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              
++              DeInitDev(psDevInfo);
++
++              
++              OMAPLFBFreeKernelMem(psDevInfo);
++      }
++      
++      
++      SetAnchorPtr(IMG_NULL);
++
++      
++      return PVRSRV_OK;
++}
++
++
++#if !defined(OMAPLFB_DEVICE_POWER)
++PVRSRV_ERROR OMAPLFBPrePower (IMG_HANDLE              hDevHandle,
++                                                PVR_POWER_STATE       eNewPowerState,
++                                                PVR_POWER_STATE       eCurrentPowerState)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevHandle);
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OMAPLFBPostPower (IMG_HANDLE             hDevHandle,
++                                                 PVR_POWER_STATE      eNewPowerState,
++                                                 PVR_POWER_STATE      eCurrentPowerState)
++{
++      OMAPLFB_DEVINFO *psDevInfo = (OMAPLFB_DEVINFO *)hDevHandle;
++
++      if ((eNewPowerState != eCurrentPowerState) &&
++              (eCurrentPowerState == PVRSRV_POWER_STATE_D3))
++      {
++              
++              if (psDevInfo->psSwapChain != IMG_NULL)
++              {
++                      EnableVSyncInterrupt(psDevInfo->psSwapChain);                   
++              }
++      }
++
++      return PVRSRV_OK;
++}
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,206 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __OMAPLFB_H__
++#define __OMAPLFB_H__
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++
++#define OMAPLCD_IRQ                   25
++
++#define OMAPLCD_SYSCONFIG             0x0410
++#define OMAPLCD_CONFIG                        0x0444
++#define OMAPLCD_DEFAULT_COLOR0                0x044C
++#define OMAPLCD_TIMING_H              0x0464
++#define OMAPLCD_TIMING_V              0x0468
++#define OMAPLCD_POL_FREQ              0x046C
++#define OMAPLCD_DIVISOR                       0x0470
++#define OMAPLCD_SIZE_DIG              0x0478
++#define OMAPLCD_SIZE_LCD              0x047C
++#define OMAPLCD_GFX_POSITION          0x0488
++#define OMAPLCD_GFX_SIZE              0x048C
++#define OMAPLCD_GFX_ATTRIBUTES                0x04a0
++#define OMAPLCD_GFX_FIFO_THRESHOLD    0x04a4
++#define OMAPLCD_GFX_WINDOW_SKIP               0x04b4
++
++#define OMAPLCD_IRQSTATUS             0x0418
++#define OMAPLCD_IRQENABLE             0x041c
++#define OMAPLCD_CONTROL                       0x0440
++#define OMAPLCD_GFX_BA0                       0x0480
++#define OMAPLCD_GFX_BA1                       0x0484
++#define OMAPLCD_GFX_ROW_INC           0x04ac
++#define OMAPLCD_GFX_PIX_INC           0x04b0
++#define OMAPLCD_VID1_BA0              0x04bc
++#define OMAPLCD_VID1_BA1              0x04c0
++#define OMAPLCD_VID1_ROW_INC          0x04d8
++#define OMAPLCD_VID1_PIX_INC          0x04dc
++
++#define       OMAP_CONTROL_GODIGITAL          (1 << 6)
++#define       OMAP_CONTROL_GOLCD              (1 << 5)
++#define       OMAP_CONTROL_DIGITALENABLE      (1 << 1)
++#define       OMAP_CONTROL_LCDENABLE          (1 << 0)
++
++#define OMAPLCD_INTMASK_VSYNC         (1 << 1)
++#define OMAPLCD_INTMASK_OFF           0
++
++#define DISPC_IRQSTATUS_EVSYNC_ODD    (1 <<  3)
++#define DISPC_IRQSTATUS_EVSYNC_EVEN   (1 <<  2)
++
++/*
++ * from $(KERNELDIR)/include/asm-arm/arch/display.h
++ */
++#define OMAP2_GRAPHICS                0
++#define OMAP2_VIDEO1          1
++#define OMAP2_VIDEO2          2
++
++#define OMAP2_OUTPUT_LCD      4
++#define OMAP2_OUTPUT_TV               5
++
++typedef struct OMAPLFB_BUFFER_TAG
++{
++      IMG_SYS_PHYADDR                 sSysAddr;
++      IMG_CPU_VIRTADDR                sCPUVAddr;
++      IMG_UINT32                      ui32BufferSize;
++      PVRSRV_SYNC_DATA                *psSyncData;    
++      struct OMAPLFB_BUFFER_TAG       *psNext;
++} OMAPLFB_BUFFER;
++
++typedef struct OMAPLFB_VSYNC_FLIP_ITEM_TAG
++{     
++
++      IMG_HANDLE      hCmdComplete;
++      
++      IMG_SYS_PHYADDR* sSysAddr;
++      
++      IMG_UINT32      ui32SwapInterval;
++      
++      IMG_BOOL        bValid;
++      
++      IMG_BOOL        bFlipped;
++      
++      IMG_BOOL        bCmdCompleted;
++
++} OMAPLFB_VSYNC_FLIP_ITEM;
++
++typedef struct PVRPDP_SWAPCHAIN_TAG
++{
++      
++      IMG_UINT32 ui32BufferCount;
++      
++      OMAPLFB_BUFFER *psBuffer;
++      
++      OMAPLFB_VSYNC_FLIP_ITEM *psVSyncFlips;
++
++      
++      IMG_UINT32 ui32InsertIndex;
++      
++      
++      IMG_UINT32 ui32RemoveIndex;
++
++      
++      PVRSRV_DC_DISP2SRV_KMJTABLE     *psPVRJTable;
++} OMAPLFB_SWAPCHAIN;
++
++typedef struct OMAPLFB_FBINFO_TAG
++{
++      IMG_SYS_PHYADDR                 sSysAddr;
++      IMG_CPU_VIRTADDR                sCPUVAddr;
++      IMG_UINT32                      ui32FBSize;
++      IMG_UINT32                      ui32BufferSize;
++      IMG_UINT32                      ui32RoundedBufferSize;
++      IMG_UINT32                      ui32Width;
++      IMG_UINT32                      ui32Height;
++      IMG_UINT32                      ui32ByteStride;
++      
++      PVRSRV_PIXEL_FORMAT             ePixelFormat;
++}OMAPLFB_FBINFO;
++
++typedef struct OMAPLFB_DEVINFO_TAG
++{
++      IMG_UINT32                      ui32DeviceID;   
++      DISPLAY_INFO                    sDisplayInfo;
++
++      
++      OMAPLFB_BUFFER                  sSystemBuffer;
++
++      
++      DISPLAY_FORMAT                  sDisplayFormat;
++      
++      
++      DISPLAY_DIMS                    sDisplayDim;    
++      
++      
++      PVRSRV_DC_DISP2SRV_KMJTABLE     sPVRJTable;
++      
++      
++      PVRSRV_DC_SRV2DISP_KMJTABLE     sDCJTable;
++
++      
++      OMAPLFB_FBINFO                  sFBInfo;
++
++      
++      IMG_UINT32                      ui32RefCount;
++
++      
++      OMAPLFB_SWAPCHAIN               *psSwapChain;
++
++      
++      IMG_DEV_VIRTADDR                sDisplayDevVAddr;
++
++      
++      IMG_BOOL                        bFlushCommands;
++
++      
++      struct fb_info                  *psLINFBInfo;
++
++      
++      struct  notifier_block          sLINNotifBlock;
++
++      
++      struct  work_struct             sLINWork;
++}  OMAPLFB_DEVINFO;
++
++#define       OMAPLFB_PAGE_SIZE 4096
++#define       OMAPLFB_PAGE_MASK (OMAPLFB_PAGE_SIZE - 1)
++#define       OMAPLFB_PAGE_TRUNC (~OMAPLFB_PAGE_MASK)
++
++#define       OMAPLFB_PAGE_ROUNDUP(x) (((x) + OMAPLFB_PAGE_MASK) & OMAPLFB_PAGE_TRUNC)
++
++PVRSRV_ERROR OMAPLFBInit(IMG_VOID);
++PVRSRV_ERROR OMAPLFBDeinit(IMG_VOID);
++
++IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size);
++IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem);
++IMG_VOID OMAPLFBWriteReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++IMG_UINT32 OMAPLFBReadReg(OMAPLFB_SWAPCHAIN *psSwapChain, IMG_UINT32 ui32Offset);
++PVRSRV_ERROR OMAPLFBGetLibFuncAddr(IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable);
++PVRSRV_ERROR OMAPLFBInstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain);
++PVRSRV_ERROR OMAPLFBUninstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain);
++IMG_BOOL OMAPLFBVSyncIHandler(OMAPLFB_SWAPCHAIN *psSwapChain);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c
+--- git/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/3rdparty/dc_omap3430_linux/omaplfb_linux.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++
++#include <linux/pci.h>
++#include <asm/uaccess.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++
++#include <asm/io.h>
++
++#include "img_defs.h"
++#include "servicesext.h"
++ #include "kerneldisplay.h"
++#include "omaplfb.h"
++
++#define DRVNAME "omaplfb"
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DRVNAME);
++
++
++extern int omap_dispc_request_irq(unsigned long, void (*)(void *), void *);
++extern void omap_dispc_free_irq(unsigned long, void (*)(void *), void *);
++
++
++#define unref__ __attribute__ ((unused))
++
++
++
++
++static int __init OMAPLFB_Init(void)
++{
++      if(OMAPLFBInit() != PVRSRV_OK)
++              return -ENODEV;
++
++      return 0;
++}
++
++static void __exit OMAPLFB_Cleanup(void)
++{    
++      if(OMAPLFBDeinit() != PVRSRV_OK)
++      {
++              printk ("OMAPLFB_Cleanup: can't deinit device\n");
++      }
++}
++
++
++IMG_VOID *OMAPLFBAllocKernelMem(IMG_UINT32 ui32Size)
++{
++      return kmalloc(ui32Size, GFP_KERNEL);
++}
++
++IMG_VOID OMAPLFBFreeKernelMem(IMG_VOID *pvMem)
++{
++      kfree(pvMem);
++}
++
++
++PVRSRV_ERROR OMAPLFBGetLibFuncAddr (IMG_CHAR *szFunctionName, PFN_DC_GET_PVRJTABLE *ppfnFuncTable)
++{
++      if(strcmp("PVRGetDisplayClassJTable", szFunctionName) != 0)
++              return PVRSRV_ERROR_INVALID_PARAMS;
++
++      
++      *ppfnFuncTable = PVRGetDisplayClassJTable;
++
++      return PVRSRV_OK;
++}
++
++static void
++OMAPLFBVSyncISR(void *arg)
++{
++      (void) OMAPLFBVSyncIHandler((OMAPLFB_SWAPCHAIN *)arg);
++}
++
++#define DISPC_IRQ_VSYNC 0x0002
++
++PVRSRV_ERROR OMAPLFBInstallVSyncISR(OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++
++        if (omap_dispc_request_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain) != 0)
++            return PVRSRV_ERROR_OUT_OF_MEMORY; /* not worth a proper mapping */
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OMAPLFBUninstallVSyncISR (OMAPLFB_SWAPCHAIN *psSwapChain)
++{
++        omap_dispc_free_irq(DISPC_IRQ_VSYNC, OMAPLFBVSyncISR, psSwapChain);
++
++      return PVRSRV_OK;
++}
++
++module_init(OMAPLFB_Init);
++module_exit(OMAPLFB_Cleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/kernelbuffer.h git-nokia/drivers/gpu/pvr/services4/include/kernelbuffer.h
+--- git/drivers/gpu/pvr/services4/include/kernelbuffer.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/kernelbuffer.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELBUFFER_H__)
++#define __KERNELBUFFER_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_BC_DEVICE)(IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_BC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_GET_BC_INFO)(IMG_HANDLE, BUFFER_INFO*);
++typedef PVRSRV_ERROR (*PFN_GET_BC_BUFFER)(IMG_HANDLE, IMG_UINT32, PVRSRV_SYNC_DATA*, IMG_HANDLE*);
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG
++{
++      IMG_UINT32                                                      ui32TableSize;
++      PFN_OPEN_BC_DEVICE                                      pfnOpenBCDevice;
++      PFN_CLOSE_BC_DEVICE                                     pfnCloseBCDevice;
++      PFN_GET_BC_INFO                                         pfnGetBCInfo;
++      PFN_GET_BC_BUFFER                                       pfnGetBCBuffer;
++      PFN_GET_BUFFER_ADDR                                     pfnGetBufferAddr;
++
++} PVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++
++typedef PVRSRV_ERROR (*PFN_BC_REGISTER_BUFFER_DEV)(PVRSRV_BC_SRV2BUFFER_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_BC_REMOVE_BUFFER_DEV)(IMG_UINT32); 
++
++typedef struct PVRSRV_BC_BUFFER2SRV_KMJTABLE_TAG
++{
++      IMG_UINT32                                                      ui32TableSize;
++      PFN_BC_REGISTER_BUFFER_DEV                      pfnPVRSRVRegisterBCDevice;
++      PFN_BC_REMOVE_BUFFER_DEV                        pfnPVRSRVRemoveBCDevice;
++
++} PVRSRV_BC_BUFFER2SRV_KMJTABLE, *PPVRSRV_BC_BUFFER2SRV_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_BC_GET_PVRJTABLE) (PPVRSRV_BC_BUFFER2SRV_KMJTABLE); 
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/include/kerneldisplay.h git-nokia/drivers/gpu/pvr/services4/include/kerneldisplay.h
+--- git/drivers/gpu/pvr/services4/include/kerneldisplay.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/kerneldisplay.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__KERNELDISPLAY_H__)
++#define __KERNELDISPLAY_H__
++
++typedef PVRSRV_ERROR (*PFN_OPEN_DC_DEVICE)(IMG_UINT32, IMG_HANDLE*, PVRSRV_SYNC_DATA*);
++typedef PVRSRV_ERROR (*PFN_CLOSE_DC_DEVICE)(IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_FORMATS)(IMG_HANDLE, IMG_UINT32*, DISPLAY_FORMAT*);
++typedef PVRSRV_ERROR (*PFN_ENUM_DC_DIMS)(IMG_HANDLE,
++                                                                               DISPLAY_FORMAT*,
++                                                                               IMG_UINT32*,
++                                                                               DISPLAY_DIMS*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_SYSTEMBUFFER)(IMG_HANDLE, IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_GET_DC_INFO)(IMG_HANDLE, DISPLAY_INFO*);
++typedef PVRSRV_ERROR (*PFN_CREATE_DC_SWAPCHAIN)(IMG_HANDLE,
++                                                                                              IMG_UINT32, 
++                                                                                              DISPLAY_SURF_ATTRIBUTES*, 
++                                                                                              DISPLAY_SURF_ATTRIBUTES*,
++                                                                                              IMG_UINT32, 
++                                                                                              PVRSRV_SYNC_DATA**,
++                                                                                              IMG_UINT32,
++                                                                                              IMG_HANDLE*, 
++                                                                                              IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DESTROY_DC_SWAPCHAIN)(IMG_HANDLE, 
++                                                                                               IMG_HANDLE);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCRECT)(IMG_HANDLE, IMG_HANDLE, IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SET_DC_DSTCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_SET_DC_SRCCK)(IMG_HANDLE, IMG_HANDLE, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_GET_DC_BUFFERS)(IMG_HANDLE,
++                                                                                 IMG_HANDLE,
++                                                                                 IMG_UINT32*,
++                                                                                 IMG_HANDLE*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_BUFFER)(IMG_HANDLE,
++                                                                                        IMG_HANDLE,
++                                                                                        IMG_UINT32,
++                                                                                        IMG_HANDLE,
++                                                                                        IMG_UINT32,
++                                                                                        IMG_RECT*);
++typedef PVRSRV_ERROR (*PFN_SWAP_TO_DC_SYSTEM)(IMG_HANDLE, IMG_HANDLE);
++typedef IMG_VOID (*PFN_SET_DC_STATE)(IMG_HANDLE, IMG_UINT32);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG
++{
++      IMG_UINT32                                              ui32TableSize;
++      PFN_OPEN_DC_DEVICE                              pfnOpenDCDevice;
++      PFN_CLOSE_DC_DEVICE                             pfnCloseDCDevice;
++      PFN_ENUM_DC_FORMATS                             pfnEnumDCFormats;
++      PFN_ENUM_DC_DIMS                                pfnEnumDCDims;
++      PFN_GET_DC_SYSTEMBUFFER                 pfnGetDCSystemBuffer;
++      PFN_GET_DC_INFO                                 pfnGetDCInfo;
++      PFN_GET_BUFFER_ADDR                             pfnGetBufferAddr;
++      PFN_CREATE_DC_SWAPCHAIN                 pfnCreateDCSwapChain;
++      PFN_DESTROY_DC_SWAPCHAIN                pfnDestroyDCSwapChain;
++      PFN_SET_DC_DSTRECT                              pfnSetDCDstRect;
++      PFN_SET_DC_SRCRECT                              pfnSetDCSrcRect;
++      PFN_SET_DC_DSTCK                                pfnSetDCDstColourKey;
++      PFN_SET_DC_SRCCK                                pfnSetDCSrcColourKey;
++      PFN_GET_DC_BUFFERS                              pfnGetDCBuffers;
++      PFN_SWAP_TO_DC_BUFFER                   pfnSwapToDCBuffer;
++      PFN_SWAP_TO_DC_SYSTEM                   pfnSwapToDCSystem;
++      PFN_SET_DC_STATE                                pfnSetDCState;
++
++} PVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef IMG_BOOL (*PFN_ISR_HANDLER)(IMG_VOID*);
++
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_DISPLAY_DEV)(PVRSRV_DC_SRV2DISP_KMJTABLE*, IMG_UINT32*);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_DISPLAY_DEV)(IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_OEM_FUNCTION)(IMG_UINT32, IMG_VOID*, IMG_UINT32, IMG_VOID*, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_COMMANDPROCLIST)(IMG_UINT32, PPFN_CMD_PROC,IMG_UINT32[][2], IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REMOVE_COMMANDPROCLIST)(IMG_UINT32, IMG_UINT32);
++typedef IMG_VOID (*PFN_DC_CMD_COMPLETE)(IMG_HANDLE, IMG_BOOL);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_SYS_ISR)(PFN_ISR_HANDLER, IMG_VOID*, IMG_UINT32, IMG_UINT32);
++typedef PVRSRV_ERROR (*PFN_DC_REGISTER_POWER)(IMG_UINT32, PFN_PRE_POWER, PFN_POST_POWER,
++                                                                                        IMG_HANDLE, PVR_POWER_STATE, PVR_POWER_STATE);
++
++typedef struct PVRSRV_DC_DISP2SRV_KMJTABLE_TAG
++{
++      IMG_UINT32                                              ui32TableSize;
++      PFN_DC_REGISTER_DISPLAY_DEV             pfnPVRSRVRegisterDCDevice;
++      PFN_DC_REMOVE_DISPLAY_DEV               pfnPVRSRVRemoveDCDevice;
++      PFN_DC_OEM_FUNCTION                             pfnPVRSRVOEMFunction;
++      PFN_DC_REGISTER_COMMANDPROCLIST pfnPVRSRVRegisterCmdProcList;
++      PFN_DC_REMOVE_COMMANDPROCLIST   pfnPVRSRVRemoveCmdProcList;
++      PFN_DC_CMD_COMPLETE                             pfnPVRSRVCmdComplete;
++      PFN_DC_REGISTER_SYS_ISR                 pfnPVRSRVRegisterSystemISRHandler;
++      PFN_DC_REGISTER_POWER                   pfnPVRSRVRegisterPowerDevice;
++} PVRSRV_DC_DISP2SRV_KMJTABLE, *PPVRSRV_DC_DISP2SRV_KMJTABLE;
++
++
++typedef struct DISPLAYCLASS_FLIP_COMMAND_TAG
++{
++      
++      IMG_HANDLE hExtDevice;
++
++      
++      IMG_HANDLE hExtSwapChain;
++
++      
++      IMG_HANDLE hExtBuffer;
++
++      
++      IMG_HANDLE hPrivateTag;
++
++      
++      IMG_UINT32 ui32ClipRectCount;
++
++      
++      IMG_RECT *psClipRect;
++
++      
++      IMG_UINT32      ui32SwapInterval;
++
++} DISPLAYCLASS_FLIP_COMMAND;
++
++#define DC_FLIP_COMMAND               0
++
++#define DC_STATE_NO_FLUSH_COMMANDS            0
++#define DC_STATE_FLUSH_COMMANDS                       1
++
++
++typedef IMG_BOOL (*PFN_DC_GET_PVRJTABLE)(PPVRSRV_DC_DISP2SRV_KMJTABLE);
++
++
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge.h git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1313 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_H__
++#define __PVR_BRIDGE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "servicesint.h"
++
++#ifdef __linux__
++
++              #include <linux/ioctl.h>
++    
++    #define PVRSRV_IOC_GID      'g'
++    #define PVRSRV_IO(INDEX)    _IO(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOW(INDEX)   _IOW(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOR(INDEX)   _IOR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++    #define PVRSRV_IOWR(INDEX)  _IOWR(PVRSRV_IOC_GID, INDEX, PVRSRV_BRIDGE_PACKAGE)
++
++#else 
++
++                      #error Unknown platform: Cannot define ioctls
++
++      #define PVRSRV_IO(INDEX)    (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOW(INDEX)   (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOR(INDEX)   (PVRSRV_IOC_GID + INDEX)
++      #define PVRSRV_IOWR(INDEX)  (PVRSRV_IOC_GID + INDEX)
++
++      #define PVRSRV_BRIDGE_BASE                  PVRSRV_IOC_GID
++#endif 
++
++
++#define PVRSRV_BRIDGE_CORE_CMD_FIRST                  0
++#define PVRSRV_BRIDGE_ENUM_DEVICES                            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+0)     
++#define PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO              PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+1)     
++#define PVRSRV_BRIDGE_RELEASE_DEVICEINFO              PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+2)     
++#define PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+3)     
++#define PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT           PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+4)     
++#define PVRSRV_BRIDGE_ALLOC_DEVICEMEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+5)     
++#define PVRSRV_BRIDGE_FREE_DEVICEMEM                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+6)     
++#define PVRSRV_BRIDGE_GETFREE_DEVICEMEM                       PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+7)     
++#define PVRSRV_BRIDGE_CREATE_COMMANDQUEUE             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+8)     
++#define PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+9)     
++#define       PVRSRV_BRIDGE_KV_TO_MMAP_DATA           PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+10)   
++#define PVRSRV_BRIDGE_CONNECT_SERVICES                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+11)    
++#define PVRSRV_BRIDGE_DISCONNECT_SERVICES             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+12)    
++#define PVRSRV_BRIDGE_WRAP_DEVICE_MEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+13)    
++#define PVRSRV_BRIDGE_GET_DEVICEMEMINFO                       PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+14)    
++#define PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM             PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+15)
++#define PVRSRV_BRIDGE_FREE_DEV_VIRTMEM                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_MAP_EXT_MEMORY                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_UNMAP_EXT_MEMORY                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+18)
++#define PVRSRV_BRIDGE_MAP_DEV_MEMORY                  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+19)
++#define PVRSRV_BRIDGE_UNMAP_DEV_MEMORY                        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+20)
++#define PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY  PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+21)
++#define PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+22)
++#define PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER            PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+23)
++#define PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER        PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++#define PVRSRV_BRIDGE_CORE_CMD_LAST                           (PVRSRV_BRIDGE_CORE_CMD_FIRST+24)
++
++#define PVRSRV_BRIDGE_SIM_CMD_FIRST                           (PVRSRV_BRIDGE_CORE_CMD_LAST+1)
++#define PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT            PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+0)      
++#define PVRSRV_BRIDGE_REGISTER_SIM_PROCESS            PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+1)      
++#define PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS  PVRSRV_IOWR(PVRSRV_BRIDGE_SIM_CMD_FIRST+2)      
++#define PVRSRV_BRIDGE_SIM_CMD_LAST                            (PVRSRV_BRIDGE_SIM_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_MAPPING_CMD_FIRST                       (PVRSRV_BRIDGE_SIM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE              PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+0)  
++#define PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE            PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+1)  
++#define PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP           PVRSRV_IOWR(PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)  
++#define PVRSRV_BRIDGE_MAPPING_CMD_LAST                        (PVRSRV_BRIDGE_MAPPING_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_POWER_CMD_FIRST                 (PVRSRV_BRIDGE_MAPPING_CMD_LAST+1)
++#define PVRSRV_BRIDGE_POWER_CONTROL                           PVRSRV_IOWR(PVRSRV_BRIDGE_POWER_CMD_FIRST+0)    
++#if defined (SUPPORT_INT_POWER_MAN)
++#define PVRSRV_BRIDGE_INT_POWER_MAN                           PVRSRV_IOWR(PVRSRV_BRIDGE_POWER_CMD_FIRST+1)    
++#endif
++#define PVRSRV_BRIDGE_POWER_CMD_LAST                  (PVRSRV_BRIDGE_POWER_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_STATS_CMD_FIRST                 (PVRSRV_BRIDGE_POWER_CMD_LAST+1)
++#define       PVRSRV_BRIDGE_GET_FB_STATS                              PVRSRV_IOWR(PVRSRV_BRIDGE_STATS_CMD_FIRST+0)    
++#define PVRSRV_BRIDGE_STATS_CMD_LAST                  (PVRSRV_BRIDGE_STATS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_MISC_CMD_FIRST                  (PVRSRV_BRIDGE_STATS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_MISC_INFO                           PVRSRV_IOWR(PVRSRV_BRIDGE_MISC_CMD_FIRST+0)     
++#define PVRSRV_BRIDGE_MISC_CMD_LAST                           (PVRSRV_BRIDGE_MISC_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_OVERLAY_CMD_FIRST                       (PVRSRV_BRIDGE_MISC_CMD_LAST+1)
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++#define PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES             PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+0)  
++#define PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES           PVRSRV_IOWR(PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)  
++#endif
++#define PVRSRV_BRIDGE_OVERLAY_CMD_LAST                        (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++
++#if defined(PDUMP)
++#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST                 (PVRSRV_BRIDGE_OVERLAY_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_PDUMP_INIT                              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0)    
++#define PVRSRV_BRIDGE_PDUMP_MEMPOL                            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPMEM                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2)    
++#define PVRSRV_BRIDGE_PDUMP_REG                                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)    
++#define PVRSRV_BRIDGE_PDUMP_REGPOL                            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+4)    
++#define PVRSRV_BRIDGE_PDUMP_COMMENT                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+5)    
++#define PVRSRV_BRIDGE_PDUMP_SETFRAME                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+6)    
++#define PVRSRV_BRIDGE_PDUMP_ISCAPTURING                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+7)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPBITMAP                        PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+8)    
++#define PVRSRV_BRIDGE_PDUMP_DUMPREADREG                       PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+9)    
++#define PVRSRV_BRIDGE_PDUMP_SYNCPOL                           PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+10)   
++#define PVRSRV_BRIDGE_PDUMP_DUMPSYNC                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+11)   
++#define PVRSRV_BRIDGE_PDUMP_MEMPAGES                  PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+12)   
++#define PVRSRV_BRIDGE_PDUMP_DRIVERINFO                        PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+13)   
++#define PVRSRV_BRIDGE_PDUMP_PDREG                             PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+14)   
++#define PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR            PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+15)   
++#define PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+16)
++#define PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ              PVRSRV_IOWR(PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST                  (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+17)
++#else
++#define PVRSRV_BRIDGE_PDUMP_CMD_LAST                  PVRSRV_BRIDGE_OVERLAY_CMD_LAST
++#endif
++
++#define PVRSRV_BRIDGE_OEM_CMD_FIRST                           (PVRSRV_BRIDGE_PDUMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_GET_OEMJTABLE                           PVRSRV_IOWR(PVRSRV_BRIDGE_OEM_CMD_FIRST+0)      
++#define PVRSRV_BRIDGE_OEM_CMD_LAST                            (PVRSRV_BRIDGE_OEM_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST              (PVRSRV_BRIDGE_OEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ENUM_CLASS                              PVRSRV_IOWR(PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_DEVCLASS_CMD_LAST                       (PVRSRV_BRIDGE_DEVCLASS_CMD_FIRST+0)
++
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST             (PVRSRV_BRIDGE_DEVCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE  PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS  PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS             PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+4)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_INFO              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+5)
++#define PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+6)
++#define PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN             PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+7)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+8)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+9)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+10)
++#define PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY              PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+11)
++#define PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS           PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+12)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER        PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+13)
++#define PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM        PVRSRV_IOWR(PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++#define PVRSRV_BRIDGE_DISPCLASS_CMD_LAST              (PVRSRV_BRIDGE_DISPCLASS_CMD_FIRST+14)
++
++ 
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST              (PVRSRV_BRIDGE_DISPCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE        PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO            PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER  PVRSRV_IOWR(PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_BUFCLASS_CMD_LAST                       (PVRSRV_BRIDGE_BUFCLASS_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_WRAP_CMD_FIRST                  (PVRSRV_BRIDGE_BUFCLASS_CMD_LAST+1)
++#define PVRSRV_BRIDGE_WRAP_EXT_MEMORY                 PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY                       PVRSRV_IOWR(PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_WRAP_CMD_LAST                           (PVRSRV_BRIDGE_WRAP_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST             (PVRSRV_BRIDGE_WRAP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM            PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM             PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_MAP_MEMINFO_MEM                 PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_UNMAP_MEMINFO_MEM                       PVRSRV_IOWR(PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++#define PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST              (PVRSRV_BRIDGE_SHAREDMEM_CMD_FIRST+3)
++
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST (PVRSRV_BRIDGE_SHAREDMEM_CMD_LAST+1)
++#define PVRSRV_BRIDGE_POLLFORVALUE                        PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR        PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_GET_DEVMEMHEAPS                 PVRSRV_IOWR(PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST  (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_FIRST+2)
++
++#define PVRSRV_BRIDGE_INITSRV_CMD_FIRST                       (PVRSRV_BRIDGE_SERVICES4_TMP_CMD_LAST+1)
++#define PVRSRV_BRIDGE_INITSRV_CONNECT                 PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_INITSRV_DISCONNECT              PVRSRV_IOWR(PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_INITSRV_CMD_LAST                        (PVRSRV_BRIDGE_INITSRV_CMD_FIRST+1)
++
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST  (PVRSRV_BRIDGE_INITSRV_CMD_LAST+1)      
++#define PVRSRV_BRIDGE_EVENT_OBJECT_WAIT                       PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+0)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CONNECT            PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+1)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_DISCONNECT PVRSRV_IOWR(PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++#define PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST           (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_FIRST+2)
++      
++#define PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD             (PVRSRV_BRIDGE_EVENT_OBJECT_CMD_LAST+1)
++
++
++#define PVRSRV_KERNAL_MODE_CLIENT                             1
++
++typedef struct PVRSRV_BRIDGE_RETURN_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_VOID *pvData;
++
++}PVRSRV_BRIDGE_RETURN;
++
++
++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
++{
++      IMG_UINT32                              ui32BridgeID;                   
++      IMG_UINT32                              ui32Size;                               
++      IMG_VOID                                *pvParamIn;                              
++      IMG_UINT32                              ui32InBufferSize;               
++      IMG_VOID                                *pvParamOut;                    
++      IMG_UINT32                              ui32OutBufferSize;              
++
++      IMG_HANDLE                              hKernelServices;                
++}PVRSRV_BRIDGE_PACKAGE;
++
++
++ 
++
++
++typedef struct PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      uiDevIndex;
++      PVRSRV_DEVICE_TYPE      eDeviceType;
++
++} PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUMCLASS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_DEVICE_CLASS sDeviceClass;
++} PVRSRV_BRIDGE_IN_ENUMCLASS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO;
++
++#if defined (SUPPORT_INT_POWER_MAN)
++
++typedef struct PVRSRV_BRIDGE_IN_INT_POWER_MAN_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32OSPowerState;
++} PVRSRV_BRIDGE_IN_INT_POWER_MAN;
++
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE;
++
++
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++} PVRSRV_BRIDGE_IN_RELEASE_DEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_DEVICE_CLASS DeviceClass;
++      IMG_VOID*                       pvDevInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_CLASSDEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++}PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++
++}PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hDevMemContext;
++
++}PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_UINT32                      ui32Attribs;
++      IMG_UINT32                      ui32Size;
++      IMG_UINT32                      ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++
++}PVRSRV_BRIDGE_IN_MAPMEMINFOTOUSER;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_PVOID                                pvLinAddr;
++      IMG_HANDLE                               hMappingInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAPMEMINFOFROMUSER;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREEDEVICEMEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_FREEDEVICEMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32Flags;
++
++} PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_UINT32                      ui32QueueSize;
++
++}PVRSRV_BRIDGE_IN_CREATECOMMANDQUEUE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      PVRSRV_QUEUE_INFO       *psQueueInfo;
++
++}PVRSRV_BRIDGE_IN_DESTROYCOMMANDQUEUE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_POWER_CONTROL_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVR_POWER_CONTROL       eControlMode;
++      PVR_POWER_STATE         ePVRPowerState;
++
++}PVRSRV_BRIDGE_IN_POWER_CONTROL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_VOID                        *pvKVIndexAddress;
++    IMG_UINT32          ui32Bytes;
++} PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevMemHeap;
++      IMG_DEV_VIRTADDR        *psDevVAddr;
++      IMG_UINT32                      ui32Size;
++      IMG_UINT32                      ui32Alignment;
++
++}PVRSRV_BRIDGE_IN_RESERVE_DEV_VIRTMEM;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_SERVICES_TAG
++{
++      PVRSRV_ERROR                    eError;
++      IMG_HANDLE              hKernelServices;
++}PVRSRV_BRIDGE_OUT_CONNECT_SERVICES;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_RESERVE_DEV_VIRTMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_FREE_DEV_VIRTMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psSrcKernelMemInfo;
++      IMG_HANDLE                              hDstDevMemHeap;
++
++}PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psDstKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psDstKernelSyncInfo;   
++      PVRSRV_CLIENT_MEM_INFO  sDstClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sDstClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO          sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO         sClientSyncInfo;        
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEV_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_SYS_PHYADDR                 *psSysPAddr;
++      IMG_UINT32                              ui32Flags;
++
++}PVRSRV_BRIDGE_IN_MAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO          sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO         sClientSyncInfo;
++      IMG_UINT32                                      ui32Flags;
++
++}PVRSRV_BRIDGE_IN_UNMAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      IMG_HANDLE              hDeviceClassBuffer;
++
++}PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;      
++      IMG_HANDLE                              hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Value;
++      IMG_UINT32 ui32Mask;
++      IMG_BOOL bLastFrame;
++      IMG_BOOL bOverwrite;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPOL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      IMG_BOOL bIsRead;
++      IMG_UINT32 ui32Value;
++      IMG_UINT32 ui32Mask;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_PVOID pvAltLinAddr;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Bytes;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_PVOID pvAltLinAddr;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32Bytes;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_REGPOL_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Mask;
++      IMG_UINT32 ui32Flags;
++}PVRSRV_BRIDGE_IN_PDUMP_REGPOL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_HWREG sHWReg;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hKernelMemInfo;
++      IMG_DEV_PHYADDR         *pPages;
++      IMG_UINT32                      ui32NumPages;
++      IMG_DEV_VIRTADDR        sDevAddr;
++      IMG_UINT32                      ui32Start;
++      IMG_UINT32                      ui32Length;
++      IMG_BOOL                        bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_MEMPAGES;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_COMMENT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_COMMENT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_SETFRAME_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32Frame;
++
++}PVRSRV_BRIDGE_IN_PDUMP_SETFRAME;
++
++
++ 
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BITMAP_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++      IMG_UINT32 ui32FileOffset;
++      IMG_UINT32 ui32Width;
++      IMG_UINT32 ui32Height;
++      IMG_UINT32 ui32StrideInBytes;
++      IMG_DEV_VIRTADDR sDevBaseAddr;
++      IMG_UINT32 ui32Size;
++      PDUMP_PIXEL_FORMAT ePixelFormat;
++      PDUMP_MEM_FORMAT eMemFormat;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_BITMAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_READREG_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szFileName[PVRSRV_PDUMP_MAX_FILENAME_SIZE];
++      IMG_UINT32 ui32FileOffset;
++      IMG_UINT32 ui32Address;
++      IMG_UINT32 ui32Size;
++      IMG_UINT32 ui32Flags;
++
++}PVRSRV_BRIDGE_IN_PDUMP_READREG;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_CHAR szString[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
++      IMG_BOOL bContinuous;
++
++}PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE *hKernelMemInfo;
++      IMG_UINT32 ui32Offset;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++}PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_PDUM_IN_CYCLE_COUNT_REG_READ_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32RegOffset;
++      IMG_BOOL bLastFrame;
++}PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUMDEVICE_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32NumDevices;
++      PVRSRV_DEVICE_IDENTIFIER asDeviceIdentifier[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMDEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO_TAG
++{
++
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevCookie;
++
++} PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUMCLASS_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32NumDevices;
++      IMG_UINT32 ui32DevID[PVRSRV_MAX_DEVICES];
++
++}PVRSRV_BRIDGE_OUT_ENUMCLASS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_UINT32              ui32DeviceID;
++      IMG_HANDLE              hDevCookie;
++      
++}PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_HANDLE              hDeviceKM;
++
++}PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      IMG_UINT32              ui32ByteSize;
++      IMG_UINT32              ui32PageOffset;
++      IMG_BOOL                bPhysContig;
++      IMG_UINT32                              ui32NumPageTableEntries;
++      IMG_SYS_PHYADDR         *psSysPAddr;
++
++}PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY_TAG
++{
++      PVRSRV_ERROR    eError;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++}PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++
++}PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY;
++
++
++#define PVRSRV_MAX_DC_DISPLAY_FORMATS                 10
++#define PVRSRV_MAX_DC_DISPLAY_DIMENSIONS              10
++#define PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS                       4
++#define PVRSRV_MAX_DC_CLIP_RECTS                              32
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Count;
++      DISPLAY_FORMAT  asFormat[PVRSRV_MAX_DC_DISPLAY_FORMATS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDeviceKM;
++      DISPLAY_FORMAT  sFormat;
++
++}PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Count;
++      DISPLAY_DIMS    asDim[PVRSRV_MAX_DC_DISPLAY_DIMENSIONS];
++
++}PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO_TAG
++{
++      PVRSRV_ERROR    eError;
++      DISPLAY_INFO    sDisplayInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_HANDLE              hBuffer;
++
++}PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDeviceKM;
++      IMG_UINT32                              ui32Flags;
++      DISPLAY_SURF_ATTRIBUTES sDstSurfAttrib;
++      DISPLAY_SURF_ATTRIBUTES sSrcSurfAttrib;
++      IMG_UINT32                              ui32BufferCount;
++      IMG_UINT32                              ui32OEMFlags;
++      IMG_UINT32                              ui32SwapChainID;
++
++} PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hSwapChain;
++      IMG_UINT32                      ui32SwapChainID;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++      IMG_RECT                        sRect;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++      IMG_UINT32                      ui32CKColour;
++
++} PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      ui32BufferCount;
++      IMG_HANDLE                      ahBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++
++} PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hBuffer;
++      IMG_UINT32                      ui32SwapInterval;
++      IMG_HANDLE                      hPrivateTag;
++      IMG_UINT32                      ui32ClipRectCount;
++      IMG_RECT                        sClipRect[PVRSRV_MAX_DC_CLIP_RECTS];
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_HANDLE                      hSwapChain;
++
++} PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_UINT32                      ui32DeviceID;
++      IMG_HANDLE                      hDevCookie;
++      
++} PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDeviceKM;
++
++} PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO_TAG
++{
++      PVRSRV_ERROR            eError;
++      BUFFER_INFO                     sBufferInfo;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDeviceKM;
++      IMG_UINT32                      ui32BufferIndex;
++
++} PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hBuffer;
++
++} PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS_TAG
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_HEAP_INFO        sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevMemContext;
++      IMG_UINT32                      ui32ClientHeapCount;
++      PVRSRV_HEAP_INFO        sHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP_TAG
++{
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hDevMemHeap;
++
++} PVRSRV_BRIDGE_OUT_CREATE_DEVMEMHEAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++
++} PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER_TAG
++{
++      PVRSRV_ERROR                    eError;
++      IMG_PVOID                               pvLinAddr;
++      IMG_HANDLE                              hMappingInfo;
++
++}PVRSRV_BRIDGE_OUT_MAPMEMINFOTOUSER;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Free;
++      IMG_UINT32 ui32LargestBlock;
++
++} PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM;
++
++
++#include "pvrmmap.h"
++typedef struct PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA_TAG
++{
++      PVRSRV_ERROR            eError;
++    
++    
++      IMG_UINT32                      ui32MMapOffset;
++
++    
++      IMG_UINT32                      ui32ByteOffset;
++
++    
++    IMG_UINT32          ui32RealByteSize;
++
++} PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA;
++ 
++typedef struct PVRSRV_BRIDGE_OUT_POWER_CONTROL_TAG
++{
++      PVRSRV_ERROR    eError;
++      PVR_POWER_STATE ePVRPowerState;
++
++}PVRSRV_BRIDGE_OUT_POWER_CONTROL;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_MISC_INFO_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      PVRSRV_MISC_INFO        sMiscInfo;
++
++}PVRSRV_BRIDGE_IN_GET_MISC_INFO;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GET_MISC_INFO_TAG
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_MISC_INFO        sMiscInfo;
++
++}PVRSRV_BRIDGE_OUT_GET_MISC_INFO;
++
++ 
++
++typedef struct PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_BOOL bIsCapturing;
++
++} PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GET_FB_STATS_TAG 
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Available;
++
++} PVRSRV_BRIDGE_IN_GET_FB_STATS;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_SYS_PHYADDR         sSysPhysAddr;
++      IMG_UINT32                      uiSizeInBytes;
++
++} PVRSRV_BRIDGE_IN_MAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE_TAG
++{
++      IMG_PVOID                       pvUserAddr;
++      IMG_UINT32                      uiActualSize;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_OUT_MAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvUserAddr;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_IN_UNMAPPHYSTOUSERSPACE;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP_TAG
++{
++      IMG_PVOID                       *ppvTbl;
++      IMG_UINT32                      uiTblSize;
++
++} PVRSRV_BRIDGE_OUT_GETPHYSTOUSERSPACEMAP;
++
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvProcess;
++
++} PVRSRV_BRIDGE_IN_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS_TAG
++{
++      IMG_SYS_PHYADDR         sRegsPhysBase;                  
++      IMG_VOID                        *pvRegsBase;                    
++      IMG_PVOID                       pvProcess;
++      IMG_UINT32                      ulNoOfEntries;
++      IMG_PVOID                       pvTblLinAddr;
++
++} PVRSRV_BRIDGE_OUT_REGISTER_SIM_PROCESS;
++
++
++typedef struct PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_PVOID                       pvProcess;
++      IMG_VOID                        *pvRegsBase;                    
++
++} PVRSRV_BRIDGE_IN_UNREGISTER_SIM_PROCESS;
++
++typedef struct PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_HANDLE                      hDevCookie;
++      IMG_UINT32                      ui32StatusAndMask;
++      PVRSRV_ERROR            eError;
++
++} PVRSRV_BRIDGE_IN_PROCESS_SIMISR_EVENT;
++
++typedef struct PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT_TAG
++{
++      IMG_UINT32                      ui32BridgeFlags; 
++      IMG_BOOL                        bInitSuccesful;
++} PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT;
++
++
++typedef struct PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_UINT32 ui32Flags;
++    IMG_UINT32 ui32Size;
++}PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM_TAG
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++}PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++}PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hKernelMemInfo;
++}PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM_TAG
++{
++      PVRSRV_CLIENT_MEM_INFO  sClientMemInfo;
++      PVRSRV_CLIENT_SYNC_INFO sClientSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psKernelMemInfo;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVRSRV_CLIENT_MEM_INFO sClientMemInfo;
++}PVRSRV_BRIDGE_IN_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_UNMAP_MEMINFO_MEM;
++
++typedef struct PVRSRV_BRIDGE_IN_POLLFORVALUE_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_UINT32 *pui32CpuVAddrKM;
++    IMG_UINT32 ui32Value;
++    IMG_UINT32 ui32Mask;
++    IMG_UINT32 ui32Waitus;
++    IMG_UINT32 ui32Tries;
++}PVRSRV_BRIDGE_IN_POLLFORVALUE;
++
++typedef struct PVRSRV_BRIDGE_OUT_POLLFORVALUE_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_POLLFORVALUE;
++
++typedef struct PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++    IMG_HANDLE hDevMemContext;
++}PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR_TAG
++{
++    IMG_DEV_PHYADDR sPDDevPAddr;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR;
++
++typedef struct PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAI_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE      hOSEventKM;
++      IMG_UINT32  ui32MSTimeout;
++} PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT;
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvr_bridge_km.h git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/pvr_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvr_bridge_km.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,260 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVR_BRIDGE_KM_H_
++#define __PVR_BRIDGE_KM_H_
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "pvr_bridge.h"
++
++#if defined(__linux__)
++PVRSRV_ERROR LinuxBridgeInit(IMG_VOID);
++IMG_VOID LinuxBridgeDeInit(IMG_VOID);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++                                                                                                 PVRSRV_DEVICE_IDENTIFIER *psDevIdList);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM(IMG_UINT32                        uiDevIndex,
++                                                                                                      PVRSRV_DEVICE_TYPE      eDeviceType,
++                                                                                                      IMG_HANDLE                      *phDevCookie);
++                                                      
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++                                                                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                               IMG_HANDLE *phDevMemContext,
++                                                                                                               IMG_UINT32 *pui32ClientHeapCount,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo,
++                                                                                                               IMG_BOOL *pbCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                                                         , IMG_BOOL *pbShared
++#endif
++                                      );
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                                IMG_HANDLE hDevMemContext,
++                                                                                                                IMG_BOOL *pbCreated);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE                   hDevCookie,
++                                                                                               IMG_HANDLE                     hDevMemHeap,
++                                                                                               IMG_UINT32                     ui32Flags,
++                                                                                               IMG_UINT32                     ui32Size,
++                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                               PVRSRV_KERNEL_MEM_INFO         **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE                    hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo,
++                                                                                              IMG_BOOL                                bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE                      hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVReserveDeviceVirtualMemKM(IMG_HANDLE          hDevMemHeap,
++                                                                                                               IMG_DEV_VIRTADDR       *psDevVAddr,
++                                                                                                               IMG_UINT32                     ui32Size,
++                                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                                               PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceVirtualMemKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++                                                                                                IMG_HANDLE hDstDevMemHeap,
++                                                                                                PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                      IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE                            hDevCookie,
++                                                                                              IMG_UINT32                              ui32ByteSize, 
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysAddr,
++                                                                                              PVRSRV_KERNEL_MEM_INFO **ppsMemInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM(PVRSRV_DEVICE_CLASS DeviceClass,
++                                                               IMG_UINT32 *pui32DevCount,
++                                                               IMG_UINT32 *pui32DevID );
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM(IMG_UINT32 ui32DeviceID,
++                                                                IMG_HANDLE hDevCookie,
++                                                                IMG_HANDLE *phDeviceKM);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM(IMG_HANDLE hDeviceKM,
++                                                                 IMG_UINT32 *pui32Count,
++                                                                 DISPLAY_FORMAT *psFormat);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM(IMG_HANDLE hDeviceKM,
++                                                              DISPLAY_FORMAT *psFormat,
++                                                              IMG_UINT32 *pui32Count,
++                                                              DISPLAY_DIMS *psDim);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM(IMG_HANDLE hDeviceKM,
++                                                                         IMG_HANDLE *phBuffer);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM(IMG_HANDLE hDeviceKM,
++                                                         DISPLAY_INFO *psDisplayInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM(IMG_HANDLE                             hDeviceKM,
++                                                                         IMG_UINT32                           ui32Flags,
++                                                                         DISPLAY_SURF_ATTRIBUTES      *psDstSurfAttrib,
++                                                                         DISPLAY_SURF_ATTRIBUTES      *psSrcSurfAttrib,
++                                                                         IMG_UINT32                           ui32BufferCount,
++                                                                         IMG_UINT32                           ui32OEMFlags,
++                                                                         IMG_HANDLE                           *phSwapChain,
++                                                                         IMG_UINT32                           *pui32SwapChainID);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE    hSwapChain,
++                                                                              IMG_BOOL bResManCallback);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT      *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT      *psRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                      IMG_HANDLE              hSwapChain,
++                                                                      IMG_UINT32              ui32CKColour);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_UINT32    *pui32BufferCount,
++                                                                IMG_HANDLE    *phBuffer);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hBuffer,
++                                                                      IMG_UINT32      ui32SwapInterval,
++                                                                      IMG_HANDLE      hPrivateTag,
++                                                                      IMG_UINT32      ui32ClipRectCount,
++                                                                      IMG_RECT        *psClipRect);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hSwapChain);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM(IMG_UINT32  ui32DeviceID,
++                                                                IMG_HANDLE    hDevCookie,
++                                                                IMG_HANDLE    *phDeviceKM);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM(IMG_HANDLE hDeviceKM, IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM(IMG_HANDLE     hDeviceKM,
++                                                         BUFFER_INFO  *psBufferInfo);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM(IMG_HANDLE   hDeviceKM,
++                                                               IMG_UINT32     ui32BufferIndex,
++                                                               IMG_HANDLE     *phBuffer);
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(IMG_HANDLE hDeviceClassBuffer,
++                                                                                                         PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++                                                                                                         IMG_HANDLE *phOSMapInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                                      IMG_BOOL bResManCallback);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++                                                                                                 IMG_UINT32 *pui32Total,
++                                                                                                 IMG_UINT32 *pui32Free,
++                                                                                                 IMG_UINT32 *pui32LargestBlock);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                                                              IMG_HANDLE                                      hDevMemContext,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO **ppsKernelSyncInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO        *psKernelSyncInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo);
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32    *pui32Total,
++                                                              IMG_UINT32      *pui32Available);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(IMG_UINT32 ui32Flags,
++                                                       IMG_UINT32 ui32Size,
++                                                       PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/pvrmmap.h git-nokia/drivers/gpu/pvr/services4/include/pvrmmap.h
+--- git/drivers/gpu/pvr/services4/include/pvrmmap.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/pvrmmap.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,38 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PVRMMAP_H__
++#define __PVRMMAP_H__
++
++
++IMG_VOID *PVRMMAPMapKernelPtr(IMG_HANDLE hModule, IMG_VOID *pvKVAddress, IMG_UINT32 ui32Bytes);
++
++
++IMG_BOOL PVRMMAPRemoveMapping(IMG_VOID *pvUserAddress, IMG_UINT32 ui32Bytes);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/servicesint.h git-nokia/drivers/gpu/pvr/services4/include/servicesint.h
+--- git/drivers/gpu/pvr/services4/include/servicesint.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/servicesint.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,252 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SERVICESINT_H__)
++#define __SERVICESINT_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "services.h"
++#include "sysinfo.h"
++
++#define HWREC_DEFAULT_TIMEOUT (500)
++
++#define DRIVERNAME_MAXLENGTH  (100)
++
++#define EVENTOBJNAME_MAXLENGTH (50)
++
++
++typedef struct _PVRSRV_EVENTOBJECT_
++{
++      
++      IMG_CHAR        szName[EVENTOBJNAME_MAXLENGTH];
++      
++      IMG_HANDLE      hOSEventKM;
++} PVRSRV_EVENTOBJECT;
++
++
++typedef struct _PVRSRV_KERNEL_MEM_INFO_
++{
++      
++      IMG_PVOID                               pvLinAddrKM;
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddr;
++                                                                              
++              
++      IMG_UINT32                              ui32Flags;
++                                                                               
++      
++      IMG_UINT32                              ui32AllocSize;          
++
++                                                                                                      
++      PVRSRV_MEMBLK                   sMemBlk;
++      
++      
++      IMG_PVOID                               pvSysBackupBuffer;      
++
++
++      
++      struct _PVRSRV_KERNEL_SYNC_INFO_        *psKernelSyncInfo;
++
++} PVRSRV_KERNEL_MEM_INFO;
++
++
++typedef struct _PVRSRV_KERNEL_SYNC_INFO_
++{
++      
++      PVRSRV_SYNC_DATA                *psSyncData;
++      
++      
++      IMG_DEV_VIRTADDR                sWriteOpsCompleteDevVAddr;
++
++      
++      IMG_DEV_VIRTADDR                sReadOpsCompleteDevVAddr;
++
++      
++      PVRSRV_KERNEL_MEM_INFO  *psSyncDataMemInfoKM;
++
++} PVRSRV_KERNEL_SYNC_INFO;
++
++
++typedef struct _PVRSRV_SYNC_OBJECT
++{
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfoKM;
++      IMG_UINT32                              ui32WriteOpsPending;
++      IMG_UINT32                              ui32ReadOpsPending;
++
++}PVRSRV_SYNC_OBJECT, *PPVRSRV_SYNC_OBJECT;
++
++typedef struct _PVRSRV_COMMAND
++{
++      IMG_UINT32                      ui32CmdSize;            
++      IMG_UINT32                      ui32DevIndex;           
++      IMG_UINT32                      CommandType;            
++      IMG_UINT32                      ui32DstSyncCount;       
++      IMG_UINT32                      ui32SrcSyncCount;       
++      PVRSRV_SYNC_OBJECT      *psDstSync;                     
++      PVRSRV_SYNC_OBJECT      *psSrcSync;                     
++      IMG_UINT32                      ui32DataSize;           
++      IMG_UINT32                      ui32ProcessID;          
++      IMG_VOID                        *pvData;                        
++}PVRSRV_COMMAND, *PPVRSRV_COMMAND;
++
++
++typedef struct _PVRSRV_QUEUE_INFO_
++{
++      IMG_VOID                        *pvLinQueueKM;                  
++      IMG_VOID                        *pvLinQueueUM;                  
++      volatile IMG_UINT32     ui32ReadOffset;                 
++      volatile IMG_UINT32     ui32WriteOffset;                
++      IMG_UINT32                      *pui32KickerAddrKM;             
++      IMG_UINT32                      *pui32KickerAddrUM;             
++      IMG_UINT32                      ui32QueueSize;                  
++
++      IMG_UINT32                      ui32ProcessID;                  
++
++      IMG_HANDLE                      hMemBlock[2];
++
++      struct _PVRSRV_QUEUE_INFO_ *psNextKM;            
++}PVRSRV_QUEUE_INFO;
++
++typedef PVRSRV_ERROR (*PFN_INSERT_CMD) (PVRSRV_QUEUE_INFO*, 
++                                                                              PVRSRV_COMMAND**,
++                                                                              IMG_UINT32,
++                                                                              IMG_UINT16,
++                                                                              IMG_UINT32,
++                                                                              PVRSRV_KERNEL_SYNC_INFO*[],
++                                                                              IMG_UINT32,
++                                                                              PVRSRV_KERNEL_SYNC_INFO*[],
++                                                                              IMG_UINT32); 
++typedef PVRSRV_ERROR (*PFN_SUBMIT_CMD) (PVRSRV_QUEUE_INFO*, PVRSRV_COMMAND*, IMG_BOOL);
++
++
++typedef struct PVRSRV_DEVICECLASS_BUFFER_TAG
++{     
++      PFN_GET_BUFFER_ADDR             pfnGetBufferAddr;
++      IMG_HANDLE                              hDevMemContext;
++      IMG_HANDLE                              hExtDevice;
++      IMG_HANDLE                              hExtBuffer;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++
++} PVRSRV_DEVICECLASS_BUFFER;
++
++              
++typedef struct PVRSRV_CLIENT_DEVICECLASS_INFO_TAG
++{
++      IMG_HANDLE hDeviceKM;
++      IMG_HANDLE      hServices;
++} PVRSRV_CLIENT_DEVICECLASS_INFO;
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetWriteOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++      IMG_UINT32 ui32WriteOpsPending;                 
++
++      if(bIsReadOp)
++      {
++              ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
++      }
++      else
++      {
++              
++
++
++              ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++      }
++
++      return ui32WriteOpsPending;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetReadOpsPending)
++#endif
++static INLINE
++IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
++{
++      IMG_UINT32 ui32ReadOpsPending;                  
++
++      if(bIsReadOp)
++      {
++              ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending++;
++      }
++      else
++      {
++              ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOpsPending;
++      }
++
++      return ui32ReadOpsPending;
++}
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVQueueCommand(IMG_HANDLE hQueueInfo, 
++                                                              PVRSRV_COMMAND *psCommand);
++
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVPollForValue(const PVRSRV_CONNECTION *psConnection,
++                   IMG_UINT32* pui32LinMemAddr,
++                   IMG_UINT32 ui32Value,
++                   IMG_UINT32 ui32Mask,
++                   IMG_UINT32 ui32Waitus,
++                   IMG_UINT32 ui32Tries);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVGetMMUContextPDDevPAddr(const PVRSRV_CONNECTION *psConnection,
++                              IMG_HANDLE hDevMemContext,
++                              IMG_DEV_PHYADDR *sPDDevPAddr);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVAllocSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                                              IMG_UINT32 ui32Flags,
++                                              IMG_UINT32 ui32Size,
++                                              PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVFreeSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                                         PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++PVRSRVUnrefSharedSysMem(const PVRSRV_CONNECTION *psConnection,
++                        PVRSRV_CLIENT_MEM_INFO *psClientMemInfo);
++
++IMG_IMPORT PVRSRV_ERROR IMG_CALLCONV
++PVRSRVMapMemInfoMem(const PVRSRV_CONNECTION *psConnection,
++                    IMG_HANDLE hKernelMemInfo,
++                    PVRSRV_CLIENT_MEM_INFO **ppsClientMemInfo);
++
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge.h git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,357 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_H__)
++#define __SGX_BRIDGE_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "pvr_bridge.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#define PVRSRV_BRIDGE_SGX_CMD_BASE (PVRSRV_BRIDGE_LAST_NON_DEVICE_CMD+1)
++#define PVRSRV_BRIDGE_SGX_GETCLIENTINFO                       PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+0)
++#define PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+1)
++#define PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+2)
++#define PVRSRV_BRIDGE_SGX_DOKICK                              PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+3)
++#define PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR             PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+4)
++#define PVRSRV_BRIDGE_SGX_READREGISTRYDWORD           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+5)
++#define PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND             PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+6)
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define PVRSRV_BRIDGE_SGX_2DQUEUEBLT            PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+7)
++#if defined(SGX2D_DIRECT_BLITS)
++#define PVRSRV_BRIDGE_SGX_2DDIRECTBLT           PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+8)
++#endif
++#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++#define PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE   PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+9)
++#endif 
++
++#define PVRSRV_BRIDGE_SGX_GETMMUPDADDR                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+10)  
++
++#if defined(TRANSFER_QUEUE)
++#define PVRSRV_BRIDGE_SGX_SUBMITTRANSFER                      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+13)
++#endif
++#define PVRSRV_BRIDGE_SGX_GETMISCINFO                         PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+14)
++#define PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT                     PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+15)
++#define PVRSRV_BRIDGE_SGX_DEVINITPART2                                PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+16)
++
++#define PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC                    PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+17)
++#define PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC                   PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+18)
++#define PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC                     PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+19)
++#define PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT  PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+20)
++#define PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET      PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+21)
++#define PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT        PVRSRV_IOWR(PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++
++#define PVRSRV_BRIDGE_LAST_SGX_CMD (PVRSRV_BRIDGE_SGX_CMD_BASE+22)
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevMemHeap;
++      IMG_DEV_VIRTADDR sDevVAddr;
++}PVRSRV_BRIDGE_IN_GETPHYSPAGEADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR
++{
++      PVRSRV_ERROR            eError;
++      IMG_DEV_PHYADDR         DevPAddr;
++      IMG_CPU_PHYADDR         CpuPAddr;
++}PVRSRV_BRIDGE_OUT_GETPHYSPAGEADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hDevMemContext;
++}PVRSRV_BRIDGE_IN_SGX_GETMMU_PDADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR_TAG
++{
++      IMG_DEV_PHYADDR                 sPDDevPAddr;
++      PVRSRV_ERROR                    eError;
++}PVRSRV_BRIDGE_OUT_SGX_GETMMU_PDADDR;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETCLIENTINFO_TAG
++{
++      IMG_UINT32                                      ui32BridgeFlags; 
++      IMG_HANDLE                                      hDevCookie;
++}PVRSRV_BRIDGE_IN_GETCLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO_TAG
++{
++      PVR3DIF4_INTERNAL_DEVINFO       sSGXInternalDevInfo;
++      PVRSRV_ERROR                            eError;
++}PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++}PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_GETCLIENTINFO_TAG
++{
++      PVR3DIF4_CLIENT_INFO    sClientInfo;
++      PVRSRV_ERROR                    eError;
++}PVRSRV_BRIDGE_OUT_GETCLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_RELEASECLIENTINFO_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVR3DIF4_CLIENT_INFO    sClientInfo;
++}PVRSRV_BRIDGE_IN_RELEASECLIENTINFO;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_ISPBREAKPOLL_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++}PVRSRV_BRIDGE_IN_ISPBREAKPOLL;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_DOKICK_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVR3DIF4_CCB_KICK               sCCBKick;
++}PVRSRV_BRIDGE_IN_DOKICK;
++
++#if defined(TRANSFER_QUEUE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_SUBMITTRANSFER_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_DEV_VIRTADDR                sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SUBMITTRANSFER;
++#endif
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_READREGDWORD_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++    IMG_PCHAR                         pszKey;
++    IMG_PCHAR                         pszValue;
++}PVRSRV_BRIDGE_IN_READREGDWORD;
++
++ 
++typedef struct PVRSRV_BRIDGE_OUT_READREGDWORD_TAG
++{
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              ui32Data;
++}PVRSRV_BRIDGE_OUT_READREGDWORD;
++
++ 
++typedef struct PVRSRV_BRIDGE_IN_SCHEDULECOMMAND_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      PVRSRV_SGX_COMMAND_TYPE eCommandType;
++      PVRSRV_SGX_COMMAND              *psCommandData;
++
++}PVRSRV_BRIDGE_IN_SCHEDULECOMMAND;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXGETMISCINFO_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      SGX_MISC_INFO   *psMiscInfo;
++}PVRSRV_BRIDGE_IN_SGXGETMISCINFO;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++}PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT_TAG
++{
++      PVRSRV_ERROR                    eError;
++      SGX_BRIDGE_INFO_FOR_SRVINIT     sInitInfo;
++}PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXDEVINITPART2_TAG
++{
++      IMG_UINT32              ui32BridgeFlags; 
++      IMG_HANDLE              hDevCookie;
++      SGX_BRIDGE_INIT_INFO    sInitInfo;
++}PVRSRV_BRIDGE_IN_SGXDEVINITPART2;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DQUEUEBLT_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hKernDstSync;
++      IMG_UINT32                              ui32NumSrcSyncs;
++      IMG_HANDLE                              ahKernSrcSync[PVRSRV_MAX_BLT_SRC_SYNCS];
++      IMG_UINT32                              ui32DataByteSize;
++      IMG_UINT32                              *pui32BltData;
++}PVRSRV_BRIDGE_IN_2DQUEUEBLT;
++
++#if defined(SGX2D_DIRECT_BLITS)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DDIRECTBLT_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_UINT32                              ui32DataByteSize;
++      IMG_UINT32                              *pui32BltData;
++}PVRSRV_BRIDGE_IN_2DDIRECTBLT;
++
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++ 
++typedef struct PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE_TAG
++{
++      IMG_UINT32                              ui32BridgeFlags; 
++      IMG_HANDLE                              hDevCookie;
++      IMG_HANDLE                              hKernSyncInfo;
++      IMG_BOOL                                bWaitForComplete;
++}PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE;
++#endif 
++
++
++#define PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS 10
++
++typedef struct PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++    IMG_UINT32 ui32TotalPBSize;
++}PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC_TAG
++{
++      IMG_HANDLE hKernelMemInfo;
++      IMG_HANDLE hSharedPBDesc;
++      IMG_HANDLE hSharedPBDescKernelMemInfoHandle;
++      IMG_HANDLE hHWPBDescKernelMemInfoHandle;
++      IMG_HANDLE hBlockKernelMemInfoHandle;
++      IMG_HANDLE ahSharedPBDescSubKernelMemInfoHandles[PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS];
++      IMG_UINT32 ui32SharedPBDescSubKernelMemInfoHandlesCount;
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC_TAG
++{
++      PVRSRV_ERROR eError;
++}PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC;
++
++
++typedef struct PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hSharedPBDescKernelMemInfo;
++      IMG_HANDLE hHWPBDescKernelMemInfo;
++      IMG_HANDLE hBlockKernelMemInfo;
++      IMG_UINT32 ui32TotalPBSize;
++      IMG_HANDLE *phKernelMemInfoHandles;
++      IMG_UINT32 ui32KernelMemInfoHandlesCount;
++}PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hSharedPBDesc;
++}PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC;
++
++
++#ifdef        PDUMP
++typedef struct PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      PVR3DIF4_KICKTA_DUMP_BUFFER *psBufferArray;
++      IMG_UINT32 ui32BufferArrayLength;
++      IMG_BOOL bDumpPolls;
++} PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY;
++#endif
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT_TAG
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr;
++}PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET;
++
++typedef struct PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT_TAG
++{
++      IMG_UINT32 ui32BridgeFlags; 
++      IMG_HANDLE hDevCookie;
++      IMG_HANDLE hHWRenderContext;
++}PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT;
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge_km.h
+--- git/drivers/gpu/pvr/services4/include/sgx_bridge_km.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgx_bridge_km.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,139 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGX_BRIDGE_KM_H__)
++#define __SGX_BRIDGE_KM_H__
++
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgx_bridge.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
++                                                               IMG_DEV_VIRTADDR sHWRenderContextDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle,
++                                               PVR3DIF4_CCB_KICK *psCCBKick);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM(IMG_HANDLE hDevMemHeap,
++                                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                                IMG_DEV_PHYADDR *pDevPAddr,
++                                                                IMG_CPU_PHYADDR *pCpuPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV SGXGetMMUPDAddrKM(IMG_HANDLE                hDevCookie,
++                                                                                      IMG_HANDLE              hDevMemContext,
++                                                                                      IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                            hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*   psClientInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        SGX_MISC_INFO                 *psMiscInfo);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                        IMG_UINT32            ui32DataByteSize,
++                                                        IMG_UINT32            *pui32BltData);
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData);
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++IMG_IMPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO      *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete);
++#endif 
++
++IMG_IMPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle,
++                                                                      SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR DevInitSGXPart2KM(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                         IMG_HANDLE hDevHandle,
++                                                         SGX_BRIDGE_INIT_INFO *psInitInfo);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc);
++
++IMG_IMPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **psSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount);
++
++
++IMG_IMPORT PVRSRV_ERROR
++SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie,
++                                              PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo);
++
++ 
++#if defined(SGX_FEATURE_2D_HARDWARE)
++#define       SGX2D_MAX_BLT_CMD_SIZ           256     
++#endif 
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/include/sgxinfo.h git-nokia/drivers/gpu/pvr/services4/include/sgxinfo.h
+--- git/drivers/gpu/pvr/services4/include/sgxinfo.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/include/sgxinfo.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,375 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined (__SGXINFO_H__)
++#define __SGXINFO_H__
++
++#include "sgxscript.h"
++
++#include "servicesint.h"
++
++#include "services.h"
++#include "sgxapi_km.h"
++
++#define SGX_MAX_DEV_DATA              24
++#define       SGX_MAX_INIT_MEM_HANDLES        16
++
++typedef struct _SGX_BRIDGE_INFO_FOR_SRVINIT
++{
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS];
++      
++      IMG_UINT32 ui32uKernelTimerClock;
++#if defined(SUPPORT_HW_RECOVERY)
++      IMG_UINT32 ui32HWRecoverySampleRate;
++#endif 
++      IMG_UINT32 ui32ActivePowManSampleRate;
++} SGX_BRIDGE_INFO_FOR_SRVINIT;
++
++typedef struct _SGX_BRIDGE_INIT_INFO_ {
++      IMG_HANDLE      hKernelCCBMemInfo;
++      IMG_HANDLE      hKernelCCBCtlMemInfo;
++      IMG_HANDLE      hKernelCCBEventKickerMemInfo;
++      IMG_HANDLE      hKernelSGXHostCtlMemInfo;
++      IMG_UINT32      ui32TAKickAddress;
++      IMG_UINT32      ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      IMG_HANDLE      hKernelHWProfilingMemInfo;
++#endif
++
++      IMG_UINT32 ui32EDMTaskReg0;
++      IMG_UINT32 ui32EDMTaskReg1;
++
++      IMG_UINT32 ui32ClockGateMask;
++
++      IMG_UINT32 ui32CacheControl;
++
++      IMG_UINT32      asInitDevData[SGX_MAX_DEV_DATA];        
++      IMG_HANDLE      asInitMemHandles[SGX_MAX_INIT_MEM_HANDLES];
++
++      SGX_INIT_SCRIPTS sScripts;
++
++} SGX_BRIDGE_INIT_INFO;
++
++typedef struct _PVRSRV_SGX_COMMAND_
++{
++      IMG_UINT32                              ui32ServiceAddress;             
++      IMG_UINT32                              ui32Data[7];                    
++} PVRSRV_SGX_COMMAND;
++
++
++typedef struct _PVRSRV_SGX_KERNEL_CCB_
++{
++      PVRSRV_SGX_COMMAND              asCommands[256];                
++} PVRSRV_SGX_KERNEL_CCB;
++
++
++typedef struct _PVRSRV_SGX_CCB_CTL_
++{
++      IMG_UINT32                              ui32WriteOffset;                
++      IMG_UINT32                              ui32ReadOffset;                 
++} PVRSRV_SGX_CCB_CTL;
++
++
++#define SGX_AUXCCBFLAGS_SHARED                                        0x00000001
++typedef struct _PVRSRV_SGX_CCB_INFO_ *PPVRSRV_SGX_CCB_INFO;
++
++typedef enum _PVRSRV_SGX_COMMAND_TYPE_
++{
++      PVRSRV_SGX_COMMAND_EDM_KICK             = 0,
++      PVRSRV_SGX_COMMAND_VIDEO_KICK   = 1,
++
++      PVRSRV_SGX_COMMAND_FORCE_I32    = 0xFFFFFFFF,
++
++}PVRSRV_SGX_COMMAND_TYPE;
++
++#define               SGX_HOSTPORT_PRESENT                    0x00000001
++
++#define PVRSRV_CCBFLAGS_RASTERCMD                     0x1
++#define PVRSRV_CCBFLAGS_TRANSFERCMD                   0x2
++#define PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD     0x3
++
++#define PVRSRV_KICKFLAG_RENDER                                0x1
++#define PVRSRV_KICKFLAG_PIXEL                         0x2
++
++
++#define       SGX_BIF_INVALIDATE_PTCACHE      0x1
++#define       SGX_BIF_INVALIDATE_PDCACHE      0x2
++
++typedef struct _PVR3DIF4_CCB_KICK_
++{
++      IMG_BOOL                        bKickRender;
++      PVRSRV_SGX_COMMAND_TYPE         eCommand;
++      PVRSRV_SGX_COMMAND              sCommand;
++      IMG_HANDLE                      hCCBKernelMemInfo;
++      IMG_HANDLE                      hDstKernelSyncInfo;
++      IMG_UINT32                      ui32DstReadOpsPendingOffset;
++      IMG_UINT32                      ui32DstWriteOpsPendingOffset;
++      IMG_UINT32      ui32NumTAStatusVals;
++      IMG_UINT32      aui32TAStatusValueOffset[SGX_MAX_TA_STATUS_VALS];
++      IMG_HANDLE      ahTAStatusSyncInfo[SGX_MAX_TA_STATUS_VALS];
++
++      IMG_UINT32      ui32Num3DStatusVals;
++      IMG_UINT32      aui323DStatusValueOffset[SGX_MAX_3D_STATUS_VALS];
++      IMG_HANDLE      ah3DStatusSyncInfo[SGX_MAX_3D_STATUS_VALS];
++#ifdef        NO_HARDWARE
++      IMG_BOOL        bTerminate;
++      IMG_HANDLE      hUpdateDstKernelSyncInfo;
++      IMG_UINT32      ui32WriteOpsPendingVal;
++#endif
++      IMG_UINT32                                      ui32KickFlags;
++} PVR3DIF4_CCB_KICK;
++
++
++typedef struct _PVRSRV_SGX_HOST_CTL_
++{     
++
++      volatile IMG_UINT32             ui32PowManFlags; 
++#if defined(SUPPORT_HW_RECOVERY)
++      IMG_UINT32                              ui32uKernelDetectedLockups;              
++      IMG_UINT32                              ui32HostDetectedLockups;                
++#endif
++      IMG_UINT32                              ui32InterruptFlags; 
++      IMG_UINT32                              ui32InterruptClearFlags; 
++
++      IMG_UINT32                              ui32ResManFlags;                
++      IMG_DEV_VIRTADDR                sResManCleanupData;             
++
++      IMG_DEV_VIRTADDR                sTAHWPBDesc;            
++      IMG_DEV_VIRTADDR                s3DHWPBDesc;
++
++} PVRSRV_SGX_HOST_CTL;
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++typedef struct _SGX_INIT_SCRIPT_DATA
++{
++      IMG_UINT32 asHWRecoveryData[SGX_MAX_DEV_DATA];
++} SGX_INIT_SCRIPT_DATA;
++#endif
++
++typedef struct _PVRSRV_SGXDEV_INFO_
++{
++      PVRSRV_DEVICE_TYPE              eDeviceType;
++      PVRSRV_DEVICE_CLASS             eDeviceClass;
++
++      IMG_UINT8                               ui8VersionMajor;
++      IMG_UINT8                               ui8VersionMinor;
++      IMG_UINT32                              ui32CoreConfig;
++      IMG_UINT32                              ui32CoreFlags;
++
++      
++      IMG_PVOID                               pvRegsBaseKM;
++      
++
++      
++      IMG_HANDLE                              hRegMapping;
++
++      
++      IMG_SYS_PHYADDR                 sRegsPhysBase;
++      
++      IMG_UINT32                              ui32RegSize;
++
++      
++      IMG_UINT32                              ui32CoreClockSpeed;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++      SGX_SLAVE_PORT                  s2DSlavePortKM;
++
++      
++      PVRSRV_RESOURCE                 s2DSlaveportResource;
++
++      
++      IMG_UINT32                      ui322DFifoSize;
++      IMG_UINT32                      ui322DFifoOffset;
++      
++      IMG_HANDLE                      h2DCmdCookie;
++      
++      IMG_HANDLE                      h2DQueue;
++      IMG_BOOL                        b2DHWRecoveryInProgress;
++      IMG_BOOL                        b2DHWRecoveryEndPending;
++      IMG_UINT32                      ui322DCompletedBlits;
++      IMG_BOOL                        b2DLockupSuspected;
++#endif
++      
++    
++      IMG_VOID                        *psStubPBDescListKM;
++
++
++      
++      IMG_DEV_PHYADDR                 sKernelPDDevPAddr;
++
++      IMG_VOID                                *pvDeviceMemoryHeap;
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBMemInfo;                     
++      PVRSRV_SGX_KERNEL_CCB   *psKernelCCB;                   
++      PPVRSRV_SGX_CCB_INFO    psKernelCCBInfo;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBCtlMemInfo;  
++      PVRSRV_SGX_CCB_CTL              *psKernelCCBCtl;                
++      PPVRSRV_KERNEL_MEM_INFO psKernelCCBEventKickerMemInfo; 
++      IMG_UINT32                              *pui32KernelCCBEventKicker; 
++      IMG_UINT32                              ui32TAKickAddress;              
++      IMG_UINT32                              ui32TexLoadKickAddress; 
++      IMG_UINT32                              ui32VideoHandlerAddress;
++#if defined(SGX_SUPPORT_HWPROFILING)
++      PPVRSRV_KERNEL_MEM_INFO psKernelHWProfilingMemInfo;
++#endif
++
++      
++      IMG_UINT32                              ui32ClientRefCount;
++
++      
++      IMG_UINT32                              ui32CacheControl;
++
++      
++
++
++      IMG_VOID                                *pvMMUContextList;
++
++      
++      IMG_BOOL                                bForcePTOff;
++
++      IMG_UINT32                              ui32EDMTaskReg0;
++      IMG_UINT32                              ui32EDMTaskReg1;
++
++      IMG_UINT32                              ui32ClockGateMask;
++      SGX_INIT_SCRIPTS                sScripts;
++#if defined(SUPPORT_HW_RECOVERY)
++      SGX_INIT_SCRIPT_DATA    sScriptData;
++#endif
++              
++      IMG_HANDLE                              hBIFResetPDOSMemHandle;
++      IMG_DEV_PHYADDR                 sBIFResetPDDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPTDevPAddr;
++      IMG_DEV_PHYADDR                 sBIFResetPageDevPAddr;
++      IMG_UINT32                              *pui32BIFResetPD;
++      IMG_UINT32                              *pui32BIFResetPT;
++
++
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      IMG_HANDLE                              hTimer;
++      
++      IMG_UINT32                              ui32TimeStamp;
++#endif
++
++      
++      IMG_UINT32                              ui32NumResets;
++
++      PVRSRV_KERNEL_MEM_INFO                  *psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL                             *psSGXHostCtl; 
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32RegFlags;
++
++      #if defined(PDUMP)
++      PVRSRV_SGX_PDUMP_CONTEXT        sPDContext;
++      #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      IMG_VOID                                *pvDummyPTPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyPTDevPAddr;
++      IMG_HANDLE                              hDummyPTPageOSMemHandle;
++      IMG_VOID                                *pvDummyDataPageCpuVAddr;
++      IMG_DEV_PHYADDR                 sDummyDataDevPAddr;
++      IMG_HANDLE                              hDummyDataPageOSMemHandle;
++#endif
++
++      IMG_UINT32                              asSGXDevData[SGX_MAX_DEV_DATA]; 
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      PVRSRV_EVENTOBJECT      *psSGXEventObject;
++#endif
++
++} PVRSRV_SGXDEV_INFO;
++
++typedef struct _PVR3DIF4_CLIENT_INFO_
++{
++      IMG_VOID                                        *pvRegsBase;                    
++      IMG_HANDLE                                      hBlockMapping;                  
++      SGX_SLAVE_PORT                          s2DSlavePort;                   
++      IMG_UINT32                                      ui32ProcessID;                  
++      IMG_VOID                                        *pvProcess;                             
++      PVRSRV_MISC_INFO                        sMiscInfo;                              
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      IMG_HANDLE                                      hOSEventKM;             
++#endif
++
++      IMG_UINT32                                      asDevData[SGX_MAX_DEV_DATA];
++
++} PVR3DIF4_CLIENT_INFO;
++
++typedef struct _PVR3DIF4_INTERNAL_DEVINFO_
++{
++      IMG_UINT32                      ui32Flags;
++      IMG_BOOL                        bTimerEnable;
++      IMG_HANDLE                      hCtlKernelMemInfoHandle;
++      IMG_BOOL                        bForcePTOff;
++      IMG_UINT32                      ui32RegFlags;
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      IMG_HANDLE                      hOSEvent;               
++#endif
++} PVR3DIF4_INTERNAL_DEVINFO;
++
++typedef struct _PVRSRV_SGX_SHARED_CCB_
++{
++      PVRSRV_CLIENT_MEM_INFO  *psCCBClientMemInfo;    
++      PVRSRV_CLIENT_MEM_INFO  *psCCBCtlClientMemInfo; 
++      IMG_UINT32                              *pui32CCBLinAddr;               
++      IMG_DEV_VIRTADDR                sCCBDevAddr;                    
++      IMG_UINT32                              *pui32WriteOffset;      
++      volatile IMG_UINT32             *pui32ReadOffset;               
++      IMG_UINT32                              ui32Size;                               
++      IMG_UINT32                              ui32AllocGran;                  
++
++      #ifdef PDUMP
++      IMG_UINT32                              ui32CCBDumpWOff;                
++      #endif
++}PVRSRV_SGX_SHARED_CCB;
++
++typedef struct _PVRSRV_SGX_CCB_
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo;                  
++      PVRSRV_KERNEL_MEM_INFO  *psCCBCtlMemInfo;               
++      IMG_PUINT32                             pui32CCBLinAddr;                
++      IMG_DEV_VIRTADDR                sCCBDevAddr;                    
++      IMG_UINT32                              *pui32WriteOffset;              
++      volatile IMG_UINT32             *pui32ReadOffset;               
++      IMG_UINT32                              ui32Size;                               
++      IMG_UINT32                              ui32AllocGran;                  
++      
++      #ifdef PDUMP
++      IMG_UINT32                              ui32CCBDumpWOff;                
++      #endif
++}PVRSRV_SGX_CCB;
++
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,4173 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++
++
++#include <stddef.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge_km.h"
++#include "pvr_debug.h"
++#include "ra.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge.h"
++#include "perproc.h"
++#include "sgx_bridge_km.h"
++#include "pdump_km.h"
++#include "sgxutils.h"
++#include "mmu.h"
++
++#include "bridged_pvr_bridge.h"
++#include "env_data.h"
++
++
++#if defined (__linux__)
++#include "mmap.h"
++#else
++#define PVRMMapKVIndexAddressToMMapData(A,B,C,D,E) PVRSRV_OK
++#endif
++
++#ifndef EFAULT
++#define EFAULT        14
++#endif
++#ifndef ENOTTY
++#define ENOTTY        25
++#endif
++
++#if defined(DEBUG)
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_ASSERT(X == PVRSRV_GET_BRIDGE_ID(Y))
++#else
++#define PVRSRV_BRIDGE_ASSERT_CMD(X, Y) PVR_UNREFERENCED_PARAMETER(X)
++#endif
++
++static IMG_BOOL gbInitServerRunning = IMG_FALSE;
++static IMG_BOOL gbInitServerRan = IMG_FALSE;
++static IMG_BOOL gbInitServerSuccessful = IMG_FALSE;
++
++PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++#if defined(DEBUG_BRIDGE_KM)
++PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++static IMG_UINT32 aui322DBltData[SGX2D_MAX_BLT_CMD_SIZ];
++#endif
++
++#if defined(PVR_SECURE_HANDLES)
++static IMG_BOOL abSharedDeviceMemHeap[PVRSRV_MAX_CLIENT_HEAPS];
++#endif
++
++
++#if defined(DEBUG_BRIDGE_KM)
++static PVRSRV_ERROR
++CopyFromUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, 
++                                      IMG_UINT32 ui32BridgeID,
++                                      IMG_VOID *pvDest,
++                                      IMG_VOID *pvSrc,
++                                      IMG_UINT32 ui32Size)
++{
++      g_BridgeDispatchTable[ui32BridgeID].ui32CopyFromUserTotalBytes+=ui32Size;
++      g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
++      return OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++static PVRSRV_ERROR
++CopyToUserWrapper(PVRSRV_PER_PROCESS_DATA *pProcData, 
++                                IMG_UINT32 ui32BridgeID,
++                                IMG_VOID *pvDest,
++                                IMG_VOID *pvSrc,
++                                IMG_UINT32 ui32Size)
++{
++      g_BridgeDispatchTable[ui32BridgeID].ui32CopyToUserTotalBytes+=ui32Size;
++      g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
++      return OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size);
++}
++#else
++#define CopyFromUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++      OSCopyFromUser(pProcData, pvDest, pvSrc, ui32Size)
++#define CopyToUserWrapper(pProcData, ui32BridgeID, pvDest, pvSrc, ui32Size) \
++      OSCopyToUser(pProcData, pvDest, pvSrc, ui32Size)
++#endif
++
++
++static int
++PVRSRVEnumerateDevicesBW(IMG_UINT32 ui32BridgeID,
++                                               IMG_VOID *psBridgeIn,
++                                               PVRSRV_BRIDGE_OUT_ENUMDEVICE *psEnumDeviceOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DEVICES);
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      psEnumDeviceOUT->eError =
++              PVRSRVEnumerateDevicesKM(&psEnumDeviceOUT->ui32NumDevices,
++                                                               psEnumDeviceOUT->asDeviceIdentifier);
++
++      return 0;
++}
++
++static int
++PVRSRVAcquireDeviceDataBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_ACQUIRE_DEVICEINFO *psAcquireDevInfoIN,
++                                                PVRSRV_BRIDGE_OUT_ACQUIRE_DEVICEINFO *psAcquireDevInfoOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO);
++
++      psAcquireDevInfoOUT->eError =
++              PVRSRVAcquireDeviceDataKM(psAcquireDevInfoIN->uiDevIndex, 
++                                                                psAcquireDevInfoIN->eDeviceType, 
++                                                                &hDevCookieInt);
++      if(psAcquireDevInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAcquireDevInfoOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psAcquireDevInfoOUT->hDevCookie,
++                                                hDevCookieInt,
++                                                PVRSRV_HANDLE_TYPE_DEV_NODE,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++      return 0;
++}
++
++static int
++PVRSRVGetDeviceMemHeapsBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_GET_DEVMEMHEAPS *psGetDevMemHeapsIN,
++                                                PVRSRV_BRIDGE_OUT_GET_DEVMEMHEAPS *psGetDevMemHeapsOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DEVMEMHEAPS);
++
++      psGetDevMemHeapsOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psGetDevMemHeapsIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDevMemHeapsOUT->eError =
++              PVRSRVGetDeviceMemHeapsKM(hDevCookieInt,
++                                                                &psGetDevMemHeapsOUT->sHeapInfo[0]);
++
++      if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              if(psGetDevMemHeapsOUT->sHeapInfo[i].ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++              {
++                      IMG_HANDLE hDevMemHeapExt;
++
++                      if(psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap != IMG_NULL)
++                      {
++                              
++                              psGetDevMemHeapsOUT->eError =
++                                      PVRSRVAllocHandle(psPerProc->psHandleBase, &hDevMemHeapExt, 
++                                                                        psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap, 
++                                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                        PVRSRV_HANDLE_ALLOC_FLAG_SHARED); 
++                              if(psGetDevMemHeapsOUT->eError != PVRSRV_OK)
++                              {
++                                      return 0;
++                              }
++                              psGetDevMemHeapsOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++SGXGetInfoForSrvinitBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitIN,
++                                         PVRSRV_BRIDGE_OUT_SGXINFO_FOR_SRVINIT *psSGXInfoForSrvinitOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psSGXInfoForSrvinitOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXInfoForSrvinitOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psSGXInfoForSrvinitIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXInfoForSrvinitOUT->eError =
++              SGXGetInfoForSrvinitKM(hDevCookieInt,
++                                                         &psSGXInfoForSrvinitOUT->sInitInfo);
++
++      if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              PVRSRV_HEAP_INFO *psHeapInfo;
++
++              psHeapInfo = &psSGXInfoForSrvinitOUT->sInitInfo.asHeapInfo[i];
++
++              if (psHeapInfo->ui32HeapID != (IMG_UINT32)SGX_UNDEFINED_HEAP_ID)
++              {
++                      IMG_HANDLE hDevMemHeapExt;
++
++                      if (psHeapInfo->hDevMemHeap != IMG_NULL)
++                      {
++                              
++                              psSGXInfoForSrvinitOUT->eError =
++                                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                                        &hDevMemHeapExt,
++                                                                        psHeapInfo->hDevMemHeap,
++                                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                        PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++                              if(psSGXInfoForSrvinitOUT->eError != PVRSRV_OK)
++                              {
++                                      return 0;
++                              }
++                              psHeapInfo->hDevMemHeap = hDevMemHeapExt;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVCreateDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_CREATE_DEVMEMCONTEXT *psCreateDevMemContextIN,
++                                                         PVRSRV_BRIDGE_OUT_CREATE_DEVMEMCONTEXT *psCreateDevMemContextOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemContextInt;
++      IMG_UINT32 i;
++      IMG_BOOL bCreated;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT);
++
++      psCreateDevMemContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psCreateDevMemContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDevMemContextOUT->eError = 
++              PVRSRVCreateDeviceMemContextKM(hDevCookieInt,
++                                                                         &hDevMemContextInt,
++                                                                         &psCreateDevMemContextOUT->ui32ClientHeapCount,
++                                                                         &psCreateDevMemContextOUT->sHeapInfo[0],
++                                                                         &bCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                         , abSharedDeviceMemHeap
++#endif
++                                                                        );
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      if(bCreated)
++      {
++              psCreateDevMemContextOUT->eError =
++                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                        &psCreateDevMemContextOUT->hDevMemContext,
++                                                        hDevMemContextInt,
++                                                        PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      }
++      else
++      {
++              psCreateDevMemContextOUT->eError =
++                      PVRSRVFindHandle(psPerProc->psHandleBase,
++                                                       &psCreateDevMemContextOUT->hDevMemContext,
++                                                       hDevMemContextInt,
++                                                       PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      }
++
++      if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      for(i = 0; i < psCreateDevMemContextOUT->ui32ClientHeapCount; i++)
++      {
++              IMG_HANDLE hDevMemHeapExt;
++
++#if defined(PVR_SECURE_HANDLES)
++              if(abSharedDeviceMemHeap[i])
++#endif
++              {
++                      
++                      psCreateDevMemContextOUT->eError =
++                              PVRSRVAllocHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED); 
++              }
++#if defined(PVR_SECURE_HANDLES)
++              else
++              {
++                      
++                      if(bCreated)
++                      {
++                              psCreateDevMemContextOUT->eError =
++                                      PVRSRVAllocSubHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                               psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                               PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++                                                                               PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                                               psCreateDevMemContextOUT->hDevMemContext);
++                      }
++                      else
++                      {
++                              psCreateDevMemContextOUT->eError =
++                                      PVRSRVFindHandle(psPerProc->psHandleBase, &hDevMemHeapExt,
++                                                                       psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap,
++                                                                       PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++                      }
++              }
++#endif
++              if(psCreateDevMemContextOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              psCreateDevMemContextOUT->sHeapInfo[i].hDevMemHeap = hDevMemHeapExt;
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVDestroyDeviceMemContextBW(IMG_UINT32 ui32BridgeID,
++                                                              PVRSRV_BRIDGE_IN_DESTROY_DEVMEMCONTEXT *psDestroyDevMemContextIN,
++                                                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemContextInt;
++      IMG_BOOL bDestroyed;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psDestroyDevMemContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt,
++                                                 psDestroyDevMemContextIN->hDevMemContext,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVDestroyDeviceMemContextKM(hDevCookieInt, hDevMemContextInt, &bDestroyed);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(bDestroyed)
++      {
++              psRetOUT->eError =
++                      PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psDestroyDevMemContextIN->hDevMemContext,
++                                                              PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      }
++
++      return 0;
++}
++
++
++
++static int
++PVRSRVAllocDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_ALLOCDEVICEMEM *psAllocDeviceMemIN,
++                                         PVRSRV_BRIDGE_OUT_ALLOCDEVICEMEM *psAllocDeviceMemOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDevMemHeapInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_DEVICEMEM);
++
++      psAllocDeviceMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psAllocDeviceMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAllocDeviceMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemHeapInt,
++                                                 psAllocDeviceMemIN->hDevMemHeap,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP);
++
++      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psAllocDeviceMemOUT->eError = 
++              PVRSRVAllocDeviceMemKM(hDevCookieInt,
++                                                         hDevMemHeapInt,
++                                                         psAllocDeviceMemIN->ui32Attribs,
++                                                         psAllocDeviceMemIN->ui32Size,
++                                                         psAllocDeviceMemIN->ui32Alignment,
++                                                         &psMemInfo);
++
++      if(psAllocDeviceMemOUT->eError == PVRSRV_OK)
++      {
++              OSMemSet(&psAllocDeviceMemOUT->sClientMemInfo,
++                               0,
++                               sizeof(psAllocDeviceMemOUT->sClientMemInfo));
++
++              
++              if(psMemInfo->pvLinAddrKM)
++              {
++                      psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->pvLinAddrKM;
++              }
++              else
++              {
++                      psAllocDeviceMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->sMemBlk.hOSMemHandle;
++              }
++#if defined (__linux__)
++              psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = 0;
++#else
++              psAllocDeviceMemOUT->sClientMemInfo.pvLinAddr = psMemInfo->pvLinAddrKM;
++#endif
++              psAllocDeviceMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++              psAllocDeviceMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++              psAllocDeviceMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++              psAllocDeviceMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++              psAllocDeviceMemOUT->eError =
++                      PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                        &psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                        psMemInfo,
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++              if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              if(psAllocDeviceMemIN->ui32Attribs & PVRSRV_MEM_NO_SYNCOBJ)
++              {
++                      
++                      OSMemSet(&psAllocDeviceMemOUT->sClientSyncInfo,
++                                       0,
++                                       sizeof (PVRSRV_CLIENT_SYNC_INFO));
++                      psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = IMG_NULL;
++                      psAllocDeviceMemOUT->psKernelSyncInfo = IMG_NULL;
++              }
++              else
++              {
++                      
++                      psAllocDeviceMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++                      psAllocDeviceMemOUT->sClientSyncInfo.psSyncData =
++                              psMemInfo->psKernelSyncInfo->psSyncData;
++                      psAllocDeviceMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++                      psAllocDeviceMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++                      psAllocDeviceMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++                      psAllocDeviceMemOUT->eError =
++                              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                                       &psAllocDeviceMemOUT->sClientSyncInfo.hKernelSyncInfo, 
++                                                                       psMemInfo->psKernelSyncInfo, 
++                                                                       PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                                       PVRSRV_HANDLE_ALLOC_FLAG_NONE, 
++                                                                       psAllocDeviceMemOUT->sClientMemInfo.hKernelMemInfo);
++                      if(psAllocDeviceMemOUT->eError != PVRSRV_OK)
++                      {
++                              return 0;
++                      }
++
++                      psAllocDeviceMemOUT->sClientMemInfo.psClientSyncInfo = 
++                              &psAllocDeviceMemOUT->sClientSyncInfo;
++
++              }
++      }
++
++      return 0;
++}
++
++
++static int
++PVRSRVFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_FREEDEVICEMEM *psFreeDeviceMemIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_DEVICEMEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psFreeDeviceMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++                                                 psFreeDeviceMemIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, pvKernelMemInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psFreeDeviceMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVMapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_MAP_DEVICECLASS_MEMORY *psMapDevClassMemIN,
++                                                       PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psMapDevClassMemOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_HANDLE hOSMapInfo;
++      IMG_HANDLE hDeviceClassBufferInt;
++      PVRSRV_HANDLE_TYPE eHandleType;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY);
++
++      
++      psMapDevClassMemOUT->eError =
++              PVRSRVLookupHandleAnyType(psPerProc->psHandleBase, &hDeviceClassBufferInt,
++                                                                &eHandleType,
++                                                                psMapDevClassMemIN->hDeviceClassBuffer);
++
++      if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      switch(eHandleType)
++      {
++#if defined(PVR_SECURE_HANDLES)
++              case PVRSRV_HANDLE_TYPE_DISP_BUFFER:
++              case PVRSRV_HANDLE_TYPE_BUF_BUFFER:
++#else
++              case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++                      break;
++              default:
++                      psMapDevClassMemOUT->eError = PVRSRV_ERROR_GENERIC;
++                      return 0;
++      }
++
++      psMapDevClassMemOUT->eError = 
++              PVRSRVMapDeviceClassMemoryKM(hDeviceClassBufferInt,
++                                                                       &psMemInfo,
++                                                                       &hOSMapInfo);
++
++      if(psMapDevClassMemOUT->eError == PVRSRV_OK)
++      {
++              OSMemSet(&psMapDevClassMemOUT->sClientMemInfo,
++                               0,
++                               sizeof(psMapDevClassMemOUT->sClientMemInfo));
++              OSMemSet(&psMapDevClassMemOUT->sClientSyncInfo,
++                               0,
++                               sizeof(psMapDevClassMemOUT->sClientSyncInfo));
++
++              
++              if(psMemInfo->pvLinAddrKM)
++              {
++                      psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM = 
++                              psMemInfo->pvLinAddrKM;
++              }
++              else
++              {
++                      psMapDevClassMemOUT->sClientMemInfo.pvLinAddrKM =
++                              psMemInfo->sMemBlk.hOSMemHandle;
++              }
++              psMapDevClassMemOUT->sClientMemInfo.pvLinAddr = 0;
++              psMapDevClassMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++              psMapDevClassMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++              psMapDevClassMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++              psMapDevClassMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++              psMapDevClassMemOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                        &psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                        psMemInfo,
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                        psMapDevClassMemIN->hDeviceClassBuffer);
++              if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++
++              psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo = IMG_NULL;
++              psMapDevClassMemOUT->psKernelSyncInfo = IMG_NULL;
++
++              
++              if(psMemInfo->psKernelSyncInfo)
++              {
++                      psMapDevClassMemOUT->psKernelSyncInfo = psMemInfo->psKernelSyncInfo;
++
++                      psMapDevClassMemOUT->sClientSyncInfo.psSyncData =
++                              psMemInfo->psKernelSyncInfo->psSyncData;
++                      psMapDevClassMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++                      psMapDevClassMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++                      psMapDevClassMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++                      psMapDevClassMemOUT->sClientMemInfo.psClientSyncInfo = &psMapDevClassMemOUT->sClientSyncInfo;
++                      
++                      psMapDevClassMemOUT->eError =
++                              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                                &psMapDevClassMemOUT->sClientSyncInfo.hKernelSyncInfo,
++                                                                psMemInfo->psKernelSyncInfo,
++                                                                PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                                psMapDevClassMemOUT->sClientMemInfo.hKernelMemInfo);
++                      if(psMapDevClassMemOUT->eError != PVRSRV_OK)
++                      {
++                              return 0;
++                      }
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVUnmapDeviceClassMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_UNMAP_DEVICECLASS_MEMORY *psUnmapDevClassMemIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvKernelMemInfo,
++                                                 psUnmapDevClassMemIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVUnmapDeviceClassMemoryKM(pvKernelMemInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psUnmapDevClassMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVWrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_WRAP_EXT_MEMORY *psWrapExtMemIN,
++                                        PVRSRV_BRIDGE_OUT_WRAP_EXT_MEMORY *psWrapExtMemOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      IMG_UINT32 ui32PageTableSize;
++      IMG_SYS_PHYADDR *psSysPAddr;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_WRAP_EXT_MEMORY);
++
++      
++      psWrapExtMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt,
++                                                 psWrapExtMemIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      ui32PageTableSize = psWrapExtMemIN->ui32NumPageTableEntries
++                                              * sizeof(IMG_SYS_PHYADDR);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32PageTableSize,
++                                (IMG_VOID **)&psSysPAddr, 0)
++         != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                                 ui32BridgeID,
++                                                 psSysPAddr,
++                                                 psWrapExtMemIN->psSysPAddr,
++                                                 ui32PageTableSize) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,      ui32PageTableSize, (IMG_VOID *)psSysPAddr, 0);
++              return -EFAULT;
++      }
++
++      psWrapExtMemOUT->eError =
++              PVRSRVWrapExtMemoryKM(hDevCookieInt,
++                                                        psWrapExtMemIN->ui32ByteSize,
++                                                        psWrapExtMemIN->ui32PageOffset,
++                                                        psWrapExtMemIN->bPhysContig,
++                                                        psSysPAddr,
++                                                        &psMemInfo);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        ui32PageTableSize,
++                        (IMG_VOID *)psSysPAddr, 0);
++
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      if(psMemInfo->pvLinAddrKM)
++      {
++              psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psWrapExtMemOUT->sClientMemInfo.pvLinAddrKM = 
++                      psMemInfo->sMemBlk.hOSMemHandle;
++      }
++
++      
++      psWrapExtMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psWrapExtMemOUT->sClientMemInfo.sDevVAddr = psMemInfo->sDevVAddr;
++      psWrapExtMemOUT->sClientMemInfo.ui32Flags = psMemInfo->ui32Flags;
++      psWrapExtMemOUT->sClientMemInfo.ui32AllocSize = psMemInfo->ui32AllocSize;
++      psWrapExtMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++
++      psWrapExtMemOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase, 
++                                                &psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo, 
++                                                psMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if(psWrapExtMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      
++      psWrapExtMemOUT->sClientSyncInfo.psSyncData =
++              psMemInfo->psKernelSyncInfo->psSyncData;
++      psWrapExtMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++              psMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++      psWrapExtMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++              psMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++      psWrapExtMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++      psWrapExtMemOUT->sClientMemInfo.psClientSyncInfo = &psWrapExtMemOUT->sClientSyncInfo;
++
++      psWrapExtMemOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                &psWrapExtMemOUT->sClientSyncInfo.hKernelSyncInfo, 
++                                                (IMG_HANDLE)psMemInfo->psKernelSyncInfo,
++                                                PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                psWrapExtMemOUT->sClientMemInfo.hKernelMemInfo);
++
++      return 0;
++}
++
++static int
++PVRSRVUnwrapExtMemoryBW(IMG_UINT32 ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_UNWRAP_EXT_MEMORY *psUnwrapExtMemIN,
++                                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psUnwrapExtMemIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVUnwrapExtMemoryKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++                                                              IMG_FALSE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                 psUnwrapExtMemIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVGetFreeDeviceMemBW(IMG_UINT32 ui32BridgeID,
++                                               PVRSRV_BRIDGE_IN_GETFREEDEVICEMEM *psGetFreeDeviceMemIN,
++                                               PVRSRV_BRIDGE_OUT_GETFREEDEVICEMEM *psGetFreeDeviceMemOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETFREE_DEVICEMEM);
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psGetFreeDeviceMemOUT->eError = 
++              PVRSRVGetFreeDeviceMemKM(psGetFreeDeviceMemIN->ui32Flags,
++                                                               &psGetFreeDeviceMemOUT->ui32Total,
++                                                               &psGetFreeDeviceMemOUT->ui32Free,
++                                                               &psGetFreeDeviceMemOUT->ui32LargestBlock);
++
++      return 0;
++}
++
++static int
++PVRMMapKVIndexAddressToMMapDataBW(IMG_UINT32 ui32BridgeID,
++                                                                PVRSRV_BRIDGE_IN_KV_TO_MMAP_DATA *psMMapDataIN,
++                                                                PVRSRV_BRIDGE_OUT_KV_TO_MMAP_DATA *psMMapDataOUT,
++                                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_KV_TO_MMAP_DATA);
++      PVR_UNREFERENCED_PARAMETER(psMMapDataIN);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psMMapDataOUT->eError =
++              PVRMMapKVIndexAddressToMMapData(psMMapDataIN->pvKVIndexAddress,
++                                                                              psMMapDataIN->ui32Bytes,
++                                                                              &psMMapDataOUT->ui32MMapOffset,
++                                                                              &psMMapDataOUT->ui32ByteOffset,
++                                                                              &psMMapDataOUT->ui32RealByteSize);
++
++      return 0;
++}
++
++
++#ifdef PDUMP
++static int
++PDumpIsCaptureFrameBW(IMG_UINT32 ui32BridgeID,
++                                        IMG_VOID *psBridgeIn,
++                                        PVRSRV_BRIDGE_OUT_PDUMP_ISCAPTURING *psPDumpIsCapturingOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_ISCAPTURING);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psPDumpIsCapturingOUT->bIsCapturing = PDumpIsCaptureFrameKM();
++      psPDumpIsCapturingOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PDumpCommentBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_COMMENT *psPDumpCommentIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_COMMENT);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = PDumpCommentKM(&psPDumpCommentIN->szComment[0],
++                                                                        psPDumpCommentIN->ui32Flags);
++      return 0;
++}
++
++static int
++PDumpSetFrameBW(IMG_UINT32 ui32BridgeID,
++                              PVRSRV_BRIDGE_IN_PDUMP_SETFRAME *psPDumpSetFrameIN,
++                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SETFRAME);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = PDumpSetFrameKM(psPDumpSetFrameIN->ui32Frame);
++
++      return 0;
++}
++
++static int
++PDumpRegWithFlagsBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_PDUMP_DUMPREG *psPDumpRegDumpIN,
++                                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError =
++              PDumpRegWithFlagsKM(psPDumpRegDumpIN->sHWReg.ui32RegAddr,
++                                                      psPDumpRegDumpIN->sHWReg.ui32RegVal,
++                                                      psPDumpRegDumpIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpRegPolBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_REGPOL *psPDumpRegPolIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_REGPOL);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError = 
++              PDumpRegPolWithFlagsKM(psPDumpRegPolIN->sHWReg.ui32RegAddr,     
++                                                         psPDumpRegPolIN->sHWReg.ui32RegVal,
++                                                         psPDumpRegPolIN->ui32Mask,
++                                                         psPDumpRegPolIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpMemPolBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_MEMPOL *psPDumpMemPolIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_MEMPOL);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psPDumpMemPolIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PDumpMemPolKM(((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo),
++                                        psPDumpMemPolIN->ui32Offset,
++                                        psPDumpMemPolIN->ui32Value,
++                                        psPDumpMemPolIN->ui32Mask,
++                                        PDUMP_POLL_OPERATOR_EQUAL,
++                                        psPDumpMemPolIN->bLastFrame,
++                                        psPDumpMemPolIN->bOverwrite,
++                                        MAKEUNIQUETAG(pvMemInfo));
++
++      return 0;
++}
++
++static int
++PDumpMemBW(IMG_UINT32 ui32BridgeID,
++                 PVRSRV_BRIDGE_IN_PDUMP_DUMPMEM *psPDumpMemDumpIN,
++                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++      IMG_VOID *pvAltLinAddrKM = IMG_NULL;
++      IMG_UINT32 ui32Bytes = psPDumpMemDumpIN->ui32Bytes;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPMEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvMemInfo,
++                                                 psPDumpMemDumpIN->psKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpMemDumpIN->pvAltLinAddr)
++      {
++              if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                        ui32Bytes, 
++                                        &pvAltLinAddrKM, 0) != PVRSRV_OK)
++              {
++                      return -EFAULT;
++              }
++
++              if(CopyFromUserWrapper(psPerProc, 
++                                             ui32BridgeID,
++                                                         pvAltLinAddrKM,
++                                                         psPDumpMemDumpIN->pvAltLinAddr,
++                                                         ui32Bytes) != PVRSRV_OK)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++                      return -EFAULT;
++              }
++      }
++
++      psRetOUT->eError =
++              PDumpMemKM(pvAltLinAddrKM,
++                                 pvMemInfo,
++                                 psPDumpMemDumpIN->ui32Offset,
++                                 ui32Bytes,
++                                 psPDumpMemDumpIN->ui32Flags,
++                                 MAKEUNIQUETAG(pvMemInfo));
++
++      if(pvAltLinAddrKM)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++      }
++
++      return 0;
++}             
++
++static int
++PDumpBitmapBW(IMG_UINT32 ui32BridgeID,
++                        PVRSRV_BRIDGE_IN_PDUMP_BITMAP *psPDumpBitmapIN,
++                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++      psRetOUT->eError =
++              PDumpBitmapKM(&psPDumpBitmapIN->szFileName[0],
++                                        psPDumpBitmapIN->ui32FileOffset,
++                                        psPDumpBitmapIN->ui32Width,
++                                        psPDumpBitmapIN->ui32Height,
++                                        psPDumpBitmapIN->ui32StrideInBytes,
++                                        psPDumpBitmapIN->sDevBaseAddr,
++                                        psPDumpBitmapIN->ui32Size,
++                                        psPDumpBitmapIN->ePixelFormat,
++                                        psPDumpBitmapIN->eMemFormat,
++                                        psPDumpBitmapIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpReadRegBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_READREG *psPDumpReadRegIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPREADREG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      psRetOUT->eError =
++              PDumpReadRegKM(&psPDumpReadRegIN->szFileName[0],
++                                         psPDumpReadRegIN->ui32FileOffset,
++                                         psPDumpReadRegIN->ui32Address,
++                                         psPDumpReadRegIN->ui32Size,
++                                         psPDumpReadRegIN->ui32Flags);
++
++      return 0;
++}
++
++static int
++PDumpDriverInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_PDUMP_DRIVERINFO *psPDumpDriverInfoIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 ui32PDumpFlags;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DRIVERINFO);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      ui32PDumpFlags = 0;
++      if(psPDumpDriverInfoIN->bContinuous)
++      {
++              ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS;
++      }
++      psRetOUT->eError =
++              PDumpDriverInfoKM(&psPDumpDriverInfoIN->szString[0],
++                                                ui32PDumpFlags);
++
++      return 0;
++}
++
++static int
++PDumpSyncDumpBW(IMG_UINT32 ui32BridgeID,
++                              PVRSRV_BRIDGE_IN_PDUMP_DUMPSYNC *psPDumpSyncDumpIN,
++                              PVRSRV_BRIDGE_RETURN *psRetOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvAltLinAddrKM = IMG_NULL;
++      IMG_UINT32 ui32Bytes = psPDumpSyncDumpIN->ui32Bytes;
++      IMG_VOID *pvSyncInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPSYNC);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++                                                 psPDumpSyncDumpIN->psKernelSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpSyncDumpIN->pvAltLinAddr)
++      {
++              if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                        ui32Bytes, 
++                                        &pvAltLinAddrKM, 0) != PVRSRV_OK)
++              {
++                      return -EFAULT;
++              }
++
++              if(CopyFromUserWrapper(psPerProc, 
++                                             ui32BridgeID,
++                                                         pvAltLinAddrKM,
++                                                         psPDumpSyncDumpIN->pvAltLinAddr,
++                                                         ui32Bytes) != PVRSRV_OK)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++                      return -EFAULT;
++              }
++      }
++
++      psRetOUT->eError =
++              PDumpMemKM(pvAltLinAddrKM,
++                                 ((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++                                 psPDumpSyncDumpIN->ui32Offset,
++                                 ui32Bytes,
++                                 0,
++                                 MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++      if(pvAltLinAddrKM)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32Bytes, pvAltLinAddrKM, 0);
++      }
++
++      return 0;
++}
++
++static int
++PDumpSyncPolBW(IMG_UINT32 ui32BridgeID,
++                         PVRSRV_BRIDGE_IN_PDUMP_SYNCPOL *psPDumpSyncPolIN,
++                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 ui32Offset;
++      IMG_VOID *pvSyncInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_SYNCPOL);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
++                                                 psPDumpSyncPolIN->psKernelSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psPDumpSyncPolIN->bIsRead)
++      {
++              ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++      }
++      else
++      {
++              ui32Offset = offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++      }
++
++      psRetOUT->eError =
++              PDumpMemPolKM(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM,
++                                        ui32Offset,
++                                        psPDumpSyncPolIN->ui32Value,
++                                        psPDumpSyncPolIN->ui32Mask,
++                                        PDUMP_POLL_OPERATOR_EQUAL,
++                                        IMG_FALSE,
++                                        IMG_FALSE,
++                                        MAKEUNIQUETAG(((PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo)->psSyncDataMemInfoKM));
++
++      return 0;
++}
++
++static int
++PDumpPDRegBW(IMG_UINT32 ui32BridgeID,
++                       PVRSRV_BRIDGE_IN_PDUMP_DUMPPDREG *psPDumpPDRegDumpIN,
++                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_PDREG);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PDumpPDReg(psPDumpPDRegDumpIN->sHWReg.ui32RegAddr,
++                         psPDumpPDRegDumpIN->sHWReg.ui32RegVal,
++                         PDUMP_PD_UNIQUETAG);
++
++      psRetOUT->eError = PVRSRV_OK;
++      return 0;
++}
++
++static int
++PDumpCycleCountRegReadBW(IMG_UINT32 ui32BridgeID,
++                                               PVRSRV_BRIDGE_IN_PDUMP_CYCLE_COUNT_REG_READ *psPDumpCycleCountRegReadIN,
++                                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PDumpCycleCountRegRead(psPDumpCycleCountRegReadIN->ui32RegOffset,
++                                                 psPDumpCycleCountRegReadIN->bLastFrame);
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PDumpPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_PDUMP_DUMPPDDEVPADDR *psPDumpPDDevPAddrIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR);
++
++      psRetOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvMemInfo,
++                                                 psPDumpPDDevPAddrIN->hKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PDumpPDDevPAddrKM((PVRSRV_KERNEL_MEM_INFO *)pvMemInfo,
++                                                psPDumpPDDevPAddrIN->ui32Offset,
++                                                psPDumpPDDevPAddrIN->sPDDevPAddr,
++                                                MAKEUNIQUETAG(pvMemInfo),
++                                                PDUMP_PD_UNIQUETAG);
++      return 0;
++}
++
++static int
++PDumpBufferArrayBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_PDUMP_BUFFER_ARRAY *psPDumpBufferArrayIN,
++                                 IMG_VOID *psBridgeOut,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_UINT32 i;
++      PVR3DIF4_KICKTA_DUMP_BUFFER *psKickTADumpBuffer;
++      IMG_UINT32 ui32BufferArrayLength =
++              psPDumpBufferArrayIN->ui32BufferArrayLength;
++      IMG_UINT32 ui32BufferArraySize =
++              ui32BufferArrayLength * sizeof(PVR3DIF4_KICKTA_DUMP_BUFFER);
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                ui32BufferArraySize, 
++                                (IMG_PVOID *)&psKickTADumpBuffer, 0) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID,
++                                                 psKickTADumpBuffer,
++                                                 psPDumpBufferArrayIN->psBufferArray,
++                                                 ui32BufferArraySize) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++              return -EFAULT;
++      }
++
++      for(i = 0; i < ui32BufferArrayLength; i++)
++      {
++              IMG_VOID *pvMemInfo;
++
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                                      &pvMemInfo,
++                                                                      psKickTADumpBuffer[i].hKernelMemInfo,
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY: "
++                                       "PVRSRVLookupHandle failed (%d)", eError));
++                      break;
++              }
++              psKickTADumpBuffer[i].hKernelMemInfo = pvMemInfo;
++      }
++
++      if(eError == PVRSRV_OK)
++      {
++              DumpBufferArray(psKickTADumpBuffer,
++                                              ui32BufferArrayLength,
++                                              psPDumpBufferArrayIN->bDumpPolls);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32BufferArraySize, psKickTADumpBuffer, 0);
++
++      return 0;
++}
++
++#endif 
++
++#if defined(SUPPORT_SGX1)
++static int
++SGXGetClientInfoBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_GETCLIENTINFO *psGetClientInfoIN,
++                                 PVRSRV_BRIDGE_OUT_GETCLIENTINFO *psGetClientInfoOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETCLIENTINFO);
++
++      psGetClientInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psGetClientInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psGetClientInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetClientInfoOUT->eError =
++              SGXGetClientInfoKM(hDevCookieInt, 
++                                                 &psGetClientInfoOUT->sClientInfo);
++      return 0;
++}
++
++static int
++SGXReleaseClientInfoBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_RELEASECLIENTINFO *psReleaseClientInfoIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO);
++
++      psRetOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psReleaseClientInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      PVR_ASSERT(psDevInfo->ui32ClientRefCount > 0);
++
++      psDevInfo->ui32ClientRefCount--;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++
++static int
++SGXGetInternalDevInfoBW(IMG_UINT32 ui32BridgeID,
++                                              PVRSRV_BRIDGE_IN_GETINTERNALDEVINFO *psSGXGetInternalDevInfoIN,
++                                              PVRSRV_BRIDGE_OUT_GETINTERNALDEVINFO *psSGXGetInternalDevInfoOUT,
++                                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO);
++      
++      psSGXGetInternalDevInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 psSGXGetInternalDevInfoIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXGetInternalDevInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXGetInternalDevInfoOUT->eError =
++              SGXGetInternalDevInfoKM(hDevCookieInt, 
++                                                              &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo);
++      
++      psSGXGetInternalDevInfoOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hCtlKernelMemInfoHandle,
++                                                psSGXGetInternalDevInfoOUT->sSGXInternalDevInfo.hCtlKernelMemInfoHandle,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_SHARED);
++
++      return 0;
++}
++
++
++static int
++SGXDoKickBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_DOKICK *psDoKickIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DOKICK);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psDoKickIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++                                                 psDoKickIN->sCCBKick.hCCBKernelMemInfo,
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO); 
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psDoKickIN->sCCBKick.hDstKernelSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         psDoKickIN->sCCBKick.hDstKernelSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++#if defined (NO_HARDWARE)
++      if(psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo != IMG_NULL)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         psDoKickIN->sCCBKick.hUpdateDstKernelSyncInfo,
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++#endif
++      for (i = 0; i < psDoKickIN->sCCBKick.ui32NumTAStatusVals; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ahTAStatusSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      for(i = 0; i < psDoKickIN->sCCBKick.ui32Num3DStatusVals; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++                                                         psDoKickIN->sCCBKick.ah3DStatusSyncInfo[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO); 
++
++              if(psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      psRetOUT->eError =
++              SGXDoKickKM(hDevCookieInt, 
++                                      &psDoKickIN->sCCBKick);
++
++      return 0;
++}
++
++
++#if defined(TRANSFER_QUEUE)
++static int
++SGXSubmitTransferBW(IMG_UINT32 ui32BridgeID,
++                      PVRSRV_BRIDGE_IN_SUBMITTRANSFER *psSubmitTransferIN,
++                      PVRSRV_BRIDGE_RETURN *psRetOUT,
++                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_SUBMITTRANSFER);
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSubmitTransferIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              SGXSubmitTransferKM(hDevCookieInt,
++                                                      psSubmitTransferIN->sHWRenderContextDevVAddr);
++
++      return 0;
++}
++#endif
++
++static int
++SGXGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_SGXGETMISCINFO *psSGXGetMiscInfoIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      SGX_MISC_INFO *psMiscInfo;
++
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_GETMISCINFO);
++
++      
++      psMiscInfo =
++              (SGX_MISC_INFO *)((IMG_UINT8 *)psSGXGetMiscInfoIN
++                                                + sizeof(PVRSRV_BRIDGE_IN_SGXGETMISCINFO));
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psSGXGetMiscInfoIN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID,
++                                                 psMiscInfo,
++                                                 psSGXGetMiscInfoIN->psMiscInfo,
++                                                 sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++                      break;
++      }
++
++      
++      psRetOUT->eError = SGXGetMiscInfoKM(psDevInfo, psMiscInfo);
++
++      
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++                      break;
++      }
++
++      if(CopyToUserWrapper(psPerProc,
++                                   ui32BridgeID,
++                                               psSGXGetMiscInfoIN->psMiscInfo,
++                                               psMiscInfo,
++                                               sizeof(SGX_MISC_INFO)) != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVInitSrvConnectBW(IMG_UINT32 ui32BridgeID,
++                                         IMG_VOID *psBridgeIn,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_CONNECT);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      if(!OSProcHasPrivSrvInit() || gbInitServerRunning || gbInitServerRan)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++#if defined (__linux__)
++      gbInitServerRunning = IMG_TRUE;
++#endif
++      psPerProc->bInitProcess = IMG_TRUE;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVInitSrvDisconnectBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_INITSRV_DISCONNECT *psInitSrvDisconnectIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_INITSRV_DISCONNECT);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      PDUMPENDINITPHASE();
++
++      gbInitServerSuccessful = psInitSrvDisconnectIN->bInitSuccesful;
++
++      psPerProc->bInitProcess = IMG_FALSE;
++      gbInitServerRunning = IMG_FALSE;
++      gbInitServerRan = IMG_TRUE;
++
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++
++static int
++PVRSRVEventObjectWaitBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_EVENT_OBJECT_WAIT *psEventObjectWaitIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_EVENT_OBJECT_WAIT);
++
++      psRetOUT->eError = OSEventObjectWait(psEventObjectWaitIN->hOSEventKM, psEventObjectWaitIN->ui32MSTimeout);
++
++      return 0;
++}
++
++
++static int
++SGXDevInitPart2BW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_SGXDEVINITPART2 *psSGXDevInitPart2IN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_ERROR eError;
++      IMG_BOOL bDissociateFailed = IMG_FALSE;
++      IMG_BOOL bLookupFailed = IMG_FALSE;
++      IMG_BOOL bReleaseFailed = IMG_FALSE;
++      IMG_HANDLE hDummy;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_DEVINITPART2);
++
++      if(!psPerProc->bInitProcess)
++      {
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psSGXDevInitPart2IN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      
++      
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy,
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDummy, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bLookupFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (hHandle == IMG_NULL)
++              {
++                      continue;
++              }
++
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                         &hDummy, 
++                                                         hHandle, 
++                                                         PVRSRV_HANDLE_TYPE_MEM_INFO);
++              bLookupFailed |= (eError != PVRSRV_OK);
++      }
++
++      if (bLookupFailed)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle lookup failed"));
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                 &psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo, 
++                                                 PVRSRV_HANDLE_TYPE_MEM_INFO);
++      bReleaseFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE *phHandle = &psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (*phHandle == IMG_NULL)
++                      continue;
++
++              eError = PVRSRVLookupAndReleaseHandle(psPerProc->psHandleBase, 
++                                                         phHandle, 
++                                                         *phHandle, 
++                                                         PVRSRV_HANDLE_TYPE_MEM_INFO);
++              bReleaseFailed |= (eError != PVRSRV_OK);
++      }
++
++      if (bReleaseFailed)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A handle release failed"));
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++              
++              PVR_DBG_BREAK;
++              return 0;
++      }
++
++      
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBEventKickerMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelHWProfilingMemInfo);
++      bDissociateFailed |= (eError != PVRSRV_OK);
++#endif
++
++
++
++      for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++      {
++              IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++              if (hHandle == IMG_NULL)
++                      continue;
++
++              eError = PVRSRVDissociateDeviceMemKM(hDevCookieInt, hHandle);
++              bDissociateFailed |= (eError != PVRSRV_OK);
++      }
++
++       
++      if(bDissociateFailed)
++      {
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBMemInfo, IMG_FALSE);
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelCCBCtlMemInfo, IMG_FALSE);
++              PVRSRVFreeDeviceMemKM(hDevCookieInt, psSGXDevInitPart2IN->sInitInfo.hKernelSGXHostCtlMemInfo, IMG_FALSE);
++
++              for (i = 0; i < SGX_MAX_INIT_MEM_HANDLES; i++)
++              {
++                      IMG_HANDLE hHandle = psSGXDevInitPart2IN->sInitInfo.asInitMemHandles[i];
++
++                      if (hHandle == IMG_NULL)
++                              continue;
++
++                      PVRSRVFreeDeviceMemKM(hDevCookieInt, (PVRSRV_KERNEL_MEM_INFO *)hHandle, IMG_FALSE);
++
++              }
++
++              PVR_DPF((PVR_DBG_ERROR, "DevInitSGXPart2BW: A dissociate failed"));
++
++              psRetOUT->eError = PVRSRV_ERROR_GENERIC;
++
++              
++              PVR_DBG_BREAK;
++              return 0;
++      }
++
++      psRetOUT->eError =
++              DevInitSGXPart2KM(psPerProc,
++                                                hDevCookieInt,
++                                                &psSGXDevInitPart2IN->sInitInfo);
++
++      return 0;
++}
++
++static int
++SGXRegisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextIN,
++                                                       PVRSRV_BRIDGE_OUT_SGX_REGISTER_HW_RENDER_CONTEXT *psSGXRegHWRenderContextOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT);
++
++      psSGXRegHWRenderContextOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXRegHWRenderContextIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXRegHWRenderContextOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      hHWRenderContextInt =
++              SGXRegisterHWRenderContextKM(psDevInfo,
++                                                                       &psSGXRegHWRenderContextIN->sHWRenderContextDevVAddr);
++
++      if (hHWRenderContextInt == IMG_NULL)
++      {
++              psSGXRegHWRenderContextOUT->eError = PVRSRV_ERROR_GENERIC;
++              return 0;
++      }
++
++      psSGXRegHWRenderContextOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXRegHWRenderContextOUT->hHWRenderContext,
++                                                hHWRenderContextInt,
++                                                PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++SGXFlushHWRenderTargetBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SGX_FLUSH_HW_RENDER_TARGET *psSGXFlushHWRenderTargetIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt,
++                                                 psSGXFlushHWRenderTargetIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      SGXFlushHWRenderTargetKM(psDevInfo, psSGXFlushHWRenderTargetIN->sHWRTDataSetDevVAddr);
++
++      return 0;
++}
++
++static int
++SGXUnregisterHWRenderContextBW(IMG_UINT32 ui32BridgeID,
++                                                         PVRSRV_BRIDGE_IN_SGX_UNREGISTER_HW_RENDER_CONTEXT *psSGXUnregHWRenderContextIN,
++                                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hHWRenderContextInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hHWRenderContextInt,
++                                                 psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                 PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = SGXUnregisterHWRenderContextKM(hHWRenderContextInt);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psSGXUnregHWRenderContextIN->hHWRenderContext,
++                                                      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT);
++      
++      return 0;
++}
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++static int
++SGX2DQueueBlitBW(IMG_UINT32 ui32BridgeID,
++                               PVRSRV_BRIDGE_IN_2DQUEUEBLT *ps2DQueueBltIN,
++                               PVRSRV_BRIDGE_RETURN *psRetOUT,
++                               PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[PVRSRV_MAX_BLT_SRC_SYNCS];
++      IMG_UINT32 i;
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvSyncInfo;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUEUEBLT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DQueueBltIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(ps2DQueueBltIN->ui32DataByteSize > sizeof(aui322DBltData))
++      {
++              psRetOUT->eError = PVRSRV_ERROR_CMD_TOO_BIG;
++              return 0;
++      }
++
++      if(CopyFromUserWrapper(psPerProc,
++                                     ui32BridgeID,
++                                                 aui322DBltData,
++                                                 ps2DQueueBltIN->pui32BltData,
++                                                 ps2DQueueBltIN->ui32DataByteSize)
++        != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      for(i = 0; i < ps2DQueueBltIN->ui32NumSrcSyncs; i++)
++      {
++              psRetOUT->eError =
++                      PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                         ps2DQueueBltIN->ahKernSrcSync[i],
++                                                         PVRSRV_HANDLE_TYPE_SYNC_INFO);
++              if( psRetOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++              apsSrcSync[i] = (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                 ps2DQueueBltIN->hKernDstSync,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if( psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DQueueBlitKM(psDevInfo,
++                                               (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++                                               ps2DQueueBltIN->ui32NumSrcSyncs,
++                                               apsSrcSync,
++                                               ps2DQueueBltIN->ui32DataByteSize,
++                                               aui322DBltData);
++
++      return 0;
++}
++
++#if defined(SGX2D_DIRECT_BLITS)
++static int
++SGX2DDirectBlitBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_2DDIRECTBLT *ps2DDirectBltIN,
++                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DDIRECTBLT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DDirectBltIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(ps2DDirectBltIN->ui32DataByteSize > sizeof(aui322DBltData))
++      {
++              psRetOUT->eError = PVRSRV_ERROR_CMD_TOO_BIG;
++              return 0;
++      }
++
++      if(CopyFromUserWrapper(psPerProc,
++                                     ui32BridgeID,
++                                                 aui322DBltData,
++                                                 ps2DDirectBltIN->pui32BltData,
++                                                 ps2DDirectBltIN->ui32DataByteSize)
++        != PVRSRV_OK)
++      {
++              return -EFAULT;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DDirectBlitKM(psDevInfo,
++                                                ps2DDirectBltIN->ui32DataByteSize,
++                                                ps2DDirectBltIN->pui32BltData);
++
++      return 0;
++}
++#endif 
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++static int
++SGX2DQueryBlitsCompleteBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_2DQUERYBLTSCOMPLETE *ps2DQueryBltsCompleteIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_VOID *pvSyncInfo;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevCookieInt, 
++                                                 ps2DQueryBltsCompleteIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo, 
++                                                 ps2DQueryBltsCompleteIN->hKernSyncInfo,
++                                                 PVRSRV_HANDLE_TYPE_SYNC_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookieInt)->pvDevice;
++
++      psRetOUT->eError =
++              SGX2DQueryBlitsCompleteKM(psDevInfo,
++                                                                (PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
++                                                                ps2DQueryBltsCompleteIN->bWaitForComplete);
++
++      return 0;
++}
++#endif 
++
++static int
++SGXFindSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescIN,
++                                        PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos = IMG_NULL;
++      IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount = 0;
++      IMG_UINT32 i;
++      IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC);
++
++      psSGXFindSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psSGXFindSharedPBDescIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              SGXFindSharedPBDescKM(hDevCookieInt,
++                                                        psSGXFindSharedPBDescIN->ui32TotalPBSize,
++                                                        &hSharedPBDesc,
++                                                        &psSharedPBDescKernelMemInfo,
++                                                        &psHWPBDescKernelMemInfo,
++                                                        &psBlockKernelMemInfo,
++                                                        &ppsSharedPBDescSubKernelMemInfos,
++                                                        &ui32SharedPBDescSubKernelMemInfosCount);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      PVR_ASSERT(ui32SharedPBDescSubKernelMemInfosCount
++                         <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++      psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount =
++              ui32SharedPBDescSubKernelMemInfosCount;
++
++      if(hSharedPBDesc == IMG_NULL)
++      {   
++              psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++              
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++      }
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hSharedPBDesc,
++                                                hSharedPBDesc,
++                                                PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle,
++                                                psSharedPBDescKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hHWPBDescKernelMemInfoHandle,
++                                                psHWPBDescKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      psSGXFindSharedPBDescOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psSGXFindSharedPBDescOUT->hBlockKernelMemInfoHandle,
++                                                psBlockKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                psSGXFindSharedPBDescOUT->hSharedPBDesc);
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++              goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++
++      
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
++              PVRSRV_BRIDGE_OUT_SGXFINDSHAREDPBDESC *psSGXFindSharedPBDescOut =
++                      psSGXFindSharedPBDescOUT;
++
++              psSGXFindSharedPBDescOut->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                        &psSGXFindSharedPBDescOut->ahSharedPBDescSubKernelMemInfoHandles[i],
++                                                        ppsSharedPBDescSubKernelMemInfos[i],
++                                                        PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                        PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                        psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle);
++              if(psSGXFindSharedPBDescOut->eError != PVRSRV_OK)
++                      goto PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT;
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                        * ui32SharedPBDescSubKernelMemInfosCount,
++                        ppsSharedPBDescSubKernelMemInfos,
++                        IMG_NULL);
++
++PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC_EXIT:
++      if(psSGXFindSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              psSGXFindSharedPBDescOUT->hSharedPBDescKernelMemInfoHandle = 0;
++              psSGXFindSharedPBDescOUT->ui32SharedPBDescSubKernelMemInfoHandlesCount = 0;
++
++              if(hSharedPBDesc != IMG_NULL)
++              {
++                      SGXUnrefSharedPBDescKM(hSharedPBDesc);
++              }
++              if (psSGXFindSharedPBDescOUT->hSharedPBDesc != IMG_NULL)
++              {
++                              PVRSRV_ERROR eError;
++
++                              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                      psSGXFindSharedPBDescOUT->hSharedPBDesc,
++                                        PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescBW: Couldn't free shared PB description handle (%d)", eError));
++                              }
++              }
++      }
++
++      return 0;
++}
++
++static int
++SGXUnrefSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescIN,
++                                         PVRSRV_BRIDGE_OUT_SGXUNREFSHAREDPBDESC *psSGXUnrefSharedPBDescOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hSharedPBDesc;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC);
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hSharedPBDesc,
++                                                 psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++                                                 PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++      if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              SGXUnrefSharedPBDescKM(hSharedPBDesc);
++
++      if(psSGXUnrefSharedPBDescOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psSGXUnrefSharedPBDescOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                 psSGXUnrefSharedPBDescIN->hSharedPBDesc,
++                                                 PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++      
++      return 0;
++}
++
++static int
++SGXAddSharedPBDescBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescIN,
++                                       PVRSRV_BRIDGE_OUT_SGXADDSHAREDPBDESC *psSGXAddSharedPBDescOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo;
++      IMG_UINT32 ui32KernelMemInfoHandlesCount =
++              psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount;
++      IMG_BOOL bFault=IMG_FALSE;
++      IMG_HANDLE *phKernelMemInfoHandles = IMG_NULL;
++      PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfos = IMG_NULL;
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError=PVRSRV_OK;
++      IMG_HANDLE hSharedPBDesc = IMG_NULL;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC);
++
++      psSGXAddSharedPBDescOUT->hSharedPBDesc = IMG_NULL;
++
++      PVR_ASSERT(ui32KernelMemInfoHandlesCount 
++                         <= PVRSRV_BRIDGE_SGX_SHAREDPBDESC_MAX_SUBMEMINFOS);
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              &hDevCookieInt,
++                                                              psSGXAddSharedPBDescIN->hDevCookie,
++                                                              PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psSharedPBDescKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psHWPBDescKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                              (IMG_VOID **)&psBlockKernelMemInfo,
++                                                              psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++      
++      if(!OSAccessOK(PVR_VERIFY_READ,
++                                 psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++                                 ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC:"
++                               " Invalid phKernelMemInfos pointer", __FUNCTION__));
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE),
++                                (IMG_VOID **)&phKernelMemInfoHandles,
++                                0) != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(CopyFromUserWrapper(psPerProc, 
++                                     ui32BridgeID, 
++                                     phKernelMemInfoHandles,
++                                                 psSGXAddSharedPBDescIN->phKernelMemInfoHandles,
++                                                 ui32KernelMemInfoHandlesCount * sizeof(IMG_HANDLE))
++         != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                ui32KernelMemInfoHandlesCount * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++                                (IMG_VOID **)&ppsKernelMemInfos,
++                                0) != PVRSRV_OK)
++      {
++              bFault=IMG_TRUE;
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++      {
++              eError = PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                                      (IMG_VOID **)&ppsKernelMemInfos[i],
++                                                                      phKernelMemInfoHandles[i],
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++              if(eError != PVRSRV_OK)
++              {
++                      goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++              }
++      }
++
++      eError = SGXAddSharedPBDescKM(hDevCookieInt,
++                                                                psSharedPBDescKernelMemInfo,
++                                                                psHWPBDescKernelMemInfo,
++                                                                psBlockKernelMemInfo,
++                                                                psSGXAddSharedPBDescIN->ui32TotalPBSize,
++                                                                &hSharedPBDesc,
++                                                                ppsKernelMemInfos,
++                                                                ui32KernelMemInfoHandlesCount);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psSGXAddSharedPBDescOUT->hSharedPBDesc,
++                                                hSharedPBDesc,
++                                                PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hSharedPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hHWPBDescKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                              psSGXAddSharedPBDescIN->hBlockKernelMemInfo,
++                                                              PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      if(eError != PVRSRV_OK)
++      {
++              goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++      }
++
++      for(i=0; i<ui32KernelMemInfoHandlesCount; i++)
++      {
++              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                      phKernelMemInfoHandles[i],
++                                                                      PVRSRV_HANDLE_TYPE_MEM_INFO);
++              if(eError != PVRSRV_OK)
++              {
++                      goto PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT;
++              }
++      }
++
++PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC_RETURN_RESULT:
++
++      psSGXAddSharedPBDescOUT->eError = eError;
++
++      if(phKernelMemInfoHandles)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++                                * sizeof(IMG_HANDLE),
++                                (IMG_VOID *)phKernelMemInfoHandles, 0);
++      }
++      if(ppsKernelMemInfos)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                psSGXAddSharedPBDescIN->ui32KernelMemInfoHandlesCount
++                                * sizeof(PVRSRV_KERNEL_MEM_INFO *),
++                                (IMG_VOID *)ppsKernelMemInfos, 0);
++      }
++
++      if(bFault || eError != PVRSRV_OK)
++      {
++              if(hSharedPBDesc != IMG_NULL)
++              {
++                      SGXUnrefSharedPBDescKM(hSharedPBDesc);
++              }
++
++              if(psSGXAddSharedPBDescOUT->hSharedPBDesc != IMG_NULL)
++              {
++                              PVRSRV_ERROR eError;
++
++                              eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                                       psSGXAddSharedPBDescOUT->hSharedPBDesc,
++                                                                                       PVRSRV_HANDLE_TYPE_SHARED_PB_DESC);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR,
++                                                      "SGXAddSharedPBDescBW: Couldn't free shared PB description handle (%d)",
++                                                      eError));
++                              }
++              }
++      }
++
++      if(bFault)
++              return -EFAULT;
++      else
++              return 0;
++}
++
++#endif 
++
++
++static int
++PVRSRVGetMiscInfoBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GET_MISC_INFO *psGetMiscInfoIN,
++                                      PVRSRV_BRIDGE_OUT_GET_MISC_INFO *psGetMiscInfoOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_MISC_INFO);
++      
++      OSMemCopy(&psGetMiscInfoOUT->sMiscInfo,
++                        &psGetMiscInfoIN->sMiscInfo,
++                        sizeof(PVRSRV_MISC_INFO));
++
++      psGetMiscInfoOUT->eError = PVRSRVGetMiscInfoKM(&psGetMiscInfoIN->sMiscInfo);
++      psGetMiscInfoOUT->sMiscInfo = psGetMiscInfoIN->sMiscInfo;
++
++      return 0;
++}
++
++static int
++PVRSRVConnectBW(IMG_UINT32 ui32BridgeID,
++                              IMG_VOID *psBridgeIn,
++                              PVRSRV_BRIDGE_OUT_CONNECT_SERVICES *psConnectServicesOUT,
++                              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CONNECT_SERVICES);
++      
++      psConnectServicesOUT->hKernelServices = psPerProc->hPerProcData;
++      psConnectServicesOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVDisconnectBW(IMG_UINT32 ui32BridgeID,
++                                 IMG_VOID *psBridgeIn,
++                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DISCONNECT_SERVICES);
++
++      
++      psRetOUT->eError = PVRSRV_OK;
++
++      return 0;
++}
++
++static int
++PVRSRVEnumerateDCBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_ENUMCLASS *psEnumDispClassIN,
++                                      PVRSRV_BRIDGE_OUT_ENUMCLASS *psEnumDispClassOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_CLASS);
++
++      psEnumDispClassOUT->eError =
++              PVRSRVEnumerateDCKM(psEnumDispClassIN->sDeviceClass,
++                                                      &psEnumDispClassOUT->ui32NumDevices,
++                                                      &psEnumDispClassOUT->ui32DevID[0]);
++
++      return 0;
++}
++
++static int
++PVRSRVOpenDCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceIN,
++                                       PVRSRV_BRIDGE_OUT_OPEN_DISPCLASS_DEVICE *psOpenDispClassDeviceOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE);
++
++      psOpenDispClassDeviceOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &hDevCookieInt, 
++                                                 psOpenDispClassDeviceIN->hDevCookie, 
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenDispClassDeviceOUT->eError =
++              PVRSRVOpenDCDeviceKM(psOpenDispClassDeviceIN->ui32DeviceID, 
++                                                       hDevCookieInt,
++                                                       &hDispClassInfoInt);
++
++      if(psOpenDispClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenDispClassDeviceOUT->eError = 
++              PVRSRVAllocHandle(psPerProc->psHandleBase, 
++                                                &psOpenDispClassDeviceOUT->hDeviceKM, 
++                                                hDispClassInfoInt, 
++                                                PVRSRV_HANDLE_TYPE_DISP_INFO, 
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      return 0;
++} 
++
++static int
++PVRSRVCloseDCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_CLOSE_DISPCLASS_DEVICE *psCloseDispClassDeviceIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psCloseDispClassDeviceIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVCloseDCDeviceKM(pvDispClassInfoInt, IMG_FALSE);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psCloseDispClassDeviceIN->hDeviceKM,
++                                                      PVRSRV_HANDLE_TYPE_DISP_INFO);
++      return 0;
++} 
++
++static int
++PVRSRVEnumDCFormatsBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsIN,
++                                        PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_FORMATS *psEnumDispClassFormatsOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS);
++
++      psEnumDispClassFormatsOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psEnumDispClassFormatsIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psEnumDispClassFormatsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psEnumDispClassFormatsOUT->eError = 
++              PVRSRVEnumDCFormatsKM(pvDispClassInfoInt,
++                                                        &psEnumDispClassFormatsOUT->ui32Count,
++                                                        psEnumDispClassFormatsOUT->asFormat);
++
++      return 0;
++} 
++
++static int
++PVRSRVEnumDCDimsBW(IMG_UINT32 ui32BridgeID,
++                                 PVRSRV_BRIDGE_IN_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsIN,
++                                 PVRSRV_BRIDGE_OUT_ENUM_DISPCLASS_DIMS *psEnumDispClassDimsOUT,
++                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS);
++
++      psEnumDispClassDimsOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psEnumDispClassDimsIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psEnumDispClassDimsOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psEnumDispClassDimsOUT->eError = 
++              PVRSRVEnumDCDimsKM(pvDispClassInfoInt, 
++                                                 &psEnumDispClassDimsIN->sFormat, 
++                                                 &psEnumDispClassDimsOUT->ui32Count,
++                                                 psEnumDispClassDimsOUT->asDim);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCSystemBufferBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferIN,  
++                                                PVRSRV_BRIDGE_OUT_GET_DISPCLASS_SYSBUFFER *psGetDispClassSysBufferOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hBufferInt;
++      IMG_VOID *pvDispClassInfoInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER);
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfoInt, 
++                                                 psGetDispClassSysBufferIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVGetDCSystemBufferKM(pvDispClassInfoInt, 
++                                                                &hBufferInt);
++
++      if(psGetDispClassSysBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassSysBufferOUT->eError = 
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                       &psGetDispClassSysBufferOUT->hBuffer,
++                                                       hBufferInt,
++                                                       PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                       (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                       psGetDispClassSysBufferIN->hDeviceKM);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_GET_DISPCLASS_INFO *psGetDispClassInfoIN,
++                                PVRSRV_BRIDGE_OUT_GET_DISPCLASS_INFO *psGetDispClassInfoOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_INFO);
++
++      psGetDispClassInfoOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psGetDispClassInfoIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassInfoOUT->eError =
++              PVRSRVGetDCInfoKM(pvDispClassInfo,
++                                                &psGetDispClassInfoOUT->sDisplayInfo);
++
++      return 0;
++} 
++
++static int
++PVRSRVCreateDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainIN,
++                                                PVRSRV_BRIDGE_OUT_CREATE_DISPCLASS_SWAPCHAIN *psCreateDispClassSwapChainOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_HANDLE hSwapChainInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN);
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psCreateDispClassSwapChainIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++
++      if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVCreateDCSwapChainKM(pvDispClassInfo, 
++                                                                psCreateDispClassSwapChainIN->ui32Flags,
++                                                                &psCreateDispClassSwapChainIN->sDstSurfAttrib,
++                                                                &psCreateDispClassSwapChainIN->sSrcSurfAttrib,
++                                                                psCreateDispClassSwapChainIN->ui32BufferCount,
++                                                                psCreateDispClassSwapChainIN->ui32OEMFlags,
++                                                                &hSwapChainInt,
++                                                                &psCreateDispClassSwapChainOUT->ui32SwapChainID);
++
++      if(psCreateDispClassSwapChainOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psCreateDispClassSwapChainOUT->eError = 
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase, 
++                                                &psCreateDispClassSwapChainOUT->hSwapChain, 
++                                                hSwapChainInt,
++                                                PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE,
++                                                psCreateDispClassSwapChainIN->hDeviceKM);
++
++      return 0;
++}
++
++static int
++PVRSRVDestroyDCSwapChainBW(IMG_UINT32 ui32BridgeID,
++                                                 PVRSRV_BRIDGE_IN_DESTROY_DISPCLASS_SWAPCHAIN *psDestroyDispClassSwapChainIN,
++                                                 PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                 PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSwapChain, 
++                                                 psDestroyDispClassSwapChainIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVDestroyDCSwapChainKM(pvSwapChain, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase, 
++                                                      psDestroyDispClassSwapChainIN->hSwapChain, 
++                                                      PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCDstRectBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassDstRectIN,
++                                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassDstRectIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassDstRectIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCDstRectKM(pvDispClassInfo,
++                                                       pvSwapChain,
++                                                       &psSetDispClassDstRectIN->sRect);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCSrcRectBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_SET_DISPCLASS_RECT *psSetDispClassSrcRectIN,
++                                       PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassSrcRectIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassSrcRectIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCSrcRectKM(pvDispClassInfo,
++                                                       pvSwapChain,
++                                                       &psSetDispClassSrcRectIN->sRect);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCDstColourKeyBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassColKeyIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassColKeyIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCDstColourKeyKM(pvDispClassInfo,
++                                                                pvSwapChain,
++                                                                psSetDispClassColKeyIN->ui32CKColour);
++
++      return 0;
++} 
++
++static int
++PVRSRVSetDCSrcColourKeyBW(IMG_UINT32 ui32BridgeID,
++                                                PVRSRV_BRIDGE_IN_SET_DISPCLASS_COLOURKEY *psSetDispClassColKeyIN,
++                                                PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psSetDispClassColKeyIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSetDispClassColKeyIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVSetDCSrcColourKeyKM(pvDispClassInfo,
++                                                                pvSwapChain,
++                                                                psSetDispClassColKeyIN->ui32CKColour);
++
++      return 0;
++} 
++
++static int
++PVRSRVGetDCBuffersBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersIN,
++                                       PVRSRV_BRIDGE_OUT_GET_DISPCLASS_BUFFERS *psGetDispClassBuffersOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++      IMG_UINT32 i;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS);
++
++      psGetDispClassBuffersOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &pvDispClassInfo,
++                                                 psGetDispClassBuffersIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassBuffersOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvSwapChain, 
++                                                 psGetDispClassBuffersIN->hSwapChain, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN);
++      if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetDispClassBuffersOUT->eError = 
++              PVRSRVGetDCBuffersKM(pvDispClassInfo, 
++                                                       pvSwapChain,
++                                                       &psGetDispClassBuffersOUT->ui32BufferCount,
++                                                       psGetDispClassBuffersOUT->ahBuffer);
++
++      PVR_ASSERT(psGetDispClassBuffersOUT->ui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++      for(i = 0; i < psGetDispClassBuffersOUT->ui32BufferCount; i++)
++      {
++              IMG_HANDLE hBufferExt;
++
++              psGetDispClassBuffersOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                               &hBufferExt,
++                                                               psGetDispClassBuffersOUT->ahBuffer[i],
++                                                               PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                               (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE | PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                               psGetDispClassBuffersIN->hSwapChain);
++              if(psGetDispClassBuffersOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++              psGetDispClassBuffersOUT->ahBuffer[i] = hBufferExt;
++      }
++
++      return 0;
++} 
++
++static int
++PVRSRVSwapToDCBufferBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_BUFFER *psSwapDispClassBufferIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChainBuf;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psSwapDispClassBufferIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupSubHandle(psPerProc->psHandleBase, 
++                                                 &pvSwapChainBuf, 
++                                                 psSwapDispClassBufferIN->hBuffer, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++                                                 psSwapDispClassBufferIN->hDeviceKM);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVSwapToDCBufferKM(pvDispClassInfo, 
++                                                         pvSwapChainBuf,
++                                                         psSwapDispClassBufferIN->ui32SwapInterval,
++                                                         psSwapDispClassBufferIN->hPrivateTag,
++                                                         psSwapDispClassBufferIN->ui32ClipRectCount,
++                                                         psSwapDispClassBufferIN->sClipRect);
++
++      return 0;
++}
++
++static int
++PVRSRVSwapToDCSystemBW(IMG_UINT32 ui32BridgeID,
++                                         PVRSRV_BRIDGE_IN_SWAP_DISPCLASS_TO_SYSTEM *psSwapDispClassSystemIN,
++                                         PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                         PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvDispClassInfo;
++      IMG_VOID *pvSwapChain;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvDispClassInfo, 
++                                                 psSwapDispClassSystemIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_DISP_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError =
++              PVRSRVLookupSubHandle(psPerProc->psHandleBase,
++                                                 &pvSwapChain,
++                                                 psSwapDispClassSystemIN->hSwapChain,
++                                                 PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++                                                 psSwapDispClassSystemIN->hDeviceKM);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      psRetOUT->eError = 
++              PVRSRVSwapToDCSystemKM(pvDispClassInfo, 
++                                                         pvSwapChain);
++
++      return 0;
++}
++
++static int
++PVRSRVOpenBCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceIN,
++                                       PVRSRV_BRIDGE_OUT_OPEN_BUFFERCLASS_DEVICE *psOpenBufferClassDeviceOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevCookieInt;
++      IMG_HANDLE hBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE);
++
++      psOpenBufferClassDeviceOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 &hDevCookieInt,
++                                                 psOpenBufferClassDeviceIN->hDevCookie,
++                                                 PVRSRV_HANDLE_TYPE_DEV_NODE);
++      if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenBufferClassDeviceOUT->eError = 
++              PVRSRVOpenBCDeviceKM(psOpenBufferClassDeviceIN->ui32DeviceID,
++                                                       hDevCookieInt,
++                                                       &hBufClassInfo);
++      if(psOpenBufferClassDeviceOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psOpenBufferClassDeviceOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psOpenBufferClassDeviceOUT->hDeviceKM,
++                                                hBufClassInfo,
++                                                PVRSRV_HANDLE_TYPE_BUF_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++PVRSRVCloseBCDeviceBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_CLOSE_BUFFERCLASS_DEVICE *psCloseBufferClassDeviceIN,
++                                        PVRSRV_BRIDGE_RETURN *psRetOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE);
++
++      psRetOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psCloseBufferClassDeviceIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = 
++              PVRSRVCloseBCDeviceKM(pvBufClassInfo, IMG_FALSE);
++
++      if(psRetOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psRetOUT->eError = PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                                                 psCloseBufferClassDeviceIN->hDeviceKM,
++                                                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++
++      return 0;
++}
++
++static int
++PVRSRVGetBCInfoBW(IMG_UINT32 ui32BridgeID,
++                                PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_INFO *psGetBufferClassInfoIN,
++                                PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_INFO *psGetBufferClassInfoOUT,
++                                PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO);
++
++      psGetBufferClassInfoOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psGetBufferClassInfoIN->hDeviceKM,
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psGetBufferClassInfoOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassInfoOUT->eError =
++              PVRSRVGetBCInfoKM(pvBufClassInfo, 
++                                                &psGetBufferClassInfoOUT->sBufferInfo);
++      return 0;
++}
++
++static int
++PVRSRVGetBCBufferBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferIN,
++                                      PVRSRV_BRIDGE_OUT_GET_BUFFERCLASS_BUFFER *psGetBufferClassBufferOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_VOID *pvBufClassInfo;
++      IMG_HANDLE hBufferInt;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER);
++
++      psGetBufferClassBufferOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase, 
++                                                 &pvBufClassInfo, 
++                                                 psGetBufferClassBufferIN->hDeviceKM, 
++                                                 PVRSRV_HANDLE_TYPE_BUF_INFO);
++      if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassBufferOUT->eError = 
++              PVRSRVGetBCBufferKM(pvBufClassInfo, 
++                                                      psGetBufferClassBufferIN->ui32BufferIndex,
++                                                      &hBufferInt);
++
++      if(psGetBufferClassBufferOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetBufferClassBufferOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                       &psGetBufferClassBufferOUT->hBuffer,
++                                                       hBufferInt,
++                                                       PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++                                                       (PVRSRV_HANDLE_ALLOC_FLAG)(PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE |  PVRSRV_HANDLE_ALLOC_FLAG_SHARED),
++                                                       psGetBufferClassBufferIN->hDeviceKM);
++
++      return 0;
++}
++
++static int
++PVRSRVPowerControlBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_POWER_CONTROL *psPowerControlIN,
++                                       PVRSRV_BRIDGE_OUT_POWER_CONTROL *psPowerControlOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{     
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_POWER_CONTROL);
++
++      psPowerControlOUT->eError =
++              PVRSRVPowerControlKM(psPowerControlIN->eControlMode, 
++                                                       &psPowerControlIN->ePVRPowerState);
++      psPowerControlOUT->ePVRPowerState = psPowerControlIN->ePVRPowerState;
++      return 0;
++}
++
++
++static int
++PVRSRVAllocSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                       PVRSRV_BRIDGE_IN_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemIN,
++                                                       PVRSRV_BRIDGE_OUT_ALLOC_SHARED_SYS_MEM *psAllocSharedSysMemOUT,
++                                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM);
++
++      psAllocSharedSysMemOUT->eError =
++              PVRSRVAllocSharedSysMemoryKM(psAllocSharedSysMemIN->ui32Flags,
++                                                                       psAllocSharedSysMemIN->ui32Size,
++                                                                       &psKernelMemInfo);
++      if(psAllocSharedSysMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      OSMemSet(&psAllocSharedSysMemOUT->sClientMemInfo,
++                       0,
++                       sizeof(psAllocSharedSysMemOUT->sClientMemInfo));
++
++      
++      if(psKernelMemInfo->pvLinAddrKM)
++      {
++              psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->sMemBlk.hOSMemHandle;
++      }
++      psAllocSharedSysMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psAllocSharedSysMemOUT->sClientMemInfo.ui32Flags =
++              psKernelMemInfo->ui32Flags;
++      psAllocSharedSysMemOUT->sClientMemInfo.ui32AllocSize =
++              psKernelMemInfo->ui32AllocSize; 
++      psAllocSharedSysMemOUT->eError =
++              PVRSRVAllocHandle(psPerProc->psHandleBase,
++                                                &psAllocSharedSysMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                psKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++
++      return 0;
++}
++
++static int
++PVRSRVFreeSharedSysMemoryBW(IMG_UINT32 ui32BridgeID,
++                                                      PVRSRV_BRIDGE_IN_FREE_SHARED_SYS_MEM *psFreeSharedSysMemIN,
++                                                      PVRSRV_BRIDGE_OUT_FREE_SHARED_SYS_MEM *psFreeSharedSysMemOUT,
++                                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM);
++
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVLookupHandle(psPerProc->psHandleBase,
++                                                 (IMG_VOID **)&psKernelMemInfo,
++                                                 psFreeSharedSysMemIN->psKernelMemInfo,
++                                                                                                                                 PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++
++      if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++              return 0;
++      
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++      if(psFreeSharedSysMemOUT->eError != PVRSRV_OK)
++              return 0;
++
++      psFreeSharedSysMemOUT->eError =
++              PVRSRVReleaseHandle(psPerProc->psHandleBase,
++                                                      psFreeSharedSysMemIN->psKernelMemInfo,
++                                                      PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO);
++      return 0;
++}
++
++static int
++PVRSRVMapMemInfoMemBW(IMG_UINT32 ui32BridgeID,
++                                        PVRSRV_BRIDGE_IN_MAP_MEMINFO_MEM *psMapMemInfoMemIN,
++                                        PVRSRV_BRIDGE_OUT_MAP_MEMINFO_MEM *psMapMemInfoMemOUT,
++                                        PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++      PVRSRV_HANDLE_TYPE eHandleType;
++      IMG_HANDLE      hParent;
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_MAP_MEMINFO_MEM);
++
++      psMapMemInfoMemOUT->eError =
++              PVRSRVLookupHandleAnyType(psPerProc->psHandleBase,
++                                                 (IMG_VOID **)&psKernelMemInfo,
++                                                 &eHandleType,
++                                                 psMapMemInfoMemIN->hKernelMemInfo);
++      if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      switch (eHandleType)
++      {
++#if defined(PVR_SECURE_HANDLES)
++              case PVRSRV_HANDLE_TYPE_MEM_INFO:
++              case PVRSRV_HANDLE_TYPE_MEM_INFO_REF:
++              case PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO:
++#else
++              case PVRSRV_HANDLE_TYPE_NONE:
++#endif
++                      break;
++              default:
++                      psMapMemInfoMemOUT->eError = PVRSRV_ERROR_GENERIC;
++                      return 0;
++      }
++
++      
++      psMapMemInfoMemOUT->eError =
++              PVRSRVGetParentHandle(psPerProc->psHandleBase,
++                                      &hParent,
++                                      psMapMemInfoMemIN->hKernelMemInfo,
++                                      eHandleType);
++      if (psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++      if (hParent == IMG_NULL)
++      {
++              hParent = psMapMemInfoMemIN->hKernelMemInfo;
++      }
++
++      OSMemSet(&psMapMemInfoMemOUT->sClientMemInfo,
++                       0,
++                       sizeof(psMapMemInfoMemOUT->sClientMemInfo));
++
++      
++      if(psKernelMemInfo->pvLinAddrKM)
++      {
++              psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->pvLinAddrKM;
++      }
++      else
++      {
++              psMapMemInfoMemOUT->sClientMemInfo.pvLinAddrKM =
++                      psKernelMemInfo->sMemBlk.hOSMemHandle;
++      }
++
++      psMapMemInfoMemOUT->sClientMemInfo.pvLinAddr = 0;
++      psMapMemInfoMemOUT->sClientMemInfo.sDevVAddr =
++              psKernelMemInfo->sDevVAddr;
++      psMapMemInfoMemOUT->sClientMemInfo.ui32Flags =
++              psKernelMemInfo->ui32Flags;
++      psMapMemInfoMemOUT->sClientMemInfo.ui32AllocSize =
++              psKernelMemInfo->ui32AllocSize; 
++      psMapMemInfoMemOUT->sClientMemInfo.hMappingInfo = IMG_NULL;
++      psMapMemInfoMemOUT->eError =
++              PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                &psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo,
++                                                psKernelMemInfo,
++                                                PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++                                                PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                hParent);
++
++      if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      if(psKernelMemInfo->ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++      {
++              
++              OSMemSet(&psMapMemInfoMemOUT->sClientSyncInfo,
++                               0,
++                               sizeof (PVRSRV_CLIENT_SYNC_INFO));
++              psMapMemInfoMemOUT->psKernelSyncInfo = IMG_NULL;
++      }
++      else
++      {
++              
++              psMapMemInfoMemOUT->sClientSyncInfo.psSyncData =
++                      psKernelMemInfo->psKernelSyncInfo->psSyncData;
++              psMapMemInfoMemOUT->sClientSyncInfo.sWriteOpsCompleteDevVAddr =
++                      psKernelMemInfo->psKernelSyncInfo->sWriteOpsCompleteDevVAddr;
++              psMapMemInfoMemOUT->sClientSyncInfo.sReadOpsCompleteDevVAddr =
++                      psKernelMemInfo->psKernelSyncInfo->sReadOpsCompleteDevVAddr;
++              psMapMemInfoMemOUT->sClientSyncInfo.hMappingInfo = IMG_NULL;
++
++              psMapMemInfoMemOUT->sClientMemInfo.psClientSyncInfo = &psMapMemInfoMemOUT->sClientSyncInfo;
++
++              psMapMemInfoMemOUT->eError =
++                      PVRSRVAllocSubHandle(psPerProc->psHandleBase,
++                                                               &psMapMemInfoMemOUT->sClientSyncInfo.hKernelSyncInfo,
++                                                               psKernelMemInfo->psKernelSyncInfo,
++                                                               PVRSRV_HANDLE_TYPE_SYNC_INFO,
++                                                               PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
++                                                               psMapMemInfoMemOUT->sClientMemInfo.hKernelMemInfo);
++              if(psMapMemInfoMemOUT->eError != PVRSRV_OK)
++              {
++                      return 0;
++              }
++      }
++
++      return 0;
++}
++
++static int
++PVRSRVPollForValueBW(IMG_UINT32 ui32BridgeID,
++                                       PVRSRV_BRIDGE_IN_POLLFORVALUE *psPollForValueIN,
++                                       PVRSRV_BRIDGE_OUT_POLLFORVALUE *psPollForValueOUT,
++                                       PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_POLLFORVALUE);
++
++      psPollForValueOUT->eError =
++              PollForValueKM(psPollForValueIN->pui32CpuVAddrKM,
++                                         psPollForValueIN->ui32Value,
++                                         psPollForValueIN->ui32Mask,
++                                         psPollForValueIN->ui32Waitus,
++                                         psPollForValueIN->ui32Tries
++                                        );
++      return 0;
++}
++
++static int
++MMU_GetPDDevPAddrBW(IMG_UINT32 ui32BridgeID,
++                                      PVRSRV_BRIDGE_IN_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrIN,
++                                      PVRSRV_BRIDGE_OUT_GETMMU_PD_DEVPADDR *psGetMmuPDDevPAddrOUT,
++                                      PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      IMG_HANDLE hDevMemContextInt;
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++      PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR);
++
++      psGetMmuPDDevPAddrOUT->eError = 
++              PVRSRVLookupHandle(psPerProc->psHandleBase, &hDevMemContextInt, 
++                                                 psGetMmuPDDevPAddrIN->hDevMemContext,
++                                                 PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT);
++      if(psGetMmuPDDevPAddrOUT->eError != PVRSRV_OK)
++      {
++              return 0;
++      }
++
++      psGetMmuPDDevPAddrOUT->sPDDevPAddr =
++              MMU_GetPDDevPAddr(BM_GetMMUContextFromMemContext(hDevMemContextInt));
++      if(psGetMmuPDDevPAddrOUT->sPDDevPAddr.uiAddr)
++      {
++              psGetMmuPDDevPAddrOUT->eError = PVRSRV_OK;
++      }
++      else
++      {
++              psGetMmuPDDevPAddrOUT->eError = PVRSRV_ERROR_GENERIC;
++      }
++      return 0;
++}
++
++
++static int
++DummyBW(IMG_UINT32 ui32BridgeID,
++              IMG_VOID *psBridgeIn,
++              IMG_VOID *psBridgeOut,
++              PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++#if !defined(DEBUG)
++      PVR_UNREFERENCED_PARAMETER(ui32BridgeID);
++#endif
++      PVR_UNREFERENCED_PARAMETER(psBridgeIn);
++      PVR_UNREFERENCED_PARAMETER(psBridgeOut);
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++
++#if defined(DEBUG_BRIDGE_KM)
++      PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu (%s) mapped to "
++                       "Dummy Wrapper (probably not what you want!)",
++                       __FUNCTION__, ui32BridgeID, g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#else
++      PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: BridgeID %lu mapped to "
++                       "Dummy Wrapper (probably not what you want!)",
++                       __FUNCTION__, ui32BridgeID));
++#endif
++      return -ENOTTY;
++}
++
++
++#define SetDispatchTableEntry(ui32Index, pfFunction) \
++      _SetDispatchTableEntry(PVRSRV_GET_BRIDGE_ID(ui32Index), #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction)
++#define DISPATCH_TABLE_GAP_THRESHOLD 5
++static IMG_VOID
++_SetDispatchTableEntry(IMG_UINT32 ui32Index,
++                                         const IMG_CHAR *pszIOCName,
++                                         BridgeWrapperFunction pfFunction,
++                                         const IMG_CHAR *pszFunctionName)
++{
++      static IMG_UINT32 ui32PrevIndex = (IMG_UINT32)-1;
++#if !defined(DEBUG)
++      PVR_UNREFERENCED_PARAMETER(pszIOCName);
++#endif
++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
++      PVR_UNREFERENCED_PARAMETER(pszFunctionName);
++#endif
++
++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
++      
++      PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName));
++#endif
++
++      
++      if(g_BridgeDispatchTable[ui32Index].pfFunction)
++      {
++#if defined(DEBUG_BRIDGE_KM)
++              PVR_DPF((PVR_DBG_ERROR,
++                               "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry for %s",
++                               __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName));
++#else
++              PVR_DPF((PVR_DBG_ERROR,
++                               "%s: BUG!: Adding dispatch table entry for %s clobbers an existing entry (index=%lu)",
++                               __FUNCTION__, pszIOCName, ui32Index));
++#endif
++              PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++                              __FUNCTION__));
++      }
++
++      
++      if((ui32PrevIndex != (IMG_UINT32)-1) &&
++         (ui32Index >= ui32PrevIndex+DISPATCH_TABLE_GAP_THRESHOLD ||
++              ui32Index <= ui32PrevIndex))
++      {
++#if defined(DEBUG_BRIDGE_KM)
++              PVR_DPF((PVR_DBG_WARNING,
++                               "%s: There is a gap in the dispatch table between indices %lu (%s) and %lu (%s)",
++                               __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
++                               ui32Index, pszIOCName));
++#else
++              PVR_DPF((PVR_DBG_WARNING,
++                               "%s: There is a gap in the dispatch table between indices %lu and %lu (%s)",
++                               __FUNCTION__, ui32PrevIndex, ui32Index, pszIOCName));
++#endif
++              PVR_DPF((PVR_DBG_ERROR, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.",
++                              __FUNCTION__));
++      }
++
++      g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
++#if defined(DEBUG_BRIDGE_KM)
++      g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
++      g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
++      g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
++      g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
++#endif
++
++      ui32PrevIndex = ui32Index;
++}
++
++
++PVRSRV_ERROR
++CommonBridgeInit(IMG_VOID)
++{
++      IMG_UINT32 i;
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DEVICES, PVRSRVEnumerateDevicesBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ACQUIRE_DEVICEINFO, PVRSRVAcquireDeviceDataBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_RELEASE_DEVICEINFO, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DEVMEMCONTEXT, PVRSRVCreateDeviceMemContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DEVMEMCONTEXT, PVRSRVDestroyDeviceMemContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_DEVICEMEM, PVRSRVAllocDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEVICEMEM, PVRSRVFreeDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETFREE_DEVICEMEM, PVRSRVGetFreeDeviceMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_COMMANDQUEUE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_COMMANDQUEUE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_KV_TO_MMAP_DATA, PVRMMapKVIndexAddressToMMapDataBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CONNECT_SERVICES, PVRSRVConnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DISCONNECT_SERVICES, PVRSRVDisconnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_DEVICE_MEM, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVICEMEMINFO, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_RESERVE_DEV_VIRTMEM , DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_DEV_VIRTMEM, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_EXT_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_EXT_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEV_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEV_MEMORY, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY, PVRSRVMapDeviceClassMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_DEVICECLASS_MEMORY, PVRSRVUnmapDeviceClassMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEM_INFO_TO_USER, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAP_MEM_INFO_FROM_USER, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PROCESS_SIMISR_EVENT, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_REGISTER_SIM_PROCESS, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNREGISTER_SIM_PROCESS, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAPPHYSTOUSERSPACE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNMAPPHYSTOUSERSPACE, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETPHYSTOUSERSPACEMAP, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_POWER_CONTROL, PVRSRVPowerControlBW);
++
++      
++#if defined (SUPPORT_INT_POWER_MAN)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INT_POWER_MAN, DummyBW);
++#endif
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_FB_STATS, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_MISC_INFO, PVRSRVGetMiscInfoBW);
++
++      
++#if defined (SUPPORT_OVERLAY_ROTATE_BLIT)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INIT_3D_OVL_BLT_RES, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DEINIT_3D_OVL_BLT_RES, DummyBW);
++#endif
++
++
++      
++#if defined(PDUMP)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_INIT, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_MEMPOL, PDumpMemPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPMEM, PDumpMemBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REG, PDumpRegWithFlagsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_REGPOL, PDumpRegPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_COMMENT, PDumpCommentBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SETFRAME, PDumpSetFrameBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_ISCAPTURING, PDumpIsCaptureFrameBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPBITMAP, PDumpBitmapBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPREADREG, PDumpReadRegBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_SYNCPOL, PDumpSyncPolBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPSYNC, PDumpSyncDumpBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DRIVERINFO, PDumpDriverInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_PDREG, PDumpPDRegBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_DUMPPDDEVPADDR, PDumpPDDevPAddrBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_BUFFER_ARRAY, PDumpBufferArrayBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP_CYCLE_COUNT_REG_READ, PDumpCycleCountRegReadBW);
++#endif 
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_OEMJTABLE, DummyBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_CLASS, PVRSRVEnumerateDCBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_DISPCLASS_DEVICE, PVRSRVOpenDCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_DISPCLASS_DEVICE, PVRSRVCloseDCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_FORMATS, PVRSRVEnumDCFormatsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ENUM_DISPCLASS_DIMS, PVRSRVEnumDCDimsBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_SYSBUFFER, PVRSRVGetDCSystemBufferBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_INFO, PVRSRVGetDCInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CREATE_DISPCLASS_SWAPCHAIN, PVRSRVCreateDCSwapChainBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_DESTROY_DISPCLASS_SWAPCHAIN, PVRSRVDestroyDCSwapChainBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTRECT, PVRSRVSetDCDstRectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCRECT, PVRSRVSetDCSrcRectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_DSTCOLOURKEY, PVRSRVSetDCDstColourKeyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SET_DISPCLASS_SRCCOLOURKEY, PVRSRVSetDCSrcColourKeyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DISPCLASS_BUFFERS, PVRSRVGetDCBuffersBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_BUFFER, PVRSRVSwapToDCBufferBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SWAP_DISPCLASS_TO_SYSTEM, PVRSRVSwapToDCSystemBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_OPEN_BUFFERCLASS_DEVICE, PVRSRVOpenBCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_CLOSE_BUFFERCLASS_DEVICE, PVRSRVCloseBCDeviceBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_INFO, PVRSRVGetBCInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_BUFFERCLASS_BUFFER, PVRSRVGetBCBufferBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_WRAP_EXT_MEMORY, PVRSRVWrapExtMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_UNWRAP_EXT_MEMORY, PVRSRVUnwrapExtMemoryBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_ALLOC_SHARED_SYS_MEM, PVRSRVAllocSharedSysMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_FREE_SHARED_SYS_MEM, PVRSRVFreeSharedSysMemoryBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_MAP_MEMINFO_MEM, PVRSRVMapMemInfoMemBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_POLLFORVALUE, PVRSRVPollForValueBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GETMMU_PD_DEVPADDR, MMU_GetPDDevPAddrBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_GET_DEVMEMHEAPS, PVRSRVGetDeviceMemHeapsBW);
++
++      
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_CONNECT, PVRSRVInitSrvConnectBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_INITSRV_DISCONNECT, PVRSRVInitSrvDisconnectBW);
++
++              
++      SetDispatchTableEntry(PVRSRV_BRIDGE_EVENT_OBJECT_WAIT, PVRSRVEventObjectWaitBW);
++
++
++#if defined(SUPPORT_SGX1)
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETCLIENTINFO, SGXGetClientInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_RELEASECLIENTINFO, SGXReleaseClientInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETINTERNALDEVINFO, SGXGetInternalDevInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DOKICK, SGXDoKickBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETPHYSPAGEADDR, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_READREGISTRYDWORD, DummyBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SCHEDULECOMMAND, DummyBW);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUEUEBLT, SGX2DQueueBlitBW);
++#if defined(SGX2D_DIRECT_BLITS)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DDIRECTBLT, SGX2DDirectBlitBW);
++#endif 
++#endif 
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE, SGX2DQueryBlitsCompleteBW);
++#endif 
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMMUPDADDR, DummyBW);
++
++#if defined(TRANSFER_QUEUE)
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_SUBMITTRANSFER, SGXSubmitTransferBW);
++#endif        
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_GETMISCINFO, SGXGetMiscInfoBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGXINFO_FOR_SRVINIT , SGXGetInfoForSrvinitBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_DEVINITPART2, SGXDevInitPart2BW);
++
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FINDSHAREDPBDESC, SGXFindSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREFSHAREDPBDESC, SGXUnrefSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_ADDSHAREDPBDESC, SGXAddSharedPBDescBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_REGISTER_HW_RENDER_CONTEXT, SGXRegisterHWRenderContextBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_FLUSH_HW_RENDER_TARGET, SGXFlushHWRenderTargetBW);
++      SetDispatchTableEntry(PVRSRV_BRIDGE_SGX_UNREGISTER_HW_RENDER_CONTEXT, SGXUnregisterHWRenderContextBW);
++
++#endif 
++
++
++      
++      
++      for(i=0;i<BRIDGE_DISPATCH_TABLE_ENTRY_COUNT;i++)
++      {
++              if(!g_BridgeDispatchTable[i].pfFunction)
++              {
++                      g_BridgeDispatchTable[i].pfFunction = DummyBW;
++#if defined(DEBUG_BRIDGE_KM)
++                      g_BridgeDispatchTable[i].pszIOCName = "_PVRSRV_BRIDGE_DUMMY";
++                      g_BridgeDispatchTable[i].pszFunctionName = "DummyBW";
++                      g_BridgeDispatchTable[i].ui32CallCount = 0;
++                      g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
++                      g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
++#endif
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++                                        PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM)
++ {
++
++      IMG_VOID   * psBridgeIn;
++      IMG_VOID   * psBridgeOut;
++      BridgeWrapperFunction pfBridgeHandler;
++      IMG_UINT32   ui32BridgeID = psBridgePackageKM->ui32BridgeID;
++      int          err          = -EFAULT;
++
++#if defined(DEBUG_TRACE_BRIDGE_KM)
++      PVR_DPF((PVR_DBG_ERROR, "%s: %s",
++                       __FUNCTION__,
++                       g_BridgeDispatchTable[ui32BridgeID].pszIOCName));
++#endif
++
++#if defined(DEBUG_BRIDGE_KM)
++      g_BridgeDispatchTable[ui32BridgeID].ui32CallCount++;
++      g_BridgeGlobalStats.ui32IOCTLCount++;
++#endif
++
++      if(!psPerProc->bInitProcess)
++      {
++              if(gbInitServerRan)
++              {
++                      if(!gbInitServerSuccessful)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation failed.  Driver unusable.",
++                                               __FUNCTION__));
++                              goto return_fault;
++                      }
++              }
++              else
++              {
++                      if(gbInitServerRunning)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation is in progress",
++                                               __FUNCTION__));
++                              goto return_fault;
++                      }
++                      else
++                      {
++                              
++                              switch(ui32BridgeID)
++                              {
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_DISCONNECT_SERVICES):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_CONNECT):
++                                      case PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_INITSRV_DISCONNECT):
++                                              break;
++                                      default:
++                                              PVR_DPF((PVR_DBG_ERROR, "%s: Driver initialisation not completed yet.",
++                                                               __FUNCTION__));
++                                              goto return_fault;
++                              }
++                      }
++              }
++      }
++
++
++
++#if defined(__linux__)
++      {
++              
++              SYS_DATA *psSysData;
++
++              if(SysAcquireData(&psSysData) != PVRSRV_OK)
++              {
++                      goto return_fault;
++              }
++
++              
++              psBridgeIn = ((ENV_DATA *)psSysData->pvEnvSpecificData)->pvBridgeData;
++              psBridgeOut = (IMG_PVOID)((IMG_PBYTE)psBridgeIn + PVRSRV_MAX_BRIDGE_IN_SIZE);
++
++              if(psBridgePackageKM->ui32InBufferSize > 0)
++              {
++                      if(!OSAccessOK(PVR_VERIFY_READ,
++                                                      psBridgePackageKM->pvParamIn,
++                                                      psBridgePackageKM->ui32InBufferSize))
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pvParamIn pointer", __FUNCTION__));
++                      }
++
++                      if(CopyFromUserWrapper(psPerProc,
++                                                     ui32BridgeID,
++                                                                 psBridgeIn,
++                                                                 psBridgePackageKM->pvParamIn,
++                                                                 psBridgePackageKM->ui32InBufferSize)
++                        != PVRSRV_OK)
++                      {
++                              goto return_fault;
++                      }
++              }
++      }
++#else
++      psBridgeIn  = psBridgePackageKM->pvParamIn;
++      psBridgeOut = psBridgePackageKM->pvParamOut;
++#endif
++
++      if(ui32BridgeID > (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: ui32BridgeID = %d is out if range!",
++                               __FUNCTION__, ui32BridgeID));
++              goto return_fault;
++      }
++      pfBridgeHandler =
++              (BridgeWrapperFunction)g_BridgeDispatchTable[ui32BridgeID].pfFunction;
++      err = pfBridgeHandler(ui32BridgeID,
++                                                psBridgeIn,
++                                                psBridgeOut,
++                                                psPerProc);
++      if(err < 0)
++      {
++              goto return_fault;
++      }
++
++
++#if defined(__linux__)        
++      
++      if(CopyToUserWrapper(psPerProc, 
++                                               ui32BridgeID,
++                                               psBridgePackageKM->pvParamOut,
++                                               psBridgeOut,
++                                               psBridgePackageKM->ui32OutBufferSize)
++         != PVRSRV_OK)
++      {
++              goto return_fault;
++      }
++#endif
++
++      return 0;
++
++return_fault:
++      return err;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h
+--- git/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/bridged/bridged_pvr_bridge.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,91 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __BRIDGED_PVR_BRIDGE_H__
++#define __BRIDGED_PVR_BRIDGE_H__
++
++#include "pvr_bridge.h"
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#if defined(__linux__)
++#define PVRSRV_GET_BRIDGE_ID(X)       _IOC_NR(X)
++#else
++#define PVRSRV_GET_BRIDGE_ID(X)       (X - PVRSRV_IOWR(PVRSRV_BRIDGE_CORE_CMD_FIRST))
++#endif
++
++typedef int (*BridgeWrapperFunction)(IMG_UINT32 ui32BridgeID,
++                                                                       IMG_VOID *psBridgeIn,
++                                                                       IMG_VOID *psBridgeOut,
++                                                                       PVRSRV_PER_PROCESS_DATA *psPerProc);
++
++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
++{
++      BridgeWrapperFunction pfFunction; 
++#if defined(DEBUG_BRIDGE_KM)
++      const IMG_CHAR *pszIOCName; 
++      const IMG_CHAR *pszFunctionName; 
++      IMG_UINT32 ui32CallCount; 
++      IMG_UINT32 ui32CopyFromUserTotalBytes; 
++      IMG_UINT32 ui32CopyToUserTotalBytes; 
++#endif
++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
++
++
++#if defined(SUPPORT_SGX1)
++#define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_LAST_SGX_CMD+1)
++#else
++#error "FIXME: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT unset"
++#endif
++
++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
++
++
++#if defined(DEBUG_BRIDGE_KM)
++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
++{
++      IMG_UINT32 ui32IOCTLCount;
++      IMG_UINT32 ui32TotalCopyFromUserBytes;
++      IMG_UINT32 ui32TotalCopyToUserBytes;
++}PVRSRV_BRIDGE_GLOBAL_STATS;
++
++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
++#endif
++
++
++PVRSRV_ERROR CommonBridgeInit(IMG_VOID);
++
++int BridgedDispatchKM(PVRSRV_PER_PROCESS_DATA * psPerProc,
++                                        PVRSRV_BRIDGE_PACKAGE   * psBridgePackageKM);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/buffer_manager.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1761 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#include "sysconfig.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++
++#define MIN(a,b)       (a > b ? b : a)
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags);
++static void
++BM_FreeMemory (void *pH, IMG_UINTPTR_T base, BM_MAPPING *psMapping);
++static IMG_BOOL
++BM_ImportMemory(void *pH, IMG_SIZE_T uSize,
++                                      IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping,
++                                      IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase);
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++                              BM_MAPPING *pMapping, 
++                              IMG_SIZE_T *pActualSize,
++                              IMG_UINT32 uFlags,
++                              IMG_UINT32 dev_vaddr_alignment,
++                              IMG_DEV_VIRTADDR *pDevVAddr);
++static void
++DevMemoryFree (BM_MAPPING *pMapping);
++
++static IMG_BOOL
++AllocMemory (BM_CONTEXT                               *pBMContext,
++                              BM_HEAP                         *psBMHeap,
++                              IMG_DEV_VIRTADDR        *psDevVAddr,
++                              IMG_SIZE_T                      uSize,
++                              IMG_UINT32                      uFlags,
++                              IMG_UINT32                      uDevVAddrAlignment,
++                              BM_BUF                          *pBuf)
++{
++      BM_MAPPING                      *pMapping;
++      IMG_UINTPTR_T           uOffset;
++      RA_ARENA                        *pArena = IMG_NULL;
++
++      PVR_UNREFERENCED_PARAMETER(pBMContext);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "AllocMemory (pBMContext=%08X, uSize=0x%x, uFlags=0x%x, align=0x%x, pBuf=%08X)",
++                        pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf));
++
++      
++
++
++      if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++      {
++              if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));                  
++                      return IMG_FALSE;
++              }
++
++              
++
++              
++              if(psBMHeap->ui32Attribs
++                 &    (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                 |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      
++                      pArena = psBMHeap->pImportArena;
++              }
++              else
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
++                      return IMG_FALSE;
++              }
++
++              
++              if (!RA_Alloc(pArena,
++                                        uSize,
++                                        IMG_NULL,
++                                        (void*) &pMapping,
++                                        uFlags,
++                                        uDevVAddrAlignment,
++                                        0,
++                                        (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
++                      return IMG_FALSE;
++              }
++
++              uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
++              if(pMapping->CpuVAddr)
++              {
++                      pBuf->CpuVAddr = (void*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
++              }
++              else
++              {
++                      pBuf->CpuVAddr = IMG_NULL;
++              }
++
++              if(uSize == pMapping->uSize)
++              {
++                      pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++              }
++              else
++              {
++                      if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++                                                               uOffset,
++                                                               uSize,
++                                                               psBMHeap->ui32Attribs,
++                                                               &pBuf->hOSMemHandle)!=PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
++                              return IMG_FALSE;
++                      }
++              }
++
++              
++              pBuf->CpuPAddr = pMapping->CpuPAddr;
++
++              if(uFlags & PVRSRV_MEM_ZERO)
++              {
++                      if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
++                      {
++                              return IMG_FALSE;
++                      }
++              }
++      }
++      else
++      {
++              if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++              {
++                      
++                      PVR_ASSERT(psDevVAddr != IMG_NULL);
++
++                      
++                      pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++                                                                                                      uSize,
++                                                                                                      IMG_NULL,
++                                                                                                      PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
++                                                                                                      uDevVAddrAlignment,
++                                                                                                      psDevVAddr);
++
++                      
++                      pBuf->DevVAddr = *psDevVAddr;
++              }
++              else
++              {
++                      
++
++                      
++                      pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
++                                                                                                      uSize,
++                                                                                                      IMG_NULL,
++                                                                                                      0,
++                                                                                                      uDevVAddrAlignment,
++                                                                                                      &pBuf->DevVAddr);
++              }
++
++              
++              if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                      sizeof (struct _BM_MAPPING_),
++                                                      (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED"));
++                      return IMG_FALSE;
++              }
++
++              
++              pBuf->CpuVAddr = IMG_NULL;
++              pBuf->hOSMemHandle = 0;
++              pBuf->CpuPAddr.uiAddr = 0;
++
++              
++              pMapping->CpuVAddr = IMG_NULL;
++              pMapping->CpuPAddr.uiAddr = 0;
++              pMapping->DevVAddr = pBuf->DevVAddr;
++              pMapping->psSysAddr = IMG_NULL;
++              pMapping->uSize = uSize;
++              pMapping->hOSMemHandle = 0;
++      }
++
++      
++      pMapping->pArena = pArena;
++
++      
++      pMapping->pBMHeap = psBMHeap;
++      pBuf->pMapping = pMapping;
++
++      
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "AllocMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pMapping,
++                              pMapping->DevVAddr.uiAddr,
++                              pMapping->CpuVAddr,
++                              pMapping->CpuPAddr.uiAddr,
++                              pMapping->uSize));
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "AllocMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pBuf,
++                              pBuf->DevVAddr.uiAddr,
++                              pBuf->CpuVAddr,
++                              pBuf->CpuPAddr.uiAddr,
++                              uSize));
++
++      
++      PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);
++
++      return IMG_TRUE;
++}
++
++
++static IMG_BOOL
++WrapMemory (BM_HEAP *psBMHeap,
++                      IMG_SIZE_T uSize,
++                      IMG_UINT32 ui32BaseOffset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 uFlags,
++                      BM_BUF *pBuf)
++{
++      IMG_DEV_VIRTADDR DevVAddr = {0};
++      BM_MAPPING *pMapping;
++      IMG_BOOL bResult;
++      IMG_UINT32 const ui32PageSize = HOST_PAGESIZE();
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
++                        psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr, uFlags, pBuf));
++
++      PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);
++      
++      PVR_ASSERT(((IMG_UINT32)pvCPUVAddr & (ui32PageSize - 1)) == 0);
++
++      uSize += ui32BaseOffset;
++      uSize = HOST_PAGEALIGN (uSize);
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(*pMapping),
++                                              (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
++              return IMG_FALSE;
++      }
++
++      OSMemSet(pMapping, 0, sizeof (*pMapping));
++      
++      pMapping->uSize = uSize;
++      pMapping->pBMHeap = psBMHeap;
++
++      if(!bPhysContig)
++      {
++              pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
++              pMapping->psSysAddr = psAddr;
++              
++              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: Non phys-contig mapping starting at %p",psAddr[0]));
++      }
++      else
++      {
++              pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);
++
++              if(pvCPUVAddr)
++              {
++                      pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
++                      pMapping->CpuVAddr = pvCPUVAddr;
++              
++                      if(OSRegisterMem(pMapping->CpuPAddr, 
++                                                      pMapping->CpuVAddr,
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      &pMapping->hOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: RegisterMem Phys=0x%08X, CpuVAddr = 0x%08X, Size=%d) failed",
++                                      pMapping->CpuPAddr, pMapping->CpuVAddr, pMapping->uSize));
++                              goto fail_cleanup;
++                      }
++
++              }
++              else
++              {
++                      pMapping->eCpuMemoryOrigin = hm_wrapped;
++
++                      if(OSReservePhys(pMapping->CpuPAddr,
++                                                       pMapping->uSize,
++                                                       uFlags,
++                                                       &pMapping->CpuVAddr,
++                                                       &pMapping->hOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "WrapMemory: Reserve/Map Phys=0x%08X, Size=%d) failed",
++                                      pMapping->CpuPAddr, pMapping->uSize));
++                              goto fail_cleanup;
++                      }
++              }
++      }
++
++
++      
++      bResult = DevMemoryAlloc(psBMHeap->pBMContext,
++                                                       pMapping,
++                                                       IMG_NULL,
++                                                       uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
++                                                       ui32PageSize,
++                                                       &DevVAddr);
++      if (!bResult)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "WrapMemory: DevMemoryAlloc(0x%x) failed",
++                              pMapping->uSize));
++              goto fail_cleanup;
++      }
++
++      
++      pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
++      if(!ui32BaseOffset)
++      {
++              pBuf->hOSMemHandle = pMapping->hOSMemHandle;
++      }
++      else
++      {
++              if(OSGetSubMemHandle(pMapping->hOSMemHandle,
++                                                       ui32BaseOffset,
++                                                       (pMapping->uSize-ui32BaseOffset),
++                                                       uFlags,
++                                                       &pBuf->hOSMemHandle)!=PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
++                      goto fail_cleanup;
++              }
++      }
++      if(pMapping->CpuVAddr)
++      {
++              pBuf->CpuVAddr = (void*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
++      }
++      pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset;
++
++      if(uFlags & PVRSRV_MEM_ZERO)
++      {
++              if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
++              {
++                      return IMG_FALSE;
++              }
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "WrapMemory: pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pMapping, pMapping->DevVAddr.uiAddr,
++                              pMapping->CpuVAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "WrapMemory: pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
++                              pBuf, pBuf->DevVAddr.uiAddr,
++                              pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr, uSize));
++
++      pBuf->pMapping = pMapping;
++      return IMG_TRUE;
++
++fail_cleanup:
++      if(ui32BaseOffset && pBuf->hOSMemHandle)
++      {
++              OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
++      }
++
++      if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++      {
++              if(pMapping->eCpuMemoryOrigin == hm_wrapped)
++              {
++                      OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++              }
++              else if(pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++              {
++                      OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++
++      return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags)
++{
++      IMG_VOID *pvCpuVAddr;
++
++      if(pBuf->CpuVAddr)
++      {
++              OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
++      }
++      else if(pMapping->eCpuMemoryOrigin == hm_contiguous
++                      || pMapping->eCpuMemoryOrigin == hm_wrapped)
++      {
++              pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
++                                                                      ui32Bytes,
++                                                                      PVRSRV_HAP_KERNEL_ONLY
++                                                                      | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                                      IMG_NULL);
++              if(!pvCpuVAddr)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
++                      return IMG_FALSE;
++              }
++              OSMemSet(pvCpuVAddr, 0, ui32Bytes);
++              OSUnMapPhysToLin(pvCpuVAddr,
++                                               ui32Bytes,
++                                               PVRSRV_HAP_KERNEL_ONLY
++                                               | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                               IMG_NULL);
++      }
++      else
++      {
++              IMG_UINT32 ui32BytesRemaining = ui32Bytes;
++              IMG_UINT32 ui32CurrentOffset = 0;
++              IMG_CPU_PHYADDR CpuPAddr;
++
++              
++              PVR_ASSERT(pBuf->hOSMemHandle);
++
++              CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, 0);
++
++              while(ui32BytesRemaining > 0)
++              {
++                      IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
++                      CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
++                      
++                      if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
++                      {
++                              ui32BlockBytes =
++                                      MIN(ui32BytesRemaining, HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++                      }
++
++                      pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
++                                                                              ui32BlockBytes,
++                                                                              PVRSRV_HAP_KERNEL_ONLY
++                                                                              | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                                              IMG_NULL);
++                      if(!pvCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
++                              return IMG_FALSE;
++                      }
++                      OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
++                      OSUnMapPhysToLin(pvCpuVAddr,
++                                                       ui32BlockBytes,
++                                                       PVRSRV_HAP_KERNEL_ONLY
++                                                       | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
++                                                       IMG_NULL);
++
++                      ui32BytesRemaining -= ui32BlockBytes;
++                      ui32CurrentOffset += ui32BlockBytes;
++              }
++              PVR_ASSERT(ui32BytesRemaining == 0);
++      }
++
++      return IMG_TRUE;
++}
++
++static void
++FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags)
++{
++      BM_MAPPING *pMapping;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X",
++                      pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr));
++
++      
++      pMapping = pBuf->pMapping;
++
++      if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
++      {
++              
++              if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported"));
++              }
++              else
++              {
++                      
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++              }
++      }
++      else
++      {
++              
++              if(pBuf->hOSMemHandle != pMapping->hOSMemHandle)
++              {
++                      OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags);
++              }
++              if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
++              {
++                      
++
++
++                      RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE);
++              }
++              else
++              {
++                      if(pMapping->eCpuMemoryOrigin == hm_wrapped)
++                      {
++                              OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++                      }
++                      else if(pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++                      {
++                              OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle);
++                      }
++                      
++                      DevMemoryFree (pMapping);
++
++                      
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL);
++}
++
++
++
++PVRSRV_ERROR
++BM_DestroyContext(IMG_HANDLE hBMContext,
++                                IMG_BOOL bKernelContext,
++                                IMG_BOOL bResManCallback,
++                                IMG_BOOL *pbDestroyed)
++{
++      BM_CONTEXT **ppBMContext;
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++      BM_HEAP *psBMHeap, *psTmpBMHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_DestroyContext"));
++
++      if (pbDestroyed != IMG_NULL)
++      {
++              *pbDestroyed = IMG_FALSE;
++      }
++
++      
++
++      if (pBMContext == IMG_NULL)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: Invalid handle"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++
++      psDeviceNode = pBMContext->psDeviceNode;
++
++      
++      if((!bKernelContext) && (pBMContext->ui32RefCount))
++      {
++              pBMContext->ui32RefCount--;
++
++              if(pBMContext->ui32RefCount > 0 && !bResManCallback)
++              {
++                      
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++
++      psBMHeap = pBMContext->psBMHeap;
++      while(psBMHeap)
++      {
++              
++              if(psBMHeap->ui32Attribs 
++              &       (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                      |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      if (psBMHeap->pImportArena)
++                      {
++                              RA_Delete (psBMHeap->pImportArena);
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_DestroyContext: backing store type unsupported"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++
++              
++              psTmpBMHeap = psBMHeap;
++
++              
++              psBMHeap = psBMHeap->psNext;
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psTmpBMHeap, IMG_NULL);
++      }
++
++      
++
++      if (pBMContext->psMMUContext)
++      {
++              psDeviceNode->pfnMMUFinalise(pBMContext->psMMUContext);
++      }
++      
++      
++
++      if (pBMContext->pBufferHash)
++      {
++              HASH_Delete (pBMContext->pBufferHash);
++      }
++
++      
++
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++      
++      if(bKernelContext)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDevMemoryInfo->pBMKernelContext, IMG_NULL);
++              psDevMemoryInfo->pBMKernelContext = IMG_NULL;
++      }
++      else
++      {
++              ppBMContext = &psDevMemoryInfo->pBMContext;
++              while(*ppBMContext)
++              {
++                      if(*ppBMContext == pBMContext)
++                      {
++                              
++                              *ppBMContext = pBMContext->psNext;
++
++                              
++                              if(!bResManCallback && pBMContext->hResItem)
++                              {
++                                      PVRSRV_ERROR eError;
++
++                                      eError = ResManFreeResByPtr(pBMContext->hResItem, IMG_FALSE);
++
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyContext: ResManFreeResByPtr failed %d",eError));
++                                              return eError;
++                                      }
++                              }
++
++                              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pBMContext, IMG_NULL);
++                              break;
++                      }
++                      ppBMContext = &((*ppBMContext)->psNext);
++              }
++      }
++      
++      if (pbDestroyed != IMG_NULL)
++      {
++              *pbDestroyed = IMG_TRUE;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR BM_DestroyContextCallBack(IMG_UINT32 ui32ProcessID,
++                                                                                        IMG_PVOID pvParam,
++                                                                                        IMG_UINT32 ui32Param)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER (ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER (ui32Param);
++
++      return BM_DestroyContext(pBMContext, IMG_FALSE, IMG_TRUE, IMG_NULL);
++}
++
++
++IMG_HANDLE 
++BM_CreateContext(PVRSRV_DEVICE_NODE *psDeviceNode,
++                               IMG_DEV_PHYADDR *psPDDevPAddr,
++                               IMG_BOOL bKernelContext,
++                               IMG_BOOL *pbCreated)
++{
++      BM_CONTEXT *pBMContext;
++      BM_HEAP *psBMHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_CreateContext"));
++
++      if (pbCreated != IMG_NULL)
++      {
++              *pbCreated = IMG_FALSE;
++      }
++
++      
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++
++      
++
++
++      if(bKernelContext && psDevMemoryInfo->pBMKernelContext)
++      {
++              
++              return (IMG_HANDLE)psDevMemoryInfo->pBMKernelContext;
++      }
++      
++      pBMContext = psDevMemoryInfo->pBMContext;
++
++      while(pBMContext)
++      {
++              if(ResManFindResourceByPtr(pBMContext->hResItem) == PVRSRV_OK)
++              {
++                      
++                      pBMContext->ui32RefCount++;
++
++                      return (IMG_HANDLE)pBMContext;
++              }
++
++              pBMContext = pBMContext->psNext;
++      }
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof (struct _BM_CONTEXT_),
++                                       (IMG_PVOID *)&pBMContext, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: Alloc failed"));
++              return IMG_NULL;
++      }
++      OSMemSet (pBMContext, 0, sizeof (BM_CONTEXT));
++
++      
++      pBMContext->psDeviceNode = psDeviceNode;
++
++      
++      if(bKernelContext)
++      {
++              
++              pBMContext->pBufferHash = HASH_Create (32);
++              if (pBMContext->pBufferHash==IMG_NULL)
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: HASH_Create failed"));
++                      goto cleanup;
++              }
++      }
++
++      if(psDeviceNode->pfnMMUInitialise(psDeviceNode,
++                                                                              &pBMContext->psMMUContext,
++                                                                              psPDDevPAddr) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: MMUInitialise failed"));
++              goto cleanup;
++      }
++
++      if(bKernelContext)
++      {
++              
++              psDevMemoryInfo->pBMKernelContext = pBMContext;
++      }
++      else
++      {
++              
++
++
++
++
++              PVR_ASSERT(psDevMemoryInfo->pBMKernelContext);
++              PVR_ASSERT(psDevMemoryInfo->pBMKernelContext->psBMHeap);
++
++              
++
++
++
++              pBMContext->psBMSharedHeap = psDevMemoryInfo->pBMKernelContext->psBMHeap;
++              
++              
++
++
++              psBMHeap = pBMContext->psBMSharedHeap;
++              while(psBMHeap)
++              {
++                      switch(psBMHeap->sDevArena.DevMemHeapType)
++                      {
++                              case DEVICE_MEMORY_HEAP_SHARED:
++                              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                              {
++                                      
++                                      psDeviceNode->pfnMMUInsertHeap(pBMContext->psMMUContext, psBMHeap->pMMUHeap);
++                                      break;
++                              }
++                      }
++                      
++                      psBMHeap = psBMHeap->psNext;
++              }
++
++              
++              pBMContext->hResItem = ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_CONTEXT,
++                                                                                              pBMContext,
++                                                                                              0,
++                                                                                              BM_DestroyContextCallBack,
++                                                                                              0);
++              if (pBMContext->hResItem == IMG_NULL)
++              {
++                      PVR_DPF ((PVR_DBG_ERROR, "BM_CreateContext: ResManRegisterRes failed"));
++                      goto cleanup;
++              }
++
++              
++              pBMContext->ui32RefCount++;
++
++              
++              pBMContext->psNext = psDevMemoryInfo->pBMContext;
++              psDevMemoryInfo->pBMContext = pBMContext;
++      }
++
++      if (pbCreated != IMG_NULL)
++      {
++              *pbCreated = IMG_TRUE;
++      }
++      return (IMG_HANDLE)pBMContext;
++
++cleanup:
++      
++
++
++      BM_DestroyContext(pBMContext, bKernelContext, IMG_FALSE, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pBMContext, IMG_NULL);
++
++      return IMG_NULL;
++}
++
++
++IMG_HANDLE
++BM_CreateHeap (IMG_HANDLE hBMContext,
++                         DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
++      PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode;
++      BM_HEAP *psBMHeap;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));
++
++      if(!pBMContext)
++      {
++              return IMG_NULL;
++      }
++
++      
++      if(pBMContext->ui32RefCount > 1)
++      {
++              psBMHeap = pBMContext->psBMHeap;
++
++              while(psBMHeap)
++              {
++                      if(psBMHeap->sDevArena.ui32HeapID ==  psDevMemHeapInfo->ui32HeapID)
++                      
++                      {
++                              
++                              return psBMHeap;
++                      }
++                      psBMHeap = psBMHeap->psNext;
++              }
++      }
++
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_HEAP),
++                                              (IMG_PVOID *)&psBMHeap, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
++              return IMG_NULL;
++      }
++
++      OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));
++
++      psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
++      psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
++      psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
++      psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
++      psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
++      psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
++      psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;
++
++      
++      psBMHeap->pBMContext = pBMContext;
++
++      psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
++                                                                                                      &psBMHeap->sDevArena,
++                                                                                                      &psBMHeap->pVMArena);
++      if (!psBMHeap->pMMUHeap)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
++              goto ErrorExit;
++      }
++
++      
++      psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
++                                                                              0, 0, IMG_NULL,
++                                                                              HOST_PAGESIZE(),
++                                                                              BM_ImportMemory, 
++                                                                              BM_FreeMemory, 
++                                                                              IMG_NULL,
++                                                                              psBMHeap);
++      if(psBMHeap->pImportArena == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
++              goto ErrorExit;
++      }
++
++      if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              
++
++
++
++              psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
++              if(psBMHeap->pLocalDevMemArena == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
++                      goto ErrorExit;
++              }
++      }
++
++      
++      psBMHeap->psNext = pBMContext->psBMHeap;
++      pBMContext->psBMHeap = psBMHeap;
++
++      return (IMG_HANDLE)psBMHeap;
++
++      
++ErrorExit:
++
++      
++      if (psBMHeap->pMMUHeap != IMG_NULL)
++      {
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++              psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
++      }
++
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBMHeap, IMG_NULL);
++
++      return IMG_NULL;
++}
++
++IMG_VOID
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap)
++{
++      BM_HEAP* psBMHeap = (BM_HEAP*)hDevMemHeap;
++      PVRSRV_DEVICE_NODE *psDeviceNode = psBMHeap->pBMContext->psDeviceNode;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_DestroyHeap"));
++
++      if(psBMHeap)
++      {
++              BM_HEAP **ppsBMHeap;
++              
++              
++              if(psBMHeap->ui32Attribs 
++              &       (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
++                      |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
++              {
++                      if (psBMHeap->pImportArena)
++                      {
++                              RA_Delete (psBMHeap->pImportArena);
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_DestroyHeap: backing store type unsupported"));
++                      return;
++              }
++
++              
++              psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
++              
++              
++              ppsBMHeap = &psBMHeap->pBMContext->psBMHeap;
++              while(*ppsBMHeap)
++              {
++                      if(*ppsBMHeap == psBMHeap)
++                      {
++                              
++                              *ppsBMHeap = psBMHeap->psNext;
++                              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBMHeap, IMG_NULL);
++                              break;
++                      }
++                      ppsBMHeap = &((*ppsBMHeap)->psNext);
++              }
++      }
++      else
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_DestroyHeap: invalid heap handle"));       
++      }
++}
++
++
++IMG_BOOL 
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_Reinitialise"));
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++
++#ifdef FIXME
++      BM_CONTEXT *pBMContext;
++
++      pBMContext = psDeviceNode->sDevMemoryInfo.pBMContext;
++
++      while(pBMContext)
++      {
++              MMU_Enable (pBMContext);
++              pBMContext = pBMContext->psNext;
++      }
++#endif
++
++      return IMG_TRUE;
++}
++
++IMG_BOOL
++BM_Alloc (  IMG_HANDLE                        hDevMemHeap,
++                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                      IMG_SIZE_T                      uSize,
++                      IMG_UINT32                      *pui32Flags,
++                      IMG_UINT32                      uDevVAddrAlignment,
++                      BM_HANDLE                       *phBuf)
++{
++      BM_BUF *pBuf;
++      BM_CONTEXT *pBMContext;
++      BM_HEAP *psBMHeap;
++      SYS_DATA *psSysData;
++      IMG_UINT32 uFlags = 0;
++
++      if(pui32Flags)
++              uFlags = *pui32Flags;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Alloc (uSize=0x%x, uFlags=0x%x, uDevVAddrAlignment=0x%x)",
++                      uSize, uFlags, uDevVAddrAlignment));
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++              return IMG_FALSE;
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      pBMContext = psBMHeap->pBMContext;
++
++      if(uDevVAddrAlignment == 0)
++              uDevVAddrAlignment = 1;
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                 sizeof (BM_BUF),
++                                 (IMG_PVOID *)&pBuf, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: BM_Buf alloc FAILED"));
++              return IMG_FALSE;
++      }
++      OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++      
++      if (AllocMemory(pBMContext,
++                                      psBMHeap,
++                                      psDevVAddr,
++                                      uSize,
++                                      uFlags,
++                                      uDevVAddrAlignment,
++                                      pBuf) != IMG_TRUE)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++              PVR_DPF((PVR_DBG_ERROR, "BM_Alloc: AllocMemory FAILED"));
++              return IMG_FALSE;
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Alloc (uSize=0x%x, uFlags=0x%x)=%08X",
++                uSize, uFlags, pBuf));
++
++      
++      pBuf->ui32RefCount = 1;
++      *phBuf = (BM_HANDLE)pBuf;
++      *pui32Flags = uFlags | psBMHeap->ui32Attribs;
++
++      return IMG_TRUE;
++}
++
++
++
++IMG_BOOL
++BM_Wrap (     IMG_HANDLE hDevMemHeap,
++                      IMG_UINT32 ui32Size,
++                      IMG_UINT32 ui32Offset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psSysAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 *pui32Flags,
++                      BM_HANDLE *phBuf)
++{
++      BM_BUF *pBuf;
++      BM_CONTEXT *psBMContext;
++      BM_HEAP *psBMHeap;
++      SYS_DATA *psSysData;
++      IMG_SYS_PHYADDR sHashAddress;
++      IMG_UINT32 uFlags;
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      psBMContext = psBMHeap->pBMContext;
++
++      uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);
++
++      if(pui32Flags)
++              uFlags |= *pui32Flags;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
++                      ui32Size, ui32Offset, bPhysContig, pvCPUVAddr, uFlags));
++
++      if(SysAcquireData (&psSysData) != PVRSRV_OK)
++              return IMG_FALSE;
++
++      
++      sHashAddress = psSysAddr[0];
++      
++      
++      sHashAddress.uiAddr += ui32Offset;
++
++      
++      pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr);
++
++      if(pBuf)
++      {
++              IMG_UINT32 ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);
++
++              
++              if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
++                                                                                                              pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
++              {
++                      PVR_DPF((PVR_DBG_MESSAGE,
++                                      "BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
++                                      ui32Size, ui32Offset, sHashAddress.uiAddr));
++
++                      pBuf->ui32RefCount++;
++                      *phBuf = (BM_HANDLE)pBuf;
++                      if(pui32Flags)
++                              *pui32Flags = uFlags;
++
++                      return IMG_TRUE;
++              }
++      }
++
++      
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_BUF),
++                                              (IMG_PVOID *)&pBuf, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
++              return IMG_FALSE;
++      }
++      OSMemSet(pBuf, 0, sizeof (BM_BUF));
++
++      
++      if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++              return IMG_FALSE;
++      }
++
++      
++      if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++      {
++              
++              PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);
++
++              if (!HASH_Insert (psBMContext->pBufferHash, (IMG_UINTPTR_T) sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
++              {
++                      FreeBuf (pBuf, uFlags);
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
++                      PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
++                      return IMG_FALSE;
++              }
++      }
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "BM_Wrap (uSize=0x%x, uFlags=0x%x)=%08X(devVAddr=%08X)",
++                      ui32Size, uFlags, pBuf, pBuf->DevVAddr.uiAddr));
++
++      
++      pBuf->ui32RefCount = 1;
++      *phBuf = (BM_HANDLE)pBuf;
++      if(pui32Flags)
++              *pui32Flags = uFlags;
++
++      return IMG_TRUE;
++}
++
++
++void
++BM_Free (BM_HANDLE hBuf,
++              IMG_UINT32 ui32Flags)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++      SYS_DATA *psSysData;
++      IMG_SYS_PHYADDR sHashAddr;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf));
++      PVR_ASSERT (pBuf!=IMG_NULL);
++
++      if(SysAcquireData (&psSysData) != PVRSRV_OK)
++              return;
++
++      pBuf->ui32RefCount--;
++
++      if(pBuf->ui32RefCount == 0)
++      {
++              if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
++              {
++                      sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr);
++
++                      HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash,  (IMG_UINTPTR_T)sHashAddr.uiAddr);
++              }
++              FreeBuf (pBuf, ui32Flags);
++      }
++}
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "BM_HandleToCpuVaddr(h=%08X)=%08X",
++                              hBuf, pBuf->CpuVAddr));
++      return pBuf->CpuVAddr;
++}
++
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToDevVaddr(h=%08X)=%08X", hBuf, pBuf->DevVAddr));
++      return pBuf->DevVAddr;
++}
++
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr));
++      return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr);
++}
++
++IMG_HANDLE
++BM_HandleToOSMemHandle(BM_HANDLE hBuf)
++{
++      BM_BUF *pBuf = (BM_BUF *)hBuf;
++
++      PVR_ASSERT (pBuf != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                              "BM_HandleToOSMemHandle(h=%08X)=%08X",
++                              hBuf, pBuf->hOSMemHandle));
++      return pBuf->hOSMemHandle;
++}
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++                                               IMG_UINT32 *pTotalBytes,
++                                               IMG_UINT32 *pAvailableBytes)
++{
++      if (pAvailableBytes || pTotalBytes || uFlags);
++      return IMG_FALSE;
++}
++
++
++static IMG_BOOL
++DevMemoryAlloc (BM_CONTEXT *pBMContext,
++                              BM_MAPPING *pMapping, 
++                              IMG_SIZE_T *pActualSize,
++                              IMG_UINT32 uFlags,
++                              IMG_UINT32 dev_vaddr_alignment,
++                              IMG_DEV_VIRTADDR *pDevVAddr)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++      IMG_UINT32 ui32PDumpSize = pMapping->uSize;
++#endif
++
++      psDeviceNode = pBMContext->psDeviceNode;
++
++      if(uFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              
++              pMapping->uSize *= 2;
++      }
++      
++#ifdef PDUMP
++      if(uFlags & PVRSRV_MEM_DUMMY)
++      {
++              
++              ui32PDumpSize = HOST_PAGESIZE();
++      }
++#endif
++
++      
++      if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap, 
++                                                                      pMapping->uSize, 
++                                                                      pActualSize, 
++                                                                      0,
++                                                                      dev_vaddr_alignment, 
++                                                                      &(pMapping->DevVAddr)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"));
++              return IMG_FALSE;
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(pBMContext->psMMUContext);
++#endif
++
++      
++      
++      PDUMPMALLOCPAGES(psDeviceNode->sDevId.eDeviceType, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, (IMG_HANDLE)pMapping);
++
++      switch (pMapping->eCpuMemoryOrigin)
++      {
++              case hm_wrapped:
++              case hm_wrapped_virtaddr:
++              case hm_contiguous:
++              {
++                      psDeviceNode->pfnMMUMapPages (  pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      SysCpuPAddrToSysPAddr (pMapping->CpuPAddr),
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++
++                      *pDevVAddr = pMapping->DevVAddr;
++                      break;
++              }
++              case hm_env:
++              {
++                      psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      pMapping->uSize,
++                                                      pMapping->CpuVAddr,
++                                                      pMapping->hOSMemHandle,
++                                                      pDevVAddr,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++                      break;
++              }
++              case hm_wrapped_scatter:
++              {
++                      psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap,
++                                                      pMapping->DevVAddr,
++                                                      pMapping->psSysAddr,
++                                                      pMapping->uSize,
++                                                      uFlags,
++                                                      (IMG_HANDLE)pMapping);
++
++                      *pDevVAddr = pMapping->DevVAddr;
++                      break;
++              }
++              default:
++                      PVR_DPF((PVR_DBG_ERROR,
++                              "Illegal value %d for pMapping->eCpuMemoryOrigin",
++                              pMapping->eCpuMemoryOrigin));
++                      return IMG_FALSE;
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(pBMContext->psMMUContext);
++#endif
++
++      return IMG_TRUE;
++}
++
++static void
++DevMemoryFree (BM_MAPPING *pMapping)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++#ifdef PDUMP
++      IMG_UINT32 ui32PSize;
++#endif
++
++#ifdef PDUMP
++      
++      if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              
++              ui32PSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              ui32PSize = pMapping->uSize;
++      }
++
++      PDUMPFREEPAGES(pMapping->pBMHeap, pMapping->DevVAddr,
++                                 ui32PSize, (IMG_HANDLE)pMapping,
++                                 (IMG_BOOL)(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED));
++#endif
++
++      psDeviceNode = pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      psDeviceNode->pfnMMUFree (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, pMapping->uSize);
++}
++
++static IMG_BOOL
++BM_ImportMemory (void *pH,
++                        IMG_SIZE_T uRequestSize,
++                        IMG_SIZE_T *pActualSize,
++                        BM_MAPPING **ppsMapping,
++                        IMG_UINT32 uFlags,
++                        IMG_UINTPTR_T *pBase)
++{
++      BM_MAPPING *pMapping;
++      BM_HEAP *pBMHeap = pH;
++      BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
++      IMG_BOOL bResult;
++      IMG_SIZE_T uSize;
++      IMG_SIZE_T uPSize;
++      IMG_UINT32 uDevVAddrAlignment = 0;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
++                        pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));
++
++      PVR_ASSERT (ppsMapping != IMG_NULL);
++      PVR_ASSERT (pBMContext != IMG_NULL);
++
++      uSize = HOST_PAGEALIGN (uRequestSize);
++      PVR_ASSERT (uSize >= uRequestSize);
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof (BM_MAPPING),
++                                              (IMG_PVOID *)&pMapping, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
++              goto fail_exit;
++      }
++
++      pMapping->hOSMemHandle = 0;
++      pMapping->CpuVAddr = 0;
++      pMapping->DevVAddr.uiAddr = 0;
++      pMapping->CpuPAddr.uiAddr = 0;
++      pMapping->uSize = uSize;
++      pMapping->pBMHeap = pBMHeap;
++      pMapping->ui32Flags = uFlags;
++
++      
++      if (pActualSize)
++      {
++              *pActualSize = uSize;
++      }
++      
++      
++      if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              uPSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              uPSize = pMapping->uSize;
++      }
++
++      
++
++      if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++      {
++              
++              if (OSAllocPages(pBMHeap->ui32Attribs,
++                                               uPSize, 
++                                               (IMG_VOID **)&pMapping->CpuVAddr,
++                                               &pMapping->hOSMemHandle) != PVRSRV_OK) 
++              {
++                      PVR_DPF((PVR_DBG_ERROR,
++                                      "BM_ImportMemory: OSAllocPages(0x%x) failed",
++                                      uPSize));
++                      goto fail_mapping_alloc;
++              }
++
++              
++              pMapping->eCpuMemoryOrigin = hm_env;
++      }
++      else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              
++              PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);
++
++              if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
++                                         uPSize,
++                                         IMG_NULL,
++                                         IMG_NULL,
++                                         0,
++                                         HOST_PAGESIZE(),
++                                         0,
++                                         (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
++                      goto fail_mapping_alloc;
++              }
++
++              
++              pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++              if(OSReservePhys(pMapping->CpuPAddr,
++                                               uPSize,
++                                               pBMHeap->ui32Attribs,
++                                               &pMapping->CpuVAddr,
++                                               &pMapping->hOSMemHandle) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"));
++                      goto fail_dev_mem_alloc;
++              }
++
++              
++              pMapping->eCpuMemoryOrigin = hm_contiguous;
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"));
++              goto fail_mapping_alloc;
++      }
++
++      
++      bResult = DevMemoryAlloc (pBMContext, pMapping, IMG_NULL, uFlags,
++                                                        uDevVAddrAlignment, &pMapping->DevVAddr);
++      if (!bResult)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
++                              pMapping->uSize));
++              goto fail_dev_mem_alloc;
++      }
++
++      PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);
++
++      *pBase = pMapping->DevVAddr.uiAddr;
++      *ppsMapping = pMapping;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
++      return IMG_TRUE;
++
++fail_dev_mem_alloc:
++      if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
++      {
++              
++              if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++              {
++                      pMapping->uSize /= 2;
++              }
++
++              if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++              {
++                      uPSize = HOST_PAGESIZE();
++              }
++              else
++              {
++                      uPSize = pMapping->uSize;
++              }
++
++              if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++              {
++                      OSFreePages(pBMHeap->ui32Attribs, 
++                                                uPSize, 
++                                                (void *) pMapping->CpuVAddr,
++                                                pMapping->hOSMemHandle);
++              }
++              else
++              {
++                      IMG_SYS_PHYADDR sSysPAddr;
++
++                      if(pMapping->CpuVAddr)
++                      {
++                              OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle);
++                      }
++                      sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);          
++                      RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);      
++              }
++      }
++fail_mapping_alloc:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
++fail_exit:
++      return IMG_FALSE;
++}
++
++
++static void
++BM_FreeMemory (void *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping)
++{
++      BM_HEAP *pBMHeap = h;
++      IMG_SIZE_T uPSize;
++
++      PVR_UNREFERENCED_PARAMETER (_base);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping));
++
++      PVR_ASSERT (psMapping != IMG_NULL);
++
++      DevMemoryFree (psMapping);
++
++      
++      if(psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
++      {
++              psMapping->uSize /= 2;
++      }
++      
++      if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY)
++      {
++              uPSize = HOST_PAGESIZE();
++      }
++      else
++      {
++              uPSize = psMapping->uSize;
++      }
++      
++      if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
++      {
++              OSFreePages(pBMHeap->ui32Attribs,
++                                              uPSize,
++                                              (void *) psMapping->CpuVAddr,
++                                              psMapping->hOSMemHandle);
++      }
++      else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle);
++
++              sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr);
++
++              RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"));
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)",
++                      h, _base, psMapping));
++}
++
++PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                              IMG_DEV_VIRTADDR sDevVPageAddr,
++                                                              IMG_DEV_PHYADDR *psDevPAddr)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "BM_GetPhysPageAddr"));
++
++      if(!psMemInfo || !psDevPAddr)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "BM_GetPhysPageAddr: Invalid params"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);
++
++      psDeviceNode = ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      *psDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->pBMHeap->pMMUHeap, 
++                                                                                              sDevVPageAddr);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, PVRSRV_HEAP_INFO *psHeapInfo)
++{
++      BM_HEAP *psBMHeap = (BM_HEAP *)hDevMemHeap;
++
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetHeapInfo"));
++
++      psHeapInfo->hDevMemHeap = hDevMemHeap;
++      psHeapInfo->sDevVAddrBase = psBMHeap->sDevArena.BaseDevVAddr;
++      psHeapInfo->ui32HeapByteSize = psBMHeap->sDevArena.ui32Size;
++      psHeapInfo->ui32Attribs = psBMHeap->ui32Attribs;
++
++      return PVRSRV_OK;
++}
++
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap)
++{
++      BM_HEAP *pBMHeap = (BM_HEAP*)hDevMemHeap;
++
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUContext"));
++
++      return pBMHeap->pBMContext->psMMUContext;
++}
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext)
++{
++      BM_CONTEXT *pBMContext = (BM_CONTEXT*)hDevMemContext;
++
++      PVR_DPF ((PVR_DBG_VERBOSE, "BM_GetMMUContextFromMemContext"));
++
++      return pBMContext->psMMUContext;
++}
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMMUHeap"));
++
++      return (IMG_HANDLE)((BM_HEAP*)hDevMemHeap)->pMMUHeap;
++}
++
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetDeviceNode"));
++
++      return ((BM_CONTEXT*)hDevMemContext)->psDeviceNode;
++}
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++      PVR_DPF((PVR_DBG_VERBOSE, "BM_GetMappingHandle"));
++
++      return ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping->hOSMemHandle;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/deviceclass.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1734 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/module.h>
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "kernelbuffer.h"
++#include "pvr_bridge_km.h"
++
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID);
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID);
++
++typedef struct PVRSRV_DC_SRV2DISP_KMJTABLE_TAG *PPVRSRV_DC_SRV2DISP_KMJTABLE;
++
++typedef struct PVRSRV_DC_BUFFER_TAG
++{
++      
++      PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++      struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++      struct PVRSRV_DC_SWAPCHAIN_TAG *psSwapChain;
++} PVRSRV_DC_BUFFER;
++
++typedef struct PVRSRV_DC_SWAPCHAIN_TAG
++{
++      IMG_HANDLE                                                      hExtSwapChain;
++      PVRSRV_QUEUE_INFO                                       *psQueue;
++      PVRSRV_DC_BUFFER                                        asBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      IMG_UINT32                                                      ui32BufferCount;
++      PVRSRV_DC_BUFFER                                        *psLastFlipBuffer;
++      struct PVRSRV_DISPLAYCLASS_INFO_TAG *psDCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_DC_SWAPCHAIN;
++
++typedef struct PVRSRV_DISPLAYCLASS_INFO_TAG
++{
++      IMG_UINT32                                                      ui32RefCount;
++      IMG_UINT32                                                      ui32DeviceID;
++      IMG_HANDLE                                                      hExtDevice;
++      PPVRSRV_DC_SRV2DISP_KMJTABLE            psFuncTable;
++      IMG_HANDLE                                                      hDevMemContext;
++      PVRSRV_DC_BUFFER                                        sSystemBuffer;
++} PVRSRV_DISPLAYCLASS_INFO;
++
++
++typedef struct PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO_TAG
++{
++      PVRSRV_DISPLAYCLASS_INFO                        *psDCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO;
++
++
++typedef struct PVRSRV_BC_SRV2BUFFER_KMJTABLE_TAG *PPVRSRV_BC_SRV2BUFFER_KMJTABLE;
++
++typedef struct PVRSRV_BC_BUFFER_TAG
++{
++      
++      PVRSRV_DEVICECLASS_BUFFER sDeviceClassBuffer;
++
++      struct PVRSRV_BUFFERCLASS_INFO_TAG *psBCInfo;
++} PVRSRV_BC_BUFFER;
++
++
++typedef struct PVRSRV_BUFFERCLASS_INFO_TAG
++{
++      IMG_UINT32                                                      ui32RefCount;
++      IMG_UINT32                                                      ui32DeviceID;
++      IMG_HANDLE                                                      hExtDevice;
++      PPVRSRV_BC_SRV2BUFFER_KMJTABLE          psFuncTable;
++      IMG_HANDLE                                                      hDevMemContext;
++      
++      IMG_UINT32                                                      ui32BufferCount;
++      PVRSRV_BC_BUFFER                                        *psBuffer;
++
++} PVRSRV_BUFFERCLASS_INFO;
++
++
++typedef struct PVRSRV_BUFFERCLASS_PERCONTEXT_INFO_TAG
++{
++      PVRSRV_BUFFERCLASS_INFO                         *psBCInfo;
++      IMG_HANDLE                                                      hResItem;
++} PVRSRV_BUFFERCLASS_PERCONTEXT_INFO;
++
++
++static PVRSRV_DISPLAYCLASS_INFO* DCDeviceHandleToDCInfo (IMG_HANDLE hDeviceKM)
++{
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      return psDCPerContextInfo->psDCInfo;
++}
++
++
++static PVRSRV_BUFFERCLASS_INFO* BCDeviceHandleToBCInfo (IMG_HANDLE hDeviceKM)
++{
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      return psBCPerContextInfo->psBCInfo;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumerateDCKM (PVRSRV_DEVICE_CLASS DeviceClass,
++                                                                IMG_UINT32 *pui32DevCount,
++                                                                IMG_UINT32 *pui32DevID )
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_UINT                        ui32DevCount = 0;
++      SYS_DATA                        *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if      ((psDeviceNode->sDevId.eDeviceClass == DeviceClass)
++              &&      (psDeviceNode->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_EXT))
++              {
++                      ui32DevCount++;
++                      if(pui32DevID)
++                      {
++                              *pui32DevID++ = psDeviceNode->sDevId.ui32DeviceIndex;
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      if(pui32DevCount)
++      {
++              *pui32DevCount = ui32DevCount;
++      }
++      else if(pui32DevID == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDCKM: Invalid parameters"));
++              return (PVRSRV_ERROR_INVALID_PARAMS);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDCDeviceKM (PVRSRV_DC_SRV2DISP_KMJTABLE *psFuncTable,
++                                                                         IMG_UINT32 *pui32DeviceID)
++{
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo = IMG_NULL;
++      PVRSRV_DEVICE_NODE                      *psDeviceNode;
++      SYS_DATA                                        *psSysData;
++
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++
++      
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(*psDCInfo),
++                                       (IMG_VOID **)&psDCInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDCInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet (psDCInfo, 0, sizeof(*psDCInfo));
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE),
++                                       (IMG_VOID **)&psDCInfo->psFuncTable, IMG_NULL) != PVRSRV_OK)
++      {               
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psFuncTable alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDCInfo->psFuncTable, 0, sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE));
++
++      
++      *psDCInfo->psFuncTable = *psFuncTable;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DEVICE_NODE),
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDCDeviceKM: Failed psDeviceNode alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++      psDeviceNode->pvDevice = (IMG_VOID*)psDCInfo;
++      psDeviceNode->ui32pvDeviceSize = sizeof(*psDCInfo);
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++      psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_DISPLAY;
++      psDeviceNode->psSysData = psSysData;
++
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++      psDCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      if (pui32DeviceID)
++      {
++              *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      }
++      
++      
++      SysRegisterExternalDevice(psDeviceNode);
++
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      if(psDCInfo->psFuncTable)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo->psFuncTable, IMG_NULL);
++      }
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveDCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++      SYS_DATA                                        *psSysData;
++      PVRSRV_DEVICE_NODE                      **ppsDeviceNode, *psDeviceNode;
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ppsDeviceNode = &psSysData->psDeviceNodeList;
++      while(*ppsDeviceNode)
++      {
++              switch((*ppsDeviceNode)->sDevId.eDeviceClass)
++              {
++                      case PVRSRV_DEVICE_CLASS_DISPLAY :
++                      {
++                              if((*ppsDeviceNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++                              {
++                                      goto FoundDevice;
++                              }
++                              break;
++                      }
++                      default:
++                      {
++                              break;
++                      }
++              }
++              ppsDeviceNode = &((*ppsDeviceNode)->psNext);
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveDCDeviceKM: requested device %d not present", ui32DevIndex));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++      
++      psDeviceNode = *ppsDeviceNode;
++      *ppsDeviceNode = psDeviceNode->psNext;
++
++      
++      SysRemoveExternalDevice(psDeviceNode);
++      
++      
++
++
++      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++      PVR_ASSERT(psDCInfo->ui32RefCount == 0);
++      FreeDeviceID(psSysData, ui32DevIndex);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo->psFuncTable, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCInfo, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDeviceNode, IMG_NULL);
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterBCDeviceKM (PVRSRV_BC_SRV2BUFFER_KMJTABLE *psFuncTable,
++                                                                         IMG_UINT32   *pui32DeviceID)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo = IMG_NULL;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++      SYS_DATA                                *psSysData;
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(*psBCInfo),
++                                       (IMG_VOID **)&psBCInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psBCInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet (psBCInfo, 0, sizeof(*psBCInfo));      
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE),
++                                       (IMG_VOID **)&psBCInfo->psFuncTable, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psFuncTable alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psBCInfo->psFuncTable, 0, sizeof(PVRSRV_BC_SRV2BUFFER_KMJTABLE));
++
++      
++      *psBCInfo->psFuncTable = *psFuncTable;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DEVICE_NODE),
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterBCDeviceKM: Failed psDeviceNode alloc"));
++              goto ErrorExit;
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE));
++
++      psDeviceNode->pvDevice = (IMG_VOID*)psBCInfo;
++      psDeviceNode->ui32pvDeviceSize = sizeof(*psBCInfo);
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->sDevId.eDeviceType = PVRSRV_DEVICE_TYPE_EXT;
++      psDeviceNode->sDevId.eDeviceClass = PVRSRV_DEVICE_CLASS_BUFFER;
++      psDeviceNode->psSysData = psSysData;
++
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++      psBCInfo->ui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      if (pui32DeviceID)
++      {
++              *pui32DeviceID = psDeviceNode->sDevId.ui32DeviceIndex;
++      }
++
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      if(psBCInfo->psFuncTable)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psFuncTable, IMG_NULL);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++}
++
++
++PVRSRV_ERROR PVRSRVRemoveBCDeviceKM(IMG_UINT32 ui32DevIndex)
++{
++      SYS_DATA                                        *psSysData;
++      PVRSRV_DEVICE_NODE                      **ppsDevNode, *psDevNode;
++      PVRSRV_BUFFERCLASS_INFO         *psBCInfo;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ppsDevNode = &psSysData->psDeviceNodeList;
++      while(*ppsDevNode)
++      {
++              switch((*ppsDevNode)->sDevId.eDeviceClass)
++              {
++                      case PVRSRV_DEVICE_CLASS_BUFFER :
++                      {
++                              if((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++                              {
++                                      goto FoundDevice;
++                              }
++                              break;
++                      }
++                      default:
++                      {
++                              break;
++                      }
++              }
++              ppsDevNode = &(*ppsDevNode)->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveBCDeviceKM: requested device %d not present", ui32DevIndex));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++      
++      psDevNode = *(ppsDevNode);
++      *ppsDevNode = psDevNode->psNext;
++
++      
++
++
++      FreeDeviceID(psSysData, ui32DevIndex);
++      psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDevNode->pvDevice;
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psFuncTable, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo, IMG_NULL);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDevNode, IMG_NULL);
++      return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseDCDeviceKM (IMG_HANDLE        hDeviceKM,
++                                                                      IMG_BOOL        bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++
++      PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      
++      eError = ResManFreeResByPtr(psDCPerContextInfo->hResItem, IMG_TRUE);
++                      
++                              return eError;
++                      }
++              
++
++static PVRSRV_ERROR CloseDCDeviceCallBack(IMG_UINT32  ui32ProcessID,
++                                                                                IMG_PVOID             pvParam,
++                                                                                IMG_UINT32    ui32Param)
++{
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      psDCPerContextInfo = (PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *)pvParam;
++      psDCInfo = psDCPerContextInfo->psDCInfo;
++
++      psDCInfo->ui32RefCount--;
++      if(psDCInfo->ui32RefCount == 0)
++      {       
++              
++              psDCInfo->psFuncTable->pfnCloseDCDevice(psDCInfo->hExtDevice);
++
++              PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++              
++              psDCInfo->hDevMemContext = IMG_NULL;
++              psDCInfo->hExtDevice = IMG_NULL;
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psDCPerContextInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenDCDeviceKM (IMG_UINT32 ui32DeviceID,
++                                                                 IMG_HANDLE hDevCookie,
++                                                                 IMG_HANDLE *phDeviceKM)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DISPLAYCLASS_PERCONTEXT_INFO *psDCPerContextInfo;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++
++      if(!phDeviceKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Invalid params"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if ((psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY) &&
++                      (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID))
++              {
++                      
++
++
++                      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO*)psDeviceNode->pvDevice;
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: no devnode matching index %d", ui32DeviceID));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++      
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(*psDCPerContextInfo),
++                                (IMG_VOID **)&psDCPerContextInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed psDCPerContextInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet(psDCPerContextInfo, 0, sizeof(*psDCPerContextInfo));
++
++      if(psDCInfo->ui32RefCount++ == 0)
++      {
++              PVRSRV_ERROR eError;
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++              PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++              
++              psDCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++              
++              eError = PVRSRVAllocSyncInfoKM(IMG_NULL, 
++                                                                      (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext,
++                                                                      &psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed sync info alloc"));
++                      psDCInfo->ui32RefCount--;
++                      return eError;
++              }
++
++              
++              eError = psDCInfo->psFuncTable->pfnOpenDCDevice(ui32DeviceID,
++                                                              &psDCInfo->hExtDevice,
++                                                              (PVRSRV_SYNC_DATA*)psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenDCDeviceKM: Failed to open external DC device"));
++                      psDCInfo->ui32RefCount--;
++                      PVRSRVFreeSyncInfoKM(psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo);
++                      return eError;
++              }
++      }
++
++      psDCPerContextInfo->psDCInfo = psDCInfo;
++      psDCPerContextInfo->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DISPLAYCLASS_DEVICE,
++                                                                                                                               psDCPerContextInfo,
++                                                                                                                              0,
++                                                                                                                              CloseDCDeviceCallBack,
++                                                                                                                              0);
++
++      
++      *phDeviceKM = (IMG_HANDLE)psDCPerContextInfo;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCFormatsKM (IMG_HANDLE hDeviceKM,
++                                                                      IMG_UINT32 *pui32Count,
++                                                                      DISPLAY_FORMAT *psFormat)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      if(!hDeviceKM || !pui32Count || !psFormat)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCFormatsKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      return psDCInfo->psFuncTable->pfnEnumDCFormats(psDCInfo->hExtDevice, pui32Count, psFormat);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVEnumDCDimsKM (IMG_HANDLE hDeviceKM,
++                                                               DISPLAY_FORMAT *psFormat,
++                                                               IMG_UINT32 *pui32Count,
++                                                               DISPLAY_DIMS *psDim)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++
++      if(!hDeviceKM || !pui32Count || !psFormat)      
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumDCDimsKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      return psDCInfo->psFuncTable->pfnEnumDCDims(psDCInfo->hExtDevice, psFormat, pui32Count, psDim);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCSystemBufferKM (IMG_HANDLE hDeviceKM,
++                                                                              IMG_HANDLE *phBuffer)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      IMG_HANDLE hExtBuffer;
++
++      if(!hDeviceKM || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCSystemBuffer(psDCInfo->hExtDevice, &hExtBuffer);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCSystemBufferKM: Failed to get valid buffer handle from external driver"));
++              return eError;          
++      }
++
++      
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++      psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer = hExtBuffer;
++
++      psDCInfo->sSystemBuffer.psDCInfo = psDCInfo;
++
++      
++      *phBuffer = (IMG_HANDLE)&(psDCInfo->sSystemBuffer);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCInfoKM (IMG_HANDLE hDeviceKM,
++                                                              DISPLAY_INFO *psDisplayInfo)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_ERROR eError;
++
++      if(!hDeviceKM || !psDisplayInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCInfoKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCInfo(psDCInfo->hExtDevice, psDisplayInfo);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (psDisplayInfo->ui32MaxSwapChainBuffers > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++      {
++              psDisplayInfo->ui32MaxSwapChainBuffers = PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVDestroyDCSwapChainKM(IMG_HANDLE hSwapChain, 
++                                                                              IMG_BOOL bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++      IMG_UINT32 i;
++
++      if(!hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyDCSwapChainKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++      psDCInfo = psSwapChain->psDCInfo;
++
++      
++      if(!bResManCallback && psSwapChain->hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psSwapChain->hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      
++      PVRSRVDestroyCommandQueueKM(psSwapChain->psQueue);
++
++      
++      eError = psDCInfo->psFuncTable->pfnDestroyDCSwapChain(psDCInfo->hExtDevice,
++                                                                                                                      psSwapChain->hExtSwapChain);
++
++      
++      for(i=0; i<psSwapChain->ui32BufferCount; i++)
++      {
++              if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSwapChain, IMG_NULL);
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR DestroyDCSwapChainCallBack(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      IMG_HANDLE hSwapChain = (IMG_HANDLE)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVDestroyDCSwapChainKM(hSwapChain, IMG_TRUE);
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCreateDCSwapChainKM (IMG_HANDLE                            hDeviceKM,
++                                                                              IMG_UINT32                              ui32Flags,
++                                                                              DISPLAY_SURF_ATTRIBUTES *psDstSurfAttrib,
++                                                                              DISPLAY_SURF_ATTRIBUTES *psSrcSurfAttrib,
++                                                                              IMG_UINT32                              ui32BufferCount,
++                                                                              IMG_UINT32                              ui32OEMFlags,
++                                                                              IMG_HANDLE                              *phSwapChain,
++                                                                              IMG_UINT32                              *pui32SwapChainID)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain = IMG_NULL;
++      PVRSRV_SYNC_DATA *apsSyncData[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++
++      if(!hDeviceKM
++      || !psDstSurfAttrib
++      || !psSrcSurfAttrib
++      || !phSwapChain
++      || !pui32SwapChainID)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      if (ui32BufferCount > PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Too many buffers"));
++              return PVRSRV_ERROR_TOOMANYBUFFERS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_DC_SWAPCHAIN),
++                                       (IMG_VOID **)&psSwapChain, IMG_NULL) != PVRSRV_OK)     
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed psSwapChain alloc"));
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorExit;
++      }
++      OSMemSet (psSwapChain, 0, sizeof(PVRSRV_DC_SWAPCHAIN));
++
++      
++      eError = PVRSRVCreateCommandQueueKM(1024, &psQueue);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create CmdQueue"));
++              goto ErrorExit;
++      }
++
++      
++      psSwapChain->psQueue = psQueue;
++
++      
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++                                                                              psDCInfo->hDevMemContext,
++                                                                              &psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to alloc syninfo for psSwapChain"));
++                      goto ErrorExit;
++              }
++
++              
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psDCInfo->psFuncTable->pfnGetBufferAddr;
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hDevMemContext = psDCInfo->hDevMemContext;
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtDevice = psDCInfo->hExtDevice;
++
++              
++              psSwapChain->asBuffer[i].psDCInfo = psDCInfo;
++              psSwapChain->asBuffer[i].psSwapChain = psSwapChain;
++
++              
++              apsSyncData[i] = (PVRSRV_SYNC_DATA*)psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++      }
++
++      psSwapChain->ui32BufferCount = ui32BufferCount;
++      psSwapChain->psDCInfo = psDCInfo;
++
++      
++      eError =  psDCInfo->psFuncTable->pfnCreateDCSwapChain(psDCInfo->hExtDevice,
++                                                                                                              ui32Flags,
++                                                                                                              psDstSurfAttrib,
++                                                                                                              psSrcSurfAttrib,
++                                                                                                              ui32BufferCount,
++                                                                                                              apsSyncData,
++                                                                                                              ui32OEMFlags,
++                                                                                                              &psSwapChain->hExtSwapChain,
++                                                                                                              pui32SwapChainID);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDCSwapChainKM: Failed to create 3rd party SwapChain"));
++              goto ErrorExit;
++      }
++      
++      
++      *phSwapChain = (IMG_HANDLE)psSwapChain;
++
++
++      
++      psSwapChain->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,
++                                                                                                                              psSwapChain,
++                                                                                                                              0,
++                                                                                                                              DestroyDCSwapChainCallBack,
++                                                                                                                              0);
++
++      return eError;
++
++ErrorExit:
++
++      for(i=0; i<ui32BufferCount; i++)
++      {
++              if(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psSwapChain->asBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      if(psQueue)
++      {
++              PVRSRVDestroyCommandQueueKM(psQueue);
++      }
++
++      if(psSwapChain)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSwapChain, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT              *psRect)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstRectKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCDstRect(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcRectKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_RECT              *psRect)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcRectKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCSrcRect(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      psRect);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCDstColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCDstColourKeyKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCDstColourKey(psDCInfo->hExtDevice,
++                                                                                                              psSwapChain->hExtSwapChain,
++                                                                                                              ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDCSrcColourKeyKM(IMG_HANDLE     hDeviceKM,
++                                                                         IMG_HANDLE   hSwapChain,
++                                                                         IMG_UINT32   ui32CKColour)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCSrcColourKeyKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      return psDCInfo->psFuncTable->pfnSetDCSrcColourKey(psDCInfo->hExtDevice,
++                                                                                                              psSwapChain->hExtSwapChain,
++                                                                                                              ui32CKColour);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetDCBuffersKM(IMG_HANDLE  hDeviceKM,
++                                                                IMG_HANDLE    hSwapChain,
++                                                                IMG_UINT32    *pui32BufferCount,
++                                                                IMG_HANDLE    *phBuffer)
++{
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++      IMG_HANDLE ahExtBuffer[PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS];
++      PVRSRV_ERROR eError;
++      IMG_UINT32 i;
++
++      if(!hDeviceKM || !hSwapChain || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetDCBuffersKM: Invalid parameters"));    
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      
++      eError = psDCInfo->psFuncTable->pfnGetDCBuffers(psDCInfo->hExtDevice,
++                                                                                                      psSwapChain->hExtSwapChain,
++                                                                                                      pui32BufferCount,
++                                                                                                      ahExtBuffer);
++
++      PVR_ASSERT(*pui32BufferCount <= PVRSRV_MAX_DC_SWAPCHAIN_BUFFERS);
++
++      
++
++
++      for(i=0; i<*pui32BufferCount; i++)
++      {
++              psSwapChain->asBuffer[i].sDeviceClassBuffer.hExtBuffer = ahExtBuffer[i];
++              phBuffer[i] = (IMG_HANDLE)&psSwapChain->asBuffer[i];
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCBufferKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hBuffer,
++                                                                      IMG_UINT32      ui32SwapInterval,
++                                                                      IMG_HANDLE      hPrivateTag,
++                                                                      IMG_UINT32      ui32ClipRectCount,
++                                                                      IMG_RECT        *psClipRect)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_BUFFER *psBuffer;
++      PVRSRV_QUEUE_INFO *psQueue;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      IMG_UINT32 i;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++      IMG_UINT32 ui32NumSrcSyncs = 1;
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++      PVRSRV_COMMAND *psCommand;
++
++      if(!hDeviceKM || !hBuffer || !psClipRect)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++#if defined(SUPPORT_LMA)
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif 
++      
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psBuffer = (PVRSRV_DC_BUFFER*)hBuffer;
++
++      
++      psQueue = psBuffer->psSwapChain->psQueue;
++
++      
++      apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++      if(psBuffer->psSwapChain->psLastFlipBuffer)
++      {
++              apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++              ui32NumSrcSyncs++;
++      }
++
++      
++      eError = PVRSRVInsertCommandKM (psQueue,
++                                                                      &psCommand,
++                                                                      psDCInfo->ui32DeviceID,
++                                                                      DC_FLIP_COMMAND,
++                                                                      0,
++                                                                      IMG_NULL,
++                                                                      ui32NumSrcSyncs,
++                                                                      apsSrcSync,
++                                                                      sizeof(DISPLAYCLASS_FLIP_COMMAND) + (sizeof(IMG_RECT) * ui32ClipRectCount));
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to get space in queue"));
++              goto Exit;
++      }
++      
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++      
++      psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++      
++      psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain;
++
++      
++      psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer;
++
++      
++      psFlipCmd->hPrivateTag = hPrivateTag;
++
++      
++      psFlipCmd->ui32ClipRectCount = ui32ClipRectCount;
++      
++      psFlipCmd->psClipRect = (IMG_RECT*)((IMG_UINT8*)psFlipCmd + sizeof(DISPLAYCLASS_FLIP_COMMAND));
++      
++      for(i=0; i<ui32ClipRectCount; i++)
++      {
++              psFlipCmd->psClipRect[i] = psClipRect[i];
++      }
++
++      
++      psFlipCmd->ui32SwapInterval = ui32SwapInterval;
++
++              
++      eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to submit command"));
++              goto Exit;
++      }
++      
++      
++
++
++
++
++
++
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      goto ProcessedQueues;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);     
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCBufferKM: Failed to process queues"));
++
++      eError = PVRSRV_ERROR_GENERIC;
++      goto Exit;
++
++ProcessedQueues:
++      
++      psBuffer->psSwapChain->psLastFlipBuffer = psBuffer;
++
++Exit:
++#if defined(SUPPORT_LMA)
++      PVRSRVPowerUnlock(KERNEL_ID);
++#endif        
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE        hDeviceKM,
++                                                                      IMG_HANDLE      hSwapChain)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_QUEUE_INFO *psQueue;
++      PVRSRV_DISPLAYCLASS_INFO *psDCInfo;
++      PVRSRV_DC_SWAPCHAIN *psSwapChain;
++      DISPLAYCLASS_FLIP_COMMAND *psFlipCmd;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++      IMG_UINT32 ui32NumSrcSyncs = 1;
++      PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2];
++      PVRSRV_COMMAND *psCommand;
++
++      if(!hDeviceKM || !hSwapChain)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++#if defined(SUPPORT_LMA)
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif 
++      
++      psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM);
++      psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain;
++
++      
++      psQueue = psSwapChain->psQueue;
++
++      
++      apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo;
++      if(psSwapChain->psLastFlipBuffer)
++      {
++              apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo;
++              ui32NumSrcSyncs++;
++      }
++
++      
++      eError = PVRSRVInsertCommandKM (psQueue,
++                                                                      &psCommand,
++                                                                      psDCInfo->ui32DeviceID,
++                                                                      DC_FLIP_COMMAND,
++                                                                      0,
++                                                                      IMG_NULL,
++                                                                      ui32NumSrcSyncs,
++                                                                      apsSrcSync,
++                                                                      sizeof(DISPLAYCLASS_FLIP_COMMAND));
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue"));
++              goto Exit;
++      }
++
++      
++      psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData;
++
++      
++      psFlipCmd->hExtDevice = psDCInfo->hExtDevice;
++
++      
++      psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain;
++
++      
++      psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer;
++
++      
++      psFlipCmd->hPrivateTag = IMG_NULL;
++
++      
++      psFlipCmd->ui32ClipRectCount = 0;
++
++      psFlipCmd->ui32SwapInterval = 1;
++
++      
++      eError = PVRSRVSubmitCommandKM (psQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command"));
++              goto Exit;
++      }
++
++      
++
++
++
++
++
++
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      goto ProcessedQueues;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues"));
++      eError = PVRSRV_ERROR_GENERIC;
++      goto Exit;
++
++ProcessedQueues:
++      
++      psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer;
++
++      eError = PVRSRV_OK;
++      
++Exit:
++#if defined(SUPPORT_LMA)
++      PVRSRVPowerUnlock(KERNEL_ID);
++#endif        
++      return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterSystemISRHandler (PFN_ISR_HANDLER  pfnISRHandler,
++                                                                                       IMG_VOID                       *pvISRHandlerData,
++                                                                                       IMG_UINT32                     ui32ISRSourceMask,
++                                                                                       IMG_UINT32                     ui32DeviceID)
++{
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDevNode;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ISRSourceMask);
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterSystemISRHandler: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDevNode = psSysData->psDeviceNodeList;
++      while(psDevNode)
++      {
++              if(psDevNode->sDevId.ui32DeviceIndex == ui32DeviceID)
++              {
++                      break;
++              }
++              psDevNode = psDevNode->psNext;
++      }
++
++      
++      psDevNode->pvISRData = (IMG_VOID*) pvISRHandlerData;
++
++      
++      psDevNode->pfnDeviceISR = pfnISRHandler;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State)
++{
++      PVRSRV_DISPLAYCLASS_INFO        *psDCInfo;
++      PVRSRV_DEVICE_NODE                      *psDeviceNode;
++      SYS_DATA                                        *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVSetDCState: Failed to get SysData"));
++              return;
++      }
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
++              {
++                      psDCInfo = (PVRSRV_DISPLAYCLASS_INFO *)psDeviceNode->pvDevice;
++                      if (psDCInfo->psFuncTable->pfnSetDCState && psDCInfo->hExtDevice)
++                      {
++                              psDCInfo->psFuncTable->pfnSetDCState(psDCInfo->hExtDevice, ui32State);
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable)
++{
++      psJTable->ui32TableSize = sizeof(PVRSRV_DC_DISP2SRV_KMJTABLE);
++      psJTable->pfnPVRSRVRegisterDCDevice = PVRSRVRegisterDCDeviceKM;
++      psJTable->pfnPVRSRVRemoveDCDevice = PVRSRVRemoveDCDeviceKM;
++      psJTable->pfnPVRSRVOEMFunction = SysOEMFunction;
++      psJTable->pfnPVRSRVRegisterCmdProcList = PVRSRVRegisterCmdProcListKM;
++      psJTable->pfnPVRSRVRemoveCmdProcList = PVRSRVRemoveCmdProcListKM;
++      psJTable->pfnPVRSRVCmdComplete = PVRSRVCommandCompleteKM;
++      psJTable->pfnPVRSRVRegisterSystemISRHandler = PVRSRVRegisterSystemISRHandler;
++      psJTable->pfnPVRSRVRegisterPowerDevice = PVRSRVRegisterPowerDevice;
++
++      return IMG_TRUE;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVCloseBCDeviceKM (IMG_HANDLE        hDeviceKM,
++                                                                      IMG_BOOL        bResManCallback)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++
++      PVR_UNREFERENCED_PARAMETER(bResManCallback);
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)hDeviceKM;
++
++      
++      eError = ResManFreeResByPtr(psBCPerContextInfo->hResItem, IMG_TRUE);
++                      
++                              return eError;
++                      }
++
++
++static PVRSRV_ERROR CloseBCDeviceCallBack(IMG_UINT32  ui32ProcessID,
++                                                                                IMG_PVOID             pvParam,
++                                                                                IMG_UINT32    ui32Param)
++{
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *psBCPerContextInfo;
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      psBCPerContextInfo = (PVRSRV_BUFFERCLASS_PERCONTEXT_INFO *)pvParam;
++      psBCInfo = psBCPerContextInfo->psBCInfo;
++
++      psBCInfo->ui32RefCount--;
++      if(psBCInfo->ui32RefCount == 0)
++      {
++              IMG_UINT32 i;
++
++              
++              psBCInfo->psFuncTable->pfnCloseBCDevice(psBCInfo->hExtDevice);
++
++              
++              for(i=0; i<psBCInfo->ui32BufferCount; i++)
++              {
++                      if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++                      {
++                              PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++                      }
++              }
++
++              
++              if(psBCInfo->psBuffer)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psBuffer, IMG_NULL);
++              }
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCPerContextInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVOpenBCDeviceKM (IMG_UINT32 ui32DeviceID,
++                                                                 IMG_HANDLE hDevCookie,
++                                                                 IMG_HANDLE *phDeviceKM)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++      PVRSRV_BUFFERCLASS_PERCONTEXT_INFO      *psBCPerContextInfo;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++      SYS_DATA                                *psSysData;
++      IMG_UINT32                              i;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++
++      if(!phDeviceKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Invalid params"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get SysData"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              if ((psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_BUFFER) &&
++                      (psDeviceNode->sDevId.ui32DeviceIndex == ui32DeviceID))
++              {
++                      
++
++
++                      psBCInfo = (PVRSRV_BUFFERCLASS_INFO*)psDeviceNode->pvDevice;
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: No devnode matching index %d", ui32DeviceID));
++
++      return PVRSRV_ERROR_GENERIC;
++
++FoundDevice:
++
++      
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(*psBCPerContextInfo),
++                                (IMG_VOID **)&psBCPerContextInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed psBCPerContextInfo alloc"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      OSMemSet(psBCPerContextInfo, 0, sizeof(*psBCPerContextInfo));
++
++      if(psBCInfo->ui32RefCount++ == 0)
++      {
++              BUFFER_INFO sBufferInfo;
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++              PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++              
++              psBCInfo->hDevMemContext = (IMG_HANDLE)psDeviceNode->sDevMemoryInfo.pBMKernelContext;
++
++              
++              eError = psBCInfo->psFuncTable->pfnOpenBCDevice(&psBCInfo->hExtDevice);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to open external BC device"));
++                      return eError;
++              }
++
++              
++              eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, &sBufferInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM : Failed to get BC Info"));
++                      return eError;
++              }
++
++              
++              psBCInfo->ui32BufferCount = sBufferInfo.ui32BufferCount;
++              
++
++              
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                        sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount,
++                                                        (IMG_VOID **)&psBCInfo->psBuffer, 
++                                                        IMG_NULL);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to allocate BC buffers"));
++                      return eError;
++              }
++              OSMemSet (psBCInfo->psBuffer,
++                                      0,
++                                      sizeof(PVRSRV_BC_BUFFER) * sBufferInfo.ui32BufferCount);
++      
++              for(i=0; i<psBCInfo->ui32BufferCount; i++)
++              {
++                      
++                      eError = PVRSRVAllocSyncInfoKM(IMG_NULL,
++                                                                              psBCInfo->hDevMemContext,
++                                                                              &psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++                      if(eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed sync info alloc"));
++                              goto ErrorExit;
++                      }
++                      
++                      
++
++
++                      eError = psBCInfo->psFuncTable->pfnGetBCBuffer(psBCInfo->hExtDevice,
++                                                                                                                      i,
++                                                                                                                      psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo->psSyncData,
++                                                                                                                      &psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtBuffer);
++                      if(eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOpenBCDeviceKM: Failed to get BC buffers"));
++                              goto ErrorExit;
++                      }
++
++                      
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.pfnGetBufferAddr = psBCInfo->psFuncTable->pfnGetBufferAddr;
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.hDevMemContext = psBCInfo->hDevMemContext;
++                      psBCInfo->psBuffer[i].sDeviceClassBuffer.hExtDevice = psBCInfo->hExtDevice;
++              }
++      }
++
++      psBCPerContextInfo->psBCInfo = psBCInfo;
++      psBCPerContextInfo->hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_BUFFERCLASS_DEVICE,
++                                                                                                                               psBCPerContextInfo,
++                                                                                                                              0,
++                                                                                                                              CloseBCDeviceCallBack,
++                                                                                                                              0);
++      
++      
++      *phDeviceKM = (IMG_HANDLE)psBCPerContextInfo;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++      for(i=0; i<psBCInfo->ui32BufferCount; i++)
++      {
++              if(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo)
++              {
++                      PVRSRVFreeSyncInfoKM(psBCInfo->psBuffer[i].sDeviceClassBuffer.psKernelSyncInfo);
++              }
++      }
++
++      
++      if(psBCInfo->psBuffer)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psBCInfo->psBuffer, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCInfoKM (IMG_HANDLE hDeviceKM,
++                                                              BUFFER_INFO *psBufferInfo)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++
++      if(!hDeviceKM || !psBufferInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM: Invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++      eError = psBCInfo->psFuncTable->pfnGetBCInfo(psBCInfo->hExtDevice, psBufferInfo);
++
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCInfoKM : Failed to get BC Info"));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVGetBCBufferKM (IMG_HANDLE hDeviceKM,
++                                                                IMG_UINT32 ui32BufferIndex,
++                                                                IMG_HANDLE *phBuffer)
++{
++      PVRSRV_BUFFERCLASS_INFO *psBCInfo;
++
++      if(!hDeviceKM || !phBuffer)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Invalid parameters"));     
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      psBCInfo = BCDeviceHandleToBCInfo(hDeviceKM);
++
++      if(ui32BufferIndex < psBCInfo->ui32BufferCount)
++      {
++              *phBuffer = (IMG_HANDLE)&psBCInfo->psBuffer[ui32BufferIndex];
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetBCBufferKM: Buffer index %d out of range (%d)", ui32BufferIndex,psBCInfo->ui32BufferCount));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable)
++{
++      psJTable->ui32TableSize = sizeof(PVRSRV_BC_BUFFER2SRV_KMJTABLE);
++
++      psJTable->pfnPVRSRVRegisterBCDevice = PVRSRVRegisterBCDeviceKM;
++      psJTable->pfnPVRSRVRemoveBCDevice = PVRSRVRemoveBCDeviceKM;
++
++      return IMG_TRUE;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/devicemem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/devicemem.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/devicemem.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1055 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "pdump_km.h"
++#include "sgxmmu.h"
++#include "sgxapi_km.h"
++
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE         hDevCookie,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Flags,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo);
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetDeviceMemHeapsKM(IMG_HANDLE hDevCookie,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo
++                                                                                                               )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_UINT32 ui32HeapCount;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_UINT32 i;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++      
++      ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++      psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++      
++      PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++      
++      for(i=0; i<ui32HeapCount; i++)
++      {
++              
++              psHeapInfo[i].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++              psHeapInfo[i].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++              psHeapInfo[i].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++              psHeapInfo[i].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++              psHeapInfo[i].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++      }
++
++      for(; i < PVRSRV_MAX_CLIENT_HEAPS; i++)
++      {
++              OSMemSet(psHeapInfo + i, 0, sizeof(*psHeapInfo));
++              psHeapInfo[i].ui32HeapID = (IMG_UINT32)SGX_UNDEFINED_HEAP_ID;
++      }
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                               IMG_HANDLE *phDevMemContext,
++                                                                                                               IMG_UINT32 *pui32ClientHeapCount,
++                                                                                                               PVRSRV_HEAP_INFO *psHeapInfo,
++                                                                                                               IMG_BOOL *pbCreated
++#if defined(PVR_SECURE_HANDLES)
++                                                                                                               , IMG_BOOL *pbShared
++#endif
++                                                                                                               )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_UINT32 ui32HeapCount, ui32ClientHeapCount=0;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_HANDLE hDevMemContext;
++      IMG_HANDLE hDevMemHeap;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      IMG_UINT32 i;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL);
++
++      
++
++      ui32HeapCount = psDeviceNode->sDevMemoryInfo.ui32HeapCount;
++      psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++
++      
++
++      PVR_ASSERT(ui32HeapCount <= PVRSRV_MAX_CLIENT_HEAPS);
++
++      
++
++      hDevMemContext = BM_CreateContext(psDeviceNode,
++                                                                        &sPDDevPAddr,
++                                                                        IMG_FALSE,
++                                                                        pbCreated);
++      if (hDevMemContext == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateDeviceMemContextKM: Failed BM_CreateContext"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      for(i=0; i<ui32HeapCount; i++)
++      {
++              switch(psDeviceMemoryHeap[i].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++                              psHeapInfo[ui32ClientHeapCount].hDevMemHeap = psDeviceMemoryHeap[i].hDevMemHeap;
++                              psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++                              psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++                              pbShared[ui32ClientHeapCount] = IMG_TRUE;
++#endif
++                              ui32ClientHeapCount++;
++                              break;
++                      }
++                      case DEVICE_MEMORY_HEAP_PERCONTEXT:
++                      {
++                              hDevMemHeap = BM_CreateHeap(hDevMemContext,
++                                                                                      &psDeviceMemoryHeap[i]);
++
++                              
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapID = psDeviceMemoryHeap[i].ui32HeapID;
++                              psHeapInfo[ui32ClientHeapCount].hDevMemHeap = hDevMemHeap;
++                              psHeapInfo[ui32ClientHeapCount].sDevVAddrBase = psDeviceMemoryHeap[i].sDevVAddrBase;
++                              psHeapInfo[ui32ClientHeapCount].ui32HeapByteSize = psDeviceMemoryHeap[i].ui32HeapSize;
++                              psHeapInfo[ui32ClientHeapCount].ui32Attribs = psDeviceMemoryHeap[i].ui32Attribs;
++#if defined(PVR_SECURE_HANDLES)
++                              pbShared[ui32ClientHeapCount] = IMG_FALSE;
++#endif
++
++                              ui32ClientHeapCount++;
++                              break;
++                      }
++              }
++      }
++
++      
++      *pui32ClientHeapCount = ui32ClientHeapCount;
++      *phDevMemContext = hDevMemContext;
++      
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyDeviceMemContextKM(IMG_HANDLE hDevCookie,
++                                                                                                                IMG_HANDLE hDevMemContext,
++                                                                                                                IMG_BOOL *pbDestroyed)
++{
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      return BM_DestroyContext(hDevMemContext, IMG_FALSE, IMG_FALSE, pbDestroyed);
++}
++
++
++
++
++
++
++static PVRSRV_ERROR AllocDeviceMem(IMG_HANDLE         hDevCookie,
++                                                                      IMG_HANDLE              hDevMemHeap,
++                                                                      IMG_UINT32              ui32Flags,
++                                                                      IMG_UINT32              ui32Size,
++                                                                      IMG_UINT32              ui32Alignment,
++                                                                      PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO  *psMemInfo;
++      BM_HANDLE               hBuffer;
++      
++      PVRSRV_MEMBLK   *psMemBlock;
++      IMG_BOOL                bBMError;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      *ppsMemInfo = IMG_NULL;
++
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                       (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      
++      psMemInfo->ui32Flags = ui32Flags | PVRSRV_MEM_RAM_BACKED_ALLOCATION;
++
++      bBMError = BM_Alloc (hDevMemHeap,
++                                                      IMG_NULL,
++                                                      ui32Size,
++                                                      &psMemInfo->ui32Flags,
++                                                      ui32Alignment,
++                                                      &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"AllocDeviceMem: BM_Alloc Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++
++      psMemInfo->ui32AllocSize = ui32Size;
++
++      
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      *ppsMemInfo = psMemInfo;
++
++
++      
++      return (PVRSRV_OK);
++}
++
++
++static PVRSRV_ERROR FreeDeviceMem(PVRSRV_KERNEL_MEM_INFO *psMemInfo)
++{
++      BM_HANDLE               hBuffer;
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      hBuffer = psMemInfo->sMemBlk.hBuffer;
++
++      
++      BM_Free(hBuffer, psMemInfo->ui32Flags);
++
++      if(psMemInfo->pvSysBackupBuffer)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo->pvSysBackupBuffer, IMG_NULL);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++
++      return(PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocSyncInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                                                              IMG_HANDLE                                      hDevMemContext,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO         **ppsKernelSyncInfo)
++{
++      IMG_HANDLE hSyncDevMemHeap;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      BM_CONTEXT *pBMContext;
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_SYNC_INFO *psKernelSyncInfo;
++      PVRSRV_SYNC_DATA *psSyncData;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_SYNC_INFO ),
++                                                (IMG_VOID **)&psKernelSyncInfo, IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      pBMContext = (BM_CONTEXT*)hDevMemContext;
++      psDevMemoryInfo = &pBMContext->psDeviceNode->sDevMemoryInfo;
++
++      
++      hSyncDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32SyncHeapID].hDevMemHeap;
++
++      eError = AllocDeviceMem(hDevCookie,
++                                                      hSyncDevMemHeap,
++                                                      0,
++                                                      sizeof(PVRSRV_SYNC_DATA),
++                                                      sizeof(IMG_UINT32),
++                                                      &psKernelSyncInfo->psSyncDataMemInfoKM);
++
++      if (eError != PVRSRV_OK)
++      {
++
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSyncInfoKM: Failed to alloc memory"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psKernelSyncInfo, IMG_NULL);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psKernelSyncInfo->psSyncData = psKernelSyncInfo->psSyncDataMemInfoKM->pvLinAddrKM;
++      psSyncData = psKernelSyncInfo->psSyncData;
++
++      psSyncData->ui32WriteOpsPending = 0;
++      psSyncData->ui32WriteOpsComplete = 0;
++      psSyncData->ui32ReadOpsPending = 0;
++      psSyncData->ui32ReadOpsComplete = 0;
++      psSyncData->ui32LastOpDumpVal = 0;
++
++      psKernelSyncInfo->sWriteOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32WriteOpsComplete);
++      psKernelSyncInfo->sReadOpsCompleteDevVAddr.uiAddr = psKernelSyncInfo->psSyncDataMemInfoKM->sDevVAddr.uiAddr + offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete);
++
++      
++      psKernelSyncInfo->psSyncDataMemInfoKM->psKernelSyncInfo = IMG_NULL;
++
++      
++      *ppsKernelSyncInfo = psKernelSyncInfo;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeSyncInfoKM(PVRSRV_KERNEL_SYNC_INFO        *psKernelSyncInfo)
++{
++      FreeDeviceMem(psKernelSyncInfo->psSyncDataMemInfoKM);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psKernelSyncInfo, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVFreeDeviceMemKM(IMG_HANDLE                            hDevCookie,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  *psMemInfo, 
++                                                                                              IMG_BOOL                                bResManCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      if (psMemInfo->psKernelSyncInfo)
++      {
++              PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++      }
++
++      if (eError == PVRSRV_OK)
++      {
++              eError = FreeDeviceMem(psMemInfo);
++      }
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR FreeDeviceMemCallBack(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVFreeDeviceMemKM(IMG_NULL, psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAllocDeviceMemKM(IMG_HANDLE                   hDevCookie,
++                                                                                               IMG_HANDLE                     hDevMemHeap,
++                                                                                               IMG_UINT32                     ui32Flags,
++                                                                                               IMG_UINT32                     ui32Size,
++                                                                                               IMG_UINT32                     ui32Alignment,
++                                                                                               PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO  *psMemInfo;
++      PVRSRV_ERROR                    eError;
++      BM_HEAP                                 *psBMHeap;
++      IMG_HANDLE                              hDevMemContext;
++
++      if (!hDevMemHeap)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      eError = AllocDeviceMem(hDevCookie,
++                                                      hDevMemHeap,
++                                                      ui32Flags,
++                                                      ui32Size,
++                                                      ui32Alignment,
++                                                      &psMemInfo);
++
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
++      {
++              psMemInfo->psKernelSyncInfo = IMG_NULL;
++      }
++      else
++      {
++              
++
++
++              psBMHeap = (BM_HEAP*)hDevMemHeap;
++              hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++              eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++                                                                         hDevMemContext,
++                                                                         &psMemInfo->psKernelSyncInfo);
++              if(eError != PVRSRV_OK)
++              {
++                      goto free_mainalloc;
++              }
++      }
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
++      {
++              psMemInfo->sMemBlk.hResItem = IMG_NULL;
++      }
++      else
++      {
++              
++              psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_ALLOCATION,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              FreeDeviceMemCallBack,
++                                                                                                                              0);
++              if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
++              {
++                      
++                      eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++                      goto free_mainalloc;
++              }
++      }               
++
++      
++      return (PVRSRV_OK);
++
++free_mainalloc:
++      FreeDeviceMem(psMemInfo);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDissociateDeviceMemKM(IMG_HANDLE                              hDevCookie,
++                                                                                                        PVRSRV_KERNEL_MEM_INFO        *psMemInfo)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie);
++
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      psMemInfo->sMemBlk.hResItem = ResManRegisterRes(RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION | RESMAN_TYPE_USE_PROCESSID,
++                                                psMemInfo,
++                                                0,
++                                                FreeDeviceMemCallBack,
++                                                RESMAN_KERNEL_PROCESSID);
++
++      if (psMemInfo->sMemBlk.hResItem == IMG_NULL)                                      
++      {
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT                    
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetFreeDeviceMemKM(IMG_UINT32 ui32Flags,
++                                                                                                 IMG_UINT32 *pui32Total,
++                                                                                                 IMG_UINT32 *pui32Free,
++                                                                                                 IMG_UINT32 *pui32LargestBlock)
++{
++      
++
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      PVR_UNREFERENCED_PARAMETER(pui32Total);
++      PVR_UNREFERENCED_PARAMETER(pui32Free);
++      PVR_UNREFERENCED_PARAMETER(pui32LargestBlock);
++
++      return PVRSRV_OK;
++}
++
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnwrapExtMemoryKM (PVRSRV_KERNEL_MEM_INFO     *psMemInfo,
++                                                                                                      IMG_BOOL                                bResManCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      if (psMemInfo->psKernelSyncInfo)
++      {
++              PVRSRVFreeSyncInfoKM(psMemInfo->psKernelSyncInfo);
++      }
++
++      if (eError == PVRSRV_OK)
++      {
++              eError = FreeDeviceMem(psMemInfo);
++      }
++
++      return eError;
++}
++
++
++static PVRSRV_ERROR UnwrapExtMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnwrapExtMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE                            hDevCookie,
++                                                                                              IMG_UINT32                              ui32ByteSize,
++                                                                                              IMG_UINT32                              ui32PageOffset,
++                                                                                              IMG_BOOL                                bPhysContig,
++                                                                                              IMG_SYS_PHYADDR                 *psSysAddr,
++                                                                                              PVRSRV_KERNEL_MEM_INFO  **ppsMemInfo)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      IMG_HANDLE hDevMemHeap, hDevMemContext;
++      PVRSRV_DEVICE_NODE* psDeviceNode;
++      BM_HANDLE                       hBuffer;
++      PVRSRV_MEMBLK           *psMemBlock;
++      IMG_BOOL                        bBMError;
++      BM_HEAP                         *psBMHeap;
++      PVRSRV_ERROR            eError;
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
++      PVR_ASSERT(psDeviceNode != IMG_NULL)
++
++      
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++      hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32MappingHeapID].hDevMemHeap;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(hDevMemHeap,
++                                         ui32ByteSize,
++                                         ui32PageOffset,
++                                         bPhysContig,
++                                         psSysAddr,
++                                         IMG_NULL,
++                                         &psMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++
++      
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = ui32ByteSize;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++              
++
++
++      psBMHeap = (BM_HEAP*)hDevMemHeap;
++      hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
++      eError = PVRSRVAllocSyncInfoKM(hDevCookie,
++                                                                      hDevMemContext,
++                                                                      &psMemInfo->psKernelSyncInfo);
++      if(eError != PVRSRV_OK)
++      {
++              goto free_mainwrap;
++      }
++
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_WRAP,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnwrapExtMemoryCallBack,
++                                                                                                                              0);
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      return PVRSRV_OK;
++
++free_mainwrap:
++      FreeDeviceMem(psMemInfo);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceMemoryKM (PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                              IMG_BOOL bResManCallback)
++{
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return FreeDeviceMem(psMemInfo);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnmapDeviceMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
++                                                                                                IMG_HANDLE hDstDevMemHeap,
++                                                                                                PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PageCount, ui32PageOffset;
++      IMG_UINT32                      ui32HostPageSize = HOST_PAGESIZE();
++      IMG_SYS_PHYADDR         *psSysPAddr = IMG_NULL;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      BM_BUF                          *psBuf;
++      IMG_DEV_VIRTADDR        sDevVAddr;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
++      BM_HANDLE                       hBuffer;
++      PVRSRV_MEMBLK           *psMemBlock;
++      IMG_BOOL                        bBMError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      
++      if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      *ppsDstMemInfo = IMG_NULL;
++      
++      ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
++      ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
++
++      
++
++
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      ui32PageCount*sizeof(IMG_SYS_PHYADDR),
++                                      (IMG_VOID **)&psSysPAddr, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      psBuf = psSrcMemInfo->sMemBlk.hBuffer;
++
++      
++      psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;
++
++      
++      sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - ui32PageOffset;
++      for(i=0; i<ui32PageCount; i++)
++      {
++              eError = BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to retrieve page list from device"));
++                      goto ErrorExit;
++              }
++
++              
++              psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);
++
++              
++              sDevVAddr.uiAddr += ui32HostPageSize;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
++              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto ErrorExit;
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(psBuf->pMapping->pBMHeap,
++                                         psSrcMemInfo->ui32AllocSize,
++                                         ui32PageOffset,
++                                         IMG_FALSE,
++                                         psSysPAddr,
++                                         IMG_NULL,
++                                         &psSrcMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
++              eError = PVRSRV_ERROR_BAD_MAPPING;
++              goto ErrorExit;         
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++      psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;
++
++      
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
++      psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICEMEM_MAPPING,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnmapDeviceMemoryCallBack,
++                                                                                                                              0);
++
++      *ppsDstMemInfo = psMemInfo;
++
++      return PVRSRV_OK;
++
++      
++      
++ErrorExit:
++
++      if(psSysPAddr)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psSysPAddr, IMG_NULL);
++      }
++
++      if(psMemInfo)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVUnmapDeviceClassMemoryKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                                                      IMG_BOOL bResManCallback)
++{
++      if (!psMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(!bResManCallback && psMemInfo->sMemBlk.hResItem)
++      {
++              PVRSRV_ERROR eError;
++              
++              eError = ResManFreeResByPtr(psMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return FreeDeviceMem(psMemInfo);
++}
++
++
++static PVRSRV_ERROR UnmapDeviceClassMemoryCallBack(IMG_UINT32 ui32ProcessID, 
++                                                                                      IMG_PVOID pvParam, 
++                                                                                      IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVUnmapDeviceClassMemoryKM(psMemInfo, IMG_TRUE);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(IMG_HANDLE hDeviceClassBuffer,
++                                                                                                         PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
++                                                                                                         IMG_HANDLE *phOSMapInfo)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_MEM_INFO *psMemInfo;
++      PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
++      IMG_SYS_PHYADDR *psSysPAddr;
++      IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
++      IMG_BOOL bPhysContig;
++      BM_CONTEXT *psBMContext;
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      IMG_HANDLE hDevMemHeap;
++      IMG_UINT32 ui32ByteSize;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32PageSize = HOST_PAGESIZE();
++      BM_HANDLE               hBuffer;
++      PVRSRV_MEMBLK   *psMemBlock;
++      IMG_BOOL                bBMError;
++
++      if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
++              return PVRSRV_ERROR_INVALID_PARAMS;             
++      }
++      
++      psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;
++      
++      
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++      eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
++                                                                                                 psDeviceClassBuffer->hExtBuffer,
++                                                                                                 &psSysPAddr,
++                                                                                                 &ui32ByteSize,
++                                                                                                 &pvCPUVAddr,
++                                                                                                 phOSMapInfo,
++                                                                                                 &bPhysContig);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));  
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
++      psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
++      hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[psDevMemoryInfo->ui32MappingHeapID].hDevMemHeap;
++
++      
++      ui32Offset = ((IMG_UINT32)pvCPUVAddr) & (ui32PageSize - 1);
++      pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINT8 *)pvCPUVAddr - ui32Offset);
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_KERNEL_MEM_INFO ),
++                                      (IMG_VOID **)&psMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
++
++      psMemBlock = &(psMemInfo->sMemBlk);
++
++      bBMError = BM_Wrap(hDevMemHeap,
++                                         ui32ByteSize,
++                                         ui32Offset,
++                                         bPhysContig,
++                                         psSysPAddr,
++                                         pvPageAlignedCPUVAddr,
++                                         &psMemInfo->ui32Flags,
++                                         &hBuffer);
++
++      if (!bBMError)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psMemInfo, IMG_NULL);
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++
++      
++      psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
++      psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
++
++      
++      psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;
++
++      
++
++      psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
++      
++      
++      psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
++      psMemInfo->ui32AllocSize = ui32ByteSize;
++      psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;
++
++      
++
++      psMemInfo->pvSysBackupBuffer = IMG_NULL;
++
++      
++      psMemInfo->sMemBlk.hResItem = (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
++                                                                                                                              psMemInfo,
++                                                                                                                              0,
++                                                                                                                              UnmapDeviceClassMemoryCallBack,
++                                                                                                                              0);
++
++      
++      *ppsMemInfo = psMemInfo;
++
++      return PVRSRV_OK;       
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/handle.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/handle.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/handle.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/handle.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,973 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifdef        PVR_SECURE_HANDLES
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "handle.h"
++
++#ifdef        DEBUG
++#define       HANDLE_BLOCK_SIZE       1
++#else
++#define       HANDLE_BLOCK_SIZE       256
++#endif
++
++#define       HANDLE_HASH_TAB_INIT_SIZE       32
++
++#define       INDEX_IS_VALID(psBase, i) ((i) < (psBase)->ui32TotalHandCount)
++
++#define       INDEX_TO_HANDLE(psBase, idx) ((IMG_HANDLE)((idx) + 1))
++#define       HANDLE_TO_INDEX(psBase, hand) ((IMG_UINT32)(hand) - 1)
++
++#define INDEX_TO_HANDLE_PTR(psBase, i) (((psBase)->psHandleArray) + (i))
++#define       HANDLE_TO_HANDLE_PTR(psBase, h) (INDEX_TO_HANDLE_PTR(psBase, HANDLE_TO_INDEX(psBase, h)))
++
++#define       HANDLE_PTR_TO_INDEX(psBase, psHandle) ((psHandle) - ((psBase)->psHandleArray))
++#define       HANDLE_PTR_TO_HANDLE(psBase, psHandle) \
++      INDEX_TO_HANDLE(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle))
++
++enum eHandKey {
++      HAND_KEY_DATA = 0,
++      HAND_KEY_TYPE,
++      HAND_KEY_PARENT,
++      HAND_KEY_LEN                    
++};
++
++PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++typedef IMG_UINTPTR_T HAND_KEY[HAND_KEY_LEN];
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInit)
++#endif
++static INLINE
++IMG_VOID HandleListInit(IMG_UINT32 ui32Index, struct sHandleList *psList, IMG_HANDLE hParent)
++{
++      psList->ui32Next = ui32Index;
++      psList->ui32Prev = ui32Index;
++      psList->hParent = hParent;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitParentList)
++#endif
++static INLINE
++IMG_VOID InitParentList(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      IMG_UINT32 ui32Parent = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++
++      HandleListInit(ui32Parent, &psHandle->sChildren, INDEX_TO_HANDLE(psBase, ui32Parent));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitChildEntry)
++#endif
++static INLINE
++IMG_VOID InitChildEntry(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HandleListInit(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, IMG_NULL);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIsEmpty)
++#endif
++static INLINE
++IMG_BOOL HandleListIsEmpty(IMG_UINT32 ui32Index, struct sHandleList *psList)
++{
++      IMG_BOOL bIsEmpty;
++
++      bIsEmpty = (psList->ui32Next == ui32Index);
++
++#ifdef        DEBUG
++      {
++              IMG_BOOL bIsEmpty2;
++
++              bIsEmpty2 = (psList->ui32Prev == ui32Index);
++              PVR_ASSERT(bIsEmpty == bIsEmpty2);
++      }
++#endif
++
++      return bIsEmpty;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoChildren)
++#endif
++static INLINE
++IMG_BOOL NoChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      PVR_ASSERT(psHandle->sChildren.hParent == HANDLE_PTR_TO_HANDLE(psBase, psHandle));
++
++      return HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sChildren);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(NoParent)
++#endif
++static INLINE
++IMG_BOOL NoParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      if (HandleListIsEmpty(HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings))
++      {
++              PVR_ASSERT(psHandle->sSiblings.hParent == IMG_NULL);
++
++              return IMG_TRUE;
++      }
++      else
++      {
++              PVR_ASSERT(psHandle->sSiblings.hParent != IMG_NULL);
++      }
++      return IMG_FALSE;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentHandle)
++#endif
++static INLINE
++IMG_HANDLE ParentHandle(struct sHandle *psHandle)
++{
++      return psHandle->sSiblings.hParent;
++}
++
++#define       LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, i, p, po, eo) \
++              ((struct sHandleList *)((char *)(INDEX_TO_HANDLE_PTR(psBase, i)) + (((i) == (p)) ? (po) : (eo))))
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListInsertBefore)
++#endif
++static INLINE
++IMG_VOID HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32InsIndex, struct sHandleList *psIns, IMG_SIZE_T uiParentOffset, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_UINT32 ui32ParentIndex)
++{
++      struct sHandleList *psPrevIns = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psIns->ui32Prev, ui32ParentIndex, uiParentOffset, uiEntryOffset);
++
++      PVR_ASSERT(psEntry->hParent == IMG_NULL);
++      PVR_ASSERT(ui32InsIndex == psPrevIns->ui32Next);
++      PVR_ASSERT(LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32ParentIndex, ui32ParentIndex, uiParentOffset, uiParentOffset)->hParent == INDEX_TO_HANDLE(psBase, ui32ParentIndex));
++
++      psEntry->ui32Prev = psIns->ui32Prev;
++      psIns->ui32Prev = ui32EntryIndex;
++      psEntry->ui32Next = ui32InsIndex;
++      psPrevIns->ui32Next = ui32EntryIndex;
++
++      psEntry->hParent = INDEX_TO_HANDLE(psBase, ui32ParentIndex);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(AdoptChild)
++#endif
++static INLINE
++IMG_VOID AdoptChild(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, struct sHandle *psChild)
++{
++      IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psParent->sChildren.hParent);
++
++      PVR_ASSERT(ui32Parent == (IMG_UINT32)HANDLE_PTR_TO_INDEX(psBase, psParent));
++
++      HandleListInsertBefore(psBase, ui32Parent, &psParent->sChildren, offsetof(struct sHandle, sChildren), HANDLE_PTR_TO_INDEX(psBase, psChild), &psChild->sSiblings, offsetof(struct sHandle, sSiblings), ui32Parent);
++
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListRemove)
++#endif
++static INLINE
++IMG_VOID HandleListRemove(PVRSRV_HANDLE_BASE *psBase, IMG_UINT32 ui32EntryIndex, struct sHandleList *psEntry, IMG_SIZE_T uiEntryOffset, IMG_SIZE_T uiParentOffset)
++{
++      if (!HandleListIsEmpty(ui32EntryIndex, psEntry))
++      {
++              struct sHandleList *psPrev = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Prev, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++              struct sHandleList *psNext = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, psEntry->ui32Next, HANDLE_TO_INDEX(psBase, psEntry->hParent), uiParentOffset, uiEntryOffset);
++
++              
++              PVR_ASSERT(psEntry->hParent != IMG_NULL);
++
++              psPrev->ui32Next = psEntry->ui32Next;
++              psNext->ui32Prev = psEntry->ui32Prev;
++
++              HandleListInit(ui32EntryIndex, psEntry, IMG_NULL);
++      }
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(UnlinkFromParent)
++#endif
++static INLINE
++IMG_VOID UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HandleListRemove(psBase, HANDLE_PTR_TO_INDEX(psBase, psHandle), &psHandle->sSiblings, offsetof(struct sHandle, sSiblings), offsetof(struct sHandle, sChildren));
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(HandleListIterate)
++#endif
++static INLINE
++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, struct sHandleList *psHead, IMG_SIZE_T uiParentOffset, IMG_SIZE_T uiEntryOffset, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++      IMG_UINT32 ui32Index;
++      IMG_UINT32 ui32Parent = HANDLE_TO_INDEX(psBase, psHead->hParent);
++
++      PVR_ASSERT(psHead->hParent != IMG_NULL);
++
++      
++      for(ui32Index = psHead->ui32Next; ui32Index != ui32Parent; )
++      {
++              struct sHandle *psHandle = INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++              struct sHandleList *psEntry = LIST_PTR_FROM_INDEX_AND_OFFSET(psBase, ui32Index, ui32Parent, uiParentOffset, uiEntryOffset);
++              PVRSRV_ERROR eError;
++
++              PVR_ASSERT(psEntry->hParent == psHead->hParent);
++              
++              ui32Index = psEntry->ui32Next;
++
++              eError = (*pfnIterFunc)(psBase, psHandle);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(IterateOverChildren)
++#endif
++static INLINE
++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psParent, PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, struct sHandle *))
++{
++       return HandleListIterate(psBase, &psParent->sChildren, offsetof(struct sHandle, sChildren), offsetof(struct sHandle, sSiblings), pfnIterFunc);
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(GetHandleStructure)
++#endif
++static INLINE
++PVRSRV_ERROR GetHandleStructure(PVRSRV_HANDLE_BASE *psBase, struct sHandle **ppsHandle, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      IMG_UINT32 ui32Index = HANDLE_TO_INDEX(psBase, hHandle);
++      struct sHandle *psHandle;
++
++      
++      if (!INDEX_IS_VALID(psBase, ui32Index))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle index out of range (%d >= %d)", ui32Index, psBase->ui32TotalHandCount));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psHandle =  INDEX_TO_HANDLE_PTR(psBase, ui32Index);
++      if (psHandle->eType == PVRSRV_HANDLE_TYPE_NONE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle not allocated (index: %u)", ui32Index));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandle->eType)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "GetHandleStructure: Handle type mismatch (%d != %d)", eType, psHandle->eType));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID);
++
++      
++      *ppsHandle = psHandle;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(ParentIfPrivate)
++#endif
++static INLINE
++IMG_HANDLE ParentIfPrivate(struct sHandle *psHandle)
++{
++      return (psHandle->eFlag & PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++                      ParentHandle(psHandle) : IMG_NULL;
++}
++                      
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(InitKey)
++#endif
++static INLINE
++IMG_VOID InitKey(HAND_KEY aKey, PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      aKey[HAND_KEY_DATA] = (IMG_UINTPTR_T)pvData;
++      aKey[HAND_KEY_TYPE] = (IMG_UINTPTR_T)eType;
++      aKey[HAND_KEY_PARENT] = (IMG_UINTPTR_T)hParent;
++}
++
++static PVRSRV_ERROR FreeHandleArray(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (psBase->psHandleArray != IMG_NULL)
++      {
++              eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                      psBase->ui32TotalHandCount * sizeof(struct sHandle),
++                      psBase->psHandleArray,
++                      psBase->hHandBlockAlloc);
++
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreeHandleArray: Error freeing memory (%d)", eError));
++              }
++              else
++              {
++                      psBase->psHandleArray = IMG_NULL;
++              }
++      }
++
++      return eError;
++}
++
++static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase, struct sHandle *psHandle)
++{
++      HAND_KEY aKey;
++      IMG_UINT32 ui32Index = HANDLE_PTR_TO_INDEX(psBase, psHandle);
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psBase->ui32PID == psHandle->ui32PID);
++
++      InitKey(aKey, psBase, psHandle->pvData, psHandle->eType, ParentIfPrivate(psHandle));
++
++      if (!(psHandle->eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              IMG_HANDLE hHandle;
++              hHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, aKey);
++
++              PVR_ASSERT(hHandle != IMG_NULL);
++              PVR_ASSERT(hHandle == INDEX_TO_HANDLE(psBase, ui32Index));
++      }
++
++      
++      UnlinkFromParent(psBase, psHandle);
++
++      
++      eError = IterateOverChildren(psBase, psHandle, FreeHandle);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandle: Error whilst freeing subhandles (%d)", eError));
++              return eError;
++      }
++
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++
++              psBase->ui32FirstFreeIndex =  ui32Index;
++      }
++      else
++      {
++              
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0);
++              INDEX_TO_HANDLE_PTR(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne =  ui32Index + 1;
++      }
++
++      PVR_ASSERT(psHandle->ui32NextIndexPlusOne == 0);
++
++      
++      psHandle->ui32NextIndexPlusOne = psBase->ui32LastFreeIndexPlusOne;
++
++      psBase->ui32LastFreeIndexPlusOne = ui32Index + 1;
++
++      psBase->ui32FreeHandCount++;
++
++      
++      psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR FreeAllHandles(PVRSRV_HANDLE_BASE *psBase)
++{
++      IMG_UINT32 i;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++      {
++              return eError;
++      }
++
++      for (i = 0; i < psBase->ui32TotalHandCount; i++)
++      {
++              struct sHandle *psHandle;
++
++              psHandle = INDEX_TO_HANDLE_PTR(psBase, i);
++
++              if (psHandle->eType != PVRSRV_HANDLE_TYPE_NONE)
++              {
++                      eError = FreeHandle(psBase, psHandle);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "FreeAllHandles: FreeHandle failed (%d)", eError));
++                              break;
++                      }
++
++                      
++                      if (psBase->ui32FreeHandCount == psBase->ui32TotalHandCount)
++                      {
++                              break;
++                      }
++              }
++      }
++
++      return eError;
++}
++
++static PVRSRV_ERROR FreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError;
++      
++      
++      eError = FreeAllHandles(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handles (%d)", eError));
++              return eError;
++      }
++
++      
++      eError = FreeHandleArray(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle array (%d)", eError));
++              return eError;
++      }
++
++      if (psBase->psHashTab != IMG_NULL)
++      {
++              
++              HASH_Delete(psBase->psHashTab);
++      }
++
++      eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psBase),
++              psBase,
++              psBase->hBaseBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeHandleBase: Couldn't free handle base (%d)", eError));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(FindHandle)
++#endif
++static INLINE
++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hParent)
++{
++      HAND_KEY aKey;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      InitKey(aKey, psBase, pvData, eType, hParent);
++
++      return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
++}
++
++static PVRSRV_ERROR IncreaseHandleArraySize(PVRSRV_HANDLE_BASE *psBase)
++{
++      struct sHandle *psNewHandleArray;
++      IMG_HANDLE hNewHandBlockAlloc;
++      PVRSRV_ERROR eError;
++      struct sHandle *psHandle;
++
++      
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              (psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE) * sizeof(struct sHandle),
++              (IMG_PVOID *)&psNewHandleArray,
++              &hNewHandBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "IncreaseHandleArraySize: Couldn't allocate new handle array (%d)", eError));
++              return eError;
++      }
++
++      
++      if (psBase->psHandleArray != IMG_NULL)
++              OSMemCopy(psNewHandleArray,
++                      psBase->psHandleArray,
++                      psBase->ui32TotalHandCount *  sizeof(struct sHandle));
++
++      
++      for(psHandle = psNewHandleArray + psBase->ui32TotalHandCount;
++              psHandle < psNewHandleArray + psBase->ui32TotalHandCount + HANDLE_BLOCK_SIZE;
++              psHandle++)
++      {
++              psHandle->eType = PVRSRV_HANDLE_TYPE_NONE;
++              psHandle->ui32NextIndexPlusOne  = 0;
++      }
++
++      
++      eError = FreeHandleArray(psBase);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psBase->psHandleArray = psNewHandleArray;
++      psBase->hHandBlockAlloc = hNewHandBlockAlloc;
++
++      
++      PVR_ASSERT(psBase->ui32FreeHandCount == 0);
++      psBase->ui32FreeHandCount = HANDLE_BLOCK_SIZE;
++
++      PVR_ASSERT(psBase->ui32FirstFreeIndex == 0);
++      psBase->ui32FirstFreeIndex = psBase->ui32TotalHandCount;
++
++      psBase->ui32TotalHandCount += HANDLE_BLOCK_SIZE;
++
++      PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0);
++      psBase->ui32LastFreeIndexPlusOne = psBase->ui32TotalHandCount;
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      IMG_UINT32 ui32NewIndex;
++      struct sHandle *psNewHandle;
++      IMG_HANDLE hHandle;
++      HAND_KEY aKey;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      PVR_ASSERT(psBase->psHashTab != NULL);
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == IMG_NULL);
++      }
++
++      
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVRSRV_ERROR eError = IncreaseHandleArraySize(psBase);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't increase handle array size (%d)", eError));
++                      return eError;
++              }
++      }
++      PVR_ASSERT(psBase->ui32FreeHandCount != 0)
++
++      
++      ui32NewIndex = psBase->ui32FirstFreeIndex;
++
++      
++      psNewHandle = INDEX_TO_HANDLE_PTR(psBase, ui32NewIndex);
++
++      
++      hHandle = INDEX_TO_HANDLE(psBase, ui32NewIndex);
++      
++      
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              InitKey(aKey, psBase, pvData, eType, hParent);
++
++              
++              if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (IMG_UINTPTR_T)hHandle))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
++
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      psBase->ui32FreeHandCount--;
++
++      
++      if (psBase->ui32FreeHandCount == 0)
++      {
++              PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex);
++              PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1));
++
++              psBase->ui32LastFreeIndexPlusOne = 0;
++              psBase->ui32FirstFreeIndex = 0;
++      }
++      else
++      {
++              
++              psBase->ui32FirstFreeIndex = (psNewHandle->ui32NextIndexPlusOne == 0) ?
++                      ui32NewIndex + 1 :
++                      psNewHandle->ui32NextIndexPlusOne - 1;
++      }
++
++      
++      psNewHandle->eType = eType;
++      psNewHandle->pvData = pvData;
++      psNewHandle->ui32NextIndexPlusOne = 0;
++      psNewHandle->eFlag = eFlag;
++      psNewHandle->ui32PID = psBase->ui32PID;
++      psNewHandle->ui32Index = ui32NewIndex;
++
++      InitParentList(psBase, psNewHandle);
++      PVR_ASSERT(NoChildren(psBase, psNewHandle));
++
++      InitChildEntry(psBase, psNewHandle);
++      PVR_ASSERT(NoParent(psBase, psNewHandle));
++
++      
++      *phHandle = hHandle;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++      IMG_HANDLE hHandle;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              hHandle = FindHandle(psBase, pvData, eType, IMG_NULL);
++              if (hHandle != IMG_NULL)
++              {
++                      struct sHandle *psHandle;
++                      PVRSRV_ERROR eError;
++
++                      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Lookup of existing handle failed"));
++                              return eError;
++                      }
++              
++                      
++                      if ((psHandle->eFlag & eFlag & PVRSRV_HANDLE_ALLOC_FLAG_SHARED))
++                      {
++                              *phHandle = hHandle;
++                              return PVRSRV_OK;
++                      }
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      return AllocHandle(psBase, phHandle, pvData, eType, eFlag, IMG_NULL);
++}
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      struct sHandle *psPHand;
++      struct sHandle *psCHand;
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hParentKey;
++      IMG_HANDLE hHandle;
++
++      
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      hParentKey = (eFlag & PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
++                      hParent : IMG_NULL;
++
++      
++      eError = GetHandleStructure(psBase, &psPHand, hParent, PVRSRV_HANDLE_TYPE_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (!(eFlag & PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
++      {
++              
++              hHandle = FindHandle(psBase, pvData, eType, hParentKey);
++              if (hHandle != IMG_NULL)
++              {
++                      struct sHandle *psCHand;
++                      PVRSRV_ERROR eError;
++
++                      eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Lookup of existing handle failed"));
++                              return eError;
++                      }
++              
++                      PVR_ASSERT(hParentKey != IMG_NULL && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent);
++
++                      
++                      if ((psCHand->eFlag & eFlag & PVRSRV_HANDLE_ALLOC_FLAG_SHARED) && ParentHandle(HANDLE_TO_HANDLE_PTR(psBase, hHandle)) == hParent)
++                      {
++                              *phHandle = hHandle;
++                              return PVRSRV_OK;
++                      }
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPHand = HANDLE_TO_HANDLE_PTR(psBase, hParent);
++
++      psCHand = HANDLE_TO_HANDLE_PTR(psBase, hHandle);
++
++      AdoptChild(psBase, psPHand, psCHand);
++
++      *phHandle = hHandle;
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++      IMG_HANDLE hHandle;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      
++      hHandle = (IMG_HANDLE) FindHandle(psBase, pvData, eType, IMG_NULL);
++      if (hHandle == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: couldn't find handle"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      *phHandle = hHandle;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, PVRSRV_HANDLE_TYPE_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandleAnyType: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++      *peType = psHandle->eType;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++      struct sHandle *psPHand;
++      struct sHandle *psCHand;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psCHand, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Error looking up subhandle (%d)", eError));
++              return eError;
++      }
++
++      
++      for (psPHand = psCHand; ParentHandle(psPHand) != hAncestor; )
++      {
++              eError = GetHandleStructure(psBase, &psPHand, ParentHandle(psPHand), PVRSRV_HANDLE_TYPE_NONE);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      *ppvData = psCHand->pvData;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Error looking up subhandle (%d)", eError));
++              return eError;
++      }
++
++      *phParent = ParentHandle(psHandle);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupAndReleaseHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      *ppvData = psHandle->pvData;
++
++      eError = FreeHandle(psBase, psHandle);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      struct sHandle *psHandle;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
++
++      eError = GetHandleStructure(psBase, &psHandle, hHandle, eType);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Error looking up handle (%d)", eError));
++              return eError;
++      }
++
++      eError = FreeHandle(psBase, psHandle);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID)
++{
++      PVRSRV_HANDLE_BASE *psBase;
++      IMG_HANDLE hBlockAlloc;
++      PVRSRV_ERROR eError;
++
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psBase),
++              (IMG_PVOID *)&psBase,
++              &hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base (%d)", eError));
++              return eError;
++      }
++      OSMemSet(psBase, 0, sizeof(*psBase));
++
++      
++      psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, sizeof(HAND_KEY), HASH_Func_Default, HASH_Key_Comp_Default);
++      if (psBase->psHashTab == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table\n"));
++              goto failure;
++      }
++
++      psBase->hBaseBlockAlloc = hBlockAlloc;
++      psBase->ui32PID = ui32PID;
++
++      *ppsBase = psBase;
++
++      return PVRSRV_OK;
++failure:
++      (void)PVRSRVFreeHandleBase(psBase);
++      return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psBase != gpsKernelHandleBase);
++
++      eError = FreeHandleBase(psBase);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(gpsKernelHandleBase == IMG_NULL);
++
++      eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, KERNEL_ID);
++
++      return eError;
++}
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (gpsKernelHandleBase != IMG_NULL)
++      {
++              eError = FreeHandleBase(gpsKernelHandleBase);
++              if (eError == PVRSRV_OK)
++              {
++                      gpsKernelHandleBase = IMG_NULL;
++              }
++      }
++
++      return eError;
++}
++#else
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/hash.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/hash.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/hash.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/hash.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,406 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "pvr_debug.h"
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "hash.h"
++#include "osfunc.h"
++
++#define PRIVATE_MAX(a,b) ((a)>(b)?(a):(b))
++
++#define       KEY_TO_INDEX(pHash, key, uSize) \
++      ((pHash)->pfnHashFunc((pHash)->uKeySize, key, uSize) % uSize)
++
++#define       KEY_COMPARE(pHash, pKey1, pKey2) \
++      ((pHash)->pfnKeyComp((pHash)->uKeySize, pKey1, pKey2))
++
++struct _BUCKET_
++{
++      
++      struct _BUCKET_ *pNext;
++
++      
++      IMG_UINTPTR_T v;
++
++      
++      IMG_UINTPTR_T k[];
++};
++typedef struct _BUCKET_ BUCKET;
++
++struct _HASH_TABLE_ 
++{
++      
++      BUCKET **ppBucketTable;
++      
++      
++      IMG_UINT32 uSize;       
++
++      
++      IMG_UINT32 uCount;
++
++      
++      IMG_UINT32 uMinimumSize;
++
++      
++      IMG_UINT32 uKeySize;
++
++      
++      HASH_FUNC *pfnHashFunc;
++
++      
++      HASH_KEY_COMP *pfnKeyComp;
++};
++
++IMG_UINT32
++HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen)
++{ 
++      IMG_UINTPTR_T *p = (IMG_UINTPTR_T *)pKey;
++      IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++      IMG_UINT32 ui;
++      IMG_UINT32 uHashKey = 0;
++
++      PVR_UNREFERENCED_PARAMETER(uHashTabLen);
++
++      PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++      for (ui = 0; ui < uKeyLen; ui++)
++      {
++              IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
++
++              uHashPart += (uHashPart << 12);
++              uHashPart ^= (uHashPart >> 22);
++              uHashPart += (uHashPart << 4);
++              uHashPart ^= (uHashPart >> 9);
++              uHashPart += (uHashPart << 10);
++              uHashPart ^= (uHashPart >> 2);
++              uHashPart += (uHashPart << 7);
++              uHashPart ^= (uHashPart >> 12);
++
++              uHashKey += uHashPart;
++      }
++
++      return uHashKey;
++}
++
++IMG_BOOL
++HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2)
++{ 
++      IMG_UINTPTR_T *p1 = (IMG_UINTPTR_T *)pKey1;
++      IMG_UINTPTR_T *p2 = (IMG_UINTPTR_T *)pKey2;
++      IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINTPTR_T);
++      IMG_UINT32 ui;
++
++      PVR_ASSERT((uKeySize % sizeof(IMG_UINTPTR_T)) == 0);
++
++      for (ui = 0; ui < uKeyLen; ui++)
++      {
++              if (*p1++ != *p2++)
++                      return IMG_FALSE;
++      }
++
++      return IMG_TRUE;
++}
++
++static void
++_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
++{
++      IMG_UINT32 uIndex;
++
++      PVR_ASSERT (pBucket != IMG_NULL);
++      PVR_ASSERT (ppBucketTable != IMG_NULL);
++      PVR_ASSERT (uSize != 0);
++
++      uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize);
++      pBucket->pNext = ppBucketTable[uIndex];
++      ppBucketTable[uIndex] = pBucket;
++}
++
++static void
++_Rehash (HASH_TABLE *pHash,
++       BUCKET **ppOldTable, IMG_UINT32 uOldSize,
++         BUCKET **ppNewTable, IMG_UINT32 uNewSize)
++{
++      IMG_UINT32 uIndex;
++      for (uIndex=0; uIndex< uOldSize; uIndex++)
++    {
++              BUCKET *pBucket;
++              pBucket = ppOldTable[uIndex];
++              while (pBucket != IMG_NULL)
++              {
++                      BUCKET *pNextBucket = pBucket->pNext;
++                      _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
++                      pBucket = pNextBucket;
++              }
++    }
++}
++
++static IMG_BOOL
++_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
++{
++      if (uNewSize != pHash->uSize)
++    {
++              BUCKET **ppNewTable;
++        IMG_UINT32 uIndex;
++
++              PVR_DPF ((PVR_DBG_MESSAGE,
++                  "HASH_Resize: oldsize=0x%x  newsize=0x%x  count=0x%x",
++                              pHash->uSize, uNewSize, pHash->uCount));
++
++              OSAllocMem (PVRSRV_OS_PAGEABLE_HEAP, 
++                      sizeof (BUCKET *) * uNewSize, 
++                      (IMG_PVOID*)&ppNewTable, IMG_NULL);
++              if (ppNewTable == IMG_NULL)
++            return IMG_FALSE;
++        
++        for (uIndex=0; uIndex<uNewSize; uIndex++)
++            ppNewTable[uIndex] = IMG_NULL;
++        _Rehash (pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
++        OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pHash->ppBucketTable, IMG_NULL);
++        pHash->ppBucketTable = ppNewTable;
++        pHash->uSize = uNewSize;
++    }
++    return IMG_TRUE;
++}
++
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
++{
++      HASH_TABLE *pHash;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(HASH_TABLE), 
++                                      (IMG_VOID **)&pHash, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pHash->uCount = 0;
++      pHash->uSize = uInitialLen;
++      pHash->uMinimumSize = uInitialLen;
++      pHash->uKeySize = uKeySize;
++      pHash->pfnHashFunc = pfnHashFunc;
++      pHash->pfnKeyComp = pfnKeyComp;
++
++      OSAllocMem (PVRSRV_OS_PAGEABLE_HEAP, 
++                  sizeof (BUCKET *) * pHash->uSize, 
++                  (IMG_PVOID*)&pHash->ppBucketTable, IMG_NULL);       
++
++      if (pHash->ppBucketTable == IMG_NULL)
++    {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, IMG_NULL);
++              return IMG_NULL;
++    }
++
++      for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++              pHash->ppBucketTable[uIndex] = IMG_NULL;
++      return pHash;
++}
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
++{
++      return HASH_Create_Extended(uInitialLen, sizeof(IMG_UINTPTR_T),
++              &HASH_Func_Default, &HASH_Key_Comp_Default);
++}
++
++IMG_VOID
++HASH_Delete (HASH_TABLE *pHash)
++{
++      if (pHash != IMG_NULL)
++    {
++              PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
++              
++              PVR_ASSERT (pHash->uCount==0);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pHash->ppBucketTable, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(HASH_TABLE), pHash, IMG_NULL);
++    }
++}
++
++IMG_BOOL
++HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v)
++{
++      BUCKET *pBucket;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Insert_Extended: Hash=%08X, pKey=%08X, v=0x%x", pHash, pKey, v));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BUCKET) + pHash->uKeySize, 
++                                      (IMG_VOID **)&pBucket, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      pBucket->v = v;
++      OSMemCopy(pBucket->k, pKey, pHash->uKeySize);
++      _ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
++      pHash->uCount++;
++
++      
++      if (pHash->uCount << 1 > pHash->uSize)
++    {
++        
++
++        _Resize (pHash, pHash->uSize << 1);
++    }
++    
++      
++      return IMG_TRUE;
++}
++
++IMG_BOOL
++HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Insert: Hash=%08X, k=0x%x, v=0x%x", pHash, k, v));
++
++      return HASH_Insert_Extended(pHash, &k, v);
++}
++
++IMG_UINTPTR_T
++HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++      BUCKET **ppBucket;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, pKey=%08X", pHash, pKey));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++  
++      for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++      {
++              if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++              {
++                      BUCKET *pBucket = *ppBucket;
++                      IMG_UINTPTR_T v = pBucket->v;
++                      (*ppBucket) = pBucket->pNext;
++
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BUCKET) + pHash->uKeySize, pBucket, IMG_NULL);
++
++                      pHash->uCount--;
++
++                      
++                      if (pHash->uSize > (pHash->uCount << 2) &&
++                pHash->uSize > pHash->uMinimumSize)
++            {
++                
++
++                              _Resize (pHash,
++                         PRIVATE_MAX (pHash->uSize >> 1,
++                                      pHash->uMinimumSize));
++            }
++            
++                      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x%x",
++                      pHash, pKey, v));
++                      return v;
++              }
++      }
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Remove_Extended: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++      return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Remove: Hash=%08X, k=0x%x", pHash, k));
++
++      return HASH_Remove_Extended(pHash, &k);
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey)
++{
++      BUCKET **ppBucket;
++      IMG_UINT32 uIndex;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, pKey=%08X", pHash,pKey));
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      
++      uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
++  
++      for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != IMG_NULL; ppBucket = &((*ppBucket)->pNext))
++      {
++              if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
++              {         
++                      BUCKET *pBucket = *ppBucket;
++                      IMG_UINTPTR_T v = pBucket->v;
++
++                      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x%x",
++                      pHash, pKey, v));
++                      return v;
++              }
++      }
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "HASH_Retrieve: Hash=%08X, pKey=%08X = 0x0 !!!!", pHash, pKey));
++      return 0;
++}
++
++IMG_UINTPTR_T
++HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Retrieve: Hash=%08X, k=0x%x", pHash,k));
++      return HASH_Retrieve_Extended(pHash, &k);
++}
++
++#ifdef HASH_TRACE
++void
++HASH_Dump (HASH_TABLE *pHash)
++{
++      IMG_UINT32 uIndex;
++      IMG_UINT32 uMaxLength=0;
++      IMG_UINT32 uEmptyCount=0;
++
++      PVR_ASSERT (pHash != IMG_NULL);
++      for (uIndex=0; uIndex<pHash->uSize; uIndex++)
++      {
++              BUCKET *pBucket;
++              IMG_UINT32 uLength = 0;
++              if (pHash->ppBucketTable[uIndex] == IMG_NULL)
++                      uEmptyCount++;
++              for (pBucket=pHash->ppBucketTable[uIndex];
++                      pBucket != IMG_NULL;
++                      pBucket = pBucket->pNext)
++                              uLength++;
++              uMaxLength = PRIVATE_MAX (uMaxLength, uLength);
++      }
++
++      PVR_TRACE(("hash table: uMinimumSize=%d  size=%d  count=%d",
++                      pHash->uMinimumSize, pHash->uSize, pHash->uCount));
++      PVR_TRACE(("  empty=%d  max=%d", uEmptyCount, uMaxLength));
++}
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/mem.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/mem.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/mem.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/mem.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "pvr_bridge_km.h"
++
++
++static PVRSRV_ERROR
++FreeSharedSysMemCallBack(IMG_UINT32 ui32ProcessID,
++                                               IMG_PVOID pvParam,
++                                               IMG_UINT32 ui32Param)
++{
++      PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = (PVRSRV_KERNEL_MEM_INFO *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return PVRSRVFreeSharedSysMemoryKM(psKernelMemInfo);
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVAllocSharedSysMemoryKM(IMG_UINT32 ui32Flags,
++                                                       IMG_UINT32 ui32Size,
++                                                       PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
++{
++                  PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO),
++                                (IMG_VOID **)&psKernelMemInfo, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
++      ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
++      psKernelMemInfo->ui32Flags = ui32Flags;
++      psKernelMemInfo->ui32AllocSize = ui32Size;
++
++      if(OSAllocPages(psKernelMemInfo->ui32Flags,
++                                      psKernelMemInfo->ui32AllocSize,
++                                      &psKernelMemInfo->pvLinAddrKM,
++                                      &psKernelMemInfo->sMemBlk.hOSMemHandle)
++              != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO),
++                                psKernelMemInfo,
++                                0);
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psKernelMemInfo->sMemBlk.hResItem =
++              (IMG_HANDLE)ResManRegisterRes(RESMAN_TYPE_SHARED_MEM_INFO,
++                                                                        psKernelMemInfo,
++                                                                        0,
++                                                                        FreeSharedSysMemCallBack,
++                                                                        0);
++
++      *ppsKernelMemInfo = psKernelMemInfo;
++
++      return PVRSRV_OK; 
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVFreeSharedSysMemoryKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++      PVRSRV_ERROR eError;
++
++      if(psKernelMemInfo->sMemBlk.hResItem)
++      {
++              eError =
++                      ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if(eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++      }
++
++      OSFreePages(psKernelMemInfo->ui32Flags,
++                              psKernelMemInfo->ui32AllocSize,
++                              psKernelMemInfo->pvLinAddrKM,
++                              psKernelMemInfo->sMemBlk.hOSMemHandle);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(PVRSRV_KERNEL_MEM_INFO),
++                        psKernelMemInfo,
++                        IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT PVRSRV_ERROR
++PVRSRVDissociateMemFromResmanKM(PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(!psKernelMemInfo)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      if(psKernelMemInfo->sMemBlk.hResItem)
++      {
++              eError =
++                      ResManFreeResByPtr(psKernelMemInfo->sMemBlk.hResItem, IMG_FALSE);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++              psKernelMemInfo->sMemBlk.hResItem = IMG_NULL;
++      }
++
++      return eError;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/metrics.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/metrics.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/metrics.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/metrics.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,153 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "metrics.h"
++
++#if defined(DEBUG) || defined(TIMING)
++
++static volatile IMG_UINT32 *pui32TimerRegister = 0;
++
++#define PVRSRV_TIMER_TOTAL_IN_TICKS(X)        asTimers[X].ui32Total
++#define PVRSRV_TIMER_TOTAL_IN_MS(X)           ((1000*asTimers[X].ui32Total)/ui32TicksPerMS)
++#define PVRSRV_TIMER_COUNT(X)                 asTimers[X].ui32Count
++
++
++Temporal_Data asTimers[PVRSRV_NUM_TIMERS]; 
++
++
++IMG_UINT32 PVRSRVTimeNow(IMG_VOID)
++{
++      if (!pui32TimerRegister)
++      {
++              static IMG_BOOL bFirstTime = IMG_TRUE;
++
++              if (bFirstTime)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVTimeNow: No timer register set up"));
++
++                      bFirstTime = IMG_FALSE;
++              }
++
++              return 0;
++      }
++
++#if defined(__sh__)
++
++      return (0xffffffff-*pui32TimerRegister);
++
++#else 
++
++      return 0;
++
++#endif 
++}
++
++
++static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID)
++{
++      IMG_UINT32 ui32Time1, ui32Time2;
++
++      ui32Time1 = PVRSRVTimeNow();
++
++      OSWaitus(1000000);
++
++      ui32Time2 = PVRSRVTimeNow();
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1));
++
++      return (ui32Time2 - ui32Time1);
++}
++
++
++IMG_VOID PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo)
++{
++      IMG_UINT32 ui32Loop;
++
++      PVR_UNREFERENCED_PARAMETER(pvDevInfo);
++
++      for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++      {
++              asTimers[ui32Loop].ui32Total = 0;
++              asTimers[ui32Loop].ui32Count = 0;
++      }
++
++
++      #if defined(__sh__)
++
++              
++              
++              
++              
++              *TCR_2 = TIMER_DIVISOR;
++
++              
++              *TCOR_2 = *TCNT_2 = (unsigned int)0xffffffff;
++
++              
++              *TST_REG |= (unsigned char)0x04;
++
++              pui32TimerRegister = (IMG_UINT32 *)TCNT_2;
++
++      #else 
++
++              pui32TimerRegister = 0;
++
++      #endif 
++
++}
++
++
++IMG_VOID PVRSRVOutputMetricTotals(IMG_VOID)
++{
++      IMG_UINT32 ui32TicksPerMS, ui32Loop;
++
++      ui32TicksPerMS = PVRSRVGetCPUFreq();
++
++      if (!ui32TicksPerMS)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVOutputMetricTotals: Failed to get CPU Freq"));
++              return;
++      }
++
++      for(ui32Loop=0; ui32Loop < (PVRSRV_NUM_TIMERS); ui32Loop++)
++      {
++              if (asTimers[ui32Loop].ui32Count & 0x80000000L)
++              {
++                      PVR_DPF((PVR_DBG_WARNING,"PVRSRVOutputMetricTotals: Timer %u is still ON", ui32Loop));
++              }
++      }
++#if 0
++      
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Total = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_TICKS(PVRSRV_TIMER_EXAMPLE_1)));
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Time = %ums",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_TOTAL_IN_MS(PVRSRV_TIMER_EXAMPLE_1)));
++      PVR_DPF((PVR_DBG_ERROR," Timer(%u): Count = %u",PVRSRV_TIMER_EXAMPLE_1, PVRSRV_TIMER_COUNT(PVRSRV_TIMER_EXAMPLE_1)));
++#endif
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/perproc.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/perproc.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/perproc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/perproc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,213 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++#include "handle.h"
++#include "perproc.h"
++
++#define       HASH_TAB_INIT_SIZE 32
++
++static HASH_TABLE *psHashTab = IMG_NULL;
++
++static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc)
++{
++      PVRSRV_ERROR eError;
++      IMG_UINTPTR_T uiPerProc;
++
++      PVR_ASSERT(psPerProc != IMG_NULL);
++
++      uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID);
++      if (uiPerProc == 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table"));
++              
++              PVR_ASSERT(psPerProc->ui32PID == 0);
++      }
++      else
++      {
++              PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc);
++              PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID);
++      }
++
++      
++      if (psPerProc->psHandleBase != IMG_NULL)
++      {
++              eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError));
++                      return eError;
++              }
++      }
++
++      
++      if (psPerProc->hPerProcData != IMG_NULL)
++      {
++              eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError));
++                      return eError;
++              }
++      }
++
++      eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psPerProc),
++              psPerProc,
++              psPerProc->hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError));
++              return eError;
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR ResMgrFreeProcessData(IMG_UINT32 ui32ProcessID,
++                                                                                IMG_PVOID pvParam,
++                                                                                IMG_UINT32 ui32Param)
++{
++      PVRSRV_PER_PROCESS_DATA *psPerProc = (PVRSRV_PER_PROCESS_DATA *)pvParam;
++      
++#ifdef        DEBUG
++      PVR_ASSERT(psPerProc->ui32PID == ui32ProcessID);
++#else
++      PVR_UNREFERENCED_PARAMETER (ui32ProcessID);
++#endif
++      PVR_UNREFERENCED_PARAMETER (ui32Param);
++
++      return FreePerProcessData(psPerProc);
++}
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID, IMG_BOOL bAlloc)
++{
++      PVRSRV_PER_PROCESS_DATA *psPerProc;
++      IMG_HANDLE hBlockAlloc;
++      PVRSRV_ERROR eError;
++
++      PVR_ASSERT(psHashTab != IMG_NULL);
++
++      
++      psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID);
++
++      if (psPerProc != IMG_NULL || !bAlloc)
++      {
++              PVR_ASSERT(psPerProc == IMG_NULL || psPerProc->ui32PID == ui32PID);
++              return psPerProc;
++      }
++
++      
++      eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++              sizeof(*psPerProc),
++              (IMG_PVOID *)&psPerProc,
++              &hBlockAlloc);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate per-process data (%d)", eError));
++              return IMG_NULL;
++      }
++      OSMemSet(psPerProc, 0, sizeof(*psPerProc));
++      psPerProc->hBlockAlloc = hBlockAlloc;
++
++      if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't insert per-process data into hash table"));
++              goto failure;
++      }
++
++      psPerProc->ui32PID = ui32PID;
++
++      
++      eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE,
++                                                         &psPerProc->hPerProcData,
++                                                         psPerProc,
++                                                         PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++                                                         PVRSRV_HANDLE_ALLOC_FLAG_NONE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate handle for per-process data (%d)", eError));
++              goto failure;
++      }
++
++      
++      eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase, ui32PID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't allocate handle base for process (%d)", eError));
++              goto failure;
++      }
++
++      
++      psPerProc->psResManItem = ResManRegisterRes(RESMAN_TYPE_USE_PROCESSID | RESMAN_TYPE_RESOURCE_PERPROC_DATA,
++                      psPerProc,
++                      0,
++                      ResMgrFreeProcessData,
++                      ui32PID);
++
++      if (psPerProc->psResManItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessData: Couldn't register handles with the resource manager"));
++              goto failure;
++      }
++
++      return psPerProc;
++
++failure:
++      (void)FreePerProcessData(psPerProc);
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID)
++{
++      PVR_ASSERT(psHashTab == IMG_NULL);
++
++      
++      psHashTab = HASH_Create(HASH_TAB_INIT_SIZE);
++      if (psHashTab == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataInit: Couldn't create per-process data hash table"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID)
++{
++      
++      if (psHashTab != IMG_NULL)
++      {
++              
++              HASH_Delete(psHashTab);
++              psHashTab = IMG_NULL;
++      }
++
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/power.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/power.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/power.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/power.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,595 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++static IMG_BOOL _IsSystemStatePowered(PVR_POWER_STATE eSystemPowerState)
++{
++      return (IMG_BOOL)(eSystemPowerState < PVRSRV_POWER_STATE_D2);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32       ui32CallerID,
++                                                       IMG_BOOL       bSystemPowerEvent)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++      IMG_UINT32              ui32Timeout = 1000000;
++
++#if defined(SUPPORT_LMA)
++      
++      ui32Timeout *= 60;
++#endif 
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      do
++      {
++              eError = OSLockResource(&psSysData->sPowerStateChangeResource,
++                                                              ui32CallerID);
++              if (eError == PVRSRV_OK)
++              {
++                      break;
++              }
++              else if (ui32CallerID == ISR_ID)
++              {
++                      
++
++                      eError = PVRSRV_ERROR_RETRY;
++                      break;
++              }
++
++              OSWaitus(1);
++              ui32Timeout--;
++      } while (ui32Timeout > 0);
++
++      if ((eError == PVRSRV_OK) &&
++              !bSystemPowerEvent &&
++              !_IsSystemStatePowered(psSysData->eCurrentPowerState))
++      {
++              
++              PVRSRVPowerUnlock(ui32CallerID);
++              eError = PVRSRV_ERROR_RETRY;
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID)
++{
++      OSUnlockResource(&gpsSysData->sPowerStateChangeResource, ui32CallerID);
++}
++
++
++static
++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(IMG_BOOL                     bAllDevices,
++                                                                               IMG_UINT32                     ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++              {
++                      eNewDevicePowerState = (eNewPowerState == PVRSRV_POWER_Unspecified) ?
++                                                                      psPowerDevice->eDefaultPowerState : eNewPowerState;
++                      
++                      if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++                      {
++                              if (psPowerDevice->pfnPrePower != IMG_NULL)
++                              {
++                                      
++                                      eError = psPowerDevice->pfnPrePower(psPowerDevice->hDevCookie,
++                                                                                                              eNewDevicePowerState,
++                                                                                                              psPowerDevice->eCurrentPowerState);
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              return eError;
++                                      }
++                              }
++
++                              
++                              eError = SysDevicePrePowerState(psPowerDevice->ui32DeviceIndex,
++                                                                                              eNewDevicePowerState,
++                                                                                              psPowerDevice->eCurrentPowerState);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      return eError;
++                              }
++                      }
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static
++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(IMG_BOOL                    bAllDevices,
++                                                                                IMG_UINT32            ui32DeviceIndex,
++                                                                                PVR_POWER_STATE       eNewPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (bAllDevices || (ui32DeviceIndex == psPowerDevice->ui32DeviceIndex))
++              {
++                      eNewDevicePowerState = (eNewPowerState == PVRSRV_POWER_Unspecified) ?
++                                                                      psPowerDevice->eDefaultPowerState : eNewPowerState;
++
++                      if (psPowerDevice->eCurrentPowerState != eNewDevicePowerState)
++                      {
++                              
++                              eError = SysDevicePostPowerState(psPowerDevice->ui32DeviceIndex,
++                                                                                               eNewDevicePowerState,
++                                                                                               psPowerDevice->eCurrentPowerState);
++                              if (eError != PVRSRV_OK)
++                              {
++                                      return eError;
++                              }
++
++                              if (psPowerDevice->pfnPostPower != IMG_NULL)
++                              {
++                                      
++                                      eError = psPowerDevice->pfnPostPower(psPowerDevice->hDevCookie,
++                                                                                                               eNewDevicePowerState,
++                                                                                                               psPowerDevice->eCurrentPowerState);
++                                      if (eError != PVRSRV_OK)
++                                      {
++                                              return eError;
++                                      }
++                              }
++
++                              psPowerDevice->eCurrentPowerState = eNewDevicePowerState;
++                      }
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState,
++                                                                               IMG_UINT32                     ui32CallerID,
++                                                                               IMG_BOOL                       bRetainMutex)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVDevicePrePowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      eError = PVRSRVDevicePostPowerStateKM(IMG_FALSE, ui32DeviceIndex, eNewPowerState);
++
++Exit:
++
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                              "PVRSRVSetDevicePowerStateKM : Transition to %d FAILED 0x%x", eNewPowerState, eError));
++      }
++
++      if (!bRetainMutex || (eError != PVRSRV_OK))
++      {
++              PVRSRVPowerUnlock(ui32CallerID);
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      SYS_DATA                        *psSysData;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      eError = PVRSRVPowerLock(KERNEL_ID, IMG_TRUE);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++              eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++      }
++      else
++      {
++              eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++      }
++
++      
++      eError = PVRSRVDevicePrePowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      if (eNewPowerState != psSysData->eCurrentPowerState)
++      {
++              
++              eError = SysSystemPrePowerState(eNewPowerState);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++
++      return eError;
++
++ErrorExit:
++
++      PVR_DPF((PVR_DBG_ERROR,
++                      "PVRSRVSystemPrePowerStateKM: Transition from %d to %d FAILED 0x%x",
++                      psSysData->eCurrentPowerState, eNewPowerState, eError));
++
++      
++      psSysData->eFailedPowerState = eNewPowerState;
++
++      PVRSRVPowerUnlock(KERNEL_ID);
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      SYS_DATA                        *psSysData;
++      PVR_POWER_STATE         eNewDevicePowerState;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      if (eNewPowerState != psSysData->eCurrentPowerState)
++      {
++              
++              eError = SysSystemPostPowerState(eNewPowerState);
++              if (eError != PVRSRV_OK)
++              {
++                      goto Exit;
++              }
++      }
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++              eNewDevicePowerState = PVRSRV_POWER_Unspecified;
++      }
++      else
++      {
++              eNewDevicePowerState = PVRSRV_POWER_STATE_D3;
++      }
++
++      
++      eError = PVRSRVDevicePostPowerStateKM(IMG_TRUE, 0, eNewDevicePowerState);
++      if (eError != PVRSRV_OK)
++      {
++              goto Exit;
++      }
++
++      PVR_DPF((PVR_DBG_WARNING,
++                      "PVRSRVSystemPostPowerStateKM: System Power Transition from %d to %d OK",
++                      psSysData->eCurrentPowerState, eNewPowerState));
++
++      psSysData->eCurrentPowerState = eNewPowerState;
++
++Exit:
++
++      PVRSRVPowerUnlock(KERNEL_ID);
++
++      if (_IsSystemStatePowered(eNewPowerState))
++      {
++              
++
++
++              PVRSRVCommandCompleteCallbacks();
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = PVRSRVSystemPrePowerStateKM(eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      eError = PVRSRVSystemPostPowerStateKM(eNewPowerState);
++      if(eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      
++      psSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      PVR_DPF((PVR_DBG_ERROR,
++                      "PVRSRVSetPowerStateKM: Transition from %d to %d FAILED 0x%x",
++                      psSysData->eCurrentPowerState, eNewPowerState, eError));
++
++      
++      psSysData->eFailedPowerState = eNewPowerState;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32             ui32DeviceIndex,
++                                                                         PFN_PRE_POWER        pfnPrePower,
++                                                                         PFN_POST_POWER       pfnPostPower,
++                                                                         IMG_HANDLE           hDevCookie,
++                                                                         PVR_POWER_STATE      eCurrentPowerState,
++                                                                         PVR_POWER_STATE      eDefaultPowerState)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++
++      if (pfnPrePower == IMG_NULL &&
++              pfnPostPower == IMG_NULL)
++      {
++              return PVRSRVRemovePowerDevice(ui32DeviceIndex);
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP,
++                                               sizeof(PVRSRV_POWER_DEV),
++                                               (IMG_VOID **)&psPowerDevice, IMG_NULL);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterPowerDevice: Failed to alloc PVRSRV_POWER_DEV"));
++              return eError;
++      }
++
++      
++      psPowerDevice->pfnPrePower = pfnPrePower;
++      psPowerDevice->pfnPostPower = pfnPostPower;
++      psPowerDevice->hDevCookie = hDevCookie;
++      psPowerDevice->ui32DeviceIndex = ui32DeviceIndex;
++      psPowerDevice->eCurrentPowerState = eCurrentPowerState;
++      psPowerDevice->eDefaultPowerState = eDefaultPowerState;
++
++      
++      psPowerDevice->psNext = psSysData->psPowerDeviceList;
++      psSysData->psPowerDeviceList = psPowerDevice;
++
++      return (PVRSRV_OK);
++}
++
++
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psCurrent, *psPrevious;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psCurrent = psSysData->psPowerDeviceList;
++      psPrevious = IMG_NULL;
++
++      while (psCurrent)
++      {
++              if (psCurrent->ui32DeviceIndex == ui32DeviceIndex)
++              {
++                      
++                      if (psPrevious)
++                      {
++                              psPrevious->psNext = psCurrent->psNext;
++                      }
++                      else
++                      {
++                              
++                              psSysData->psPowerDeviceList = psCurrent->psNext;
++                      }
++
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCurrent, IMG_NULL);
++                      
++                      break;
++              }
++              else
++              {
++                      psPrevious = psCurrent;
++                      psCurrent = psCurrent->psNext;
++              }
++      }
++
++      return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVPowerControlKM(PVR_POWER_CONTROL ePowerControl, PVR_POWER_STATE *pePVRPowerState)
++{
++      PVRSRV_ERROR    eError;
++      SYS_DATA                *psSysData;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      switch(ePowerControl)
++      {
++              case PVRSRV_POWER_CONTROL_SET :
++              {
++                      eError = PVRSRVSetPowerStateKM(*pePVRPowerState);
++                      break;
++              }
++              case PVRSRV_POWER_CONTROL_RETRY :
++              {
++                      eError = PVRSRVSetPowerStateKM(psSysData->eFailedPowerState);
++#ifdef DEBUG
++                      if(eError == PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVPowerControlKM: Power Transition Re-Try success"));
++                      }
++#endif
++                      break;
++              }
++              case PVRSRV_POWER_CONTROL_QUERY :
++              {
++                      *pePVRPowerState = psSysData->eCurrentPowerState;
++                      break;
++              }
++              default :
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVPowerControlKM: Invalid Power control mode %d", ePowerControl));
++                      return PVRSRV_ERROR_GENERIC;
++      }
++
++      return eError;
++}
++
++
++IMG_EXPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_POWER_DEV        *psPowerDevice;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      if (OSIsResourceLocked(&psSysData->sPowerStateChangeResource, KERNEL_ID) ||
++              OSIsResourceLocked(&psSysData->sPowerStateChangeResource, ISR_ID))
++      {
++              return IMG_FALSE;
++      }
++
++      psPowerDevice = psSysData->psPowerDeviceList;
++      while (psPowerDevice)
++      {
++              if (psPowerDevice->ui32DeviceIndex == ui32DeviceIndex)
++              {
++                      return (IMG_BOOL)(psPowerDevice->eCurrentPowerState == PVRSRV_POWER_STATE_D0);
++              }
++
++              psPowerDevice = psPowerDevice->psNext;
++      }
++
++      
++      return IMG_FALSE;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/pvrsrv.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,948 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "handle.h"
++#include "perproc.h"
++
++
++#include "ra.h"
++
++PVRSRV_ERROR AllocateDeviceID(SYS_DATA *psSysData, IMG_UINT32 *pui32DevID)
++{
++      SYS_DEVICE_ID* psDeviceWalker;
++      SYS_DEVICE_ID* psDeviceEnd;
++      
++      psDeviceWalker = &psSysData->sDeviceID[0];
++      psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++      
++      while (psDeviceWalker < psDeviceEnd)
++      {
++              if (!psDeviceWalker->bInUse)
++              {
++                      psDeviceWalker->bInUse = IMG_TRUE;
++                      *pui32DevID = psDeviceWalker->uiID;
++                      return PVRSRV_OK;
++              }
++              psDeviceWalker++;
++      }
++      
++      PVR_DPF((PVR_DBG_ERROR,"AllocateDeviceID: No free and valid device IDs available!"));
++
++      
++      PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++PVRSRV_ERROR FreeDeviceID(SYS_DATA *psSysData, IMG_UINT32 ui32DevID)
++{
++      SYS_DEVICE_ID* psDeviceWalker;
++      SYS_DEVICE_ID* psDeviceEnd;
++
++      psDeviceWalker = &psSysData->sDeviceID[0];
++      psDeviceEnd = psDeviceWalker + psSysData->ui32NumDevices;
++
++      
++      while (psDeviceWalker < psDeviceEnd)
++      {
++              
++              if      (
++                              (psDeviceWalker->uiID == ui32DevID) &&
++                              (psDeviceWalker->bInUse)
++                      )
++              {
++                      psDeviceWalker->bInUse = IMG_FALSE;
++                      return PVRSRV_OK;
++              }
++              psDeviceWalker++;
++      }
++      
++      PVR_DPF((PVR_DBG_ERROR,"FreeDeviceID: no matching dev ID that is in use!"));
++
++      
++      PVR_ASSERT(psDeviceWalker < psDeviceEnd);
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++#ifndef ReadHWReg
++IMG_EXPORT
++IMG_UINT32 ReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++      return *(volatile IMG_UINT32*)((IMG_UINT32)pvLinRegBaseAddr+ui32Offset);
++}
++#endif
++
++
++#ifndef WriteHWReg
++IMG_EXPORT
++IMG_VOID WriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      PVR_DPF((PVR_DBG_MESSAGE,"WriteHWReg Base:%x, Offset: %x, Value %x",pvLinRegBaseAddr,ui32Offset,ui32Value));
++
++      *(IMG_UINT32*)((IMG_UINT32)pvLinRegBaseAddr+ui32Offset) = ui32Value;
++}
++#endif
++
++
++#ifndef WriteHWRegs
++IMG_EXPORT
++IMG_VOID WriteHWRegs(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Count, PVRSRV_HWREG *psHWRegs)
++{
++      while (ui32Count--)
++      {
++              WriteHWReg (pvLinRegBaseAddr, psHWRegs->ui32RegAddr, psHWRegs->ui32RegVal);
++              psHWRegs++;
++      }
++}
++#endif
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVEnumerateDevicesKM(IMG_UINT32 *pui32NumDevices,
++                                                                                                 PVRSRV_DEVICE_IDENTIFIER *psDevIdList)
++{
++      PVRSRV_ERROR            eError;
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_UINT32                      i;
++      
++      if (!pui32NumDevices || !psDevIdList)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Invalid params"));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVEnumerateDevicesKM: Failed to get SysData"));
++              return eError;
++      }
++
++      
++
++      for (i=0; i<PVRSRV_MAX_DEVICES; i++)
++      {
++              psDevIdList[i].eDeviceType = PVRSRV_DEVICE_TYPE_UNKNOWN;
++      }
++      
++      
++      *pui32NumDevices = 0;
++      
++      
++
++
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      for (i=0; psDeviceNode != IMG_NULL; i++)
++      {
++              
++              if(psDeviceNode->sDevId.eDeviceType != PVRSRV_DEVICE_TYPE_EXT)
++              {
++                      
++                      *psDevIdList++ = psDeviceNode->sDevId;
++                      
++                      (*pui32NumDevices)++;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData)
++{
++      PVRSRV_ERROR    eError;
++
++      
++      eError = ResManInit();
++      if (eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      eError = PVRSRVPerProcessDataInit();
++      if(eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      eError = PVRSRVHandleInit();
++      if(eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      eError = OSCreateResource(&psSysData->sPowerStateChangeResource);
++      if (eError != PVRSRV_OK)
++      {
++              goto Error;
++      }
++
++      
++      gpsSysData->eCurrentPowerState = PVRSRV_POWER_STATE_D0;
++      gpsSysData->eFailedPowerState = PVRSRV_POWER_Unspecified;
++
++      return eError;
++      
++Error:
++      PVRSRVDeInit(psSysData);
++      return eError;
++}
++
++
++
++IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData)
++{
++      PVRSRV_ERROR    eError;
++      
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++      eError = PVRSRVHandleDeInit();
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVHandleDeInit failed"));
++      }
++
++      eError = PVRSRVPerProcessDataDeInit();
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeInit: PVRSRVPerProcessDataDeInit failed"));
++      }
++      
++      ResManDeInit();
++}
++
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,  
++                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                IMG_UINT32 ui32SOCInterruptBit,
++                                                                IMG_UINT32 *pui32DeviceIndex)
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      
++      
++      if(OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_DEVICE_NODE), 
++                                       (IMG_VOID **)&psDeviceNode, IMG_NULL) != PVRSRV_OK)    
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to alloc memory for psDeviceNode"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet (psDeviceNode, 0, sizeof(PVRSRV_DEVICE_NODE)); 
++      
++      eError = pfnRegisterDevice(psDeviceNode);
++      if (eError != PVRSRV_OK)        
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      0, psDeviceNode, IMG_NULL);
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterDevice : Failed to register device"));
++              return (PVRSRV_ERROR_DEVICE_REGISTER_FAILED);
++      }
++
++      
++
++
++
++
++      psDeviceNode->ui32RefCount = 1;
++      psDeviceNode->psSysData = psSysData;
++      psDeviceNode->ui32SOCInterruptBit = ui32SOCInterruptBit;
++      
++      
++      AllocateDeviceID(psSysData, &psDeviceNode->sDevId.ui32DeviceIndex);
++              
++      
++      psDeviceNode->psNext = psSysData->psDeviceNodeList;
++      psSysData->psDeviceNodeList = psDeviceNode;
++
++      
++      *pui32DeviceIndex = psDeviceNode->sDevId.ui32DeviceIndex;
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInitialiseDevice (IMG_UINT32 ui32DevIndex)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInitialiseDevice"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed to get SysData"));
++              return(eError);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++
++      while (psDeviceNode)
++      {
++              if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++              {
++                      goto FoundDevice;
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: requested device is not present"));
++      return PVRSRV_ERROR_INIT_FAILURE;
++      
++FoundDevice:
++
++      PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++      
++      
++      if(psDeviceNode->pfnInitDevice != IMG_NULL)
++      {
++              eError = psDeviceNode->pfnInitDevice(psDeviceNode);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVInitialiseDevice: Failed InitDevice call"));
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVAcquireDeviceDataKM (IMG_UINT32                       ui32DevIndex,
++                                                                                                       PVRSRV_DEVICE_TYPE     eDeviceType,
++                                                                                                       IMG_HANDLE                     *phDevCookie)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVAcquireDeviceDataKM"));
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: Failed to get SysData"));
++              return(eError);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++
++      if (eDeviceType != PVRSRV_DEVICE_TYPE_UNKNOWN)
++      {
++              while (psDeviceNode)
++              {
++                      if (psDeviceNode->sDevId.eDeviceType == eDeviceType)
++                      {
++                              goto FoundDevice;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++      else
++      {
++              while (psDeviceNode)
++              {
++                      if (psDeviceNode->sDevId.ui32DeviceIndex == ui32DevIndex)
++                      {
++                              goto FoundDevice;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++      }
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVAcquireDeviceDataKM: requested device is not present"));
++      return PVRSRV_ERROR_INIT_FAILURE;               
++      
++FoundDevice:
++
++      PVR_ASSERT (psDeviceNode->ui32RefCount > 0);
++
++      
++      if (phDevCookie)
++      {
++              *phDevCookie = (IMG_HANDLE)psDeviceNode;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_DEVICE_NODE      **ppsDevNode;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++
++      eError = SysAcquireData(&psSysData);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed to get SysData"));
++              return(eError);
++      }
++
++      ppsDevNode = &psSysData->psDeviceNodeList;
++      while(*ppsDevNode)
++      {
++              if((*ppsDevNode)->sDevId.ui32DeviceIndex == ui32DevIndex)
++              {
++                      psDeviceNode = *ppsDevNode;
++                      goto FoundDevice;
++              }
++              ppsDevNode = &((*ppsDevNode)->psNext);
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: requested device %d is not present", ui32DevIndex));
++      
++      return PVRSRV_ERROR_GENERIC;
++      
++FoundDevice:
++
++      
++
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++
++      eError = PVRSRVSetDevicePowerStateKM(ui32DevIndex,
++                                                                               PVRSRV_POWER_STATE_D3,
++                                                                               KERNEL_ID,
++                                                                               IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed PVRSRVSetDevicePowerStateKM call"));
++              return eError;
++      }
++#endif 
++
++      
++
++      if(psDeviceNode->pfnDeInitDevice != IMG_NULL)
++      {
++              eError = psDeviceNode->pfnDeInitDevice(psDeviceNode);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDeinitialiseDevice: Failed DeInitDevice call"));
++                      return eError;
++              }
++      }
++
++      
++      *ppsDevNode = psDeviceNode->psNext;
++
++              
++      FreeDeviceID(psSysData, ui32DevIndex);  
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                              0, psDeviceNode, IMG_NULL);
++      
++      return (PVRSRV_OK);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr,
++                                                                        IMG_UINT32 ui32Value,
++                                                                        IMG_UINT32 ui32Mask,
++                                                                        IMG_UINT32 ui32Waitus,
++                                                                        IMG_UINT32 ui32Tries)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
++
++      uiMaxTime = ui32Tries * ui32Waitus;
++
++      
++      do
++      {
++              if((*pui32LinMemAddr & ui32Mask) == ui32Value)
++              {
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++
++              OSWaitus(ui32Waitus);
++
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++
++      } while ((uiCurrent - uiStart) < uiMaxTime); 
++
++
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++#if defined (USING_ISR_INTERRUPTS)
++
++extern IMG_UINT32 gui32EventStatusServicesByISR;
++
++PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value,
++                                                               IMG_UINT32 ui32Mask,
++                                                               IMG_UINT32 ui32Waitus,
++                                                               IMG_UINT32 ui32Tries)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0, uiCurrent=0, uiMaxTime;
++
++      uiMaxTime = ui32Tries * ui32Waitus;
++
++      
++      do
++      {
++              if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value)
++              {
++                      gui32EventStatusServicesByISR = 0;
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++
++              OSWaitus(ui32Waitus);
++
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++
++      } while ((uiCurrent - uiStart) < uiMaxTime); 
++
++      return PVRSRV_ERROR_GENERIC;
++}
++#endif  
++
++
++
++
++IMG_EXPORT                    
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
++{
++      SYS_DATA *psSysData;
++      PVRSRV_ERROR eError;
++      
++      if(!psMiscInfo)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));             
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      
++      if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
++                                                                              |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
++                                                                              |PVRSRV_MISC_INFO_MEMSTATS_PRESENT))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
++              return PVRSRV_ERROR_INVALID_PARAMS;                     
++      }
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: Failed to get SysData"));          
++              return eError;  
++      }
++      
++      psMiscInfo->ui32StatePresent = 0;
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT)
++      && psSysData->pvSOCTimerRegisterKM)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
++              psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
++      }
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT)
++      && psSysData->pvSOCClockGateRegsBase)
++      {
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
++              psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
++              psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
++      }
++
++      
++      if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT)
++      && psMiscInfo->pszMemoryStr)
++      {
++              RA_ARENA                        **ppArena;
++              BM_HEAP                         *psBMHeap;
++      BM_CONTEXT                      *psBMContext;
++              PVRSRV_DEVICE_NODE      *psDeviceNode;
++              IMG_CHAR                        *pszStr;
++              IMG_UINT32                      ui32StrLen;
++              IMG_INT32                       i32Count;
++              
++              pszStr = psMiscInfo->pszMemoryStr;
++              ui32StrLen = psMiscInfo->ui32MemoryStrLen;
++  
++              psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;
++
++              
++              ppArena = &psSysData->apsLocalDevMemArena[0];
++              while(*ppArena)
++              {
++                      CHECK_SPACE(ui32StrLen);
++                      i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
++                      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++                      
++                      RA_GetStats(*ppArena,
++                                                      &pszStr, 
++                                                      &ui32StrLen);
++                      
++                      ppArena++;
++              }
++
++              
++              psDeviceNode = psSysData->psDeviceNodeList;
++              while(psDeviceNode)
++              {
++                      CHECK_SPACE(ui32StrLen);
++                      i32Count = OSSNPrintf(pszStr, 100, "\n\nDevice Type %d:\n", psDeviceNode->sDevId.eDeviceType);
++                      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++                      
++                      if(psDeviceNode->sDevMemoryInfo.pBMKernelContext)
++                      {
++                              CHECK_SPACE(ui32StrLen);
++                              i32Count = OSSNPrintf(pszStr, 100, "\nKernel Context:\n");
++                              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++                              
++                              psBMHeap = psDeviceNode->sDevMemoryInfo.pBMKernelContext->psBMHeap;
++                              while(psBMHeap)
++                              {               
++                                      if(psBMHeap->pImportArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pImportArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++
++                                      if(psBMHeap->pVMArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pVMArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++                                      psBMHeap = psBMHeap->psNext;
++                              }
++                      }
++              
++                      
++                      psBMContext = psDeviceNode->sDevMemoryInfo.pBMContext;
++                      while(psBMContext)
++                      {
++                              CHECK_SPACE(ui32StrLen);
++                              i32Count = OSSNPrintf(pszStr, 100, "\nApplication Context (hDevMemContext) 0x%08X:\n", (IMG_HANDLE)psBMContext);
++                              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++                              psBMHeap = psBMContext->psBMHeap;
++                              while(psBMHeap)
++                              {
++                                      if(psBMHeap->pImportArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pImportArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++
++                                      if(psBMHeap->pVMArena)
++                                      {
++                                              RA_GetStats(psBMHeap->pVMArena,
++                                                                              &pszStr, 
++                                                                              &ui32StrLen);
++                                      }
++                                      psBMHeap = psBMHeap->psNext;
++                              }
++                              psBMContext = psBMContext->psNext;
++                      }
++                      psDeviceNode = psDeviceNode->psNext;
++              }
++
++              
++              i32Count = OSSNPrintf(pszStr, 100, "\n\0");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PVRSRVGetFBStatsKM(IMG_UINT32            *pui32Total, 
++                                                              IMG_UINT32              *pui32Available)
++{
++      IMG_UINT32 ui32Total = 0, i = 0;
++      IMG_UINT32 ui32Available = 0;
++
++      *pui32Total             = 0;
++      *pui32Available = 0;
++
++      
++      while(BM_ContiguousStatistics(i, &ui32Total, &ui32Available) == IMG_TRUE)
++      {
++              *pui32Total             += ui32Total;
++              *pui32Available += ui32Available;
++
++              i++;
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      SYS_DATA                        *psSysData;
++      IMG_BOOL                        bStatus = IMG_FALSE;
++      IMG_UINT32                      ui32InterruptSource;
++
++      if(!psDeviceNode)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVDeviceLISR: Invalid params\n"));
++              goto out;
++      }
++      psSysData = psDeviceNode->psSysData;
++
++      
++      ui32InterruptSource = SysGetInterruptSource(psSysData, psDeviceNode);
++      if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++      {
++              if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++              {
++                      bStatus = (*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData);               
++              }
++
++              SysClearInterrupts(psSysData, psDeviceNode->ui32SOCInterruptBit);
++      }
++
++out:
++      return bStatus;
++}
++
++
++IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA                        *psSysData = pvSysData;
++      IMG_BOOL                        bStatus = IMG_FALSE;
++      IMG_UINT32                      ui32InterruptSource;
++      IMG_UINT32                      ui32ClearInterrupts = 0;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVSystemLISR: Invalid params\n"));
++              goto out;
++      }
++
++      
++      ui32InterruptSource = SysGetInterruptSource(psSysData, IMG_NULL);
++      
++      
++      if(ui32InterruptSource == 0)
++      {
++              goto out;
++      }
++      
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceISR != IMG_NULL)
++              {
++                      if(ui32InterruptSource & psDeviceNode->ui32SOCInterruptBit)
++                      {
++                              if((*psDeviceNode->pfnDeviceISR)(psDeviceNode->pvISRData))
++                              {
++                                      
++                                      bStatus = IMG_TRUE;
++                              }
++                              
++                              ui32ClearInterrupts |= psDeviceNode->ui32SOCInterruptBit;
++                      }
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      SysClearInterrupts(psSysData, ui32ClearInterrupts);
++      
++out:
++      return bStatus;
++}
++
++
++IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA                        *psSysData = pvSysData;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRSRVMISR: Invalid params\n"));
++              return;
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceMISR != IMG_NULL)
++              {
++                      (*psDeviceNode->pfnDeviceMISR)(psDeviceNode->pvISRData);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      if (PVRSRVProcessQueues(ISR_ID, IMG_FALSE) == PVRSRV_ERROR_PROCESSING_BLOCKED)
++      {
++              PVRSRVProcessQueues(ISR_ID, IMG_FALSE);
++      }
++}
++
++
++PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave)
++{
++      IMG_UINT32         uiBytesSaved = 0;
++      IMG_PVOID          pvLocalMemCPUVAddr;
++      RA_SEGMENT_DETAILS sSegDetails;
++
++      if (hArena == IMG_NULL)
++      {
++              return (PVRSRV_ERROR_INVALID_PARAMS);
++      }
++
++      sSegDetails.uiSize = 0;
++      sSegDetails.sCpuPhyAddr.uiAddr = 0;
++      sSegDetails.hSegment = 0;
++
++      
++      while (RA_GetNextLiveSegment(hArena, &sSegDetails))
++      {
++              if (pbyBuffer == IMG_NULL)
++              {
++                      
++                      uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++              }
++              else
++              {
++                      if ((uiBytesSaved + sizeof(sSegDetails.uiSize) + sSegDetails.uiSize) > *puiBufSize)
++                      {
++                              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++                      }
++
++                      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVSaveRestoreLiveSegments: Base %08x size %08x", sSegDetails.sCpuPhyAddr.uiAddr, sSegDetails.uiSize));
++
++                      
++                      pvLocalMemCPUVAddr = OSMapPhysToLin(sSegDetails.sCpuPhyAddr,
++                                                                      sSegDetails.uiSize,
++                                                                      PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                      IMG_NULL);
++                      if (pvLocalMemCPUVAddr == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Failed to map local memory to host"));
++                              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++                      }
++
++                      if (bSave)
++                      {
++                              
++                              OSMemCopy(pbyBuffer, &sSegDetails.uiSize, sizeof(sSegDetails.uiSize));
++                              pbyBuffer += sizeof(sSegDetails.uiSize);
++
++                              OSMemCopy(pbyBuffer, pvLocalMemCPUVAddr, sSegDetails.uiSize);
++                              pbyBuffer += sSegDetails.uiSize;
++                      }
++                      else
++                      {
++                              IMG_UINT32 uiSize;
++                              
++                              OSMemCopy(&uiSize, pbyBuffer, sizeof(sSegDetails.uiSize));
++
++                              if (uiSize != sSegDetails.uiSize)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "PVRSRVSaveRestoreLiveSegments: Segment size error"));
++                              }
++                              else
++                              {
++                                      pbyBuffer += sizeof(sSegDetails.uiSize);
++
++                                      OSMemCopy(pvLocalMemCPUVAddr, pbyBuffer, sSegDetails.uiSize);
++                                      pbyBuffer += sSegDetails.uiSize;
++                              }
++                      }
++
++
++                      uiBytesSaved += sizeof(sSegDetails.uiSize) + sSegDetails.uiSize;
++
++                      OSUnMapPhysToLin(pvLocalMemCPUVAddr,
++                                   sSegDetails.uiSize,
++                                   PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                   IMG_NULL);
++              }
++      }
++
++      if (pbyBuffer == IMG_NULL)
++      {
++              *puiBufSize = uiBytesSaved;
++      }
++
++      return (PVRSRV_OK);
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/queue.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/queue.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/queue.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/queue.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,966 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++
++#if defined(__linux__) && defined(__KERNEL__)
++#include "proc.h"
++
++static int
++QueuePrintCommands (PVRSRV_QUEUE_INFO * psQueue, char * buffer, size_t size)
++{
++      off_t off = 0;
++      int cmds = 0;
++      IMG_UINT32 ui32ReadOffset  = psQueue->ui32ReadOffset;
++      IMG_UINT32 ui32WriteOffset = psQueue->ui32WriteOffset;
++      PVRSRV_COMMAND * psCmd;
++
++      while (ui32ReadOffset != ui32WriteOffset)
++      {
++              psCmd= (PVRSRV_COMMAND *)((IMG_UINT32)psQueue->pvLinQueueKM + ui32ReadOffset);
++
++              off = printAppend(buffer, size, off, "%p %p  %5lu  %6lu  %3lu  %5lu   %2lu   %2lu    %3lu  \n",
++                                                      psQueue,
++                                                      psCmd,
++                                                      psCmd->ui32ProcessID,
++                                                      psCmd->CommandType,
++                                                      psCmd->ui32CmdSize,
++                                                      psCmd->ui32DevIndex,
++                                                      psCmd->ui32DstSyncCount,
++                                                      psCmd->ui32SrcSyncCount,
++                                                      psCmd->ui32DataSize);
++              
++              ui32ReadOffset += psCmd->ui32CmdSize;
++              ui32ReadOffset &= psQueue->ui32QueueSize - 1;
++              cmds++;
++      }
++      if (cmds == 0)
++              off = printAppend(buffer, size, off, "%p <empty>\n", psQueue);
++      return off;
++} 
++
++
++off_t
++QueuePrintQueues (char * buffer, size_t size, off_t off)
++{
++      SYS_DATA * psSysData;
++      PVRSRV_QUEUE_INFO * psQueue;
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++              return END_OF_FILE;
++
++       if (!off)
++                return printAppend (buffer, size, 0,
++                                                              "Command Queues\n"
++                                                              "Queue    CmdPtr      Pid Command Size DevInd  DSC  SSC  #Data ...\n");
++
++      
++ 
++      for (psQueue = psSysData->psQueueList; --off && psQueue; psQueue = psQueue->psNextKM)
++              ;
++
++      return psQueue ? QueuePrintCommands (psQueue, buffer, size) : END_OF_FILE;
++} 
++#endif 
++
++#define GET_SPACE_IN_CMDQ(psQueue)                                                                            \
++      (((psQueue->ui32ReadOffset - psQueue->ui32WriteOffset)                          \
++      + (psQueue->ui32QueueSize - 1)) & (psQueue->ui32QueueSize - 1))
++
++#define UPDATE_QUEUE_WOFF(psQueue, ui32Size)                                                  \
++      psQueue->ui32WriteOffset = (psQueue->ui32WriteOffset + ui32Size)        \
++      & (psQueue->ui32QueueSize - 1);
++
++#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending)                                        \
++      (ui32OpsComplete >= ui32OpsPending)
++
++IMG_UINT32 NearestPower2(IMG_UINT32 ui32Value)
++{
++      IMG_UINT32 ui32Temp, ui32Result = 1;
++
++      if(!ui32Value)
++              return 0;
++
++      ui32Temp = ui32Value - 1;
++      while(ui32Temp)
++      {
++              ui32Result <<= 1;
++              ui32Temp >>= 1;
++      }
++
++      return ui32Result;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo)
++{
++      PVRSRV_QUEUE_INFO       *psQueueInfo;
++      IMG_UINT32                      ui32Power2QueueSize = NearestPower2(ui32QueueSize);
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++      IMG_HANDLE                      hMemBlock;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                       sizeof(PVRSRV_QUEUE_INFO),
++                                       (IMG_VOID **)&psQueueInfo, &hMemBlock) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
++              goto ErrorExit;
++      }
++      OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
++
++      psQueueInfo->hMemBlock[0] = hMemBlock;
++      psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                       ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE, 
++                                       &psQueueInfo->pvLinQueueKM, &hMemBlock) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
++              goto ErrorExit;
++      }
++
++      psQueueInfo->hMemBlock[1] = hMemBlock;
++      psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
++
++      
++      PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
++      PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
++
++      psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
++
++      
++      if (psSysData->psQueueList == IMG_NULL)
++      {
++              eError = OSCreateResource(&psSysData->sQProcessResource);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++      
++      
++      if (OSLockResource(&psSysData->sQProcessResource, 
++                                                      KERNEL_ID) != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      psQueueInfo->psNextKM = psSysData->psQueueList;
++      psSysData->psQueueList = psQueueInfo;
++
++      if (OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID) != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      *ppsQueueInfo = psQueueInfo;
++
++      return PVRSRV_OK;
++      
++ErrorExit:
++
++      if(psQueueInfo)
++      {
++              if(psQueueInfo->pvLinQueueKM)
++              {
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                              psQueueInfo->ui32QueueSize,
++                                              psQueueInfo->pvLinQueueKM,
++                                              psQueueInfo->hMemBlock[1]);
++              }
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(PVRSRV_QUEUE_INFO), 
++                                      psQueueInfo, 
++                                      psQueueInfo->hMemBlock[0]);
++      }
++
++      return PVRSRV_ERROR_GENERIC;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
++{
++      PVRSRV_QUEUE_INFO       *psQueue;
++      SYS_DATA                        *psSysData;
++      PVRSRV_ERROR            eError;
++      IMG_BOOL                        bTimeout = IMG_TRUE;
++      IMG_BOOL                        bStart = IMG_FALSE;
++      IMG_UINT32                      uiStart = 0;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      psQueue = psSysData->psQueueList;
++
++      do
++      {
++              if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
++              {
++                      bTimeout = IMG_FALSE;
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      if (bTimeout)
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
++              eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
++      }
++
++      
++      eError = OSLockResource(&psSysData->sQProcessResource, 
++                                                              KERNEL_ID);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++      
++      if(psQueue == psQueueInfo)
++      {
++              psSysData->psQueueList = psQueueInfo->psNextKM;
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      psQueueInfo->ui32QueueSize,
++                                      psQueueInfo->pvLinQueueKM,
++                                      psQueueInfo->hMemBlock[1]);
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                      sizeof(PVRSRV_QUEUE_INFO),
++                                      psQueueInfo,
++                                      psQueueInfo->hMemBlock[0]);
++      }
++      else
++      {
++              while(psQueue)
++              {
++                      if(psQueue->psNextKM == psQueueInfo)
++                      {
++                              psQueue->psNextKM = psQueueInfo->psNextKM;
++
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      psQueueInfo->ui32QueueSize,
++                                                      psQueueInfo->pvLinQueueKM,
++                                                      psQueueInfo->hMemBlock[1]);
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      sizeof(PVRSRV_QUEUE_INFO),
++                                                      psQueueInfo,
++                                                      psQueueInfo->hMemBlock[0]);
++                              break;
++                      }
++                      psQueue = psQueue->psNextKM;
++              }
++
++              if(!psQueue)
++              {
++                      eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++                      if (eError != PVRSRV_OK)
++                      {
++                              goto ErrorExit;
++                      }
++                      eError = PVRSRV_ERROR_INVALID_PARAMS;
++                      goto ErrorExit;
++              }
++      }
++
++      
++      eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
++      if (eError != PVRSRV_OK)
++      {
++              goto ErrorExit;
++      }
++
++      
++      if (psSysData->psQueueList == IMG_NULL)
++      {
++              eError = OSDestroyResource(&psSysData->sQProcessResource);
++              if (eError != PVRSRV_OK)
++              {
++                      goto ErrorExit;
++              }
++      }
++      
++ErrorExit:
++
++      return eError;  
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              IMG_UINT32 ui32ParamSize,
++                                                                                              IMG_VOID **ppvSpace)
++{
++      IMG_BOOL bTimeout = IMG_TRUE;
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0, uiCurrent = 0;
++
++      
++      ui32ParamSize =  (ui32ParamSize+3) & 0xFFFFFFFC;
++
++      if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
++      {
++              PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      do
++      {
++              if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
++              {
++                      bTimeout = IMG_FALSE;
++                      break;  
++              }
++              
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++              
++              uiCurrent = OSClockus();
++              if (uiCurrent < uiStart)
++              {
++                      
++                      uiStart = 0;
++              }
++      } while ((uiCurrent - uiStart) < MAX_HW_TIME_US);
++
++      if (bTimeout == IMG_TRUE)
++      {
++              *ppvSpace = IMG_NULL;
++
++              return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++      }
++      else
++      {
++              *ppvSpace = (IMG_VOID *)(psQueue->ui32WriteOffset + (IMG_UINT32)psQueue->pvLinQueueUM);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO     *psQueue,
++                                                                                              PVRSRV_COMMAND          **ppsCommand,
++                                                                                              IMG_UINT32                      ui32DevIndex,
++                                                                                              IMG_UINT16                      CommandType,
++                                                                                              IMG_UINT32                      ui32DstSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++                                                                                              IMG_UINT32                      ui32SrcSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                                                              IMG_UINT32                      ui32DataByteSize )
++{
++      PVRSRV_ERROR    eError;
++      PVRSRV_COMMAND  *psCommand;
++      IMG_UINT32              ui32CommandSize;
++      IMG_UINT32              i;
++
++      
++      ui32DataByteSize = (ui32DataByteSize + 3) & 0xFFFFFFFC;
++
++      
++      ui32CommandSize = sizeof(PVRSRV_COMMAND) 
++                                      + ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
++                                      + ui32DataByteSize;
++
++      
++      eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      psCommand->ui32ProcessID        = OSGetCurrentProcessIDKM();
++
++      
++      psCommand->ui32CmdSize          = ui32CommandSize; 
++      psCommand->ui32DevIndex         = ui32DevIndex;
++      psCommand->CommandType          = CommandType;
++      psCommand->ui32DstSyncCount     = ui32DstSyncCount;
++      psCommand->ui32SrcSyncCount     = ui32SrcSyncCount;
++      psCommand->psDstSync            = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand) + sizeof(PVRSRV_COMMAND));     
++
++
++      psCommand->psSrcSync            = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand->psDstSync) 
++                                                              + (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      psCommand->pvData                       = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psCommand->psSrcSync) 
++                                                              + (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      psCommand->ui32DataSize         = ui32DataByteSize;
++
++      
++      for (i=0; i<ui32DstSyncCount; i++)
++      {
++              psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
++              psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
++              psCommand->psDstSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
++      }
++
++      
++      for (i=0; i<ui32SrcSyncCount; i++)
++      {
++              psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
++              psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
++              psCommand->psSrcSync[i].ui32ReadOpsPending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);  
++      }
++
++      
++      *ppsCommand = psCommand;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              PVRSRV_COMMAND *psCommand)
++{
++      
++      if (psCommand->ui32DstSyncCount > 0)
++      {
++              psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
++      }
++
++      if (psCommand->ui32SrcSyncCount > 0)
++      {
++              psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++                                                                      + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++      }
++
++      psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINT8 *)psQueue->pvLinQueueKM) 
++                                                                      + psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
++                                                                      + (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
++                                                                      + (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
++
++      
++      UPDATE_QUEUE_WOFF(psQueue, psCommand->ui32CmdSize);
++      
++      return PVRSRV_OK;
++}
++
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA                    *psSysData,
++                                                                PVRSRV_COMMAND        *psCommand,
++                                                                IMG_BOOL                      bFlush)
++{
++      PVRSRV_SYNC_OBJECT              *psWalkerObj;
++      PVRSRV_SYNC_OBJECT              *psEndObj;
++      IMG_UINT32                              i;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData;
++      PVRSRV_ERROR                    eError = PVRSRV_OK;
++      IMG_UINT32                              ui32WriteOpsComplete;
++      IMG_UINT32                              ui32ReadOpsComplete;
++
++      
++      psWalkerObj = psCommand->psDstSync;
++      psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
++      while (psWalkerObj < psEndObj)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++              ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++              ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++              
++              if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++              ||      (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++              {
++                      if (!bFlush ||
++                              !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++                              !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++                      }
++              }
++
++              psWalkerObj++;
++      }
++
++      
++      psWalkerObj = psCommand->psSrcSync;
++      psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
++      while (psWalkerObj < psEndObj)
++      {
++              PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
++
++              ui32ReadOpsComplete = psSyncData->ui32ReadOpsComplete;
++              ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
++              
++              if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
++              || (ui32ReadOpsComplete != psWalkerObj->ui32ReadOpsPending))
++              {
++                      if (!bFlush &&
++                              SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
++                              SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              PVR_DPF((PVR_DBG_WARNING,
++                                              "PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
++                                              psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
++                      }
++
++                      if (!bFlush ||
++                              !SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
++                              !SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOpsPending))
++                      {
++                              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++                      }
++              }
++              psWalkerObj++;
++      }
++
++      
++      if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVProcessCommand: invalid DeviceType 0x%x",
++                                      psCommand->ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      psCmdCompleteData = psSysData->ppsCmdCompleteData[psCommand->ui32DevIndex][psCommand->CommandType];
++      if (psCmdCompleteData->bInUse)
++      {
++              
++              return PVRSRV_ERROR_FAILED_DEPENDENCIES;
++      }
++
++      
++      psCmdCompleteData->bInUse = IMG_TRUE;
++
++      
++      psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
++      for (i=0; i<psCommand->ui32DstSyncCount; i++)
++      {
++              psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
++      }
++
++      
++      psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
++      for (i=0; i<psCommand->ui32SrcSyncCount; i++)
++      {
++              psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
++      }
++
++      
++
++
++
++
++
++
++
++
++
++      if (psSysData->ppfnCmdProcList[psCommand->ui32DevIndex][psCommand->CommandType]((IMG_HANDLE)psCmdCompleteData, 
++                                                                                                                                                              psCommand->ui32DataSize, 
++                                                                                                                                                              psCommand->pvData) == IMG_FALSE)
++      {
++              
++
++
++              psCmdCompleteData->bInUse = IMG_FALSE;
++              eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVProcessQueues(IMG_UINT32   ui32CallerID,
++                                                               IMG_BOOL       bFlush)
++{
++      PVRSRV_QUEUE_INFO       *psQueue;
++      SYS_DATA                        *psSysData;
++      PVRSRV_COMMAND          *psCommand;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_ERROR            eError;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++      
++      psSysData->bReProcessQueues = IMG_FALSE;
++
++      
++      eError = OSLockResource(&psSysData->sQProcessResource,
++                                                      ui32CallerID);
++      if(eError != PVRSRV_OK)
++      {
++              
++              psSysData->bReProcessQueues = IMG_TRUE;
++
++              
++              if(ui32CallerID == ISR_ID)
++              {
++                      if (bFlush)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Couldn't acquire queue processing lock for FLUSH"));
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE,"PVRSRVProcessQueues: Couldn't acquire queue processing lock"));                       
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVProcessQueues: Queue processing failed when called from Services - not expected behaviour!"));
++              }
++              
++              return PVRSRV_OK;
++      }
++
++      psQueue = psSysData->psQueueList;
++
++      if(!psQueue)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
++      }
++
++      if (bFlush)
++      {
++              PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
++      }
++
++      while (psQueue)
++      {
++              while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
++              {
++                      psCommand = (PVRSRV_COMMAND*)((IMG_UINT32)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
++
++                      if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
++                      {
++                                      
++                              UPDATE_QUEUE_ROFF(psQueue, psCommand->ui32CmdSize)
++                              
++                              if (bFlush)
++                              {
++                                      continue;
++                              }
++                      }
++
++                      break;
++              }
++              psQueue = psQueue->psNextKM;
++      }
++
++      if (bFlush)
++      {
++              PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
++      }
++
++      
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if (psDeviceNode->bReProcessDeviceCommandComplete &&
++                      psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++              {
++                      (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      
++      OSUnlockResource(&psSysData->sQProcessResource, ui32CallerID);
++      
++      
++      if(psSysData->bReProcessQueues)
++      {
++              return PVRSRV_ERROR_PROCESSING_BLOCKED;
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++IMG_EXPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR)
++{
++      IMG_UINT32                              i;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
++      SYS_DATA                                *psSysData;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return;
++      }
++
++      
++      for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
++      {
++              psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
++      }
++
++      
++      for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
++      {
++              psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOpsComplete++;
++      }
++      
++      
++      psCmdCompleteData->bInUse = IMG_FALSE;
++      
++      
++      PVRSRVCommandCompleteCallbacks();
++      
++#if defined(SYS_USING_INTERRUPTS)
++      if(bScheduleMISR)
++      {
++              OSScheduleMISR(psSysData);
++      }
++#else
++      PVR_UNREFERENCED_PARAMETER(bScheduleMISR);
++#endif 
++}
++
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID)
++{
++      SYS_DATA                                *psSysData;
++      PVRSRV_DEVICE_NODE              *psDeviceNode;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVCommandCompleteCallbacks: SysAcquireData failed"));
++              return;
++      }
++
++      psDeviceNode = psSysData->psDeviceNodeList;
++      while(psDeviceNode != IMG_NULL)
++      {
++              if(psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
++              {
++                      
++                      (*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
++              }
++              psDeviceNode = psDeviceNode->psNext;
++      }
++}
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32           ui32DevIndex,
++                                                                               PFN_CMD_PROC   *ppfnCmdProcList,
++                                                                               IMG_UINT32             ui32MaxSyncsPerCmd[][2],
++                                                                               IMG_UINT32             ui32CmdCount)
++{
++      SYS_DATA                                *psSysData;
++      PVRSRV_ERROR                    eError;
++      IMG_UINT32                              i;
++      IMG_UINT32                              ui32AllocSize;
++      PFN_CMD_PROC                    *ppfnCmdProc;
++      COMMAND_COMPLETE_DATA   *psCmdCompleteData;
++
++      
++      if(ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
++                                      ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: SysAcquireData failed"));
++              return eError;
++      }
++
++      
++      eError = OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       ui32CmdCount * sizeof(PFN_CMD_PROC), 
++                                       (IMG_VOID **)&psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc queue"));
++              return eError;
++      }
++
++      
++      ppfnCmdProc = psSysData->ppfnCmdProcList[ui32DevIndex];
++
++      
++      for (i=0; i<ui32CmdCount; i++)
++      {
++              ppfnCmdProc[i] = ppfnCmdProcList[i];
++      }
++
++      
++      ui32AllocSize = ui32CmdCount * sizeof(COMMAND_COMPLETE_DATA*);
++      eError = OSAllocMem( PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                       ui32AllocSize, 
++                                       (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
++              goto ErrorExit;
++      }
++
++      for (i=0; i<ui32CmdCount; i++)
++      {
++              
++
++              ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA) 
++                                        + ((ui32MaxSyncsPerCmd[i][0]
++                                        +     ui32MaxSyncsPerCmd[i][1])
++                                        * sizeof(PVRSRV_SYNC_OBJECT));         
++
++              eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                      ui32AllocSize,
++                                                      (IMG_VOID **)&psSysData->ppsCmdCompleteData[ui32DevIndex][i],
++                                                      IMG_NULL);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d",i));
++                      goto ErrorExit;
++              }
++
++              
++              OSMemSet(psSysData->ppsCmdCompleteData[ui32DevIndex][i], 0x00, ui32AllocSize);
++
++              psCmdCompleteData = psSysData->ppsCmdCompleteData[ui32DevIndex][i];
++
++              
++              psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
++                                                                              (((IMG_UINT32)psCmdCompleteData) 
++                                                                              + sizeof(COMMAND_COMPLETE_DATA));
++              psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
++                                                                              (((IMG_UINT32)psCmdCompleteData->psDstSync) 
++                                                                              + (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[i][0]));
++      }
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++
++      if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++      {
++              for (i=0; i<ui32CmdCount; i++)
++              {
++                      if (psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++                      }
++              }
++
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      }
++
++      if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      }
++      
++      return eError;
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
++                                                                         IMG_UINT32 ui32CmdCount)
++{
++      SYS_DATA                *psSysData;
++      PVRSRV_ERROR    eError;
++      IMG_UINT32              i;
++
++      
++      if(ui32DevIndex >= SYS_DEVICE_COUNT)
++      {
++              PVR_DPF((PVR_DBG_ERROR,
++                                      "PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
++                                      ui32DevIndex));
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRSRVRemoveCmdProcListKM: SysAcquireData failed"));
++              return eError;
++      }
++
++      if(psSysData->ppsCmdCompleteData[ui32DevIndex] != IMG_NULL)
++      {
++              for(i=0; i<ui32CmdCount; i++)
++              {
++                      
++                      if(psSysData->ppsCmdCompleteData[ui32DevIndex][i] != IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex][i], IMG_NULL);
++                      }
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppsCmdCompleteData[ui32DevIndex], IMG_NULL);
++      }
++
++      
++      if(psSysData->ppfnCmdProcList[ui32DevIndex] != IMG_NULL)
++      {
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 0, psSysData->ppfnCmdProcList[ui32DevIndex], IMG_NULL);
++      }
++
++      return PVRSRV_OK;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/ra.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/ra.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/ra.c    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/ra.c      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1181 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "hash.h"
++#include "ra.h"
++#include "buffer_manager.h"
++#include "osfunc.h"
++
++#ifdef __linux__
++#include <linux/kernel.h>
++#include "proc.h"
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++#include <stdio.h>
++#endif
++
++#define MINIMUM_HASH_SIZE (64)
++
++struct _BT_
++{
++      enum bt_type
++      {
++              btt_span,                               
++              btt_free,                               
++              btt_live                                
++      } type;
++
++      
++      IMG_UINTPTR_T base;
++      IMG_SIZE_T uSize;
++
++      
++      struct _BT_ *pNextSegment;
++      struct _BT_ *pPrevSegment;
++      
++      struct _BT_ *pNextFree;
++      struct _BT_ *pPrevFree;
++      
++      BM_MAPPING *psMapping;
++};
++typedef struct _BT_ BT;
++
++
++struct _RA_ARENA_
++{
++      
++      char *name;
++
++      
++      IMG_UINT32 uQuantum;
++
++      
++      IMG_BOOL (*pImportAlloc)(void *,
++                                                       IMG_SIZE_T uSize,
++                                                       IMG_SIZE_T *pActualSize,
++                                                       BM_MAPPING **ppsMapping,
++                                                       IMG_UINT32 uFlags,
++                                                       IMG_UINTPTR_T *pBase);
++      void (*pImportFree) (void *,
++                                               IMG_UINTPTR_T,
++                                               BM_MAPPING *psMapping);
++      void (*pBackingStoreFree) (void *, IMG_UINT32, IMG_UINT32, IMG_HANDLE);
++
++      
++      void *pImportHandle;
++
++      
++#define FREE_TABLE_LIMIT 32
++
++      
++      BT *aHeadFree [FREE_TABLE_LIMIT];
++
++      
++      BT *pHeadSegment;
++      BT *pTailSegment;
++
++      
++      HASH_TABLE *pSegmentHash;
++
++#ifdef RA_STATS
++      RA_STATISTICS sStatistics;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++#define PROC_NAME_SIZE                32
++      char szProcInfoName[PROC_NAME_SIZE];
++      char szProcSegsName[PROC_NAME_SIZE];
++#endif
++};
++
++void
++RA_Dump (RA_ARENA *pArena);
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, void *data);
++static int
++RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++void CheckBMFreespace(void);
++#endif
++
++static IMG_BOOL
++_RequestAllocFail (void *_h,
++                                IMG_SIZE_T _uSize,
++                                IMG_SIZE_T *_pActualSize, 
++                                BM_MAPPING **_ppsMapping,
++                                IMG_UINT32 _uFlags,
++                                IMG_UINTPTR_T *_pBase)
++{
++      PVR_UNREFERENCED_PARAMETER (_h);
++      PVR_UNREFERENCED_PARAMETER (_uSize);
++      PVR_UNREFERENCED_PARAMETER (_pActualSize);
++      PVR_UNREFERENCED_PARAMETER (_ppsMapping);
++      PVR_UNREFERENCED_PARAMETER (_uFlags);
++      PVR_UNREFERENCED_PARAMETER (_pBase);
++
++      return IMG_FALSE;
++}
++
++static IMG_UINT32
++pvr_log2 (IMG_SIZE_T n)
++{
++      IMG_UINT32 l = 0;
++      n>>=1;
++      while (n>0)
++      {
++              n>>=1;
++              l++;
++      }
++      return l;
++}
++
++static void
++_SegmentListInsertAfter (RA_ARENA *pArena,
++                                               BT *pInsertionPoint,
++                                               BT *pBT)
++{
++      PVR_ASSERT (pArena != IMG_NULL);
++      PVR_ASSERT (pInsertionPoint != IMG_NULL);
++
++      pBT->pNextSegment = pInsertionPoint->pNextSegment;
++      pBT->pPrevSegment = pInsertionPoint;
++      if (pInsertionPoint->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pBT;
++      else
++              pInsertionPoint->pNextSegment->pPrevSegment = pBT; 
++      pInsertionPoint->pNextSegment = pBT;
++}
++
++static void
++_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
++{
++      
++      if (pArena->pHeadSegment == IMG_NULL)
++      {
++              pArena->pHeadSegment = pArena->pTailSegment = pBT;
++              pBT->pNextSegment = pBT->pPrevSegment = IMG_NULL;
++      }
++      else
++      {
++              BT *pBTScan;
++              pBTScan = pArena->pHeadSegment;
++              while (pBTScan->pNextSegment != IMG_NULL 
++                         && pBT->base >= pBTScan->pNextSegment->base)
++                      pBTScan = pBTScan->pNextSegment;
++              _SegmentListInsertAfter (pArena, pBTScan, pBT);
++      }
++}
++
++static void
++_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
++{
++      if (pBT->pPrevSegment == IMG_NULL)
++              pArena->pHeadSegment = pBT->pNextSegment;
++      else
++              pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
++
++      if (pBT->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pBT->pPrevSegment;
++      else
++              pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
++}
++
++static BT *
++_SegmentSplit (RA_ARENA *pArena, BT *pBT, IMG_SIZE_T uSize)
++{
++      BT *pNeighbour;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BT), 
++                                      (IMG_VOID **)&pNeighbour, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pNeighbour->pPrevSegment = pBT;
++      pNeighbour->pNextSegment = pBT->pNextSegment;
++      if (pBT->pNextSegment == IMG_NULL)
++              pArena->pTailSegment = pNeighbour;
++      else
++              pBT->pNextSegment->pPrevSegment = pNeighbour;
++      pBT->pNextSegment = pNeighbour;
++
++      pNeighbour->type = btt_free;
++      pNeighbour->uSize = pBT->uSize - uSize;
++      pNeighbour->base = pBT->base + uSize;
++      pNeighbour->psMapping = pBT->psMapping;
++      pBT->uSize = uSize;
++      return pNeighbour;
++}
++
++static void
++_FreeListInsert (RA_ARENA *pArena, BT *pBT)
++{
++      IMG_UINT32 uIndex;
++      uIndex = pvr_log2 (pBT->uSize);
++      pBT->type = btt_free;
++      pBT->pNextFree = pArena->aHeadFree [uIndex];
++      pBT->pPrevFree = IMG_NULL;
++      if (pArena->aHeadFree[uIndex] != IMG_NULL)
++              pArena->aHeadFree[uIndex]->pPrevFree = pBT;
++      pArena->aHeadFree [uIndex] = pBT;
++}
++
++static void
++_FreeListRemove (RA_ARENA *pArena, BT *pBT)
++{
++      IMG_UINT32 uIndex;
++      uIndex = pvr_log2 (pBT->uSize);
++      if (pBT->pNextFree != IMG_NULL)
++              pBT->pNextFree->pPrevFree = pBT->pPrevFree;
++      if (pBT->pPrevFree == IMG_NULL)
++              pArena->aHeadFree[uIndex] = pBT->pNextFree;
++      else
++              pBT->pPrevFree->pNextFree = pBT->pNextFree;
++}
++
++static BT *
++_BuildSpanMarker (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(BT),
++                                      (IMG_VOID **)&pBT, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pBT->type = btt_span;
++      pBT->base = base;
++      pBT->uSize = uSize;
++      pBT->psMapping = IMG_NULL;
++
++      return pBT;
++}
++
++static BT *
++_BuildBT (IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                      sizeof(BT), 
++                                      (IMG_VOID **)&pBT, IMG_NULL) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      pBT->type = btt_free;
++      pBT->base = base;
++      pBT->uSize = uSize;
++
++      return pBT;
++}
++
++static BT *
++_InsertResource (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pBT;
++      PVR_ASSERT (pArena!=IMG_NULL);
++      pBT = _BuildBT (base, uSize);
++      if (pBT != IMG_NULL)
++      {
++              _SegmentListInsert (pArena, pBT);
++              _FreeListInsert (pArena, pBT);
++#ifdef RA_STATS
++              pArena->sStatistics.uTotalResourceCount+=uSize;
++              pArena->sStatistics.uFreeResourceCount+=uSize;
++              pArena->sStatistics.uSpanCount++;
++#endif
++      }
++      return pBT;
++}
++
++static BT *
++_InsertResourceSpan (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      BT *pSpanStart;
++      BT *pSpanEnd;
++      BT *pBT;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_InsertResourceSpan: arena='%s', base=0x%x, size=0x%x",
++                        pArena->name, base, uSize));
++
++      pSpanStart = _BuildSpanMarker (base, uSize);
++      if (pSpanStart == IMG_NULL)
++      {
++              goto fail_start;
++      }
++      pSpanEnd = _BuildSpanMarker (base + uSize, 0);
++      if (pSpanEnd == IMG_NULL)
++      {
++              goto fail_end;
++      }
++
++      pBT = _BuildBT (base, uSize);
++      if (pBT == IMG_NULL)
++      {
++              goto fail_bt;
++      }
++
++      _SegmentListInsert (pArena, pSpanStart);
++      _SegmentListInsertAfter (pArena, pSpanStart, pBT);
++      _FreeListInsert (pArena, pBT);
++      _SegmentListInsertAfter (pArena, pBT, pSpanEnd);
++#ifdef RA_STATS
++      pArena->sStatistics.uTotalResourceCount+=uSize;
++#endif
++      return pBT;
++
++  fail_bt:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanEnd, IMG_NULL);
++  fail_end:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pSpanStart, IMG_NULL);
++  fail_start:
++      return IMG_NULL;
++}
++
++static void
++_FreeBT (RA_ARENA *pArena, BT *pBT, IMG_BOOL bFreeBackingStore)
++{
++      BT *pNeighbour;
++      IMG_UINTPTR_T uOrigBase;
++      IMG_SIZE_T uOrigSize;
++
++      PVR_ASSERT (pArena!=IMG_NULL);
++      PVR_ASSERT (pBT!=IMG_NULL);
++
++#ifdef RA_STATS
++      pArena->sStatistics.uLiveSegmentCount--;
++      pArena->sStatistics.uFreeSegmentCount++;
++      pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++#endif
++
++      uOrigBase = pBT->base;
++      uOrigSize = pBT->uSize;
++
++      
++      pNeighbour = pBT->pPrevSegment;
++      if (pNeighbour!=IMG_NULL
++              && pNeighbour->type == btt_free
++              && pNeighbour->base + pNeighbour->uSize == pBT->base)
++      {
++              _FreeListRemove (pArena, pNeighbour);
++              _SegmentListRemove (pArena, pNeighbour);
++              pBT->base = pNeighbour->base;
++              pBT->uSize += pNeighbour->uSize;
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uFreeSegmentCount--;
++#endif
++      }
++
++      
++      pNeighbour = pBT->pNextSegment;
++      if (pNeighbour!=IMG_NULL
++              && pNeighbour->type == btt_free
++              && pBT->base + pBT->uSize == pNeighbour->base)
++      {
++              _FreeListRemove (pArena, pNeighbour);
++              _SegmentListRemove (pArena, pNeighbour);
++              pBT->uSize += pNeighbour->uSize;
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pNeighbour, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uFreeSegmentCount--;
++#endif
++      }
++
++      
++      if (pArena->pBackingStoreFree != IMG_NULL && bFreeBackingStore)
++      {
++              IMG_UINTPTR_T   uRoundedStart, uRoundedEnd;
++
++              
++              uRoundedStart = (uOrigBase / pArena->uQuantum) * pArena->uQuantum;
++              
++              if (uRoundedStart < pBT->base)
++              {
++                      uRoundedStart += pArena->uQuantum;
++              }
++
++              
++              uRoundedEnd = ((uOrigBase + uOrigSize + pArena->uQuantum - 1) / pArena->uQuantum) * pArena->uQuantum;
++              
++              if (uRoundedEnd > (pBT->base + pBT->uSize))
++              {
++                      uRoundedEnd -= pArena->uQuantum;
++              }
++              
++              if (uRoundedStart < uRoundedEnd)
++              {
++                      pArena->pBackingStoreFree(pArena->pImportHandle, uRoundedStart, uRoundedEnd, (IMG_HANDLE)0);
++              }
++      }
++
++      if (pBT->pNextSegment!=IMG_NULL && pBT->pNextSegment->type == btt_span
++              && pBT->pPrevSegment!=IMG_NULL && pBT->pPrevSegment->type == btt_span)
++      {
++              BT *next = pBT->pNextSegment;
++              BT *prev = pBT->pPrevSegment;
++              _SegmentListRemove (pArena, next);
++              _SegmentListRemove (pArena, prev);
++              _SegmentListRemove (pArena, pBT);
++              pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->psMapping);
++#ifdef RA_STATS
++              pArena->sStatistics.uSpanCount--;
++              pArena->sStatistics.uExportCount++;
++              pArena->sStatistics.uFreeSegmentCount--;
++              pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++              pArena->sStatistics.uTotalResourceCount-=pBT->uSize;
++#endif
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), next, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), prev, IMG_NULL);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++      }
++      else
++              _FreeListInsert (pArena, pBT);
++}
++
++
++static IMG_BOOL
++_AttemptAllocAligned (RA_ARENA *pArena,
++                                        IMG_SIZE_T uSize,
++                                        BM_MAPPING **ppsMapping,
++                                        IMG_UINT32 uFlags,
++                                        IMG_UINT32 uAlignment,
++                                        IMG_UINT32 uAlignmentOffset,
++                                        IMG_UINTPTR_T *base)
++{
++      IMG_UINT32 uIndex;
++      PVR_ASSERT (pArena!=IMG_NULL);
++
++      PVR_UNREFERENCED_PARAMETER (uFlags);
++
++      if (uAlignment>1)
++              uAlignmentOffset %= uAlignment;
++
++      
++
++      uIndex = pvr_log2 (uSize);
++
++#if 0
++      
++      if (1u<<uIndex < uSize)
++              uIndex++;
++#endif
++
++      while (uIndex < FREE_TABLE_LIMIT && pArena->aHeadFree[uIndex]==IMG_NULL)
++              uIndex++;
++
++      while (uIndex < FREE_TABLE_LIMIT)
++      {
++              if (pArena->aHeadFree[uIndex]!=IMG_NULL)
++              {
++                      
++                      BT *pBT;
++
++                      pBT = pArena->aHeadFree [uIndex];
++                      while (pBT!=IMG_NULL)
++                      {
++                              IMG_UINTPTR_T aligned_base;
++
++                              if (uAlignment>1)
++                                      aligned_base = (pBT->base + uAlignmentOffset + uAlignment - 1) / uAlignment * uAlignment - uAlignmentOffset;
++                              else
++                                      aligned_base = pBT->base;
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_AttemptAllocAligned: pBT-base=0x%x "
++                                                "pBT-size=0x%x alignedbase=0x%x size=0x%x",
++                                              pBT->base, pBT->uSize, aligned_base, uSize));
++
++                              if (pBT->base + pBT->uSize >= aligned_base + uSize)
++                              {
++                                      if(!pBT->psMapping || pBT->psMapping->ui32Flags == uFlags)
++                                      {
++                                              _FreeListRemove (pArena, pBT);
++
++                                              PVR_ASSERT (pBT->type == btt_free);
++
++#ifdef RA_STATS
++                                              pArena->sStatistics.uLiveSegmentCount++;
++                                              pArena->sStatistics.uFreeSegmentCount--;
++                                              pArena->sStatistics.uFreeResourceCount-=pBT->uSize;
++#endif
++
++                                              
++                                              if (aligned_base > pBT->base)
++                                              {
++                                                      BT *pNeighbour;
++
++                                                      pNeighbour = _SegmentSplit (pArena, pBT, aligned_base-pBT->base);
++                                                      
++                                                      if (pNeighbour==IMG_NULL)
++                                                      {
++                                                              PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Front split failed"));
++                                                              
++                                                              _FreeListInsert (pArena, pBT); 
++                                                              return IMG_FALSE;
++                                                      }
++
++                                                      _FreeListInsert (pArena, pBT);
++      #ifdef RA_STATS
++                                                      pArena->sStatistics.uFreeSegmentCount++;
++                                                      pArena->sStatistics.uFreeResourceCount+=pBT->uSize;
++      #endif
++                                                      pBT = pNeighbour;
++                                              }
++
++                                              
++                                              if (pBT->uSize > uSize)
++                                              {
++                                                      BT *pNeighbour;
++                                                      pNeighbour = _SegmentSplit (pArena, pBT, uSize);
++                                                      
++                                                      if (pNeighbour==IMG_NULL)
++                                                      {
++                                                              PVR_DPF ((PVR_DBG_ERROR,"_AttemptAllocAligned: Back split failed"));
++                                                              
++                                                              _FreeListInsert (pArena, pBT); 
++                                                              return IMG_FALSE;
++                                                      }
++
++                                                      _FreeListInsert (pArena, pNeighbour);
++      #ifdef RA_STATS
++                                                      pArena->sStatistics.uFreeSegmentCount++;
++                                                      pArena->sStatistics.uFreeResourceCount+=pNeighbour->uSize;
++      #endif
++                                              }
++
++                                              pBT->type = btt_live;
++
++                                              if (!HASH_Insert (pArena->pSegmentHash, pBT->base, (IMG_UINTPTR_T) pBT))
++                                              {
++                                                      _FreeBT (pArena, pBT, IMG_FALSE);
++                                                      return IMG_FALSE;
++                                              }
++
++                                              if (ppsMapping!=IMG_NULL)
++                                                      *ppsMapping = pBT->psMapping;
++
++                                              *base = pBT->base;
++                                              
++                                              return IMG_TRUE;
++                                      }
++                                      else
++                                      {
++                                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                              "AttemptAllocAligned: mismatch in flags. Import has %x, request was %x", pBT->psMapping->ui32Flags, uFlags));
++
++                                      }
++                              }
++                              pBT = pBT->pNextFree;
++                      }
++                      
++              }
++              uIndex++;
++      }
++
++      return IMG_FALSE;
++}
++
++
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++                 IMG_UINTPTR_T base, 
++                 IMG_SIZE_T uSize, 
++                 BM_MAPPING *psMapping,
++                 IMG_SIZE_T uQuantum,
++                 IMG_BOOL (*alloc)(IMG_VOID *, IMG_SIZE_T uSize, IMG_SIZE_T *pActualSize,
++                                   BM_MAPPING **ppsMapping, IMG_UINT32 _flags, IMG_UINTPTR_T *pBase),
++                 IMG_VOID (*free) (IMG_VOID *, IMG_UINTPTR_T, BM_MAPPING *psMapping),
++                 IMG_VOID (*backingstore_free) (IMG_VOID*, IMG_UINT32, IMG_UINT32, IMG_HANDLE),
++                 IMG_VOID *pImportHandle)
++{
++      RA_ARENA *pArena;
++      BT *pBT;
++      int i;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Create: name='%s', base=0x%x, uSize=0x%x, alloc=0x%x, free=0x%x",
++                        name, base, uSize, alloc, free));
++
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                       sizeof (*pArena),
++                                       (IMG_VOID **)&pArena, IMG_NULL) != PVRSRV_OK)
++      {
++              goto arena_fail;
++      }
++
++      pArena->name = name;
++      pArena->pImportAlloc = alloc!=IMG_NULL ? alloc : _RequestAllocFail;
++      pArena->pImportFree = free;
++      pArena->pBackingStoreFree = backingstore_free;
++      pArena->pImportHandle = pImportHandle;
++      for (i=0; i<FREE_TABLE_LIMIT; i++)
++              pArena->aHeadFree[i] = IMG_NULL;
++      pArena->pHeadSegment = IMG_NULL;
++      pArena->pTailSegment = IMG_NULL;
++      pArena->uQuantum = uQuantum;
++
++#ifdef RA_STATS
++      pArena->sStatistics.uSpanCount = 0;
++      pArena->sStatistics.uLiveSegmentCount = 0;
++      pArena->sStatistics.uFreeSegmentCount = 0;
++      pArena->sStatistics.uFreeResourceCount = 0;
++      pArena->sStatistics.uTotalResourceCount = 0;
++      pArena->sStatistics.uCumulativeAllocs = 0;
++      pArena->sStatistics.uCumulativeFrees = 0;
++      pArena->sStatistics.uImportCount = 0;
++      pArena->sStatistics.uExportCount = 0;
++#endif
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++      if(strcmp(pArena->name,"") != 0)
++      {
++              sprintf(pArena->szProcInfoName, "ra_info_%s", pArena->name);
++              CreateProcEntry(pArena->szProcInfoName, RA_DumpInfo, 0, pArena);
++              sprintf(pArena->szProcSegsName, "ra_segs_%s", pArena->name);
++              CreateProcEntry(pArena->szProcSegsName, RA_DumpSegs, 0, pArena);
++      }
++#endif
++
++      pArena->pSegmentHash = HASH_Create (MINIMUM_HASH_SIZE);
++      if (pArena->pSegmentHash==IMG_NULL)
++      {
++              goto hash_fail;
++      }
++      if (uSize>0)
++      {
++              uSize = (uSize + uQuantum - 1) / uQuantum * uQuantum;
++              pBT = _InsertResource (pArena, base, uSize);
++              if (pBT == IMG_NULL)
++              {
++                      goto insert_fail;
++              }
++              pBT->psMapping = psMapping;
++              
++      }
++      return pArena;
++
++  insert_fail:
++      HASH_Delete (pArena->pSegmentHash);
++  hash_fail:
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pArena, IMG_NULL);
++  arena_fail:
++      return IMG_NULL;
++}
++
++void
++RA_Delete (RA_ARENA *pArena)
++{
++      IMG_UINT32 uIndex;
++
++      PVR_ASSERT(pArena != IMG_NULL);
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Delete: name='%s'", pArena->name));
++
++      for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
++              pArena->aHeadFree[uIndex] = IMG_NULL;
++
++      while (pArena->pHeadSegment != IMG_NULL)
++      {
++              BT *pBT = pArena->pHeadSegment;
++              PVR_ASSERT (pBT->type == btt_free);
++              _SegmentListRemove (pArena, pBT);
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BT), pBT, IMG_NULL);
++#ifdef RA_STATS
++              pArena->sStatistics.uSpanCount--;
++#endif
++      }
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++      RemoveProcEntry(pArena->szProcInfoName);
++      RemoveProcEntry(pArena->szProcSegsName);
++#endif
++      HASH_Delete (pArena->pSegmentHash);
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, pArena, IMG_NULL);
++}
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize)
++{
++      PVR_ASSERT (pArena != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Add: name='%s', base=0x%x, size=0x%x", pArena->name, base, uSize));
++
++      uSize = (uSize + pArena->uQuantum - 1) / pArena->uQuantum * pArena->uQuantum;
++      return ((IMG_BOOL)(_InsertResource (pArena, base, uSize) != IMG_NULL));
++}
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena,
++                IMG_SIZE_T uRequestSize,
++                IMG_SIZE_T *pActualSize,
++                BM_MAPPING **ppsMapping,
++                IMG_UINT32 uFlags,
++                IMG_UINT32 uAlignment,
++                IMG_UINT32 uAlignmentOffset,
++                IMG_UINTPTR_T *base)
++{
++      IMG_BOOL bResult = IMG_FALSE;
++      IMG_SIZE_T uSize = uRequestSize;
++
++      PVR_ASSERT (pArena!=IMG_NULL);
++
++#ifdef USE_BM_FREESPACE_CHECK
++      CheckBMFreespace();
++#endif
++
++      if (pActualSize != IMG_NULL)
++              *pActualSize = uSize;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Alloc: arena='%s', size=0x%x(0x%x), alignment=0x%x, offset=0x%x", 
++                 pArena->name, uSize, uRequestSize, uAlignment, uAlignmentOffset));
++
++      
++
++      bResult = _AttemptAllocAligned (pArena, uSize, ppsMapping, uFlags,
++                                                                      uAlignment, uAlignmentOffset, base);
++      if (!bResult)
++      {
++              BM_MAPPING *psImportMapping;
++              IMG_UINTPTR_T import_base;
++              IMG_SIZE_T uImportSize = uSize;
++
++              
++
++
++              if (uAlignment > pArena->uQuantum)
++              {
++                      uImportSize += (uAlignment - 1);
++              }
++
++              
++              uImportSize = ((uImportSize + pArena->uQuantum - 1)/pArena->uQuantum)*pArena->uQuantum;
++              
++              bResult =
++                      pArena->pImportAlloc (pArena->pImportHandle, uImportSize, &uImportSize,
++                                                               &psImportMapping, uFlags, &import_base);
++              if (bResult)
++              {
++                      BT *pBT;
++                      pBT = _InsertResourceSpan (pArena, import_base, uImportSize);
++                      
++                      if (pBT == IMG_NULL)
++                      {
++                              
++                              pArena->pImportFree(pArena->pImportHandle, import_base,
++                                                                      psImportMapping);
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_Alloc: name='%s', size=0x%x failed!", 
++                                                pArena->name, uSize));
++                              
++                              return IMG_FALSE;
++                      }
++                      pBT->psMapping = psImportMapping;
++#ifdef RA_STATS
++                      pArena->sStatistics.uFreeSegmentCount++;
++                      pArena->sStatistics.uFreeResourceCount += uImportSize;
++                      pArena->sStatistics.uImportCount++;
++                      pArena->sStatistics.uSpanCount++;
++#endif
++                      bResult = _AttemptAllocAligned(pArena, uSize, ppsMapping, uFlags,
++                                                                                 uAlignment, uAlignmentOffset,
++                                                                                 base);
++                      if (!bResult)
++                      {
++                              PVR_DPF ((PVR_DBG_MESSAGE,
++                                                "RA_Alloc: name='%s' uAlignment failed!",
++                                                pArena->name));
++                      }
++              }
++      }
++#ifdef RA_STATS
++      if (bResult)
++              pArena->sStatistics.uCumulativeAllocs++;
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Alloc: name='%s', size=0x%x, *base=0x%x = %d",
++                        pArena->name, uSize, *base, bResult));
++
++      
++
++      return bResult;
++}
++
++void
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore)
++{
++      BT *pBT;
++
++      PVR_ASSERT (pArena != IMG_NULL);
++
++#ifdef USE_BM_FREESPACE_CHECK
++      CheckBMFreespace();
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                        "RA_Free: name='%s', base=0x%x", pArena->name, base));
++      
++      pBT = (BT *) HASH_Remove (pArena->pSegmentHash, base);
++      PVR_ASSERT (pBT != IMG_NULL);
++
++      if (pBT)
++      {
++              PVR_ASSERT (pBT->base == base);
++
++#ifdef RA_STATS
++              pArena->sStatistics.uCumulativeFrees++;
++#endif
++
++#ifdef USE_BM_FREESPACE_CHECK
++{
++unsigned char* p;
++unsigned char* endp;
++
++      p = (unsigned char*)pBT->base + SysGetDevicePhysOffset();
++      endp = (unsigned char*)((IMG_UINT32)(p + pBT->uSize));
++      while ((IMG_UINT32)p & 3)
++      {
++              *p++ = 0xAA;
++      }
++      while (p < (unsigned char*)((IMG_UINT32)endp & 0xfffffffc))
++      {
++              *(IMG_UINT32*)p = 0xAAAAAAAA;
++              p += sizeof(IMG_UINT32);
++      }
++      while (p < endp)
++      {
++              *p++ = 0xAA;
++      }
++      PVR_DPF((PVR_DBG_MESSAGE,"BM_FREESPACE_CHECK: RA_Free Cleared %08X to %08X (size=0x%x)",(unsigned char*)pBT->base + SysGetDevicePhysOffset(),endp-1,pBT->uSize));
++}
++#endif
++              _FreeBT (pArena, pBT, bFreeBackingStore);
++      }
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails)
++{
++      BT        *pBT;
++
++      if (psSegDetails->hSegment)
++      {
++              pBT = (BT *)psSegDetails->hSegment;
++      }
++      else
++      {
++              RA_ARENA *pArena = (RA_ARENA *)hArena;
++
++              pBT = pArena->pHeadSegment;
++      }
++      
++      while (pBT != IMG_NULL)
++      {
++              if (pBT->type == btt_live)
++              {
++                      psSegDetails->uiSize = pBT->uSize;      
++                      psSegDetails->sCpuPhyAddr.uiAddr = pBT->base;
++                      psSegDetails->hSegment = (IMG_HANDLE)pBT->pNextSegment;
++
++                      return IMG_TRUE;
++              }
++
++              pBT = pBT->pNextSegment;
++      }
++
++      psSegDetails->uiSize = 0;       
++      psSegDetails->sCpuPhyAddr.uiAddr = 0;
++      psSegDetails->hSegment = (IMG_HANDLE)-1;
++
++      return IMG_FALSE;
++}
++      
++
++#ifdef USE_BM_FREESPACE_CHECK
++RA_ARENA* pJFSavedArena = IMG_NULL;
++
++void CheckBMFreespace(void)
++{
++BT *pBT;
++unsigned char* p;
++unsigned char* endp;
++
++      if (pJFSavedArena != IMG_NULL)
++      {
++              for (pBT=pJFSavedArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++              {
++                      if (pBT->type == btt_free)
++                      {
++                              p = (unsigned char*)pBT->base + SysGetDevicePhysOffset();
++                              endp = (unsigned char*)((IMG_UINT32)(p + pBT->uSize) & 0xfffffffc);
++
++                              while ((IMG_UINT32)p & 3)
++                              {
++                                      if (*p++ != 0xAA)
++                                      {
++                                              fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(unsigned long*)p);
++                                              for (;;);
++                                              break;
++                                      }
++                              }
++                              while (p < endp)
++                              {
++                                      if (*(unsigned long*)p != 0xAAAAAAAA)
++                                      {
++                                              fprintf(stderr,"BM_FREESPACE_CHECK: Blank space at %08X has changed to 0x%x\n",p,*(unsigned long*)p);
++                                              for (;;);
++                                              break;
++                                      }
++                                      p += 4;
++                              }
++                      }
++              }
++      }
++}
++#endif
++
++
++#if (defined(CONFIG_PROC_FS) && defined(DEBUG)) || defined (RA_STATS)
++static char *
++_BTType (int eType)
++{
++      switch (eType)
++      {
++      case btt_span: return "span";
++      case btt_free: return "free";
++      case btt_live: return "live";
++      }
++      return "junk";
++}
++#endif 
++
++
++
++#if defined(CONFIG_PROC_FS) && defined(DEBUG)
++static int
++RA_DumpSegs(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      BT *pBT = 0;
++      int len = 0;
++      RA_ARENA *pArena = (RA_ARENA *)data;
++
++      if (count < 80)
++      {
++              *start = (char *)0;
++              return (0);
++      }
++      *eof = 0;
++      *start = (char *)1;
++      if (off == 0)
++      {
++              return printAppend(page, count, 0, "Arena \"%s\"\nBase         Size Type Ref\n", pArena->name);
++      }
++      for (pBT=pArena->pHeadSegment; --off && pBT; pBT=pBT->pNextSegment)
++              ;
++      if (pBT)
++      {
++              len = printAppend(page, count, 0, "%08x %8x %4s %08x\n", 
++                                                      (unsigned int)pBT->base, (unsigned int)pBT->uSize, _BTType (pBT->type),
++                                                      (unsigned int)pBT->psMapping);
++      }
++      else
++      {
++              *eof = 1;
++      }
++      return (len);
++}
++
++static int
++RA_DumpInfo(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      int len = 0;
++      RA_ARENA *pArena = (RA_ARENA *)data;
++
++      if (count < 80)
++      {
++              *start = (char *)0;
++              return (0);
++      }
++      *eof = 0;
++      switch (off)
++      {
++      case 0:
++              len = printAppend(page, count, 0, "quantum\t\t\t%lu\n", pArena->uQuantum);
++              break;
++      case 1:
++              len = printAppend(page, count, 0, "import_handle\t\t%08X\n", (unsigned int)pArena->pImportHandle);
++              break;
++#ifdef RA_STATS
++      case 2:
++              len = printAppend(page, count, 0, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++              break;
++      case 3:
++              len = printAppend(page, count, 0, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++              break;
++      case 4:
++              len = printAppend(page, count, 0, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++              break;
++      case 5:
++              len = printAppend(page, count, 0, "free resource count\t%lu (0x%x)\n",
++                                                      pArena->sStatistics.uFreeResourceCount,
++                                                      (unsigned int)pArena->sStatistics.uFreeResourceCount);
++              break;
++      case 6:
++              len = printAppend(page, count, 0, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++              break;
++      case 7:
++              len = printAppend(page, count, 0, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++              break;
++      case 8:
++              len = printAppend(page, count, 0, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++              break;
++      case 9:
++              len = printAppend(page, count, 0, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++              break;
++#endif
++
++      default:
++              *eof = 1;
++      }
++      *start = (char *)1;
++      return (len);
++}
++#endif
++
++
++#ifdef RA_STATS
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++                                                      IMG_CHAR **ppszStr, 
++                                                      IMG_UINT32 *pui32StrLen)
++{
++      IMG_CHAR        *pszStr = *ppszStr;
++      IMG_UINT32      ui32StrLen = *pui32StrLen;
++      IMG_INT32       i32Count;
++      BT                      *pBT;
++      
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "\nArena '%s':\n", pArena->name);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      
++              
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "  allocCB=%08X freeCB=%08X handle=%08X quantum=%d\n", 
++                                                       pArena->pImportAlloc, 
++                                                       pArena->pImportFree, 
++                                                       pArena->pImportHandle,
++                                                       pArena->uQuantum);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "span count\t\t%lu\n", pArena->sStatistics.uSpanCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "live segment count\t%lu\n", pArena->sStatistics.uLiveSegmentCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "free segment count\t%lu\n", pArena->sStatistics.uFreeSegmentCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "free resource count\t%lu (0x%x)\n",
++                                                      pArena->sStatistics.uFreeResourceCount,
++                                                      (unsigned int)pArena->sStatistics.uFreeResourceCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "total allocs\t\t%lu\n", pArena->sStatistics.uCumulativeAllocs);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "total frees\t\t%lu\n", pArena->sStatistics.uCumulativeFrees);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "import count\t\t%lu\n", pArena->sStatistics.uImportCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "export count\t\t%lu\n", pArena->sStatistics.uExportCount);
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++
++      CHECK_SPACE(ui32StrLen);
++      i32Count = OSSNPrintf(pszStr, 100, "  segment Chain:\n");
++      UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      
++      if (pArena->pHeadSegment != IMG_NULL &&
++          pArena->pHeadSegment->pPrevSegment != IMG_NULL)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "  error: head boundary tag has invalid pPrevSegment\n");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      if (pArena->pTailSegment != IMG_NULL &&
++          pArena->pTailSegment->pNextSegment != IMG_NULL)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "  error: tail boundary tag has invalid pNextSegment\n");
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++      
++      for (pBT=pArena->pHeadSegment; pBT!=IMG_NULL; pBT=pBT->pNextSegment)
++      {
++              CHECK_SPACE(ui32StrLen);
++              i32Count = OSSNPrintf(pszStr, 100, "\tbase=0x%x size=0x%x type=%s ref=%08X\n", 
++                                                                                       (unsigned long) pBT->base,
++                                                                                       pBT->uSize,
++                                                                                       _BTType(pBT->type),
++                                                                                       pBT->psMapping);
++              UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
++      }
++
++      *ppszStr = pszStr;
++      *pui32StrLen = ui32StrLen;
++      
++      return PVRSRV_OK;
++}
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/common/resman.c git-nokia/drivers/gpu/pvr/services4/srvkm/common/resman.c
+--- git/drivers/gpu/pvr/services4/srvkm/common/resman.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/common/resman.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,958 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "resman.h"
++
++#ifdef __linux__
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/semaphore.h>
++#include <linux/version.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
++#include <linux/semaphore.h>
++#endif
++#include <linux/sched.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++#include <linux/hardirq.h>
++#else
++#include <asm/hardirq.h>
++#endif
++static DECLARE_MUTEX(lock);
++
++#define ACQUIRE_SYNC_OBJ  do {                                                        \
++              if (in_interrupt()) {                                                   \
++                      printk ("ISR cannot take RESMAN mutex\n");      \
++                      BUG();                                                                          \
++              }                                                                                               \
++              else down (&lock);                                                              \
++} while (0)
++#define RELEASE_SYNC_OBJ up (&lock)
++
++#else
++
++#define ACQUIRE_SYNC_OBJ
++#define RELEASE_SYNC_OBJ
++
++#endif
++
++#define RESMAN_SIGNATURE 0x12345678
++
++typedef struct _RESMAN_ITEM_
++{
++#ifdef DEBUG
++      IMG_UINT32                              ui32Signature;
++#endif
++      struct _RESMAN_ITEM_    **ppsThis;      
++      struct _RESMAN_ITEM_    *psNext;        
++
++      IMG_UINT32                              ui32Flags;      
++      IMG_UINT32                              ui32ResType;
++
++      IMG_PVOID                               pvParam;        
++      IMG_UINT32                              ui32Param;      
++
++      RESMAN_FREE_FN                  pfnFreeResource;
++
++      IMG_UINT32                              ui32ProcessID;
++
++} RESMAN_ITEM;
++
++
++typedef struct _RESMAN_PROCESS_
++{
++#ifdef DEBUG
++      IMG_UINT32                                      ui32Signature;
++#endif
++      struct  _RESMAN_PROCESS_        **ppsThis;
++      struct  _RESMAN_PROCESS_        *psNext;
++
++      IMG_UINT32                                      ui32ProcessID;
++      IMG_UINT32                                      ui32RefCount; 
++      RESMAN_ITEM                                     *psResItemList;
++
++} RESMAN_PROCESS, *PRESMAN_PROCESS;
++
++
++typedef struct
++{
++      RESMAN_PROCESS  *psProcessList;
++
++} RESMAN_LIST, *PRESMAN_LIST;
++
++
++PRESMAN_LIST  gpsResList=IMG_NULL;
++
++
++#define PRINT_RESLIST(x, y, z)
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback);
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_PROCESS    psProcess,
++                                                                                 IMG_UINT32           ui32SearchCriteria, 
++                                                                                 IMG_UINT32           ui32ResType, 
++                                                                                 IMG_PVOID            pvParam, 
++                                                                                 IMG_UINT32           ui32Param, 
++                                                                                 IMG_BOOL                     bExecuteCallback);
++
++static PRESMAN_PROCESS FindProcess(IMG_UINT32 ui32ProcessID);
++static IMG_VOID SaveRestoreBuffers(IMG_BOOL bSaveBuffers);
++
++#ifdef DEBUG
++      static IMG_VOID ValidateResList(PRESMAN_LIST psResList);
++      #define VALIDATERESLIST() ValidateResList(gpsResList)
++#else
++      #define VALIDATERESLIST()
++#endif
++
++#ifdef __linux__
++#include "proc.h"
++#endif
++
++#if defined(__linux__)
++
++static const char *
++resourceType (IMG_UINT32 type)
++{
++      static char buf[32];
++      switch (type)
++      {
++              
++              case RESMAN_TYPE_HW_RENDER_CONTEXT:
++                      return "HW Render Context Resource";
++              case RESMAN_TYPE_SHARED_PB_DESC:
++                      return "Shared Parameter Buffer Description Resource";
++              
++              
++              
++              
++              
++              case RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN:
++                      return "Display Class Swapchain Resource";
++              case RESMAN_TYPE_DISPLAYCLASS_DEVICE:
++                      return "Display Class Device Resource";
++              
++              
++              case RESMAN_TYPE_BUFFERCLASS_DEVICE:
++                      return "Buffer Class Device Resource";
++
++              
++              case RESMAN_TYPE_OS_USERMODE_MAPPING:
++                      return "OS specific User mode mappings";
++              
++              
++              case RESMAN_TYPE_DEVICEMEM_CONTEXT:
++                      return "Device Memory Context Resource";
++              case RESMAN_TYPE_DEVICECLASSMEM_MAPPING:
++                      return "Device Memory Mapping Resource";
++              case RESMAN_TYPE_DEVICEMEM_MAPPING:
++                      return "Device Memory Mapping Resource";
++              case RESMAN_TYPE_DEVICEMEM_WRAP:
++                      return "Device Memory Wrap Resource";
++              case RESMAN_TYPE_DEVICEMEM_ALLOCATION:
++                      return "Device Memory Allocation Resource";
++              default:                                        
++                      sprintf(buf, "Unknown (type %lu)", type);
++                      return buf;
++      }
++}
++#endif
++
++#ifdef __linux__
++static off_t
++ResManPrintProcessResources (char * buffer, size_t size,
++                                                       PRESMAN_PROCESS psProcess)
++{
++    off_t off = 0;
++    
++      
++      PRESMAN_ITEM psCurItem = psProcess->psResItemList;
++
++      off = printAppend (buffer, size, 0,
++                                         "  pid=%ld ref count=%ld\n"
++                                         "    Flags    pParam   Param    FreeFn   Type\n",
++                                         psProcess->ui32ProcessID,
++                                         psProcess->ui32RefCount);
++      while(psCurItem)
++      {
++              off  = printAppend (buffer, size, off,
++                                                      "    %8lx %8p %8lx %8p %s\n",
++                                                      psCurItem->ui32Flags,
++                                                      psCurItem->pvParam,
++                                                      psCurItem->ui32Param,
++                                                      psCurItem->pfnFreeResource,
++                                                      resourceType(psCurItem->ui32ResType));
++              psCurItem = psCurItem->psNext;
++      }
++      return off;
++} 
++
++static off_t
++ResManPrintAllProcessResources (char * buffer, size_t size, off_t off)
++{
++      PRESMAN_PROCESS psProcess;
++
++      VALIDATERESLIST();
++
++      if (size < 80)                          
++              return 0;
++      
++    if (!off)
++        return printAppend (buffer, size, 0, "Registered resources\n");
++    
++      if(gpsResList != IMG_NULL) 
++      {                               
++      
++      psProcess = gpsResList->psProcessList;
++      while (--off && psProcess)
++              psProcess = psProcess->psNext;
++      }
++
++    return ResManPrintProcessResources (buffer, size, psProcess);
++
++} 
++
++#endif 
++
++
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID)
++{
++      
++      if(gpsResList == IMG_NULL)
++      {       
++              if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(*gpsResList),
++                                              (IMG_VOID **)&gpsResList, IMG_NULL) != PVRSRV_OK)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              gpsResList->psProcessList = IMG_NULL;
++      
++              
++              VALIDATERESLIST();
++
++      }
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID ResManDeInit(IMG_VOID)
++{
++      if (gpsResList != IMG_NULL)
++      {
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*gpsResList), gpsResList, IMG_NULL);
++      }
++}
++
++
++static PVRSRV_ERROR ResManProcessConnect(IMG_UINT32 ui32ProcID)
++{
++      PRESMAN_PROCESS         psProcess;
++      PVRSRV_ERROR            eError;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      psProcess = FindProcess(ui32ProcID);
++      if(psProcess == IMG_NULL)
++      {
++              
++              eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(RESMAN_PROCESS), (IMG_VOID **)&psProcess, IMG_NULL);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "ResManProcessConnect: ERROR allocating new RESMAN process struct"));
++                      
++                      VALIDATERESLIST();
++
++                      
++                      RELEASE_SYNC_OBJ;
++
++                      return eError;
++              }
++
++#ifdef DEBUG
++              psProcess->ui32Signature = RESMAN_SIGNATURE;
++#endif 
++              psProcess->ui32ProcessID        = ui32ProcID;
++              psProcess->ui32RefCount         = 0;
++              psProcess->psResItemList        = IMG_NULL;
++
++              
++              psProcess->psNext               = gpsResList->psProcessList;
++              psProcess->ppsThis              = &gpsResList->psProcessList;
++              gpsResList->psProcessList       = psProcess;
++              if (psProcess->psNext)
++              {
++                      psProcess->psNext->ppsThis = &(psProcess->psNext);                      
++              }
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "ResManProcessConnect: Process 0x%x has ref-count %d",
++                      psProcess->ui32ProcessID, psProcess->ui32RefCount));
++      psProcess->ui32RefCount++;
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_OK;
++}
++static PVRSRV_ERROR ResManProcessDisconnect(IMG_UINT32 ui32ProcID)
++{     
++      PRESMAN_PROCESS         psProcess;
++      
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      psProcess = FindProcess(ui32ProcID);
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ResManProcessDisconnect: "
++                               "ERROR finding process struct for 0x%x", ui32ProcID));
++              
++              
++              VALIDATERESLIST();
++
++              
++              PRINT_RESLIST(gpsResList, psProcess, IMG_FALSE);
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      if(--psProcess->ui32RefCount == 0)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManProcessDisconnect: "
++                              "Last close from process 0x%x received", psProcess->ui32ProcessID));
++
++              
++              PRINT_RESLIST(gpsResList, psProcess, IMG_TRUE);
++
++              
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_OS_USERMODE_MAPPING, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_HW_RENDER_CONTEXT, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_TRANSFER_CONTEXT, 0, 0, IMG_TRUE);                       
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_SHARED_PB_DESC, 0, 0, IMG_TRUE);
++
++              
++              
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DISPLAYCLASS_DEVICE, 0, 0, IMG_TRUE);
++              
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_BUFFERCLASS_DEVICE, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICECLASSMEM_MAPPING, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_WRAP, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_MAPPING, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_ALLOCATION, 0, 0, IMG_TRUE);
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_DEVICEMEM_CONTEXT, 0, 0, IMG_TRUE);
++
++              
++              FreeResourceByCriteria(psProcess, RESMAN_CRITERIA_RESTYPE, RESMAN_TYPE_RESOURCE_PERPROC_DATA, 0, 0, IMG_TRUE);
++
++              
++              *(psProcess->ppsThis) = psProcess->psNext;
++              if (psProcess->psNext)
++              {
++                      psProcess->psNext->ppsThis      = psProcess->ppsThis;
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psProcess, IMG_NULL);
++
++      }
++
++      
++      VALIDATERESLIST();
++
++      
++      PRINT_RESLIST(gpsResList, psProcess, IMG_FALSE);
++
++      
++      if (gpsResList->psProcessList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManProcessDisconnect: Releasing Resource List"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, gpsResList, IMG_NULL);
++
++              gpsResList = IMG_NULL;
++      }
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_OK;
++}
++
++
++PRESMAN_ITEM ResManRegisterRes(IMG_UINT32             ui32ResType, 
++                                                         IMG_PVOID            pvParam, 
++                                                         IMG_UINT32           ui32Param, 
++                                                         RESMAN_FREE_FN       pfnFreeResource, 
++                                                         IMG_UINT32           ui32ProcessID)
++{
++      IMG_UINT32              ui32CurProcessID;
++      PRESMAN_ITEM    psNewResItem;
++      PRESMAN_PROCESS psProcess;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      if(ui32ResType & RESMAN_TYPE_USE_PROCESSID)
++      {
++              ui32CurProcessID = ui32ProcessID;
++              ui32ResType &= ~RESMAN_TYPE_USE_PROCESSID;
++      }
++      else
++      {
++              
++              ui32CurProcessID = OSGetCurrentProcessIDKM();
++      }
++
++      PVR_ASSERT(ui32ResType != 0);           
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: register resource "
++                      "Proc 0x%x, ResType 0x%x, pvParam 0x%x, ui32Param 0x%x, "
++                      "FreeFunc %08X",
++                      ui32CurProcessID, ui32ResType, (IMG_UINT32)pvParam,
++                      ui32Param, pfnFreeResource));
++
++      
++      psProcess = FindProcess(ui32CurProcessID);
++
++      if(psProcess == IMG_NULL)
++      {
++              
++
++
++
++
++              
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManRegisterRes: "
++                              "Could not find process info for process 0x%x",
++                              ui32CurProcessID));
++
++              
++              VALIDATERESLIST();
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return((PRESMAN_ITEM)IMG_NULL);
++      }                                                       
++
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(RESMAN_ITEM), (IMG_VOID **)&psNewResItem,
++                                      IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ResManRegisterRes: "
++                              "ERROR allocating new resource item"));
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return((PRESMAN_ITEM)IMG_NULL);
++      }
++
++      
++#ifdef DEBUG
++      psNewResItem->ui32Signature     = RESMAN_SIGNATURE;
++#endif 
++      psNewResItem->ui32ResType               = ui32ResType;
++      psNewResItem->pvParam                   = pvParam;
++      psNewResItem->ui32Param                 = ui32Param;
++      psNewResItem->pfnFreeResource   = pfnFreeResource;
++      psNewResItem->ui32ProcessID             = ui32CurProcessID;
++      psNewResItem->ui32Flags             = 0;
++      
++      
++      psNewResItem->ppsThis   = &psProcess->psResItemList;
++      psNewResItem->psNext    = psProcess->psResItemList;
++      psProcess->psResItemList = psNewResItem;
++      if (psNewResItem->psNext)
++      {
++              psNewResItem->psNext->ppsThis = &psNewResItem->psNext;
++      }
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return(psNewResItem);
++}
++
++PVRSRV_ERROR ResManFreeResByPtr(RESMAN_ITEM   *psResItem,
++                                                              IMG_BOOL        bExecuteCallback)
++{
++      PVRSRV_ERROR eError;
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: NULL ptr - nothing to do"));
++              return PVRSRV_OK;
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByPtr: freeing resource at %08X", psResItem));
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      eError = FreeResourceByPtr(psResItem, bExecuteCallback);
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return(eError);
++}
++
++
++PVRSRV_ERROR ResManFreeResByCriteria(IMG_UINT32 ui32SearchCriteria, 
++                                                                       IMG_UINT32     ui32ResType, 
++                                                                       IMG_PVOID      pvParam, 
++                                                                       IMG_UINT32     ui32Param, 
++                                                                       IMG_BOOL       bExecuteCallback)
++{
++      IMG_UINT32              ui32CurProcessID;
++      PRESMAN_PROCESS psProcess;
++      PVRSRV_ERROR eError;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      VALIDATERESLIST();
++
++      
++      ui32CurProcessID = OSGetCurrentProcessIDKM();
++
++      PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++                      "Proc 0x%x, Criteria 0x%x, Type 0x%x, Addr 0x%x, Param 0x%x",
++                      ui32CurProcessID, ui32SearchCriteria, ui32ResType,
++                      (IMG_UINT32)pvParam, ui32Param));
++
++      
++      psProcess = FindProcess(ui32CurProcessID);
++
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ResManFreeResByCriteria: "
++                              "ERROR finding process struct for 0x%x",
++                              ui32CurProcessID));
++
++              
++              VALIDATERESLIST();
++              
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      eError = FreeResourceByCriteria(psProcess, ui32SearchCriteria,
++                                                                      ui32ResType, pvParam, ui32Param,
++                                                                      bExecuteCallback);
++
++      
++      VALIDATERESLIST();
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR ResManPrePower(PVR_POWER_STATE eNewPowerState, 
++                                                      PVR_POWER_STATE eCurrentPowerState)
++{
++      if ((eNewPowerState != eCurrentPowerState) &&
++              (eNewPowerState==PVRSRV_POWER_STATE_D3))
++      {
++              SaveRestoreBuffers(IMG_TRUE);           
++      }
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR ResManPostPower(PVR_POWER_STATE eNewPowerState, 
++                                                       PVR_POWER_STATE eCurrentPowerState)
++{
++      if ((eNewPowerState != eCurrentPowerState) 
++      &&      (eCurrentPowerState == PVRSRV_POWER_STATE_D3))
++      {
++              SaveRestoreBuffers(IMG_FALSE);
++      }
++      
++      return PVRSRV_OK;
++}
++
++
++IMG_INTERNAL PVRSRV_ERROR ResManFindResourceByPtr(RESMAN_ITEM *psItem)
++{
++      RESMAN_PROCESS *psProcess;
++      RESMAN_ITEM *psCurItem;
++      IMG_UINT32 ui32CurProcessID;
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      
++      ui32CurProcessID = OSGetCurrentProcessIDKM();
++
++      psProcess = FindProcess(ui32CurProcessID);
++      if(psProcess == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FindResourceByPtr: "
++                               "ERROR finding process struct for 0x%x", ui32CurProcessID));
++              
++              
++              VALIDATERESLIST();
++
++              
++              RELEASE_SYNC_OBJ;
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++
++      PVR_ASSERT(psItem != IMG_NULL);
++      PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FindResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++                      psItem, psItem->psNext));
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FindResourceByPtr: Resource Proc 0x%x, Type 0x%x, Addr 0x%x, "
++                      "Param 0x%x, FnCall %08X, Flags 0x%x",
++                      OSGetCurrentProcessIDKM(),
++                      psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++                      psItem->pfnFreeResource, psItem->ui32Flags));
++
++      
++      psCurItem       = psProcess->psResItemList;
++
++      while(psCurItem != IMG_NULL)
++      {
++              
++              if(psCurItem != psItem)
++              {
++                      
++                      psCurItem = psCurItem->psNext;
++              }
++              else
++              {
++                      
++                      RELEASE_SYNC_OBJ;
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++      RELEASE_SYNC_OBJ;
++
++      return PVRSRV_ERROR_NOT_OWNER;
++}
++
++static PVRSRV_ERROR FreeResourceByPtr(RESMAN_ITEM *psItem, IMG_BOOL bExecuteCallback)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      PVR_ASSERT(psItem != IMG_NULL);
++      PVR_ASSERT(psItem->ui32Signature == RESMAN_SIGNATURE);
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FreeResourceByPtr: psItem=%08X, psItem->psNext=%08X",
++                      psItem, psItem->psNext));
++
++      PVR_DPF((PVR_DBG_MESSAGE,
++                      "FreeResourceByPtr: Resource Proc 0x%x, Type 0x%x, Addr 0x%x, "
++                      "Param 0x%x, FnCall %08X, Flags 0x%x",
++                      OSGetCurrentProcessIDKM(),
++                      psItem->ui32ResType, (IMG_UINT32)psItem->pvParam, psItem->ui32Param,
++                      psItem->pfnFreeResource, psItem->ui32Flags));
++
++      
++      if (psItem->psNext)
++      {
++              psItem->psNext->ppsThis = psItem->ppsThis;
++      }
++      *psItem->ppsThis = psItem->psNext;
++
++      
++      RELEASE_SYNC_OBJ;
++
++      
++      if (bExecuteCallback)
++      {
++              eError = psItem->pfnFreeResource(psItem->ui32ProcessID,
++                                                                               psItem->pvParam, psItem->ui32Param);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR calling FreeResource function"));
++              }
++      }
++
++      
++      ACQUIRE_SYNC_OBJ;
++
++      
++      if(OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psItem, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FreeResourceByPtr: ERROR freeing resource list item memory"));
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++
++      return(eError);
++}
++
++
++static PVRSRV_ERROR FreeResourceByCriteria(PRESMAN_PROCESS    psProcess,
++                                                                                 IMG_UINT32           ui32SearchCriteria, 
++                                                                                 IMG_UINT32           ui32ResType, 
++                                                                                 IMG_PVOID            pvParam, 
++                                                                                 IMG_UINT32           ui32Param, 
++                                                                                 IMG_BOOL                     bExecuteCallback)
++{
++      PRESMAN_ITEM    psCurItem;
++      IMG_BOOL                bMatch;
++      PVRSRV_ERROR    eError = PVRSRV_OK;
++
++      
++      psCurItem       = psProcess->psResItemList;
++
++      while(psCurItem != IMG_NULL)
++      {
++              
++              bMatch = IMG_TRUE;
++
++              
++              if((ui32SearchCriteria & RESMAN_CRITERIA_RESTYPE) &&
++                              psCurItem->ui32ResType != ui32ResType)
++              {
++                      bMatch = IMG_FALSE;
++              }
++                      
++              
++              else if((ui32SearchCriteria & RESMAN_CRITERIA_PVOID_PARAM) &&
++                              psCurItem->pvParam != pvParam)
++              {
++                      bMatch = IMG_FALSE;
++              }
++
++              
++              else if((ui32SearchCriteria & RESMAN_CRITERIA_UI32_PARAM) &&
++                              psCurItem->ui32Param != ui32Param)
++              {
++                      bMatch = IMG_FALSE;
++              }               
++              
++              if(!bMatch)
++              {
++                      
++                      psCurItem = psCurItem->psNext;
++              }
++              else
++              {
++                      
++                      eError = FreeResourceByPtr(psCurItem, bExecuteCallback);
++
++                      if(eError != PVRSRV_OK)
++                      {
++                              return eError;
++                      }
++
++                      
++
++
++                      psCurItem = psProcess->psResItemList;
++              }
++      }
++
++      return eError;
++}
++
++
++
++
++static PRESMAN_PROCESS FindProcess(IMG_UINT32 ui32ProcessID)
++{
++      PRESMAN_PROCESS psCurProcess;
++
++      
++      
++      if (gpsResList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "FindProcess: resman not initialised yet"));
++              return ((PRESMAN_PROCESS)IMG_NULL);
++      }
++      
++      psCurProcess = gpsResList->psProcessList;
++
++      while(psCurProcess != IMG_NULL)
++      {
++              if(psCurProcess->ui32ProcessID == ui32ProcessID)
++              {
++                      return(psCurProcess);
++              }
++              psCurProcess = psCurProcess->psNext;
++      }
++
++      return((PRESMAN_PROCESS)IMG_NULL);
++}
++
++
++#ifdef DEBUG
++static IMG_VOID ValidateResList(PRESMAN_LIST psResList)
++{
++      PRESMAN_ITEM    psCurItem, *ppsThisItem;
++      PRESMAN_PROCESS psCurProcess, *ppsThisProcess;
++
++      
++      if (psResList == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_MESSAGE, "ValidateResList: resman not initialised yet"));
++              return;
++      }
++
++      psCurProcess = psResList->psProcessList;
++      ppsThisProcess = &psResList->psProcessList;
++
++      
++      while(psCurProcess != IMG_NULL)
++      {
++              
++              PVR_ASSERT(psCurProcess->ui32Signature == RESMAN_SIGNATURE);
++              if (psCurProcess->ppsThis != ppsThisProcess)
++              {
++                      PVR_DPF((PVR_DBG_WARNING,
++                                      "psCP=%08X pid=0x%x psCP->ppsThis=%08X psCP->psNext=%08X ppsTP=%08X",
++                                      psCurProcess, psCurProcess->ui32ProcessID, psCurProcess->ppsThis,
++                                      psCurProcess->psNext, ppsThisProcess));
++                      PVR_ASSERT(psCurProcess->ppsThis == ppsThisProcess);
++              }
++      
++              
++              psCurItem = psCurProcess->psResItemList;
++              ppsThisItem = &psCurProcess->psResItemList;
++              while(psCurItem != IMG_NULL)
++              {
++                      
++                      PVR_ASSERT(psCurItem->ui32Signature == RESMAN_SIGNATURE);
++                      if (psCurItem->ppsThis != ppsThisItem)
++                      {
++                              PVR_DPF((PVR_DBG_WARNING,
++                                              "psCurItem=%08X psCurItem->ppsThis=%08X psCurItem->psNext=%08X ppsThisItem=%08X",
++                                              psCurItem, psCurItem->ppsThis, psCurItem->psNext, ppsThisItem));
++                              PVR_ASSERT(psCurItem->ppsThis == ppsThisItem);
++                      }
++
++                      
++                      ppsThisItem = &psCurItem->psNext;
++                      psCurItem = psCurItem->psNext;
++              }
++
++              
++              ppsThisProcess = &psCurProcess->psNext;
++              psCurProcess = psCurProcess->psNext;
++      }
++}
++#endif 
++
++
++
++IMG_INTERNAL
++PVRSRV_ERROR PVRSRVResManConnect(IMG_UINT32   ui32ProcID,
++                                                               IMG_BOOL       bConnect)
++{
++      if (ui32ProcID == RESMAN_PROCESSID_FIND)
++      {
++              ui32ProcID = OSGetCurrentProcessIDKM();
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVResManConnect(%s): ProcID:%lu",
++                       bConnect ? "T" : "F", ui32ProcID));
++
++      if(bConnect)
++      {
++              return ResManProcessConnect(ui32ProcID);
++      }
++      else
++      {
++              return ResManProcessDisconnect(ui32ProcID);
++      }
++}
++
++
++static IMG_VOID SaveRestoreBuffers(IMG_BOOL bSaveBuffers)
++{
++      PVR_UNREFERENCED_PARAMETER(bSaveBuffers);
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,2020 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "hash.h"
++#include "ra.h"
++#include "pdump_km.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "mmu.h"
++
++
++typedef struct _MMU_PT_INFO_
++{
++      
++      IMG_VOID *hPTPageOSMemHandle;
++      IMG_CPU_VIRTADDR PTPageCpuVAddr;
++      IMG_UINT32 ui32ValidPTECount;
++} MMU_PT_INFO;
++
++struct _MMU_CONTEXT_
++{
++      
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      
++      IMG_CPU_VIRTADDR pvPDCpuVAddr;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++
++      IMG_VOID *hPDOSMemHandle;
++
++      
++      MMU_PT_INFO *apsPTInfoList[1024];
++
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      struct _MMU_CONTEXT_ *psNext;
++};
++
++struct _MMU_HEAP_
++{
++      MMU_CONTEXT *psMMUContext;
++
++      IMG_UINT32 ui32PTBaseIndex;
++      IMG_UINT32 ui32PTPageCount;
++      IMG_UINT32 ui32PTEntryCount;
++
++      
++      RA_ARENA *psVMArena;
++
++      DEV_ARENA_DESCRIPTOR *psDevArena;
++};
++
++#if defined (SUPPORT_SGX_MMU_DUMMY_PAGE)
++#define DUMMY_DATA_PAGE_SIGNATURE     0xDEADBEEF
++#endif
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables   (MMU_HEAP *pMMUHeap,
++                                       IMG_DEV_VIRTADDR DevVAddr,
++                                       IMG_SIZE_T uSize,
++                                       IMG_BOOL bForUnmap,
++                                       IMG_HANDLE hUniqueTag);
++#endif 
++
++#define PAGE_TEST                                     0
++#if PAGE_TEST
++static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr);
++#endif
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 ui32RegVal;
++      IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++      
++
++
++      ui32RegVal = OSReadHWReg(pvRegsBaseKM, EUR_CR_BIF_CTRL);
++
++      OSWriteHWReg(pvRegsBaseKM,
++                              EUR_CR_BIF_CTRL,
++                              ui32RegVal | EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++      
++      PDUMPREG(EUR_CR_BIF_CTRL, EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++}
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 ui32RegVal;
++      IMG_VOID *pvRegsBaseKM = psMMUContext->psDevInfo->pvRegsBaseKM;
++
++      
++
++
++
++
++      OSWriteHWReg(pvRegsBaseKM,
++                              EUR_CR_BIF_CTRL,
++                              ui32RegVal & ~EUR_CR_BIF_CTRL_MMU_BYPASS_HOST_MASK);
++      
++      PDUMPREG(EUR_CR_BIF_CTRL, 0);
++}
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PDCACHE;      
++}
++
++
++IMG_VOID MMU_InvalidatePageTableCache(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->ui32CacheControl |= SGX_BIF_INVALIDATE_PTCACHE;      
++}
++
++
++static IMG_BOOL
++_AllocPageTables (MMU_HEAP *pMMUHeap)
++{
++      PVR_DPF ((PVR_DBG_MESSAGE, "_AllocPageTables()"));
++
++      PVR_ASSERT (pMMUHeap!=IMG_NULL);
++      PVR_ASSERT (HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);
++
++      
++
++
++
++
++
++      
++      pMMUHeap->ui32PTEntryCount = pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pMMUHeap->ui32PTBaseIndex = (pMMUHeap->psDevArena->BaseDevVAddr.uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >> SGX_MMU_PAGE_SHIFT;
++
++      
++
++
++      pMMUHeap->ui32PTPageCount = (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >> SGX_MMU_PT_SHIFT;
++
++      return IMG_TRUE;
++}
++
++static IMG_VOID
++_DeferredFreePageTable (MMU_HEAP *pMMUHeap, IMG_UINT32 ui32PTIndex)
++{
++      IMG_UINT32 *pui32PDEntry;
++      IMG_UINT32 i;
++      IMG_UINT32 ui32PDIndex;
++      SYS_DATA *psSysData;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePageTables: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      
++      ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      {
++              
++              PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == IMG_NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0);
++      }
++
++      
++      PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PTPageCount);
++      if(ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr)
++      {
++              PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++      }
++
++      switch(pMMUHeap->psDevArena->DevMemHeapType)
++      {
++              case DEVICE_MEMORY_HEAP_SHARED :
++              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++              {
++                      
++                      MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++                      while(psMMUContext)
++                      {
++                              
++                              pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++                              pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                              
++                              pui32PDEntry[ui32PTIndex] = psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++#else
++                              
++                              pui32PDEntry[ui32PTIndex] = 0;
++#endif
++
++                              
++                              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                              
++                              psMMUContext = psMMUContext->psNext;
++                      }
++                      break;
++              }
++              case DEVICE_MEMORY_HEAP_PERCONTEXT :
++              case DEVICE_MEMORY_HEAP_KERNEL :
++              {
++                      
++                      pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++                      pui32PDEntry += ui32PDIndex;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      
++                      pui32PDEntry[ui32PTIndex] = pMMUHeap->psMMUContext->psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++#else
++                      
++                      pui32PDEntry[ui32PTIndex] = 0;
++#endif
++
++                      
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[ui32PTIndex], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++                      break;
++              }
++              default:
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"));
++                      return;
++              }
++      }
++
++      
++      if(ppsPTInfoList[ui32PTIndex] != IMG_NULL)
++      {
++              if(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != IMG_NULL)
++              {
++                      IMG_PUINT32 pui32Tmp;
++
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr;
++
++                      
++
++
++                      for(i=0; (i<pMMUHeap->ui32PTEntryCount) && (i<1024); i++)
++                      {
++                              pui32Tmp[i] = 0;
++                      }
++
++                      
++
++
++
++                      if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++                      {
++                              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                        SGX_MMU_PAGE_SIZE,
++                                                        ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++                                                        ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle);
++                      }
++                      else
++                      {
++                              IMG_SYS_PHYADDR sSysPAddr;
++                              IMG_CPU_PHYADDR sCpuPAddr;
++
++                              
++                              sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr);
++                              sSysPAddr = SysCpuPAddrToSysPAddr (sCpuPAddr);
++
++                              
++                              OSUnMapPhysToLin(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr,
++                                 SGX_MMU_PAGE_SIZE,
++                                 PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                 ppsPTInfoList[ui32PTIndex]->hPTPageOSMemHandle);
++
++                              
++
++
++                              RA_Free (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++                      }
++
++                      
++
++
++                      pMMUHeap->ui32PTEntryCount -= i;
++              }
++              else
++              {
++                      
++                      pMMUHeap->ui32PTEntryCount -= 1024;
++              }
++
++              
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                      sizeof(MMU_PT_INFO),
++                                      ppsPTInfoList[ui32PTIndex],
++                                      IMG_NULL);
++              ppsPTInfoList[ui32PTIndex] = IMG_NULL;
++      }
++      else
++      {
++              
++              pMMUHeap->ui32PTEntryCount -= 1024;
++      }
++
++      PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PTPageCount);
++}
++
++static IMG_VOID
++_DeferredFreePageTables (MMU_HEAP *pMMUHeap)
++{
++      IMG_UINT32 i;
++
++      for(i=0; i<pMMUHeap->ui32PTPageCount; i++)
++      {
++              _DeferredFreePageTable(pMMUHeap, i);
++      }
++      MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++}
++
++
++static IMG_BOOL
++_DeferredAllocPagetables(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++      IMG_UINT32 ui32PTPageCount;
++      IMG_UINT32 ui32PDIndex;
++      IMG_UINT32 i;
++      IMG_UINT32 *pui32PDEntry;
++      MMU_PT_INFO **ppsPTInfoList;
++      SYS_DATA *psSysData;
++
++      
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE < 32
++      PVR_ASSERT(DevVAddr.uiAddr < (1<<SGX_FEATURE_ADDRESS_SPACE_SIZE));
++#endif
++
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return IMG_FALSE;
++      }
++
++      
++      ui32PDIndex = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ui32PTPageCount = (DevVAddr.uiAddr + ui32Size + (1<<(SGX_MMU_PAGE_SHIFT+SGX_MMU_PT_SHIFT)) - 1)
++                                              >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++      ui32PTPageCount -= ui32PDIndex;
++
++      
++      pui32PDEntry = (IMG_UINT32*)pMMUHeap->psMMUContext->pvPDCpuVAddr;
++      pui32PDEntry += ui32PDIndex;
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
++      PDUMPCOMMENT("Page directory mods (page count == %08X)", ui32PTPageCount);
++
++      
++      for(i=0; i<ui32PTPageCount; i++)
++      {
++              if(ppsPTInfoList[i] == IMG_NULL)
++              {
++                      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                               sizeof (MMU_PT_INFO),
++                                               (IMG_VOID **)&ppsPTInfoList[i], IMG_NULL);
++                      if (ppsPTInfoList[i] == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocMem failed"));
++                              return IMG_FALSE;
++                      }
++                      OSMemSet (ppsPTInfoList[i], 0, sizeof(MMU_PT_INFO));
++              }
++
++              if(ppsPTInfoList[i]->hPTPageOSMemHandle == IMG_NULL
++              && ppsPTInfoList[i]->PTPageCpuVAddr == IMG_NULL)
++              {
++                      IMG_CPU_PHYADDR sCpuPAddr;
++                      IMG_DEV_PHYADDR sDevPAddr;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      IMG_UINT32 *pui32Tmp;
++                      IMG_UINT32 j;
++#else
++                      
++                      PVR_ASSERT(pui32PDEntry[i] == 0);
++#endif
++              
++                      
++
++
++                      if(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena == IMG_NULL)
++                      {
++                              if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                                 SGX_MMU_PAGE_SIZE,
++                                                                 (IMG_VOID **)&ppsPTInfoList[i]->PTPageCpuVAddr,
++                                                                 &ppsPTInfoList[i]->hPTPageOSMemHandle) != PVRSRV_OK)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to OSAllocPages failed"));        
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              if(ppsPTInfoList[i]->PTPageCpuVAddr)
++                              {
++                                      sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList[i]->PTPageCpuVAddr);
++                              }
++                              else
++                              {
++                                      
++                                      sCpuPAddr = OSMemHandleToCpuPAddr(ppsPTInfoList[i]->hPTPageOSMemHandle, 0);
++                              }
++                              sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++                      }
++                      else
++                      {
++                              IMG_SYS_PHYADDR sSysPAddr;
++
++                              
++
++
++                              if(RA_Alloc(pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->psLocalDevMemArena,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      IMG_NULL,
++                                                      IMG_NULL,
++                                                      0,
++                                                      SGX_MMU_PAGE_SIZE, 
++                                                      0, 
++                                                      &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR call to RA_Alloc failed"));
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                              ppsPTInfoList[i]->PTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++                                                                                                                      SGX_MMU_PAGE_SIZE,
++                                                                                                                      PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                      &ppsPTInfoList[i]->hPTPageOSMemHandle);
++                              if(!ppsPTInfoList[i]->PTPageCpuVAddr)
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR failed to map page tables"));
++                                      return IMG_FALSE;
++                              }
++
++                              
++                              sDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++                              #if PAGE_TEST
++                              PageTest(ppsPTInfoList[i]->PTPageCpuVAddr, sDevPAddr);
++                              #endif
++                      }
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[i]->PTPageCpuVAddr;
++                      
++                      for(j=0; j<SGX_MMU_PT_SIZE; j++)
++                      {
++                              pui32Tmp[j] = pMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++                      }
++#else
++                      
++                      OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0, SGX_MMU_PAGE_SIZE);
++#endif
++                      
++                      PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[i]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++                      
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, ppsPTInfoList[i]->PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                      switch(pMMUHeap->psDevArena->DevMemHeapType)
++                      {
++                              case DEVICE_MEMORY_HEAP_SHARED :
++                              case DEVICE_MEMORY_HEAP_SHARED_EXPORTED :
++                              {
++                                      
++                                      MMU_CONTEXT *psMMUContext = (MMU_CONTEXT*)pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList;
++
++                                      while(psMMUContext)
++                                      {
++                                              
++                                              pui32PDEntry = (IMG_UINT32*)psMMUContext->pvPDCpuVAddr;
++                                              pui32PDEntry += ui32PDIndex;
++
++                                              
++                                              pui32PDEntry[i] = sDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++
++                                              
++                                              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                                              
++                                              psMMUContext = psMMUContext->psNext;
++                                      }
++                                      break;
++                              }
++                              case DEVICE_MEMORY_HEAP_PERCONTEXT :
++                              case DEVICE_MEMORY_HEAP_KERNEL :
++                              {
++                                      
++                                      pui32PDEntry[i] = sDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++
++                                      
++                                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID*)&pui32PDEntry[i], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++                                      break;
++                              }
++                              default:
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR, "_DeferredAllocPagetables: ERROR invalid heap type"));
++                                      return IMG_FALSE;
++                              }
++                      }
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++                      
++
++
++
++                      MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->psDevInfo);
++#endif
++              }
++              else
++              {
++                      
++                      PVR_ASSERT(pui32PDEntry[i] != 0);
++              }
++      }
++
++      return IMG_TRUE;
++}
++
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++      IMG_UINT32 *pui32Tmp;
++      IMG_UINT32 i;
++      IMG_CPU_VIRTADDR pvPDCpuVAddr;
++      IMG_DEV_PHYADDR sPDDevPAddr;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      MMU_CONTEXT *psMMUContext;
++      IMG_HANDLE hPDOSMemHandle;
++      SYS_DATA *psSysData;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++
++      PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Initialise"));
++
++      
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to SysAcquireData failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                               sizeof (MMU_CONTEXT),
++                               (IMG_VOID **)&psMMUContext, IMG_NULL);
++      if (psMMUContext == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      OSMemSet (psMMUContext, 0, sizeof(MMU_CONTEXT));
++
++      
++      psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      psMMUContext->psDevInfo = psDevInfo;
++
++      
++      psMMUContext->psDeviceNode = psDeviceNode;
++
++      
++      if(psDeviceNode->psLocalDevMemArena == IMG_NULL)
++      {
++              if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      &pvPDCpuVAddr,
++                                                      &hPDOSMemHandle) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              if(pvPDCpuVAddr)
++              {
++                      sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
++              }
++              else
++              {
++                      
++                      sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
++              }
++              sPDDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++              #if PAGE_TEST
++              PageTest(pvPDCpuVAddr, sPDDevPAddr);
++              #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psDevInfo->pvMMUContextList)
++              {
++                      
++                      if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                              SGX_MMU_PAGE_SIZE, 
++                                                              &psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                              &psDevInfo->hDummyPTPageOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      if(psDevInfo->pvDummyPTPageCpuVAddr)
++                      {
++                              sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++                      }
++                      else
++                      {
++                              
++                              sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyPTPageOSMemHandle, 0);
++                      }
++                      psDevInfo->sDummyPTDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++
++                      
++                      if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, 
++                                                              SGX_MMU_PAGE_SIZE, 
++                                                              &psDevInfo->pvDummyDataPageCpuVAddr, 
++                                                              &psDevInfo->hDummyDataPageOSMemHandle) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocPages failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      if(psDevInfo->pvDummyDataPageCpuVAddr)
++                      {
++                              sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++                      }
++                      else
++                      {
++                              sCpuPAddr = OSMemHandleToCpuPAddr(psDevInfo->hDummyDataPageOSMemHandle, 0);
++                      }
++                      psDevInfo->sDummyDataDevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
++              }
++#endif 
++      }
++      else
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++
++              
++              if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                      SGX_MMU_PAGE_SIZE,
++                                      IMG_NULL,
++                                      IMG_NULL,
++                                      0,
++                                      SGX_MMU_PAGE_SIZE,
++                                      0,
++                                      &(sSysPAddr.uiAddr))!= IMG_TRUE)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              
++              sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++              sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++              pvPDCpuVAddr = OSMapPhysToLin(sCpuPAddr, 
++                                                                              SGX_MMU_PAGE_SIZE, 
++                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                              &hPDOSMemHandle);
++              if(!pvPDCpuVAddr)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++
++              #if PAGE_TEST
++              PageTest(pvPDCpuVAddr, sPDDevPAddr);
++              #endif
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psDevInfo->pvMMUContextList)
++              {
++                      
++                      if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                              SGX_MMU_PAGE_SIZE,
++                                              IMG_NULL,
++                                              IMG_NULL,
++                                              0,
++                                              SGX_MMU_PAGE_SIZE,
++                                              0,
++                                              &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                      psDevInfo->sDummyPTDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++                      psDevInfo->pvDummyPTPageCpuVAddr = OSMapPhysToLin(sCpuPAddr,
++                                                                                                                              SGX_MMU_PAGE_SIZE,
++                                                                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                              &psDevInfo->hDummyPTPageOSMemHandle);
++                      if(!psDevInfo->pvDummyPTPageCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      if(RA_Alloc(psDeviceNode->psLocalDevMemArena,
++                                              SGX_MMU_PAGE_SIZE,
++                                              IMG_NULL,
++                                              IMG_NULL,
++                                              0,
++                                              SGX_MMU_PAGE_SIZE,
++                                              0,
++                                              &(sSysPAddr.uiAddr))!= IMG_TRUE)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR call to RA_Alloc failed"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++
++                      
++                      sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
++                      psDevInfo->sDummyDataDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
++                      psDevInfo->pvDummyDataPageCpuVAddr = OSMapPhysToLin(sCpuPAddr, 
++                                                                                                                              SGX_MMU_PAGE_SIZE, 
++                                                                                                                              PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                                                                              &psDevInfo->hDummyDataPageOSMemHandle);
++                      if(!psDevInfo->pvDummyDataPageCpuVAddr)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "MMU_Initialise: ERROR failed to map page tables"));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++              }
++#endif 
++      }
++
++      
++      PDUMPCOMMENT("Alloc page directory");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(psMMUContext);
++#endif
++
++      PDUMPMALLOCPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PD_UNIQUETAG);
++
++      pui32Tmp = (IMG_UINT32 *)pvPDCpuVAddr;
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              pui32Tmp[i] = psDevInfo->sDummyPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++      }
++
++      if(!psDevInfo->pvMMUContextList)
++      {
++              
++
++
++              pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyPTPageCpuVAddr;
++              for(i=0; i<SGX_MMU_PT_SIZE; i++)
++              {
++                      pui32Tmp[i] = psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++              }
++              
++              PDUMPCOMMENT("Dummy Page table contents");
++              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++              
++
++              pui32Tmp = (IMG_UINT32 *)psDevInfo->pvDummyDataPageCpuVAddr;
++              for(i=0; i<(SGX_MMU_PAGE_SIZE/4); i++)
++              {
++                      pui32Tmp[i] = DUMMY_DATA_PAGE_SIGNATURE;
++              }
++              
++              PDUMPCOMMENT("Dummy Data Page contents");
++              PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++      }
++#else 
++      
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              
++              pui32Tmp[i] = 0;
++      }
++#endif 
++
++      
++      PDUMPCOMMENT("Page directory contents");
++      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, 0, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++      
++      psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
++      psMMUContext->sPDDevPAddr = sPDDevPAddr;
++      psMMUContext->hPDOSMemHandle = hPDOSMemHandle;
++
++      
++      *ppsMMUContext = psMMUContext;
++
++      
++      *psPDDevPAddr = sPDDevPAddr;
++
++      
++      psMMUContext->psNext = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++      psDevInfo->pvMMUContextList = (IMG_VOID*)psMMUContext;
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(psMMUContext);
++#endif
++
++      return PVRSRV_OK;
++}
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext)
++{
++      IMG_UINT32 *pui32Tmp, i;
++      SYS_DATA *psSysData;
++      MMU_CONTEXT **ppsMMUContext;
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psMMUContext->psDevInfo;
++      MMU_CONTEXT *psMMUContextList = (MMU_CONTEXT*)psDevInfo->pvMMUContextList;
++#endif
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Finalise: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      
++      PDUMPCOMMENT("Free page directory");
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyPTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++      PDUMPFREEPAGETABLE(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->pvDummyDataPageCpuVAddr, SGX_MMU_PAGE_SIZE, PDUMP_PT_UNIQUETAG);
++#endif
++
++      pui32Tmp = (IMG_UINT32 *)psMMUContext->pvPDCpuVAddr;
++
++      
++
++
++      for(i=0; i<SGX_MMU_PD_SIZE; i++)
++      {
++              
++              pui32Tmp[i] = 0;
++      }
++
++      
++
++
++
++      if(psMMUContext->psDeviceNode->psLocalDevMemArena == IMG_NULL)
++      {
++              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                              SGX_MMU_PAGE_SIZE,
++                                              psMMUContext->pvPDCpuVAddr,
++                                              psMMUContext->hPDOSMemHandle);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psMMUContextList->psNext)
++              {
++                      OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                      psDevInfo->hDummyPTPageOSMemHandle);
++                      OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                      SGX_MMU_PAGE_SIZE,
++                                                      psDevInfo->pvDummyDataPageCpuVAddr,
++                                                      psDevInfo->hDummyDataPageOSMemHandle);
++              }
++#endif
++      }
++      else
++      {
++              IMG_SYS_PHYADDR sSysPAddr;
++              IMG_CPU_PHYADDR sCpuPAddr;
++
++              
++              sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr);
++              sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++
++              
++              OSUnMapPhysToLin(psMMUContext->pvPDCpuVAddr, 
++                                                      SGX_MMU_PAGE_SIZE,
++                            PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                      psMMUContext->hPDOSMemHandle);
++              
++              RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              if(!psMMUContextList->psNext)
++              {
++                      
++                      sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyPTPageCpuVAddr);
++                      sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++      
++                      
++                      OSUnMapPhysToLin(psDevInfo->pvDummyPTPageCpuVAddr, 
++                                                              SGX_MMU_PAGE_SIZE,
++                                PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                              psDevInfo->hDummyPTPageOSMemHandle);
++                      
++                      RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
++
++                      
++                      sCpuPAddr = OSMapLinToCPUPhys(psDevInfo->pvDummyDataPageCpuVAddr);
++                      sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr);
++      
++                      
++                      OSUnMapPhysToLin(psDevInfo->pvDummyDataPageCpuVAddr, 
++                                                              SGX_MMU_PAGE_SIZE,
++                                PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                              psDevInfo->hDummyDataPageOSMemHandle);
++                      
++                      RA_Free (psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);                  
++              }
++#endif
++      }
++      
++      PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Finalise"));
++
++      
++      ppsMMUContext = (MMU_CONTEXT**)&psMMUContext->psDevInfo->pvMMUContextList;
++      while(*ppsMMUContext)
++      {
++              if(*ppsMMUContext == psMMUContext)
++              {
++                      
++                      *ppsMMUContext = psMMUContext->psNext;
++                      break;
++              }
++              
++              
++              ppsMMUContext = &((*ppsMMUContext)->psNext);
++      }
++
++      
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(MMU_CONTEXT), psMMUContext, IMG_NULL);
++}
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap)
++{
++      IMG_UINT32 *pui32PDCpuVAddr = (IMG_UINT32 *) psMMUContext->pvPDCpuVAddr;
++      IMG_UINT32 *pui32KernelPDCpuVAddr = (IMG_UINT32 *) psMMUHeap->psMMUContext->pvPDCpuVAddr;
++      IMG_UINT32 ui32PDEntry;
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;
++#endif
++
++      
++      pui32PDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++      pui32KernelPDCpuVAddr += psMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++
++
++      PDUMPCOMMENT("Page directory shared heap range copy");
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(psMMUContext);
++#endif
++
++      for (ui32PDEntry = 0; ui32PDEntry < psMMUHeap->ui32PTPageCount; ui32PDEntry++)
++      {
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              PVR_ASSERT(pui32PDCpuVAddr[ui32PDEntry] == 0);
++#endif
++
++              
++              pui32PDCpuVAddr[ui32PDEntry] = pui32KernelPDCpuVAddr[ui32PDEntry];
++              if (pui32PDCpuVAddr[ui32PDEntry])
++              {
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PDCpuVAddr[ui32PDEntry], sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++                      bInvalidateDirectoryCache = IMG_TRUE;
++#endif
++              }
++      }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(psMMUContext);
++#endif
++
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      if (bInvalidateDirectoryCache)
++      {
++              
++
++
++
++              MMU_InvalidateDirectoryCache(psMMUContext->psDevInfo);
++      }
++#endif
++}
++
++
++static IMG_VOID
++MMU_UnmapPagesAndFreePTs (MMU_HEAP *psMMUHeap,
++                                                IMG_DEV_VIRTADDR sDevVAddr,
++                                                IMG_UINT32 ui32PageCount,
++                                                IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32                      uPageSize = HOST_PAGESIZE();
++      IMG_DEV_VIRTADDR        sTmpDevVAddr;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PDIndex;
++      IMG_UINT32                      ui32PTIndex;
++      IMG_UINT32                      *pui32Tmp;
++      IMG_BOOL                        bInvalidateDirectoryCache = IMG_FALSE;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++      
++      sTmpDevVAddr = sDevVAddr;
++
++      for(i=0; i<ui32PageCount; i++)
++      {
++              MMU_PT_INFO **ppsPTInfoList;
++
++              
++              ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++              
++              ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++              {
++                      
++                      ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++      
++                      
++                      if (!ppsPTInfoList[0])
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++      
++                              
++                              sTmpDevVAddr.uiAddr += uPageSize;
++      
++                              
++                              continue;
++                      }
++      
++                      
++                      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++                      
++                      if (!pui32Tmp)
++                      {
++                              continue;
++                      }
++      
++                      
++                      if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++                      {
++                              ppsPTInfoList[0]->ui32ValidPTECount--;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_MESSAGE, "MMU_UnmapPagesAndFreePTs: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++                      }
++      
++                      
++                      PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++                      
++                      pui32Tmp[ui32PTIndex] = psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++#else
++                      
++                      pui32Tmp[ui32PTIndex] = 0;
++#endif
++              }
++
++              
++
++              if (ppsPTInfoList[0] && ppsPTInfoList[0]->ui32ValidPTECount == 0)
++              {
++                      _DeferredFreePageTable(psMMUHeap, ui32PDIndex - (psMMUHeap->ui32PTBaseIndex >> SGX_MMU_PT_SHIFT));
++                      bInvalidateDirectoryCache = IMG_TRUE;
++              }
++
++              
++              sTmpDevVAddr.uiAddr += uPageSize;
++      }
++
++      if(bInvalidateDirectoryCache)
++      {
++              MMU_InvalidateDirectoryCache(psMMUHeap->psMMUContext->psDevInfo);
++      }
++      else
++      {
++              MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif 
++}
++
++
++IMG_VOID MMU_FreePageTables(IMG_PVOID pvMMUHeap,
++                            IMG_UINT32 ui32Start,
++                            IMG_UINT32 ui32End,
++                            IMG_HANDLE hUniqueTag)
++{
++      MMU_HEAP *pMMUHeap = (MMU_HEAP*)pvMMUHeap;
++      IMG_DEV_VIRTADDR Start;
++
++      Start.uiAddr = ui32Start;
++
++      MMU_UnmapPagesAndFreePTs(pMMUHeap, Start, (ui32End - ui32Start) / SGX_MMU_PAGE_SIZE, hUniqueTag);
++}
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++                      DEV_ARENA_DESCRIPTOR *psDevArena,
++                      RA_ARENA **ppsVMArena)
++{
++      MMU_HEAP *pMMUHeap;
++      IMG_BOOL bRes;
++
++      PVR_ASSERT (psDevArena != IMG_NULL);
++
++      OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                               sizeof (MMU_HEAP),
++                               (IMG_VOID **)&pMMUHeap, IMG_NULL);
++      if (pMMUHeap == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to OSAllocMem failed"));
++              return IMG_NULL;
++      }
++
++      pMMUHeap->psMMUContext = psMMUContext;
++      pMMUHeap->psDevArena = psDevArena;
++
++      bRes = _AllocPageTables (pMMUHeap);
++      if (!bRes)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to _AllocPageTables failed"));
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++              return IMG_NULL;
++      }
++
++      
++      pMMUHeap->psVMArena = RA_Create(psDevArena->pszName,
++                                                                      psDevArena->BaseDevVAddr.uiAddr,
++                                                                      psDevArena->ui32Size,
++                                                                      IMG_NULL,
++                                                                      SGX_MMU_PAGE_SIZE,
++                                                                      IMG_NULL,
++                                                                      IMG_NULL,
++                                                                      MMU_FreePageTables,
++                                                                      pMMUHeap);
++
++      if (pMMUHeap->psVMArena == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_Create: ERROR call to RA_Create failed"));
++              _DeferredFreePageTables (pMMUHeap);
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++              return IMG_NULL;
++      }
++
++#if 0 
++      
++      if(psDevArena->ui32HeapID == SGX_TILED_HEAP_ID)
++      {
++              IMG_UINT32 ui32RegVal;
++              IMG_UINT32 ui32XTileStride;
++
++              
++
++
++
++
++              ui32XTileStride = 2;
++
++              ui32RegVal = (EUR_CR_BIF_TILE0_MIN_ADDRESS_MASK
++                                              & ((psDevArena->BaseDevVAddr.uiAddr>>20)
++                                              << EUR_CR_BIF_TILE0_MIN_ADDRESS_SHIFT))
++                                      |(EUR_CR_BIF_TILE0_MAX_ADDRESS_MASK
++                                              & (((psDevArena->BaseDevVAddr.uiAddr+psDevArena->ui32Size)>>20)
++                                              << EUR_CR_BIF_TILE0_MAX_ADDRESS_SHIFT))
++                                      |(EUR_CR_BIF_TILE0_CFG_MASK
++                                              & (((ui32XTileStride<<1)|8) << EUR_CR_BIF_TILE0_CFG_SHIFT));
++              PDUMPREG(EUR_CR_BIF_TILE0, ui32RegVal);
++      }
++#endif
++
++      
++
++      *ppsVMArena = pMMUHeap->psVMArena;
++
++      return pMMUHeap;
++}
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMUHeap)
++{
++      if (pMMUHeap != IMG_NULL)
++      {
++              PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Delete"));
++
++              if(pMMUHeap->psVMArena)
++              {
++                      RA_Delete (pMMUHeap->psVMArena);
++              }
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++              EnableHostAccess(pMMUHeap->psMMUContext);
++#endif
++              _DeferredFreePageTables (pMMUHeap);
++#ifdef SUPPORT_SGX_MMU_BYPASS
++              DisableHostAccess(pMMUHeap->psMMUContext);
++#endif
++
++              OSFreeMem (PVRSRV_OS_PAGEABLE_HEAP, 0, pMMUHeap, IMG_NULL);
++      }
++}
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMUHeap,
++                 IMG_SIZE_T uSize,
++                 IMG_SIZE_T *pActualSize,
++                 IMG_UINT32 uFlags,
++                 IMG_UINT32 uDevVAddrAlignment,
++                 IMG_DEV_VIRTADDR *psDevVAddr)
++{
++      IMG_BOOL bStatus;
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "MMU_Alloc: uSize=0x%x, flags=0x%x, align=0x%x",
++              uSize, uFlags, uDevVAddrAlignment));
++
++      
++
++      if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++      {
++              bStatus = RA_Alloc (pMMUHeap->psVMArena,
++                                                      uSize,
++                                                      pActualSize,
++                                                      IMG_NULL,
++                                                      0,
++                                                      uDevVAddrAlignment,
++                                                      0,
++                                                      &(psDevVAddr->uiAddr));
++              if(!bStatus)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: RA_Alloc of VMArena failed"));
++                      return bStatus;
++              }
++      }
++
++      #ifdef SUPPORT_SGX_MMU_BYPASS
++      EnableHostAccess(pMMUHeap->psMMUContext);
++      #endif
++
++      
++      bStatus = _DeferredAllocPagetables(pMMUHeap, *psDevVAddr, uSize);
++      
++      #ifdef SUPPORT_SGX_MMU_BYPASS
++      DisableHostAccess(pMMUHeap->psMMUContext);
++      #endif
++
++      if (!bStatus)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
++              if((uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) == 0)
++              {
++                      
++                      RA_Free (pMMUHeap->psVMArena, psDevVAddr->uiAddr, IMG_FALSE);
++              }
++      }
++
++      return bStatus;
++}
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR DevVAddr, IMG_UINT32 ui32Size)
++{
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++              "MMU_Free: mmu=%08X, dev_vaddr=%08X", pMMUHeap, DevVAddr.uiAddr));
++
++      if((DevVAddr.uiAddr >= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr) && 
++              (DevVAddr.uiAddr + ui32Size <= pMMUHeap->psDevArena->BaseDevVAddr.uiAddr + pMMUHeap->psDevArena->ui32Size))
++      {
++              RA_Free (pMMUHeap->psVMArena, DevVAddr.uiAddr, IMG_TRUE);
++              return;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR,"MMU_Free: Couldn't find DevVAddr %08X in a DevArena",DevVAddr.uiAddr));
++}
++
++IMG_VOID
++MMU_Enable (MMU_HEAP *pMMUHeap)
++{
++      PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++      
++}
++
++IMG_VOID
++MMU_Disable (MMU_HEAP *pMMUHeap)
++{
++      PVR_UNREFERENCED_PARAMETER(pMMUHeap);
++              
++}
++
++#if defined(PDUMP)
++static IMG_VOID
++MMU_PDumpPageTables   (MMU_HEAP *pMMUHeap,
++                                       IMG_DEV_VIRTADDR DevVAddr,
++                                       IMG_SIZE_T uSize,
++                                       IMG_BOOL bForUnmap,
++                                       IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32      ui32NumPTEntries;
++      IMG_UINT32      ui32PTIndex;
++      IMG_UINT32      *pui32PTEntry;
++
++      MMU_PT_INFO **ppsPTInfoList;
++      IMG_UINT32 ui32PDIndex;
++      IMG_UINT32 ui32PTDumpCount;
++
++      
++      ui32NumPTEntries = (uSize + SGX_MMU_PAGE_SIZE - 1) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      ui32PDIndex = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++      
++      ui32PTIndex = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      PDUMPCOMMENT("Page table mods (num entries == %08X) %s", ui32NumPTEntries, bForUnmap ? "(for unmap)" : "");
++
++      
++      while(ui32NumPTEntries > 0)
++      {
++              MMU_PT_INFO* psPTInfo = *ppsPTInfoList++;
++
++              if(ui32NumPTEntries <= 1024 - ui32PTIndex)
++              {
++                      ui32PTDumpCount = ui32NumPTEntries;
++              }
++              else
++              {
++                      ui32PTDumpCount = 1024 - ui32PTIndex;
++              }
++
++              if (psPTInfo)
++              {
++                      pui32PTEntry = (IMG_UINT32*)psPTInfo->PTPageCpuVAddr; 
++                      PDUMPMEM2(PVRSRV_DEVICE_TYPE_SGX, (IMG_VOID *) &pui32PTEntry[ui32PTIndex], ui32PTDumpCount * sizeof(IMG_UINT32), 0, IMG_FALSE, PDUMP_PT_UNIQUETAG, hUniqueTag);
++              }
++
++              
++              ui32NumPTEntries -= ui32PTDumpCount;
++
++              
++              ui32PTIndex = 0;
++      }
++
++      PDUMPCOMMENT("Finished page table mods %s", bForUnmap ? "(for unmap)" : "");
++}
++#endif 
++
++
++static IMG_VOID
++MMU_MapPage (MMU_HEAP *pMMUHeap,
++                       IMG_DEV_VIRTADDR DevVAddr,
++                       IMG_DEV_PHYADDR DevPAddr,
++                       IMG_UINT32 ui32MemFlags)
++{
++      IMG_UINT32 ui32Index;
++      IMG_UINT32 *pui32Tmp;
++      IMG_UINT32 ui32MMUFlags = 0;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      
++
++      if(((PVRSRV_MEM_READ|PVRSRV_MEM_WRITE) & ui32MemFlags) == (PVRSRV_MEM_READ|PVRSRV_MEM_WRITE))
++      {
++              
++              ui32MMUFlags = 0;
++      }
++      else if(PVRSRV_MEM_READ & ui32MemFlags)
++      {
++              
++              ui32MMUFlags |= SGX_MMU_PTE_READONLY;
++      }
++      else if(PVRSRV_MEM_WRITE & ui32MemFlags)
++      {
++              
++              ui32MMUFlags |= SGX_MMU_PTE_WRITEONLY;
++      }
++      
++      
++      if(PVRSRV_MEM_CACHE_CONSISTENT & ui32MemFlags)
++      {
++              ui32MMUFlags |= SGX_MMU_PTE_CACHECONSISTENT;
++      }
++
++      
++      if(PVRSRV_MEM_EDM_PROTECT & ui32MemFlags)
++      {
++              ui32MMUFlags |= SGX_MMU_PTE_EDMPROTECT;
++      }
++      
++      
++
++
++      
++      ui32Index = DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++
++      
++      ui32Index = (DevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++#if !defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++      
++      if (pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_MapPage: Page is already valid for alloc at VAddr:0x%08lX PDIdx:%u PTIdx:%u",DevVAddr.uiAddr, DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT), ui32Index ));
++      }
++
++      PVR_ASSERT((pui32Tmp[ui32Index] & SGX_MMU_PTE_VALID) == 0);
++#endif
++
++      
++      ppsPTInfoList[0]->ui32ValidPTECount++;
++      
++      
++      pui32Tmp[ui32Index] = (DevPAddr.uiAddr & SGX_MMU_PTE_ADDR_MASK)
++                                              | SGX_MMU_PTE_VALID
++                                              | ui32MMUFlags;
++
++}
++
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMUHeap,
++                              IMG_DEV_VIRTADDR DevVAddr,
++                              IMG_SYS_PHYADDR *psSysAddr,
++                              IMG_SIZE_T uSize,
++                              IMG_UINT32 ui32MemFlags,
++                              IMG_HANDLE hUniqueTag)
++{
++#if defined(PDUMP)
++      IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif 
++      IMG_UINT32 uCount, i;
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++#if defined(PDUMP)
++      MapBaseDevVAddr = DevVAddr;
++#else
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif 
++
++      for (i=0, uCount=0; uCount<uSize; i++, uCount+=SGX_MMU_PAGE_SIZE)
++      {
++              IMG_SYS_PHYADDR sSysAddr;
++
++              sSysAddr = psSysAddr[i];
++
++
++              DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysAddr);
++
++              MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++              DevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
++
++              PVR_DPF ((PVR_DBG_MESSAGE, 
++                               "MMU_MapScatter: devVAddr=%08X, SysAddr=%08X, size=0x%x/0x%x",
++                                DevVAddr.uiAddr, sSysAddr.uiAddr, uCount, uSize));
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMUHeap,
++                        IMG_DEV_VIRTADDR DevVAddr,
++                        IMG_SYS_PHYADDR SysPAddr,
++                        IMG_SIZE_T uSize,
++                        IMG_UINT32 ui32MemFlags,
++                        IMG_HANDLE hUniqueTag)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++#if defined(PDUMP)
++      IMG_DEV_VIRTADDR MapBaseDevVAddr;
++#endif 
++      IMG_UINT32 uCount;
++      IMG_UINT32 ui32VAdvance = SGX_MMU_PAGE_SIZE;
++      IMG_UINT32 ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++      PVR_ASSERT (pMMUHeap != IMG_NULL);
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                "MMU_MapPages: mmu=%08X, devVAddr=%08X, SysPAddr=%08X, size=0x%x",
++                pMMUHeap, DevVAddr.uiAddr, SysPAddr.uiAddr, uSize));
++
++#if defined(PDUMP)
++      MapBaseDevVAddr = DevVAddr;
++#else
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif 
++
++      DevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, SysPAddr);
++
++#if defined(FIX_HW_BRN_23281)
++      if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              ui32VAdvance *= 2;
++      }
++#endif
++
++      
++
++
++      if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++      {
++              ui32PAdvance = 0;
++      }
++
++      for (uCount=0; uCount<uSize; uCount+=ui32VAdvance)
++      {
++              MMU_MapPage (pMMUHeap, DevVAddr, DevPAddr, ui32MemFlags);
++              DevVAddr.uiAddr += ui32VAdvance;
++              DevPAddr.uiAddr += ui32PAdvance;
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP          *pMMUHeap,
++                         IMG_DEV_VIRTADDR   MapBaseDevVAddr,
++                         IMG_SIZE_T         uByteSize,
++                         IMG_CPU_VIRTADDR   CpuVAddr,
++                         IMG_HANDLE         hOSMemHandle,
++                         IMG_DEV_VIRTADDR  *pDevVAddr,
++                         IMG_UINT32         ui32MemFlags,
++                         IMG_HANDLE         hUniqueTag)
++{
++      IMG_UINT32                      i;
++      IMG_UINT32                      uOffset = 0;
++      IMG_DEV_VIRTADDR        MapDevVAddr;
++      IMG_UINT32                      ui32VAdvance = SGX_MMU_PAGE_SIZE;
++      IMG_UINT32                      ui32PAdvance = SGX_MMU_PAGE_SIZE;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++      PVR_DPF ((PVR_DBG_MESSAGE,
++                      "MMU_MapShadow: %08X, 0x%x, %08X",
++                      MapBaseDevVAddr.uiAddr,
++                      uByteSize,
++                      CpuVAddr));
++
++      PVR_ASSERT(((IMG_UINT32)CpuVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32)uByteSize & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      pDevVAddr->uiAddr = MapBaseDevVAddr.uiAddr;
++
++#if defined(FIX_HW_BRN_23281)
++      if(ui32MemFlags & PVRSRV_MEM_INTERLEAVED)
++      {
++              ui32VAdvance *= 2;
++      }
++#endif
++
++      
++
++
++      if(ui32MemFlags & PVRSRV_MEM_DUMMY)
++      {
++              ui32PAdvance = 0;
++      }
++
++      
++      MapDevVAddr = MapBaseDevVAddr;
++      for (i=0; i<uByteSize; i+=ui32VAdvance)
++      {
++              IMG_CPU_PHYADDR CpuPAddr;
++              IMG_DEV_PHYADDR DevPAddr;
++
++              if(CpuVAddr)
++              {
++                      CpuPAddr = OSMapLinToCPUPhys ((IMG_VOID *)((IMG_UINT32)CpuVAddr + uOffset));
++              }
++              else
++              {
++                      CpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, uOffset);
++              }
++              DevPAddr = SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE_SGX, CpuPAddr);
++
++              PVR_DPF ((PVR_DBG_MESSAGE,
++                              "0x%x: CpuVAddr=%08X, CpuPAddr=%08X, DevVAddr=%08X, DevPAddr=%08X",
++                              uOffset, 
++                              (IMG_UINTPTR_T)CpuVAddr + uOffset, 
++                              CpuPAddr.uiAddr, 
++                              MapDevVAddr.uiAddr, 
++                              DevPAddr.uiAddr));
++
++              MMU_MapPage (pMMUHeap, MapDevVAddr, DevPAddr, ui32MemFlags);
++
++              
++              MapDevVAddr.uiAddr += ui32VAdvance;
++              uOffset += ui32PAdvance;
++      }
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (pMMUHeap, MapBaseDevVAddr, uByteSize, IMG_FALSE, hUniqueTag);
++#endif 
++}
++
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *psMMUHeap,
++                              IMG_DEV_VIRTADDR sDevVAddr,
++                              IMG_UINT32 ui32PageCount,
++                              IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32                      uPageSize = HOST_PAGESIZE();
++      IMG_DEV_VIRTADDR        sTmpDevVAddr;
++      IMG_UINT32                      i;
++      IMG_UINT32                      ui32PDIndex;
++      IMG_UINT32                      ui32PTIndex;
++      IMG_UINT32                      *pui32Tmp;
++
++#if !defined (PDUMP)
++      PVR_UNREFERENCED_PARAMETER(hUniqueTag);
++#endif
++
++      
++      sTmpDevVAddr = sDevVAddr;
++
++      for(i=0; i<ui32PageCount; i++)
++      {
++              MMU_PT_INFO **ppsPTInfoList;
++
++              
++              ui32PDIndex = sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++              
++              ppsPTInfoList = &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];
++
++              
++              ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              if (!ppsPTInfoList[0])
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: ERROR Invalid PT for alloc at VAddr:0x%08lX (VaddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++
++                      
++                      sTmpDevVAddr.uiAddr += uPageSize;
++
++                      
++                      continue;
++              }
++
++              
++              pui32Tmp = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++              
++              if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID)
++              {
++                      ppsPTInfoList[0]->ui32ValidPTECount--;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Page is already invalid for alloc at VAddr:0x%08lX (VAddrIni:0x%08lX AllocPage:%u) PDIdx:%u PTIdx:%u",sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,i, ui32PDIndex, ui32PTIndex ));
++              }
++
++              
++              PVR_ASSERT((IMG_INT32)ppsPTInfoList[0]->ui32ValidPTECount >= 0);
++
++#if defined(SUPPORT_SGX_MMU_DUMMY_PAGE)
++              
++              pui32Tmp[ui32PTIndex] = psMMUHeap->psMMUContext->psDevInfo->sDummyDataDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++#else
++              
++              pui32Tmp[ui32PTIndex] = 0;
++#endif
++
++              
++              sTmpDevVAddr.uiAddr += uPageSize;
++      }
++
++      MMU_InvalidatePageTableCache(psMMUHeap->psMMUContext->psDevInfo);
++
++#if defined(PDUMP)
++      MMU_PDumpPageTables (psMMUHeap, sDevVAddr, uPageSize*ui32PageCount, IMG_TRUE, hUniqueTag);
++#endif 
++}
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr)
++{
++      IMG_UINT32 *pui32PageTable;
++      IMG_UINT32 ui32Index;
++      IMG_DEV_PHYADDR sDevPAddr;
++      MMU_PT_INFO **ppsPTInfoList;
++
++      
++      ui32Index = sDevVPageAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++
++      
++      ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32Index];
++      if (!ppsPTInfoList[0])
++      {
++              PVR_DPF((PVR_DBG_ERROR,"MMU_GetPhysPageAddr: Not mapped in at 0x%08x", sDevVPageAddr.uiAddr));
++              sDevPAddr.uiAddr = 0;
++              return sDevPAddr;
++      }
++
++      
++      ui32Index = (sDevVPageAddr.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++      
++      pui32PageTable = (IMG_UINT32*)ppsPTInfoList[0]->PTPageCpuVAddr;
++
++      
++      sDevPAddr.uiAddr = pui32PageTable[ui32Index];
++
++      
++      sDevPAddr.uiAddr &= SGX_MMU_PTE_ADDR_MASK;
++
++      return sDevPAddr;
++}
++
++
++IMG_DEV_PHYADDR MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext)
++{
++      return (pMMUContext->sPDDevPAddr);
++}
++
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetPhysPageAddrKM (IMG_HANDLE hDevMemHeap,
++                                                                 IMG_DEV_VIRTADDR sDevVAddr,
++                                                                 IMG_DEV_PHYADDR *pDevPAddr,
++                                                                 IMG_CPU_PHYADDR *pCpuPAddr)
++{
++      MMU_HEAP *pMMUHeap;
++      IMG_DEV_PHYADDR DevPAddr;
++
++      
++
++      pMMUHeap = (MMU_HEAP*)BM_GetMMUHeap(hDevMemHeap);
++
++      DevPAddr = MMU_GetPhysPageAddr(pMMUHeap, sDevVAddr);
++      pCpuPAddr->uiAddr = DevPAddr.uiAddr; 
++      pDevPAddr->uiAddr = DevPAddr.uiAddr;
++
++      return (pDevPAddr->uiAddr != 0) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++
++PVRSRV_ERROR SGXGetMMUPDAddrKM(IMG_HANDLE             hDevCookie,
++                                                              IMG_HANDLE              hDevMemContext,
++                                                              IMG_DEV_PHYADDR *psPDDevPAddr)
++{
++      if (!hDevCookie || !hDevMemContext || !psPDDevPAddr)
++      {
++              return PVRSRV_ERROR_INVALID_PARAMS;
++      }
++
++      PVR_UNREFERENCED_PARAMETER(hDevCookie); 
++
++      
++      *psPDDevPAddr = ((BM_CONTEXT*)hDevMemContext)->psMMUContext->sPDDevPAddr;
++      
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++      SYS_DATA *psSysData;
++      RA_ARENA *psLocalDevMemArena;
++      IMG_HANDLE hOSMemHandle = IMG_NULL;
++      IMG_BYTE *pui8MemBlock = IMG_NULL;
++      IMG_SYS_PHYADDR sMemBlockSysPAddr;
++      IMG_CPU_PHYADDR sMemBlockCpuPAddr;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to SysAcquireData failed"));
++              return eError;
++      }
++
++      psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++      
++      if(psLocalDevMemArena == IMG_NULL)
++      {
++              
++              eError = OSAllocPages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                                    3 * SGX_MMU_PAGE_SIZE,
++                                                    (IMG_VOID **)&pui8MemBlock,
++                                                    &hOSMemHandle);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to OSAllocPages failed"));     
++                      return eError;
++              }
++
++              
++              if(pui8MemBlock)
++              {
++                      sMemBlockCpuPAddr = OSMapLinToCPUPhys(pui8MemBlock);
++              }
++              else
++              {
++                      
++                      sMemBlockCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, 0);
++              }
++      }
++      else
++      {
++              
++
++              if(RA_Alloc(psLocalDevMemArena,
++                                      3 * SGX_MMU_PAGE_SIZE,
++                                      IMG_NULL,
++                                      IMG_NULL,
++                                      0,
++                                      SGX_MMU_PAGE_SIZE,
++                                      0,
++                                      &(sMemBlockSysPAddr.uiAddr)) != IMG_TRUE)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR call to RA_Alloc failed"));
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++
++              
++              sMemBlockCpuPAddr = SysSysPAddrToCpuPAddr(sMemBlockSysPAddr);
++              pui8MemBlock = OSMapPhysToLin(sMemBlockCpuPAddr,
++                                                                        SGX_MMU_PAGE_SIZE * 3,
++                                                                        PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                                                                        &hOSMemHandle);
++              if(!pui8MemBlock)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDAlloc: ERROR failed to map page tables"));
++                      return PVRSRV_ERROR_BAD_MAPPING;
++              }
++      }
++
++      psDevInfo->hBIFResetPDOSMemHandle = hOSMemHandle;
++      psDevInfo->sBIFResetPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sMemBlockCpuPAddr);
++      psDevInfo->sBIFResetPTDevPAddr.uiAddr = psDevInfo->sBIFResetPDDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++      psDevInfo->sBIFResetPageDevPAddr.uiAddr = psDevInfo->sBIFResetPTDevPAddr.uiAddr + SGX_MMU_PAGE_SIZE;
++      psDevInfo->pui32BIFResetPD = (IMG_UINT32 *)pui8MemBlock;
++      psDevInfo->pui32BIFResetPT = (IMG_UINT32 *)(pui8MemBlock + SGX_MMU_PAGE_SIZE);
++      
++      
++      OSMemSet(psDevInfo->pui32BIFResetPD, 0, SGX_MMU_PAGE_SIZE);
++      OSMemSet(psDevInfo->pui32BIFResetPT, 0, SGX_MMU_PAGE_SIZE);
++      
++      OSMemSet(pui8MemBlock + (2 * SGX_MMU_PAGE_SIZE), 0xDB, SGX_MMU_PAGE_SIZE);
++
++      return PVRSRV_OK;
++}
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++      SYS_DATA *psSysData;
++      RA_ARENA *psLocalDevMemArena;
++      IMG_SYS_PHYADDR sPDSysPAddr;
++
++      eError = SysAcquireData(&psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "MMU_BIFResetPDFree: ERROR call to SysAcquireData failed"));
++              return;
++      }
++
++      psLocalDevMemArena = psSysData->apsLocalDevMemArena[0];
++
++      
++      if(psLocalDevMemArena == IMG_NULL)
++      {
++              OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
++                                      3 * SGX_MMU_PAGE_SIZE,
++                                      psDevInfo->pui32BIFResetPD,
++                                      psDevInfo->hBIFResetPDOSMemHandle);
++      }
++      else
++      {
++              OSUnMapPhysToLin(psDevInfo->pui32BIFResetPD,
++                         3 * SGX_MMU_PAGE_SIZE,
++                         PVRSRV_HAP_WRITECOMBINE|PVRSRV_HAP_KERNEL_ONLY,
++                         psDevInfo->hBIFResetPDOSMemHandle);
++                                               
++              sPDSysPAddr = SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE_SGX, psDevInfo->sBIFResetPDDevPAddr);
++              RA_Free(psLocalDevMemArena, sPDSysPAddr.uiAddr, IMG_FALSE);
++      }
++}
++
++
++#if PAGE_TEST
++static void PageTest(void* pMem, IMG_DEV_PHYADDR sDevPAddr)
++{
++      volatile IMG_UINT32 ui32WriteData;
++      volatile IMG_UINT32 ui32ReadData;
++      volatile IMG_UINT32 *pMem32 = (volatile IMG_UINT32 *)pMem;
++      int n;
++      IMG_BOOL bOK=IMG_TRUE;
++
++      ui32WriteData = 0xffffffff;
++
++      for (n=0; n<1024; n++)
++      {
++              pMem32[n] = ui32WriteData;
++              ui32ReadData = pMem32[n];
++
++              if (ui32WriteData != ui32ReadData)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++                      PVR_DBG_BREAK;
++                      bOK = IMG_FALSE;
++              }
++      }
++
++      ui32WriteData = 0;
++
++      for (n=0; n<1024; n++)
++      {
++              pMem32[n] = ui32WriteData;
++              ui32ReadData = pMem32[n];
++
++              if (ui32WriteData != ui32ReadData)
++              {
++                      
++                      PVR_DPF ((PVR_DBG_ERROR, "Error - memory page test failed at device phys address 0x%08X", sDevPAddr.uiAddr + (n<<2) ));
++                      PVR_DBG_BREAK;
++                      bOK = IMG_FALSE;
++              }
++      }
++
++      if (bOK)
++      {
++              PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X is OK", sDevPAddr.uiAddr));
++      }
++      else
++      {
++              PVR_DPF ((PVR_DBG_VERBOSE, "MMU Page 0x%08X *** FAILED ***", sDevPAddr.uiAddr));
++      }
++}
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/mmu.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,123 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _MMU_H_
++#define _MMU_H_
++
++PVRSRV_ERROR
++MMU_Initialise (PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT **ppsMMUContext, IMG_DEV_PHYADDR *psPDDevPAddr);
++
++IMG_VOID
++MMU_Finalise (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++MMU_InsertHeap(MMU_CONTEXT *psMMUContext, MMU_HEAP *psMMUHeap);
++
++MMU_HEAP *
++MMU_Create (MMU_CONTEXT *psMMUContext,
++                      DEV_ARENA_DESCRIPTOR *psDevArena,
++                      RA_ARENA **ppsVMArena);
++
++IMG_VOID
++MMU_Delete (MMU_HEAP *pMMU);
++
++IMG_BOOL
++MMU_Alloc (MMU_HEAP *pMMU,
++           IMG_SIZE_T uSize,
++           IMG_SIZE_T *pActualSize,
++           IMG_UINT32 uFlags,
++                 IMG_UINT32 uDevVAddrAlignment,
++           IMG_DEV_VIRTADDR *pDevVAddr);
++
++IMG_VOID
++MMU_Free (MMU_HEAP *pMMU,
++          IMG_DEV_VIRTADDR DevVAddr,
++                IMG_UINT32 ui32Size);
++
++IMG_VOID 
++MMU_Enable (MMU_HEAP *pMMU);
++
++IMG_VOID 
++MMU_Disable (MMU_HEAP *pMMU);
++
++IMG_VOID
++MMU_MapPages (MMU_HEAP *pMMU,
++                        IMG_DEV_VIRTADDR devVAddr,
++                        IMG_SYS_PHYADDR SysPAddr,
++                        IMG_SIZE_T uSize,
++                        IMG_UINT32 ui32MemFlags,
++                        IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapShadow (MMU_HEAP          * pMMU,
++               IMG_DEV_VIRTADDR    MapBaseDevVAddr,
++               IMG_SIZE_T          uSize, 
++               IMG_CPU_VIRTADDR    CpuVAddr,
++               IMG_HANDLE          hOSMemHandle,
++               IMG_DEV_VIRTADDR  * pDevVAddr,
++               IMG_UINT32          ui32MemFlags,
++               IMG_HANDLE          hUniqueTag);
++
++IMG_VOID
++MMU_UnmapPages (MMU_HEAP *pMMU,
++             IMG_DEV_VIRTADDR dev_vaddr,
++             IMG_UINT32 ui32PageCount,
++             IMG_HANDLE hUniqueTag);
++
++IMG_VOID
++MMU_MapScatter (MMU_HEAP *pMMU,
++                              IMG_DEV_VIRTADDR DevVAddr,
++                              IMG_SYS_PHYADDR *psSysAddr,
++                              IMG_SIZE_T uSize,
++                              IMG_UINT32 ui32MemFlags,
++                              IMG_HANDLE hUniqueTag);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPhysPageAddr(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++
++
++IMG_DEV_PHYADDR
++MMU_GetPDDevPAddr(MMU_CONTEXT *pMMUContext);
++
++
++#ifdef SUPPORT_SGX_MMU_BYPASS
++IMG_VOID
++EnableHostAccess (MMU_CONTEXT *psMMUContext);
++
++
++IMG_VOID
++DisableHostAccess (MMU_CONTEXT *psMMUContext);
++#endif
++
++IMG_VOID MMU_InvalidateDirectoryCache(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++PVRSRV_ERROR MMU_BIFResetPDAlloc(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++IMG_VOID MMU_BIFResetPDFree(PVRSRV_SGXDEV_INFO *psDevInfo);
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/pb.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,408 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "services_headers.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "pvr_bridge_km.h"
++#include "pdump_km.h"
++
++#ifndef __linux__
++#pragma message("TODO: Review use of OS_PAGEABLE vs OS_NON_PAGEABLE")
++#endif
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param);
++
++IMG_EXPORT PVRSRV_ERROR
++SGXFindSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                        IMG_UINT32 ui32TotalPBSize,
++                                        IMG_HANDLE *phSharedPBDesc,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsHWPBDescKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO **ppsBlockKernelMemInfo,
++                                        PVRSRV_KERNEL_MEM_INFO ***pppsSharedPBDescSubKernelMemInfos,
++                                        IMG_UINT32 *ui32SharedPBDescSubKernelMemInfosCount)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc;
++      PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos=IMG_NULL;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++      PVRSRV_ERROR eError = PVRSRV_ERROR_GENERIC;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++              psStubPBDesc != IMG_NULL;
++              psStubPBDesc = psStubPBDesc->psNext)
++      {
++              if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++              {
++                      IMG_UINT32 i;
++                      PRESMAN_ITEM psResItem;
++
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                      * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                (IMG_VOID **)&ppsSharedPBDescSubKernelMemInfos,
++                                                IMG_NULL) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: OSAllocMem failed"));
++
++                              eError = PVRSRV_ERROR_OUT_OF_MEMORY;
++                              goto ExitNotFound;
++                      }
++                      psResItem = ResManRegisterRes(RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++
++                      if (psResItem == IMG_NULL)
++                      {
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                      * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                ppsSharedPBDescSubKernelMemInfos,
++                                                0);
++
++                              PVR_DPF((PVR_DBG_ERROR, "SGXFindSharedPBDescKM: ResManRegisterRes failed"));
++
++                              eError = PVRSRV_ERROR_GENERIC;
++                              goto ExitNotFound;
++                      }
++
++                      *ppsSharedPBDescKernelMemInfo = psStubPBDesc->psSharedPBDescKernelMemInfo;
++                      *ppsHWPBDescKernelMemInfo = psStubPBDesc->psHWPBDescKernelMemInfo;
++                      *ppsBlockKernelMemInfo = psStubPBDesc->psBlockKernelMemInfo;
++
++                      *ui32SharedPBDescSubKernelMemInfosCount =
++                              psStubPBDesc->ui32SubKernelMemInfosCount;
++
++                      *pppsSharedPBDescSubKernelMemInfos = ppsSharedPBDescSubKernelMemInfos;
++
++                      for(i=0; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++                      {
++                              ppsSharedPBDescSubKernelMemInfos[i] =
++                                      psStubPBDesc->ppsSubKernelMemInfos[i];
++                      }
++
++                      psStubPBDesc->ui32RefCount++;
++                      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++                      return PVRSRV_OK;
++              }
++      }
++
++      
++      eError = PVRSRV_OK;
++ExitNotFound:
++      *phSharedPBDesc = IMG_NULL;
++
++      return eError;
++}
++
++IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO* psSGXDevInfo) 
++{
++      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
++      
++      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
++              *ppsStubPBDesc != IMG_NULL;
++              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
++      {
++              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
++              IMG_UINT32* pui32Flags = (IMG_UINT32*)psStubPBDesc->psHWPBDescKernelMemInfo->pvLinAddrKM;
++              *pui32Flags |= 1;
++      }
++}
++
++
++static PVRSRV_ERROR
++SGXCleanupSharedPBDescKM(PVRSRV_STUB_PBDESC *psStubPBDescIn)
++{
++      PVRSRV_STUB_PBDESC **ppsStubPBDesc;
++      IMG_UINT32 i;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)psStubPBDescIn->hDevCookie)->pvDevice;
++
++      for(ppsStubPBDesc = (PVRSRV_STUB_PBDESC **)&psSGXDevInfo->psStubPBDescListKM;
++              *ppsStubPBDesc != IMG_NULL;
++              ppsStubPBDesc = &(*ppsStubPBDesc)->psNext)
++      {
++              PVRSRV_STUB_PBDESC *psStubPBDesc = *ppsStubPBDesc;
++
++              if(psStubPBDesc == psStubPBDescIn)
++              {
++                      psStubPBDesc->ui32RefCount--;
++                      PVR_ASSERT((IMG_INT32)psStubPBDesc->ui32RefCount >= 0);
++
++                      if(psStubPBDesc->ui32RefCount == 0)
++                      {
++                              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXDevInfo->psSGXHostCtl;
++#if defined (PDUMP)
++                              IMG_HANDLE hUniqueTag = MAKEUNIQUETAG(psSGXDevInfo->psKernelSGXHostCtlMemInfo);
++#endif
++
++                              
++                              
++                              psSGXHostCtl->sTAHWPBDesc.uiAddr = 0;
++                              psSGXHostCtl->s3DHWPBDesc.uiAddr = 0;
++
++                              
++                              PDUMPCOMMENT("TA/3D CCB Control - Reset HW PBDesc records");
++                              PDUMPMEM(IMG_NULL, psSGXDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, sTAHWPBDesc), sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++                              PDUMPMEM(IMG_NULL, psSGXDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, s3DHWPBDesc), sizeof(IMG_DEV_VIRTADDR), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++
++                              *ppsStubPBDesc = psStubPBDesc->psNext;
++
++                              for(i=0 ; i<psStubPBDesc->ui32SubKernelMemInfosCount; i++)
++                              {
++                                      
++                                      PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie,
++                                                                                psStubPBDesc->ppsSubKernelMemInfos[i],
++                                                                                IMG_FALSE);
++                              }
++
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                                * psStubPBDesc->ui32SubKernelMemInfosCount,
++                                                psStubPBDesc->ppsSubKernelMemInfos,
++                                                0);
++
++                              PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->psBlockKernelMemInfo);
++
++                              PVRSRVFreeDeviceMemKM(psStubPBDesc->hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
++
++                              PVRSRVFreeSharedSysMemoryKM(psStubPBDesc->psSharedPBDescKernelMemInfo);
++              
++                              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                                sizeof(PVRSRV_STUB_PBDESC),
++                                                psStubPBDesc,
++                                                0);
++
++                      }
++                      return PVRSRV_OK;
++              }
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
++
++static PVRSRV_ERROR SGXCleanupSharedPBDescCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc = (PVRSRV_STUB_PBDESC *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      return SGXCleanupSharedPBDescKM(psStubPBDesc);
++}
++
++IMG_EXPORT PVRSRV_ERROR
++SGXUnrefSharedPBDescKM(IMG_HANDLE hSharedPBDesc)
++{
++      PVR_ASSERT(hSharedPBDesc != IMG_NULL);
++
++      return ResManFreeResByPtr((PRESMAN_ITEM)hSharedPBDesc, IMG_TRUE);
++}
++
++IMG_EXPORT PVRSRV_ERROR
++SGXAddSharedPBDescKM(IMG_HANDLE hDevCookie,
++                                       PVRSRV_KERNEL_MEM_INFO *psSharedPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psHWPBDescKernelMemInfo,
++                                       PVRSRV_KERNEL_MEM_INFO *psBlockKernelMemInfo,
++                                       IMG_UINT32 ui32TotalPBSize,
++                                       IMG_HANDLE *phSharedPBDesc,
++                                       PVRSRV_KERNEL_MEM_INFO **ppsSharedPBDescSubKernelMemInfos,
++                                       IMG_UINT32 ui32SharedPBDescSubKernelMemInfosCount)
++{
++      PVRSRV_STUB_PBDESC *psStubPBDesc=IMG_NULL;
++      PVRSRV_ERROR eRet = PVRSRV_ERROR_GENERIC;
++      IMG_UINT32 i;
++      PVRSRV_SGXDEV_INFO *psSGXDevInfo;
++      PRESMAN_ITEM psResItem;
++
++      psSGXDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      for(psStubPBDesc = psSGXDevInfo->psStubPBDescListKM;
++              psStubPBDesc != IMG_NULL;
++              psStubPBDesc = psStubPBDesc->psNext)
++      {
++              
++              if(psStubPBDesc->ui32TotalPBSize == ui32TotalPBSize)
++              {
++                      
++                      psResItem = ResManRegisterRes(
++                                      RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++                      if (psResItem == IMG_NULL)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,
++                                      "SGXAddSharedPBDescKM: "
++                                      "Failed to register exisitng shared "
++                                      "PBDesc with the resource manager"));
++                              goto NoAddKeepPB;
++                      }
++
++                      
++                      psStubPBDesc->ui32RefCount++;
++
++                      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++                      eRet = PVRSRV_OK;
++                      goto NoAddKeepPB;
++              }
++              if(psStubPBDesc->psSharedPBDescKernelMemInfo
++                 == psSharedPBDescKernelMemInfo)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Tried to add an already managed "
++                                       "meminfo"));
++                      eRet = PVRSRV_ERROR_INVALID_PARAMS;
++                      goto NoAddKeepPB;
++              }
++      }
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_STUB_PBDESC),
++                                (IMG_VOID **)&psStubPBDesc,
++                                0) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: Failed to alloc "
++                                      "StubPBDesc"));
++              eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto NoAdd;
++      }
++
++
++      psStubPBDesc->ppsSubKernelMemInfos=IMG_NULL;
++
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                * ui32SharedPBDescSubKernelMemInfosCount,
++                                (IMG_VOID **)&psStubPBDesc->ppsSubKernelMemInfos,
++                                0) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                               "Failed to alloc "
++                               "StubPBDesc->ppsSubKernelMemInfos"));
++              eRet = PVRSRV_ERROR_OUT_OF_MEMORY;
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psSharedPBDescKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psHWPBDescKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++
++      if(PVRSRVDissociateMemFromResmanKM(psBlockKernelMemInfo)
++         != PVRSRV_OK)
++      {
++              goto NoAdd;
++      }
++      
++      psStubPBDesc->ui32RefCount = 1;
++      psStubPBDesc->ui32TotalPBSize = ui32TotalPBSize;
++      psStubPBDesc->psSharedPBDescKernelMemInfo = psSharedPBDescKernelMemInfo;
++      psStubPBDesc->psHWPBDescKernelMemInfo = psHWPBDescKernelMemInfo;
++      psStubPBDesc->psBlockKernelMemInfo = psBlockKernelMemInfo;
++
++      psStubPBDesc->ui32SubKernelMemInfosCount =
++              ui32SharedPBDescSubKernelMemInfosCount;
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++      {
++              psStubPBDesc->ppsSubKernelMemInfos[i] = ppsSharedPBDescSubKernelMemInfos[i];
++              if(PVRSRVDissociateMemFromResmanKM(ppsSharedPBDescSubKernelMemInfos[i])
++                 != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Failed to dissociate shared PBDesc "
++                                       "from process"));
++                      goto NoAdd;
++              }
++      }
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_SHARED_PB_DESC,
++                                      (IMG_VOID *)psStubPBDesc,
++                                      0,
++                                      &SGXCleanupSharedPBDescCallback,
++                                      0);
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXAddSharedPBDescKM: "
++                                       "Failed to register shared PBDesc "
++                                       " with the resource manager"));
++              goto NoAdd;
++      }
++      psStubPBDesc->hDevCookie = hDevCookie;
++
++      
++      psStubPBDesc->psNext = psSGXDevInfo->psStubPBDescListKM;
++      psSGXDevInfo->psStubPBDescListKM = psStubPBDesc;
++
++      *phSharedPBDesc = (IMG_HANDLE)psResItem;
++
++      return PVRSRV_OK;
++
++NoAdd:
++      if(psStubPBDesc)
++      {
++              if(psStubPBDesc->ppsSubKernelMemInfos)
++              {
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                        sizeof(PVRSRV_KERNEL_MEM_INFO *)
++                                        * ui32SharedPBDescSubKernelMemInfosCount,
++                                        psStubPBDesc->ppsSubKernelMemInfos,
++                                        0);
++              }
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
++                                sizeof(PVRSRV_STUB_PBDESC),
++                                psStubPBDesc,
++                                0);
++      }
++
++NoAddKeepPB:
++      for(i=0; i<ui32SharedPBDescSubKernelMemInfosCount; i++)
++              PVRSRVFreeDeviceMemKM(hDevCookie, ppsSharedPBDescSubKernelMemInfos[i], IMG_FALSE);
++
++      PVRSRVFreeSharedSysMemoryKM(psSharedPBDescKernelMemInfo);
++      PVRSRVFreeDeviceMemKM(hDevCookie, psStubPBDesc->psHWPBDescKernelMemInfo, IMG_FALSE);
++
++      PVRSRVFreeSharedSysMemoryKM(psBlockKernelMemInfo);
++
++      return eRet;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgx2dcore.c  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,877 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sgxinfo.h"
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++
++#include "sgx2dcore.h"
++
++#define SGX2D_FLUSH_BH                                                        (0xF0000000) 
++#define SGX2D_QUEUED_BLIT_PAD 4
++
++#define SGX2D_COMMAND_QUEUE_SIZE 1024
++
++#define SGX2D_2D_NOT_IDLE(psDevInfo)  ((psDevInfo)->ui322DFifoSize > SGX2DFifoFreeSpace(psDevInfo) || SGX2DIsBusy(psDevInfo))
++
++static IMG_VOID SGX2DHardwareKick(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS, EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK | EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK);
++}
++
++IMG_VOID SGX2DHWRecoveryStart(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->b2DHWRecoveryInProgress = IMG_TRUE;
++      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++}
++
++IMG_VOID SGX2DHWRecoveryEnd(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      psDevInfo->b2DHWRecoveryEndPending = IMG_TRUE;
++      psDevInfo->b2DHWRecoveryInProgress = IMG_FALSE;
++      SGX2DHardwareKick(psDevInfo);
++}
++
++#if !defined(NO_HARDWARE)
++static IMG_VOID SGX2DKick(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_BOOL bStart = IMG_FALSE;
++      IMG_UINT32 uiStart = 0;
++
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      
++      do
++      {
++              if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED)
++              {
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++}
++#endif 
++
++IMG_BOOL SGX2DIsBusy(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_UINT32 ui32BlitStatus;
++
++      ui32BlitStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++              EUR_CR_2D_BLIT_STATUS);
++
++      return (ui32BlitStatus & EUR_CR_2D_BLIT_STATUS_BUSY_MASK) != 0;
++}
++
++IMG_UINT32 SGX2DCompletedBlits(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_UINT32 ui32BlitStatus;
++
++      ui32BlitStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM,
++              EUR_CR_2D_BLIT_STATUS);
++
++      return (ui32BlitStatus & EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK) >>
++                                      EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireSlavePort)
++#endif
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireSlavePort)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DAcquireSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                                 IMG_BOOL                       bBlock)
++{
++#if defined(SGX2D_DIRECT_BLITS)
++      PVR_UNREFERENCED_PARAMETER(bBlock);
++      return OSLockResource(&psDevInfo->s2DSlaveportResource, ISR_ID);
++#else
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(bBlock);
++
++      return PVRSRV_OK;
++#endif
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DReleaseSlavePort)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DReleaseSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++#if defined(SGX2D_DIRECT_BLITS)
++      return OSUnlockResource(&psDevInfo->s2DSlaveportResource, ISR_ID);
++#else
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      return PVRSRV_OK;
++#endif
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DAcquireFifoSpace)
++#endif
++static INLINE
++PVRSRV_ERROR SGX2DAcquireFifoSpace(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                                 IMG_UINT32                   ui32MinBytesRequired,
++                                                                 IMG_UINT32                   *pui32BytesObtained)
++{
++      PVRSRV_ERROR    eError = PVRSRV_ERROR_FIFO_SPACE;
++      IMG_UINT32              ui32FifoBytes;
++
++#if defined(DEBUG) && defined(SGX2D_DIRECT_BLITS)
++      
++      if (OSIsResourceLocked(&psDevInfo->s2DSlaveportResource, ISR_ID) == IMG_FALSE)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGX2DAcquireFifoSpace: 2D slaveport is not locked"));
++              return PVRSRV_ERROR_PROCESSING_BLOCKED;
++      }
++#endif 
++
++      
++      ui32FifoBytes = SGX2DFifoFreeSpace(psDevInfo);
++
++      
++      if (ui32FifoBytes >= ui32MinBytesRequired)
++      {
++              if (pui32BytesObtained)
++                      *pui32BytesObtained = ui32FifoBytes;
++              
++              eError = PVRSRV_OK;
++      }
++
++      return eError;
++}
++
++#if defined(DEBUG) && defined (SGX2D_TRACE_BLIT)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DTraceBlt)
++#endif
++FORCE_INLINE
++IMG_VOID SGX2DTraceBlt(IMG_UINT32 *pui32BltData, IMG_UINT32 ui32Count)
++{
++      IMG_UINT32 i;
++
++      PVR_TRACE(("----SGX 2D BLIT----"));
++
++      for (i = 0; i < ui32Count; i++)
++      {
++              PVR_TRACE(("word[%02d]: 0x%08x", i, pui32BltData[i]));
++      }
++}
++#else
++#define SGX2DTraceBlt(pui32BltData, ui32Count)
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DWriteSlavePort)
++#endif
++FORCE_INLINE
++IMG_VOID SGX2DWriteSlavePort(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                       IMG_UINT32 ui32Value)
++{
++      SGX_SLAVE_PORT          *psSlavePort= &psDevInfo->s2DSlavePortKM;
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      IMG_UINT32 *pui32Offset = psSlavePort->pui32Offset;
++
++      
++      if(*pui32Offset > (psSlavePort->ui32DataRange >> 1))
++      {
++              
++              *pui32Offset = 0;
++      }
++#endif
++
++      SGX2DTraceBlt(&ui32Value, 1);
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      *((IMG_UINT32*)((IMG_UINT32)psSlavePort->pvData + *pui32Offset)) = ui32Value;
++#else
++      *((IMG_UINT32*)psSlavePort->pvData) = ui32Value;
++#endif
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      *pui32Offset += 4;
++#endif
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DWriteSlavePortBatch)
++#endif
++FORCE_INLINE
++PVRSRV_ERROR SGX2DWriteSlavePortBatch(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                                        IMG_UINT32                    *pui32LinDataAddr,
++                                                                        IMG_UINT32                    ui32Bytes)
++{
++      IMG_INT32       i;
++      SGX_SLAVE_PORT  *psSlavePort= &psDevInfo->s2DSlavePortKM;
++      IMG_UINT32      *pui32LinPortAddrBase = (IMG_UINT32*) psSlavePort->pvData;
++      IMG_UINT32      ui32DWORDs = ui32Bytes >> 2;
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      IMG_UINT32      *pui32Offset = psSlavePort->pui32Offset;
++      IMG_UINT32      *pui32LinPortAddr;
++
++      
++      if (ui32Bytes > (psSlavePort->ui32DataRange >> 1))
++      {
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      
++      if(*pui32Offset > (psSlavePort->ui32DataRange >> 1))
++      {
++              
++              *pui32Offset = 0;
++      }
++
++      
++      pui32LinPortAddr = (IMG_UINT32*)((IMG_UINT32)pui32LinPortAddrBase + *pui32Offset);
++#endif
++      
++      SGX2DTraceBlt(pui32LinDataAddr, ui32DWORDs);
++
++      
++      for (i = ui32DWORDs; i != 0 ; i -= ui32DWORDs)
++      {
++              ui32DWORDs = (i < 32) ? i : 32;
++
++              switch(ui32DWORDs)
++              {
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++                      case 32:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 31:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 30:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 29:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 28:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 27:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 26:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 25:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 24:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 23:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 22:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 21:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 20:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 19:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 18:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 17:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 16:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 15:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 14:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 13:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 12:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 11:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 10:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 9:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 8:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 7:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 6:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 5:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 4:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 3:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 2:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++                      case 1:
++                      *pui32LinPortAddr++ = *pui32LinDataAddr++;
++#else
++                      case 32:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 31:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 30:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 29:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 28:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 27:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 26:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 25:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 24:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 23:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 22:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 21:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 20:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 19:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 18:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 17:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 16:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 15:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 14:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 13:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 12:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 11:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 10:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 9:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 8:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 7:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 6:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 5:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 4:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 3:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 2:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++                      case 1:
++                      *pui32LinPortAddrBase = *pui32LinDataAddr++;
++#endif
++              }
++      }
++
++#if defined(SGX2D_INCREMENTING_SP_WRITES)
++      
++      *pui32Offset += ui32Bytes;
++#endif
++
++      return PVRSRV_OK;
++}
++
++IMG_BOOL SGX2DProcessBlit(IMG_HANDLE          hCmdCookie,
++                                                      IMG_UINT32              ui32DataSize,
++                                                      IMG_VOID                *pvData)
++{
++      PVRSRV_BLT_CMD_INFO             *psBltCmd;
++      PVRSRV_SGXDEV_INFO              *psDevInfo;
++      IMG_UINT32                      ui32BytesRequired;
++      IMG_UINT32                      ui32BytesObtained = 0;
++      IMG_BOOL                        bError = IMG_TRUE;
++      PVRSRV_ERROR                    eError;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DProcessBlit: Start"));
++
++      psBltCmd = (PVRSRV_BLT_CMD_INFO*)pvData;
++
++      
++      if (psBltCmd == IMG_NULL || psBltCmd->ui32CmdSize != ui32DataSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"ProcessBlit: Data packet size is incorrect"));
++              return IMG_FALSE;
++      }
++
++      
++      psDevInfo = psBltCmd->psDevInfo;
++
++      if (psDevInfo->h2DCmdCookie != IMG_NULL)
++      {
++              return IMG_FALSE;
++      }
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress)
++      {
++              psDevInfo->h2DCmdCookie = hCmdCookie;
++              SGX2DHardwareKick(psDevInfo);
++              return IMG_TRUE;
++      }
++
++      
++      if (SGX2DAcquireSlavePort(psDevInfo, IMG_FALSE) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ProcessBlit: Couldn't acquire slaveport"));
++              return IMG_FALSE;
++      }
++
++#ifdef        FIXME
++      
++
++#endif
++
++      
++      if (psDevInfo->b2DHWRecoveryEndPending && SGX2D_2D_NOT_IDLE(psDevInfo))
++      {
++                              psDevInfo->h2DCmdCookie = hCmdCookie;
++                              SGX2DHardwareKick(psDevInfo);
++                              PVR_ASSERT(bError);
++                              goto ErrorExit;
++      }
++      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++
++      ui32BytesRequired = psBltCmd->ui32DataByteSize + SGX2D_QUEUED_BLIT_PAD;
++
++      
++      eError = SGX2DAcquireFifoSpace(psDevInfo, ui32BytesRequired,    &ui32BytesObtained);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "ProcessBlit: Get Fifo Space failed"));
++              bError = IMG_FALSE;
++              goto ErrorExit;
++      }
++
++      
++      SGX2DWriteSlavePortBatch(psDevInfo,
++                                                       psBltCmd->aui32BltData,
++                                                       psBltCmd->ui32DataByteSize);
++
++      
++      psDevInfo->h2DCmdCookie = hCmdCookie;
++
++      
++      SGX2DWriteSlavePort(psDevInfo, SGX2D_FLUSH_BH);
++
++      PVR_ASSERT(bError);
++ErrorExit:
++
++      
++      if(SGX2DReleaseSlavePort(psDevInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGX2DReleaseSlavePort: failed to release slaveport"));
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DProcessBlit: Exit.  Error %d", (int)bError));
++
++      return bError;
++}
++
++IMG_VOID SGX2DHandle2DComplete(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      IMG_HANDLE hCmdCookie;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Start"));
++
++      hCmdCookie = psDevInfo->h2DCmdCookie;
++      psDevInfo->h2DCmdCookie = IMG_NULL;
++
++      
++      if (hCmdCookie != IMG_NULL)
++      {
++              PVRSRVCommandCompleteKM(hCmdCookie, IMG_FALSE);
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DHandle2DComplete: Exit"));
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueueBlitKM(PVRSRV_SGXDEV_INFO              *psDevInfo,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *psDstSync,
++                                                        IMG_UINT32                            ui32NumSrcSyncs,
++                                                        PVRSRV_KERNEL_SYNC_INFO       *apsSrcSync[],
++                                                        IMG_UINT32                            ui32DataByteSize,
++                                                        IMG_UINT32                            *pui32BltData)
++{
++#if defined(NO_HARDWARE)
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(psDstSync);
++      PVR_UNREFERENCED_PARAMETER(ui32NumSrcSyncs);
++      PVR_UNREFERENCED_PARAMETER(apsSrcSync);
++      PVR_UNREFERENCED_PARAMETER(ui32DataByteSize);
++      PVR_UNREFERENCED_PARAMETER(pui32BltData);
++
++      return PVRSRV_OK;
++#else
++      PVRSRV_COMMAND          *psCommand;
++      PVRSRV_BLT_CMD_INFO     *psBltCmd;
++      IMG_UINT32              ui32CmdByteSize;
++      IMG_UINT32              i;
++      PVRSRV_ERROR            eError;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueueBlitKM: Start"));
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress == IMG_TRUE)
++      {
++              return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
++      }
++
++      
++      if ((ui32DataByteSize + SGX2D_QUEUED_BLIT_PAD) > psDevInfo->ui322DFifoSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: Blit too big for FIFO. Blit size: %d (+ padding %d), FIFO size: %d", ui32DataByteSize, SGX2D_QUEUED_BLIT_PAD, psDevInfo->ui322DFifoSize));
++
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      ui32CmdByteSize = sizeof(PVRSRV_BLT_CMD_INFO)
++                              + ui32DataByteSize
++                              - sizeof(IMG_UINT32);
++
++      eError = PVRSRVInsertCommandKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue,
++                                      &psCommand,
++                                      SYS_DEVICE_SGX, 
++                                      SGX_2D_BLT_COMMAND,
++                                      (psDstSync == IMG_NULL) ? 0 : 1,
++                                      &psDstSync,
++                                      ui32NumSrcSyncs,
++                                      apsSrcSync,
++                                      ui32CmdByteSize );
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: PVRSRVInsertCommandKM failed. Error %d", eError));
++#ifdef DEBUG
++              if (eError == PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
++              {
++                      if (!SGX2DIsBusy(psDevInfo))
++                      {
++                              
++                              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: 2D core not busy, command queue full - lockup suspected"));
++                      }
++              }
++#endif
++              return eError;
++      }
++
++      
++      psBltCmd                = (PVRSRV_BLT_CMD_INFO*) psCommand->pvData;
++      psBltCmd->ui32CmdSize   = ui32CmdByteSize;
++      psBltCmd->psDevInfo     = psDevInfo;
++
++      
++      psBltCmd->psDstSync = psDstSync;
++
++      psBltCmd->ui32NumSrcSyncInfos = ui32NumSrcSyncs;
++      for(i = 0; i < ui32NumSrcSyncs; i++)
++      {
++              
++              psBltCmd->apsSrcSync[i] = apsSrcSync[i];
++      }
++
++      if (pui32BltData != IMG_NULL)
++      {
++              for(i = 0; i < (ui32DataByteSize>>2); i++)
++              {
++                      psBltCmd->aui32BltData[i] = pui32BltData[i];
++              }
++      }
++
++      psBltCmd->ui32DataByteSize = ui32DataByteSize;
++
++      
++      eError = PVRSRVSubmitCommandKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue, psCommand);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DQueueBlitKM: PVRSRVSubmitCommandKM failed. Error %d", eError));
++      }
++
++      SGX2DKick(psDevInfo);
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueueBlitKM: Exit. Error: %d", eError));
++
++      return eError;
++#endif        
++}
++
++#if defined(SGX2D_DIRECT_BLITS)
++IMG_EXPORT
++PVRSRV_ERROR SGX2DDirectBlitKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                         IMG_UINT32                   ui32DataByteSize,
++                                                         IMG_UINT32                   *pui32BltData)
++{
++#if defined(NO_HARDWARE)
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++      PVR_UNREFERENCED_PARAMETER(ui32DataByteSize);
++      PVR_UNREFERENCED_PARAMETER(pui32BltData);
++
++      return PVRSRV_OK;
++#else
++      PVRSRV_ERROR    eError;
++      PVRSRV_ERROR    eSrvErr;
++      
++      IMG_UINT32              ui32CmdByteSize = ui32DataByteSize + 4;
++      IMG_BOOL                bStart = IMG_FALSE;
++      IMG_UINT32              uiStart = 0;
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DDirectBlitKM: Start"));
++
++      
++      if (psDevInfo->b2DHWRecoveryInProgress == IMG_TRUE)
++      {
++              return PVRSRV_ERROR_FIFO_SPACE;
++      }
++
++      
++      if ( ui32CmdByteSize > psDevInfo->ui322DFifoSize)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Blit too big for FIFO. Blit size: %d (+ padding %d), FIFO size: %d", ui32DataByteSize, 4, psDevInfo->ui322DFifoSize));
++
++              return PVRSRV_ERROR_CMD_TOO_BIG;
++      }
++
++      eSrvErr = SGX2DAcquireSlavePort (psDevInfo, IMG_TRUE);
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot acquire slaveport. Error %d", eSrvErr));
++              return eSrvErr;
++      }
++
++#ifdef        FIXME
++      
++
++#endif
++      do
++      {
++              eSrvErr = SGX2DAcquireFifoSpace(psDevInfo,
++                                        ui32CmdByteSize,
++                                        IMG_NULL);
++              if (eSrvErr == PVRSRV_OK)
++              {
++                      break;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot acquire FIFO space. Error %d", eSrvErr));
++              
++              eError = eSrvErr;
++      }
++      else
++      {
++              
++              if (psDevInfo->b2DHWRecoveryEndPending && SGX2D_2D_NOT_IDLE(psDevInfo))
++              {
++                      eError = PVRSRV_ERROR_FIFO_SPACE;
++              }
++              else
++              {
++                      eError = PVRSRV_OK;
++
++                      psDevInfo->b2DHWRecoveryEndPending = IMG_FALSE;
++
++                      SGX2DWriteSlavePortBatch(psDevInfo, pui32BltData, ui32DataByteSize);
++
++                      SGX2DWriteSlavePort(psDevInfo, EURASIA2D_FENCE_BH);
++              }
++      }
++
++      eSrvErr = SGX2DReleaseSlavePort(psDevInfo);
++      if (eSrvErr != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DDirectBlitKM: Cannot release slave port.  Error %d", eSrvErr));
++
++              if (eError != PVRSRV_OK)
++              {
++                      eError = eSrvErr;
++              }
++      }
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DDirectBlitKM: Exit.  Error: %d", eError));
++
++      
++      SGX2DKick(psDevInfo);
++
++      return eError;
++#endif        
++}
++#endif 
++
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE) || defined(PVR2D_ALT_2DHW)
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGX2DQuerySyncOpsComplete)
++#endif
++static INLINE
++IMG_BOOL SGX2DQuerySyncOpsComplete(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo)
++{
++      PVRSRV_SYNC_DATA *psSyncData = psSyncInfo->psSyncData;
++
++      return (IMG_BOOL)(
++                                        (psSyncData->ui32ReadOpsComplete == psSyncData->ui32ReadOpsPending) &&
++                                        (psSyncData->ui32WriteOpsComplete == psSyncData->ui32WriteOpsPending)
++                                       );
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGX2DQueryBlitsCompleteKM(PVRSRV_SGXDEV_INFO     *psDevInfo,
++                                                                         PVRSRV_KERNEL_SYNC_INFO *psSyncInfo,
++                                                                         IMG_BOOL bWaitForComplete)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Start"));
++
++      if(SGX2DQuerySyncOpsComplete(psSyncInfo))
++      {
++              
++              PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Blits complete."));
++              return PVRSRV_OK;
++      }
++
++      
++      if (!bWaitForComplete)
++      {
++              
++              PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: No wait. Ops pending."));
++              return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++
++       
++      PVR_DPF((PVR_DBG_MESSAGE, "SGX2DQueryBlitsCompleteKM: Ops pending. Start polling."));
++      do
++      {
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++
++              if(SGX2DQuerySyncOpsComplete(psSyncInfo))
++              {
++                      
++                      PVR_DPF((PVR_DBG_CALLTRACE, "SGX2DQueryBlitsCompleteKM: Wait over.  Blits complete."));
++                      return PVRSRV_OK;
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      uiStart = OSClockus();
++                      bStart = IMG_TRUE;
++              }
++
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      PVR_DPF((PVR_DBG_ERROR,"SGX2DQueryBlitsCompleteKM: Timed out. Ops pending."));
++
++      return PVRSRV_ERROR_TIMEOUT;
++}
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++PVRSRV_ERROR SGX2DInit(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++
++      
++      PVR_ASSERT(psDevInfo->ui322DFifoSize == 0);
++      psDevInfo->ui322DFifoSize =  SGX2DFifoFreeSpace(psDevInfo);
++
++      PVR_TRACE(("SGX2DInit: 2D FIFO size: %d", psDevInfo->ui322DFifoSize));
++
++      
++      PVR_ASSERT(psDevInfo->s2DSlavePortKM.pui32Offset == 0);
++      PVR_ASSERT(psDevInfo->ui322DFifoOffset == 0);
++      psDevInfo->s2DSlavePortKM.pui32Offset = &psDevInfo->ui322DFifoOffset;
++
++      PVR_ASSERT(psDevInfo->h2DQueue == IMG_NULL);
++      eError = PVRSRVCreateCommandQueueKM(SGX2D_COMMAND_QUEUE_SIZE,
++                                              (PVRSRV_QUEUE_INFO **)&psDevInfo->h2DQueue);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX2DInit: PVRSRVCreateCommandQueueKM failed (%d)", eError));
++
++              return eError;
++      }
++
++      PVR_ASSERT(psDevInfo->h2DCmdCookie == IMG_NULL);
++      PVR_ASSERT(!psDevInfo->b2DHWRecoveryInProgress);
++      PVR_ASSERT(!psDevInfo->b2DHWRecoveryEndPending);
++      PVR_ASSERT(psDevInfo->ui322DCompletedBlits == 0);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGX2DDeInit(PVRSRV_SGXDEV_INFO      *psDevInfo)
++{
++      PVRSRV_ERROR eError;
++
++      if (psDevInfo->h2DQueue != IMG_NULL)
++      {
++              eError = PVRSRVDestroyCommandQueueKM((PVRSRV_QUEUE_INFO *)psDevInfo->h2DQueue);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGX2DDeInit: PVRSRVDestroyCommandQueueKM failed (%d)", eError));
++
++                      return eError;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxconfig.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,131 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXCONFIG_H__
++#define __SGXCONFIG_H__
++
++#define DEV_DEVICE_TYPE                       PVRSRV_DEVICE_TYPE_SGX
++#define DEV_DEVICE_CLASS              PVRSRV_DEVICE_CLASS_3D
++
++#define DEV_MAJOR_VERSION             1
++#define DEV_MINOR_VERSION             0
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 32
++      #define SGX_ADDRESS_SPACE_SIZE                          32
++
++      #define SGX_GENERAL_HEAP_BASE                           0x00400000
++      #define SGX_GENERAL_HEAP_SIZE                           (0x78000000-0x00800000)
++
++      #define SGX_TADATA_HEAP_BASE                            0x78000000
++      #define SGX_TADATA_HEAP_SIZE                            (0x08000000-0x00400000)
++
++      #define SGX_KERNEL_CODE_HEAP_BASE                       0x80000000
++      #define SGX_KERNEL_CODE_HEAP_SIZE                       0x00080000
++
++      #define SGX_VIDEO_CODE_HEAP_BASE                        0x81000000
++      #define SGX_VIDEO_CODE_HEAP_SIZE                        0x00080000
++
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_BASE         0x82000000
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_SIZE         0x05000000
++
++      #define SGX_PIXELSHADER_HEAP_BASE                       0x88000000
++      #define SGX_PIXELSHADER_HEAP_SIZE                       0x00500000
++      
++      #define SGX_VERTEXSHADER_HEAP_BASE                      0x89000000
++      #define SGX_VERTEXSHADER_HEAP_SIZE                      0x00200000
++
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE         0x8A000000
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE         0x02000000
++
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE        0x8C000000
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE        0x02000000
++
++      #define SGX_SYNCINFO_HEAP_BASE                          0xA0000000
++      #define SGX_SYNCINFO_HEAP_SIZE                          0x01000000
++
++      #define SGX_3DPARAMETERS_HEAP_BASE                      0xC0000000
++      #define SGX_3DPARAMETERS_HEAP_SIZE                      (0x10000000-0x00400000)
++
++      #define SGX_2D_HEAP_BASE                                        0xD0000000
++      #define SGX_2D_HEAP_SIZE                                        (0x08000000-0x00400000)
++
++      #define SGX_GENERAL_MAPPING_HEAP_BASE           0xD8000000
++      #define SGX_GENERAL_MAPPING_HEAP_SIZE           (0x08000000-0x00400000)
++
++      
++      #define SGX_CORE_IDENTIFIED
++#endif
++
++#if SGX_FEATURE_ADDRESS_SPACE_SIZE == 28
++      #define SGX_ADDRESS_SPACE_SIZE                          28
++
++      #define SGX_GENERAL_HEAP_BASE                           0x00400000
++      #define SGX_GENERAL_HEAP_SIZE                           (0x07000000-0x00401000)
++
++      
++      #define SGX_TADATA_HEAP_BASE                            0x07000000
++      #define SGX_TADATA_HEAP_SIZE                            (0x01000000-0x00001000)
++
++      #define SGX_3DPARAMETERS_HEAP_BASE                      0x08000000
++      #define SGX_3DPARAMETERS_HEAP_SIZE                      (0x04000000-0x00001000)
++
++      #define SGX_GENERAL_MAPPING_HEAP_BASE           0x0C000000
++      #define SGX_GENERAL_MAPPING_HEAP_SIZE           (0x01000000-0x00001000)
++
++      #define SGX_PIXELSHADER_HEAP_BASE                       0x0D000000
++      #define SGX_PIXELSHADER_HEAP_SIZE                       0x00500000
++
++      #define SGX_VERTEXSHADER_HEAP_BASE                      0x0D800000
++      #define SGX_VERTEXSHADER_HEAP_SIZE                      0x00200000
++
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_BASE         0x0E000000
++      #define SGX_PDSPIXEL_CODEDATA_HEAP_SIZE         (0x00800000-0x00001000)
++
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_BASE        0x0E800000
++      #define SGX_PDSVERTEX_CODEDATA_HEAP_SIZE        (0x00800000-0x00001000)
++
++      #define SGX_KERNEL_CODE_HEAP_BASE                       0x0F000000
++      #define SGX_KERNEL_CODE_HEAP_SIZE                       0x00080000
++
++      #define SGX_VIDEO_CODE_HEAP_BASE                        0x0F400000
++      #define SGX_VIDEO_CODE_HEAP_SIZE                        0x00080000
++
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_BASE         0x0F800000
++      #define SGX_KERNEL_VIDEO_DATA_HEAP_SIZE         (0x00400000-0x00001000)
++              
++      #define SGX_SYNCINFO_HEAP_BASE                          0x0FC00000
++      #define SGX_SYNCINFO_HEAP_SIZE                          (0x00400000-0x00001000)
++
++      
++      #define SGX_CORE_IDENTIFIED
++#endif
++
++#if !defined(SGX_CORE_IDENTIFIED)
++      #error "sgxconfig.h: ERROR: unspecified SGX Core version"
++#endif        
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinfokm.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,147 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SGXINFOKM_H__
++#define __SGXINFOKM_H__
++
++#include "sgxdefs.h"
++#include "device.h"
++#include "sysconfig.h"
++#include "sgxscript.h"
++#include "sgxinfo.h"
++
++#define               SGX_HOSTPORT_PRESENT                    0x00000001
++
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST               (1 << 0)        
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE      (1 << 1)        
++#define PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE     (1 << 2)        
++#define PVRSRV_USSE_EDM_POWMAN_NO_WORK                                                (1 << 3)        
++
++#define PVRSRV_USSE_EDM_INTERRUPT_HWR                 (1 << 0)        
++#define PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER        (1 << 1)        
++
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST     0x01    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST     0x02    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE       0x04    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD                0x10    
++#define PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT                0x20    
++
++
++
++
++
++
++typedef struct _SGX_TIMING_INFORMATION_
++{
++      IMG_UINT32                      ui32CoreClockSpeed;
++      IMG_UINT32                      ui32HWRecoveryFreq;
++      IMG_UINT32                      ui32ActivePowManLatencyms;
++      IMG_UINT32                      ui32uKernelFreq;
++} SGX_TIMING_INFORMATION;
++
++typedef struct _SGX_DEVICE_MAP_
++{     
++      IMG_UINT32                      ui32Flags;
++
++      
++      IMG_SYS_PHYADDR         sRegsSysPBase;
++      IMG_DEV_PHYADDR         sRegsDevPBase;
++      IMG_CPU_PHYADDR         sRegsCpuPBase;
++      IMG_UINT32                      ui32RegsSize;
++      
++      
++      IMG_SYS_PHYADDR         sSPSysPBase;
++      IMG_DEV_PHYADDR         sSPDevPBase;
++      IMG_CPU_PHYADDR         sSPCpuPBase;
++      IMG_UINT32                      ui32SPSize;
++
++
++      
++      IMG_SYS_PHYADDR         sLocalMemSysPBase;
++      IMG_DEV_PHYADDR         sLocalMemDevPBase;
++      IMG_CPU_PHYADDR         sLocalMemCpuPBase;
++      IMG_UINT32                      ui32LocalMemSize;
++
++      
++      IMG_UINT32                      ui32IRQ;
++
++      
++      SGX_TIMING_INFORMATION sTimingInfo;
++} SGX_DEVICE_MAP;
++
++
++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
++struct _PVRSRV_STUB_PBDESC_
++{
++      IMG_UINT32              ui32RefCount;
++      IMG_UINT32              ui32TotalPBSize;
++      PVRSRV_KERNEL_MEM_INFO  *psSharedPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psHWPBDescKernelMemInfo;
++      PVRSRV_KERNEL_MEM_INFO  **ppsSubKernelMemInfos;
++      IMG_UINT32              ui32SubKernelMemInfosCount;
++      IMG_HANDLE              hDevCookie;
++      PVRSRV_KERNEL_MEM_INFO  *psBlockKernelMemInfo;
++      PVRSRV_STUB_PBDESC      *psNext;
++};
++
++typedef struct _PVRSRV_SGX_CCB_INFO_
++{
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo;                  
++      PVRSRV_KERNEL_MEM_INFO  *psCCBCtlMemInfo;               
++      PVRSRV_SGX_COMMAND              *psCommands;                    
++      IMG_UINT32                              *pui32WriteOffset;              
++      volatile IMG_UINT32             *pui32ReadOffset;               
++#if defined(PDUMP)
++      IMG_UINT32                              ui32CCBDumpWOff;                
++#endif
++} PVRSRV_SGX_CCB_INFO;
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++
++IMG_VOID SGXOSTimer(IMG_VOID *pvData);
++
++IMG_VOID ResetPBs(PVRSRV_SGXDEV_INFO  *psDevInfo);
++#if defined(NO_HARDWARE)
++static INLINE IMG_VOID NoHardwareGenerateEvent(PVRSRV_SGXDEV_INFO             *psDevInfo,
++                                                                                              IMG_UINT32 ui32StatusRegister,
++                                                                                              IMG_UINT32 ui32StatusValue,
++                                                                                              IMG_UINT32 ui32StatusMask)
++{
++      IMG_UINT32 ui32RegVal;
++
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister);
++
++      ui32RegVal &= ~ui32StatusMask;
++      ui32RegVal |= (ui32StatusValue & ui32StatusMask);
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32StatusRegister, ui32RegVal);
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxinit.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1809 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "sgxmmu.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sgxconfig.h"
++#include "sysconfig.h"
++#include "pvr_bridge_km.h"
++
++#include "pdump_km.h"
++#include "ra.h"
++#include "mmu.h"
++#include "handle.h"
++#include "perproc.h"
++
++#ifdef        SGX_FEATURE_2D_HARDWARE
++#include "sgx2dcore.h"
++#endif
++
++#include "sgxutils.h"
++
++#if defined (SGX_FEATURE_2D_HARDWARE)
++#define       SGX_USING_CMD_PROC_LIST
++#endif
++
++IMG_BOOL SGX_ISRHandler(IMG_VOID *pvData);
++IMG_VOID SGXScheduleProcessQueues(IMG_VOID *pvData);
++
++IMG_UINT32 gui32EventStatusServicesByISR = 0;
++
++static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
++                                               IMG_UINT32                      ui32PDUMPFlags);
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                 IMG_BOOL                             bHardwareRecovery);
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie);
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++#define SGX_BIF_DIR_LIST_INDEX_EDM    15
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE15
++#else
++#define SGX_BIF_DIR_LIST_REG_EDM      EUR_CR_BIF_DIR_LIST_BASE0
++#endif
++
++static IMG_VOID SGXCommandComplete(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      SGXScheduleProcessQueues(psDeviceNode);
++}
++
++static IMG_UINT32 DeinitDevInfo(PVRSRV_SGXDEV_INFO *psDevInfo)
++{
++      if (psDevInfo->psKernelCCBInfo != IMG_NULL)
++      {
++              
++
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_SGX_CCB_INFO), psDevInfo->psKernelCCBInfo, IMG_NULL);
++      }
++
++      return PVRSRV_OK;
++}
++
++static PVRSRV_ERROR InitDevInfo(PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                                              PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                              SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++      PVRSRV_ERROR            eError;
++
++      PVRSRV_SGX_CCB_INFO     *psKernelCCBInfo = IMG_NULL;
++
++      PVR_UNREFERENCED_PARAMETER(psPerProc);
++      psDevInfo->sScripts = psInitInfo->sScripts;
++
++      psDevInfo->psKernelCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBMemInfo;
++      psDevInfo->psKernelCCB = (PVRSRV_SGX_KERNEL_CCB *) psDevInfo->psKernelCCBMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelCCBCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBCtlMemInfo;
++      psDevInfo->psKernelCCBCtl = (PVRSRV_SGX_CCB_CTL *) psDevInfo->psKernelCCBCtlMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelCCBEventKickerMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelCCBEventKickerMemInfo;
++      psDevInfo->pui32KernelCCBEventKicker = (IMG_UINT32 *)psDevInfo->psKernelCCBEventKickerMemInfo->pvLinAddrKM;
++
++      psDevInfo->psKernelSGXHostCtlMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelSGXHostCtlMemInfo;
++      psDevInfo->psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++
++#if defined(SGX_SUPPORT_HWPROFILING)
++      psDevInfo->psKernelHWProfilingMemInfo = (PVRSRV_KERNEL_MEM_INFO *)psInitInfo->hKernelHWProfilingMemInfo;
++#endif
++
++      
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, 
++                                              sizeof(PVRSRV_SGX_CCB_INFO),
++                                              (IMG_VOID **)&psKernelCCBInfo, 0);
++      if (eError != PVRSRV_OK)        
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to alloc memory"));
++              goto failed_allockernelccb;
++      }
++
++
++      OSMemSet(psKernelCCBInfo, 0, sizeof(PVRSRV_SGX_CCB_INFO));
++      psKernelCCBInfo->psCCBMemInfo           = psDevInfo->psKernelCCBMemInfo;
++      psKernelCCBInfo->psCCBCtlMemInfo        = psDevInfo->psKernelCCBCtlMemInfo;
++      psKernelCCBInfo->psCommands                     = psDevInfo->psKernelCCB->asCommands;
++      psKernelCCBInfo->pui32WriteOffset       = &psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++      psKernelCCBInfo->pui32ReadOffset        = &psDevInfo->psKernelCCBCtl->ui32ReadOffset;
++      psDevInfo->psKernelCCBInfo = psKernelCCBInfo;
++
++      
++
++      psDevInfo->ui32TAKickAddress = psInitInfo->ui32TAKickAddress;
++
++      
++
++      psDevInfo->ui32VideoHandlerAddress = psInitInfo->ui32VideoHandlerAddress;
++
++      psDevInfo->bForcePTOff = IMG_FALSE;
++      psDevInfo->ui32RegFlags = 0;
++      psDevInfo->ui32CacheControl = psInitInfo->ui32CacheControl;
++
++      psDevInfo->ui32EDMTaskReg0 = psInitInfo->ui32EDMTaskReg0;
++      psDevInfo->ui32EDMTaskReg1 = psInitInfo->ui32EDMTaskReg1;
++      psDevInfo->ui32ClockGateMask = psInitInfo->ui32ClockGateMask;   
++
++
++      
++      OSMemCopy(&psDevInfo->asSGXDevData,  &psInitInfo->asInitDevData, sizeof(psDevInfo->asSGXDevData));
++
++      return PVRSRV_OK;
++
++failed_allockernelccb:
++      DeinitDevInfo(psDevInfo);
++
++      return eError;
++}
++
++
++
++
++PVRSRV_ERROR SGXPrePowerState (IMG_HANDLE             hDevHandle, 
++                                                         PVR_POWER_STATE      eNewPowerState, 
++                                                         PVR_POWER_STATE      eCurrentPowerState)
++{
++      if (eNewPowerState != eCurrentPowerState)
++      {
++              PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++              PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++
++              
++
++
++              if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++              {
++                      PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++                      #if defined (SGX_FEATURE_AUTOCLOCKGATING) && (!defined(NO_HARDWARE) || defined(PDUMP))
++                      IMG_UINT32 ui32ClockMask = psDevInfo->ui32ClockGateMask;
++                      #endif
++
++                      
++                      psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST;
++
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Trigger power down event on uKernel...");
++                      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++
++                      
++                      #if !defined(NO_HARDWARE)
++                      if (PollForValueKM((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags),
++                                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                                              MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                              WAIT_TRY_COUNT) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip power off failed."));
++                      }
++                      #endif
++
++                      #ifdef PDUMP
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Wait for power down event on uKernel...");
++                      PDUMPMEMPOL(psDevInfo->psKernelSGXHostCtlMemInfo,
++                                              offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags),
++                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                              PVRSRV_USSE_EDM_POWMAN_POWEROFF_COMPLETE,
++                                              PDUMP_POLL_OPERATOR_EQUAL,
++                                              IMG_FALSE, IMG_FALSE,
++                                              MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++                      #endif
++
++                      SGXDeinitialise(psDevInfo);
++
++                      #if defined(SGX_FEATURE_AUTOCLOCKGATING)
++                      
++                      #if !defined(NO_HARDWARE)
++                      if (PollForValueKM((volatile IMG_UINT32 *)((IMG_UINT8*)psDevInfo->pvRegsBaseKM + EUR_CR_CLKGATESTATUS),
++                                                              0,
++                                                              ui32ClockMask,
++                                                              MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                              WAIT_TRY_COUNT) != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"Wait for chip idle failed."));
++                      }
++                      #endif
++                      PDUMPREGPOL(EUR_CR_CLKGATESTATUS, 0, ui32ClockMask);
++                      #endif
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXPostPowerState (IMG_HANDLE            hDevHandle, 
++                                                              PVR_POWER_STATE eNewPowerState, 
++                                                              PVR_POWER_STATE eCurrentPowerState)
++{
++      if (eNewPowerState != eCurrentPowerState)
++      {
++              PVRSRV_ERROR            eError;
++              PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++              PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++              SYS_DATA                        *psSysData;
++
++              
++
++              eError = SysAcquireData(&psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      return eError;
++              }
++              
++              
++
++              if(eCurrentPowerState == PVRSRV_POWER_STATE_D3)
++              {
++                      PVRSRV_SGX_HOST_CTL *psSGXHostCtl = psDevInfo->psSGXHostCtl;
++                      
++                      psSGXHostCtl->ui32PowManFlags = 0;
++
++                      
++                      PDUMPCOMMENT("TA/3D CCB Control - Reset Power Manager flags");
++                      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32PowManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++
++                      eError = SGXInitialise(psDevInfo, IMG_FALSE);
++
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXPostPowerState: SGXInitialise failed"));
++                              return eError;
++                      }
++              }
++
++              PVR_DPF((PVR_DBG_WARNING,
++                              "SGXPostPowerState : SGX Power Transition from %d to %d OK",
++                              eCurrentPowerState, eNewPowerState));
++      }
++
++      return PVRSRV_OK;
++}
++
++#define       SCRIPT_DATA(pData, offset, type) (*((type *)(((char *)pData) + offset)))
++#define       SCRIPT_DATA_UI32(pData, offset) SCRIPT_DATA(pData, offset, IMG_UINT32)
++
++static PVRSRV_ERROR SGXRunScript(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_INIT_COMMAND *psScript, IMG_UINT32 ui32NumInitCommands)
++{
++      IMG_UINT32 ui32PC;
++      SGX_INIT_COMMAND *psComm;
++
++      for (ui32PC = 0, psComm = psScript;
++              ui32PC < ui32NumInitCommands;
++              ui32PC++, psComm++)
++      {
++              switch (psComm->eOp)
++              {
++                      case SGX_INIT_OP_WRITE_HW_REG:
++                      {
++                              OSWriteHWReg(psDevInfo->pvRegsBaseKM, psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++                              PDUMPREG(psComm->sWriteHWReg.ui32Offset, psComm->sWriteHWReg.ui32Value);
++                              break;
++                      }
++#if defined(PDUMP)
++                      case SGX_INIT_OP_PDUMP_HW_REG:
++                      {
++                              PDUMPREG(psComm->sPDumpHWReg.ui32Offset, psComm->sPDumpHWReg.ui32Value);
++                              break;
++                      }
++#endif
++                      case SGX_INIT_OP_HALT:
++                      {
++                              return PVRSRV_OK;
++                      }
++                      case SGX_INIT_OP_ILLEGAL:
++                      
++                      default:
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SGXRunScript: PC %d: Illegal command: %d", ui32PC, psComm->eOp));
++                              return PVRSRV_ERROR_GENERIC;
++                      }
++              }
++
++      }
++
++      return PVRSRV_ERROR_GENERIC;;
++}
++
++PVRSRV_ERROR SGXInitialise(PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                 IMG_BOOL                             bHardwareRecovery)
++{
++      PVRSRV_ERROR            eError;
++      IMG_UINT32                      ui32ReadOffset, ui32WriteOffset;
++
++      
++      ResetSGX(psDevInfo, PDUMP_FLAGS_CONTINUOUS);
++
++      
++      *psDevInfo->pui32KernelCCBEventKicker = 0;
++#if defined(PDUMP)
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0,
++                       sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS,
++                       MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++#endif 
++
++      
++
++
++      psDevInfo->psSGXHostCtl->sTAHWPBDesc.uiAddr = 0;
++      psDevInfo->psSGXHostCtl->s3DHWPBDesc.uiAddr = 0;
++#if defined(PDUMP)
++      PDUMPCOMMENT(" CCB Control - Reset HW PBDesc records");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++                       offsetof(PVRSRV_SGX_HOST_CTL, sTAHWPBDesc), sizeof(IMG_DEV_VIRTADDR),
++                       PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelSGXHostCtlMemInfo,
++                       offsetof(PVRSRV_SGX_HOST_CTL, s3DHWPBDesc), sizeof(IMG_DEV_VIRTADDR),
++                       PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelSGXHostCtlMemInfo));
++#endif 
++
++      eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asInitCommands, SGX_MAX_INIT_COMMANDS);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXInitialise: SGXRunScript failed (%d)", eError));
++              return (PVRSRV_ERROR_GENERIC);
++      }
++
++      if (bHardwareRecovery)
++      {
++              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++              
++              if (PollForValueKM((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32InterruptClearFlags),
++                                                 0,
++                                                 PVRSRV_USSE_EDM_INTERRUPT_HWR,
++                                                 MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                                 WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGXEDM: Wait for uKernel HW Recovery failed"));
++              }
++      }
++
++      
++
++
++      for (ui32ReadOffset = psDevInfo->psKernelCCBCtl->ui32ReadOffset,
++                       ui32WriteOffset = psDevInfo->psKernelCCBCtl->ui32WriteOffset;
++               ui32ReadOffset != ui32WriteOffset;
++               ui32ReadOffset = (ui32ReadOffset + 1) & 0xFF)
++      {
++              *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK);
++      }
++
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR SGXDeinitialise(IMG_HANDLE hDevCookie)
++
++{
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO *) hDevCookie;
++      PVRSRV_ERROR            eError;
++
++      
++      if (psDevInfo->pvRegsBaseKM == IMG_NULL)
++      {
++              return PVRSRV_OK;
++      }
++
++      eError = SGXRunScript(psDevInfo, psDevInfo->sScripts.asDeinitCommands, SGX_MAX_DEINIT_COMMANDS);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXDeinitialise: SGXRunScript failed (%d)", eError));
++              return (PVRSRV_ERROR_GENERIC);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++static IMG_VOID ResetSGXSleep(PVRSRV_SGXDEV_INFO      *psDevInfo,
++                                                        IMG_UINT32                    ui32PDUMPFlags,
++                                                        IMG_BOOL                              bPDump)
++{
++#if !defined(PDUMP)
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      
++      OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed);
++      if (bPDump)
++      {
++              PDUMPIDLWITHFLAGS(1000, ui32PDUMPFlags);
++      }
++}
++
++
++static IMG_VOID ResetSGX(PVRSRV_SGXDEV_INFO   *psDevInfo,
++                                               IMG_UINT32                      ui32PDUMPFlags)
++{
++      IMG_UINT32 ui32RegVal;
++
++      const IMG_UINT32 ui32SoftResetRegVal =
++                                      #ifdef EUR_CR_SOFT_RESET_TWOD_RESET_MASK
++                                      EUR_CR_SOFT_RESET_TWOD_RESET_MASK       |
++                                      #endif
++                                      EUR_CR_SOFT_RESET_DPM_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TA_RESET_MASK         |
++                                      EUR_CR_SOFT_RESET_USE_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_ISP_RESET_MASK        |
++                                      EUR_CR_SOFT_RESET_TSP_RESET_MASK;
++
++      const IMG_UINT32 ui32BifInvalDCVal = EUR_CR_BIF_CTRL_INVALDC_MASK;
++
++      const IMG_UINT32 ui32BifFaultMask =
++                                              EUR_CR_BIF_INT_STAT_FAULT_MASK;
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      IMG_UINT32                      ui32BIFCtrl;
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      IMG_UINT32                      ui32BIFMemArb;
++#endif 
++#endif 
++
++#ifndef PDUMP
++      PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags);
++#endif 
++
++      psDevInfo->ui32NumResets++;
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n");
++
++#if defined(FIX_HW_BRN_23944)
++      
++      ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++      if (ui32RegVal & ui32BifFaultMask)
++      {
++              
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++              ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      }
++#endif 
++
++      
++      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++      
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags);
++
++#if defined(EUR_CR_BIF_MEM_ARB_CONFIG)
++      
++
++      ui32BIFMemArb   = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) |
++                                        (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) |
++                                        (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT);
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32BIFMemArb, ui32PDUMPFlags);
++#endif 
++#endif 
++
++
++      
++
++
++
++
++      ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++      
++      ui32RegVal = ui32SoftResetRegVal;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++
++      
++
++      for (;;)
++      {
++              IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT);
++              IMG_DEV_VIRTADDR sBifFault;
++              IMG_UINT32 ui32PDIndex, ui32PTIndex;
++
++              if ((ui32BifIntStat & ui32BifFaultMask) == 0)
++              {
++                      break;
++              }
++              
++              
++
++
++              
++
++
++              sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT);
++              PVR_DPF((PVR_DBG_WARNING, "ResetSGX: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr));
++              ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);
++              ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT;
++
++              
++              ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID;
++
++              
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal);
++              ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2);
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal);
++
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              ui32RegVal = ui32SoftResetRegVal;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++              ui32RegVal = 0;
++              OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++              ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE);
++
++              
++              psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0;
++              psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0;
++      }
++
++
++      
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32BIFCtrl = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT);
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT);
++#endif
++#if defined(FIX_HW_BRN_23410)
++      
++      ui32BIFCtrl |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT);
++#endif
++
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32BIFCtrl);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32BIFCtrl, ui32PDUMPFlags);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr);
++      PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG);
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags);
++#endif
++      
++#if !defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      ui32RegVal = ui32SoftResetRegVal | EUR_CR_SOFT_RESET_BIF_RESET_MASK;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      ui32RegVal = ui32SoftResetRegVal;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++#endif 
++
++      
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32BifInvalDCVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32BifInvalDCVal, ui32PDUMPFlags);
++
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags);
++      
++      PVR_DPF((PVR_DBG_WARNING,"Soft Reset of SGX"));
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      
++      ui32RegVal = 0;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal);
++      PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags);
++
++      
++      ResetSGXSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE);
++
++      PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n");
++}
++
++static PVRSRV_ERROR DevInitSGXPart1 (IMG_VOID *pvDeviceNode)
++{
++      PVRSRV_SGXDEV_INFO      *psDevInfo;     
++      IMG_HANDLE              hKernelDevMemContext;
++      IMG_DEV_PHYADDR         sPDDevPAddr;
++      IMG_UINT32              i;
++      PVRSRV_DEVICE_NODE  *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeap;
++      IMG_HANDLE          hDevInfoOSMemHandle = (IMG_HANDLE)IMG_NULL;
++      PVRSRV_ERROR            eError;
++
++      PDUMPCOMMENT("SGX Initialisation Part 1");
++
++      
++      PDUMPCOMMENT("SGX Core Version Information: %s", SGX_CORE_FRIENDLY_NAME);
++#ifdef SGX_CORE_REV
++      PDUMPCOMMENT("SGX Core Revision Information: %d", SGX_CORE_REV);
++#else
++      PDUMPCOMMENT("SGX Core Revision Information: head rtl");
++#endif        
++
++      
++      if(OSAllocPages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_CACHED,
++                                      sizeof(PVRSRV_SGXDEV_INFO),
++                                      (IMG_VOID **)&psDevInfo,
++                                      &hDevInfoOSMemHandle) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for DevInfo"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet (psDevInfo, 0, sizeof(PVRSRV_SGXDEV_INFO));
++
++      
++      psDevInfo->eDeviceType          = DEV_DEVICE_TYPE;
++      psDevInfo->eDeviceClass         = DEV_DEVICE_CLASS;
++
++      
++      psDeviceNode->pvDevice = (IMG_PVOID)psDevInfo;
++      psDeviceNode->hDeviceOSMemHandle = hDevInfoOSMemHandle;
++      
++      
++      psDevInfo->pvDeviceMemoryHeap = (IMG_VOID*)psDeviceMemoryHeap;
++
++      
++      hKernelDevMemContext = BM_CreateContext(psDeviceNode,
++                                                                                      &sPDDevPAddr,
++                                                                                      IMG_TRUE,
++                                                                                      IMG_NULL);
++
++      psDevInfo->sKernelPDDevPAddr = sPDDevPAddr;
++
++      
++      for(i=0; i<psDeviceNode->sDevMemoryInfo.ui32HeapCount; i++)
++      {
++              IMG_HANDLE hDevMemHeap;
++
++              switch(psDeviceMemoryHeap[i].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_KERNEL:
++                      case DEVICE_MEMORY_HEAP_SHARED:
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              hDevMemHeap = BM_CreateHeap (hKernelDevMemContext,
++                                                                                              &psDeviceMemoryHeap[i]);
++                              
++
++
++                              psDeviceMemoryHeap[i].hDevMemHeap = hDevMemHeap;
++                              break;
++                      }
++              }
++      }
++      
++      eError = MMU_BIFResetPDAlloc(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGX : Failed to alloc memory for BIF reset"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(PVRSRV_EVENTOBJECT) , 
++                                       (IMG_VOID **)&psDevInfo->psSGXEventObject, 0) != PVRSRV_OK)    
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to alloc memory for event object"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++
++      if(OSEventObjectCreate("PVRSRV_EVENTOBJECT_SGX", psDevInfo->psSGXEventObject) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart1 : Failed to create event object"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      
++      }
++#endif 
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInfoForSrvinitKM(IMG_HANDLE hDevHandle, SGX_BRIDGE_INFO_FOR_SRVINIT *psInitInfo)
++{
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      PVRSRV_ERROR            eError;
++      SGX_DEVICE_MAP          *psSGXDeviceMap;
++      SGX_TIMING_INFORMATION* psSGXTimingInfo;
++
++      PDUMPCOMMENT("SGXGetInfoForSrvinit");
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++      psInitInfo->sPDDevPAddr = psDevInfo->sKernelPDDevPAddr;
++
++      eError = PVRSRVGetDeviceMemHeapsKM(hDevHandle, &psInitInfo->asHeapInfo[0]);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: PVRSRVGetDeviceMemHeapsKM failed (%d)", eError));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX, 
++                                                                      (IMG_VOID**)&psSGXDeviceMap);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXGetInfoForSrvinit: Failed to get device memory map!"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
++      
++      
++      psDevInfo->ui32CoreClockSpeed = psSGXTimingInfo->ui32CoreClockSpeed;
++      
++      
++      psInitInfo->ui32uKernelTimerClock = psSGXTimingInfo->ui32CoreClockSpeed / psSGXTimingInfo->ui32uKernelFreq;
++#if defined(SUPPORT_HW_RECOVERY)
++      psInitInfo->ui32HWRecoverySampleRate = psSGXTimingInfo->ui32uKernelFreq / psSGXTimingInfo->ui32HWRecoveryFreq;
++#endif 
++
++      psInitInfo->ui32ActivePowManSampleRate =
++              psSGXTimingInfo->ui32uKernelFreq * psSGXTimingInfo->ui32ActivePowManLatencyms / 1000;
++      
++
++
++
++
++
++
++
++      
++      psInitInfo->ui32ActivePowManSampleRate += 2;
++      
++      return eError;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR DevInitSGXPart2KM (PVRSRV_PER_PROCESS_DATA *psPerProc,
++                                IMG_HANDLE hDevHandle,
++                                SGX_BRIDGE_INIT_INFO *psInitInfo)
++{
++#if defined(SGX_USING_CMD_PROC_LIST)
++      PFN_CMD_PROC            pfnCmdProcList[SGX_COMMAND_COUNT];
++      IMG_UINT32              ui32SyncCountList[SGX_COMMAND_COUNT][2];
++#endif
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      PVRSRV_SGXDEV_INFO      *psDevInfo;
++      PVRSRV_ERROR            eError;
++      SGX_DEVICE_MAP          *psSGXDeviceMap;
++      PVR_POWER_STATE         eDefaultPowerState;
++
++      PDUMPCOMMENT("SGX Initialisation Part 2");
++
++      psDeviceNode = (PVRSRV_DEVICE_NODE *)hDevHandle;
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++      
++
++      eError = InitDevInfo(psPerProc, psDeviceNode, psInitInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to load EDM program"));
++              goto failed_init_dev_info;
++      }
++
++      
++#ifdef SGX_FEATURE_2D_HARDWARE
++      eError = OSCreateResource(&psDevInfo->s2DSlaveportResource);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to create resource !"));
++              return PVRSRV_ERROR_INIT_FAILURE;
++      }
++#endif
++
++      eError = SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE_SGX,
++                                                                      (IMG_VOID**)&psSGXDeviceMap);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to get device memory map!"));
++              return PVRSRV_ERROR_INIT_FAILURE;
++      }
++
++      
++      psDevInfo->pvRegsBaseKM = OSMapPhysToLin(psSGXDeviceMap->sRegsCpuPBase,
++                                                                                 psSGXDeviceMap->ui32RegsSize,
++                                                                                 PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                                 IMG_NULL);
++      if (!psDevInfo->pvRegsBaseKM)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map in regs\n"));
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++      psDevInfo->ui32RegSize = psSGXDeviceMap->ui32RegsSize;
++      psDevInfo->sRegsPhysBase = psSGXDeviceMap->sRegsSysPBase;
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      psDevInfo->s2DSlavePortKM.pvData = OSMapPhysToLin (psSGXDeviceMap->sSPCpuPBase,
++                                                                                                              psSGXDeviceMap->ui32SPSize,
++                                                                                                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                                                                                              IMG_NULL);
++
++              
++      if (!psDevInfo->s2DSlavePortKM.pvData)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed to map 2D Slave port region\n"));
++              return PVRSRV_ERROR_BAD_MAPPING;
++      }
++      psDevInfo->s2DSlavePortKM.ui32DataRange = psSGXDeviceMap->ui32SPSize;
++      psDevInfo->s2DSlavePortKM.sPhysBase = psSGXDeviceMap->sSPSysPBase;
++#endif
++
++
++#if defined (SYS_USING_INTERRUPTS)
++
++      
++      psDeviceNode->pvISRData = psDeviceNode;
++      
++      PVR_ASSERT(psDeviceNode->pfnDeviceISR == SGX_ISRHandler);
++
++#endif 
++
++      
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      psDevInfo->psSGXHostCtl->ui32PowManFlags |= PVRSRV_USSE_EDM_POWMAN_NO_WORK;
++      eDefaultPowerState = PVRSRV_POWER_STATE_D3;
++#else 
++      eDefaultPowerState = PVRSRV_POWER_STATE_D0;
++#endif 
++      eError = PVRSRVRegisterPowerDevice (psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                              SGXPrePowerState,
++                                                                              SGXPostPowerState,
++                                                                              (IMG_HANDLE)psDeviceNode,
++                                                                              PVRSRV_POWER_STATE_D3,
++                                                                              eDefaultPowerState);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: failed to register device with power manager"));
++              return eError;
++      }
++
++#if defined(SGX_USING_CMD_PROC_LIST)
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      pfnCmdProcList[SGX_2D_BLT_COMMAND] = SGX2DProcessBlit;
++      ui32SyncCountList[SGX_2D_BLT_COMMAND][0] = 1;   
++      ui32SyncCountList[SGX_2D_BLT_COMMAND][1] = PVRSRV_MAX_BLT_SRC_SYNCS;
++#endif
++      eError = PVRSRVRegisterCmdProcListKM(psDeviceNode->sDevId.ui32DeviceIndex, &pfnCmdProcList[0], ui32SyncCountList, SGX_COMMAND_COUNT);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: PVRSRVRegisterCmdProcList failed"));
++              return eError;
++      }
++#endif 
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      eError = SGX2DInit(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: SGX2DInit failed"));
++              return eError;
++      }
++#endif
++
++
++
++      
++
++      OSMemSet(psDevInfo->psKernelCCB, 0, sizeof(PVRSRV_SGX_KERNEL_CCB));
++      OSMemSet(psDevInfo->psKernelCCBCtl, 0, sizeof(PVRSRV_SGX_CCB_CTL));
++      OSMemSet(psDevInfo->pui32KernelCCBEventKicker, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker));
++      PDUMPCOMMENT("Kernel CCB");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBMemInfo, 0, sizeof(PVRSRV_SGX_KERNEL_CCB), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBMemInfo));
++      PDUMPCOMMENT("Kernel CCB Control");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBCtlMemInfo, 0, sizeof(PVRSRV_SGX_CCB_CTL), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBCtlMemInfo));
++      PDUMPCOMMENT("Kernel CCB Event Kicker");
++      PDUMPMEM(IMG_NULL, psDevInfo->psKernelCCBEventKickerMemInfo, 0, sizeof(*psDevInfo->pui32KernelCCBEventKicker), PDUMP_FLAGS_CONTINUOUS, MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++
++
++      
++      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                               PVRSRV_POWER_Unspecified,
++                                                                               KERNEL_ID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevInitSGXPart2KM: Failed PVRSRVSetDevicePowerStateKM call"));
++              return eError;
++      }
++
++#if defined(SUPPORT_HW_RECOVERY)
++      {
++              SGX_TIMING_INFORMATION* psSGXTimingInfo = & psSGXDeviceMap->sTimingInfo;
++              
++              psDevInfo->hTimer = OSAddTimer(SGXOSTimer, psDeviceNode, 1000 * 50 / psSGXTimingInfo->ui32uKernelFreq);
++              if(psDevInfo->hTimer == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"OSAddTimer : Failed to register timer callback function"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++#endif
++
++      return PVRSRV_OK;
++
++failed_init_dev_info:
++      return eError;
++}
++
++static PVRSRV_ERROR DevDeInitSGX (IMG_VOID *pvDeviceNode)
++{
++      PVRSRV_DEVICE_NODE                      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvDeviceNode;
++      PVRSRV_SGXDEV_INFO                      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      IMG_HANDLE                                      hDevInfoOSMemHandle = psDeviceNode->hDeviceOSMemHandle;
++      PVRSRV_ERROR                            eError = PVRSRV_ERROR_INVALID_PARAMS;
++      IMG_UINT32                                      ui32Heap;
++      DEVICE_MEMORY_HEAP_INFO         *psDeviceMemoryHeap;
++
++      if (!psDevInfo)
++      {
++              
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Null DevInfo"));
++              return PVRSRV_OK;
++      }
++
++#if defined(SUPPORT_HW_RECOVERY)
++      
++      if(psDevInfo->hTimer)
++      {
++              eError = OSRemoveTimer (psDevInfo->hTimer);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: Failed to remove timer"));
++                      return  eError;
++              }
++      }
++#endif
++
++      MMU_BIFResetPDFree(psDevInfo);
++
++      
++
++
++
++
++
++
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      eError = SGXDeinitialise((IMG_HANDLE)psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGXDeinitialise failed"));
++              return eError;
++      }
++#endif 
++
++
++
++      
++
++      DeinitDevInfo(psDevInfo);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      eError = SGX2DDeInit(psDevInfo);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: SGX2DDeInit failed"));
++              return eError;
++      }
++#endif
++
++#if defined(SGX_USING_CMD_PROC_LIST)
++      eError = PVRSRVRemoveCmdProcListKM(psDeviceNode->sDevId.ui32DeviceIndex, SGX_COMMAND_COUNT);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX: PVRSRVRemoveCmdProcList failed"));
++              return eError;
++      }
++#endif
++      
++      psDeviceMemoryHeap = (DEVICE_MEMORY_HEAP_INFO *)psDevInfo->pvDeviceMemoryHeap;
++      for(ui32Heap=0; ui32Heap<psDeviceNode->sDevMemoryInfo.ui32HeapCount; ui32Heap++)
++      {
++              switch(psDeviceMemoryHeap[ui32Heap].DevMemHeapType)
++              {
++                      case DEVICE_MEMORY_HEAP_KERNEL:
++                      case DEVICE_MEMORY_HEAP_SHARED:
++                      case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
++                      {
++                              if (psDeviceMemoryHeap[ui32Heap].hDevMemHeap != IMG_NULL)
++                              {
++                                      BM_DestroyHeap(psDeviceMemoryHeap[ui32Heap].hDevMemHeap);
++                              }
++                              break;
++                      }
++              }
++      }
++
++      
++      eError = BM_DestroyContext(psDeviceNode->sDevMemoryInfo.pBMKernelContext, IMG_TRUE, IMG_FALSE, IMG_NULL);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"DevDeInitSGX : Failed to destroy kernel context"));
++              return eError;
++      }
++
++      
++      eError = PVRSRVRemovePowerDevice (((PVRSRV_DEVICE_NODE*)pvDeviceNode)->sDevId.ui32DeviceIndex);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++
++#ifdef SGX_FEATURE_2D_HARDWARE
++      eError = OSDestroyResource(&psDevInfo->s2DSlaveportResource);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif
++
++      
++      if (psDevInfo->pvRegsBaseKM != IMG_NULL)
++      {
++              OSUnMapPhysToLin(psDevInfo->pvRegsBaseKM,
++                                               psDevInfo->ui32RegSize,
++                                               PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                               IMG_NULL);
++      }
++#ifdef SGX_FEATURE_2D_HARDWARE
++      
++      if (psDevInfo->s2DSlavePortKM.pvData != IMG_NULL)
++      {
++              OSUnMapPhysToLin(psDevInfo->s2DSlavePortKM.pvData, 
++                                         psDevInfo->s2DSlavePortKM.ui32DataRange,
++                                         PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                                         IMG_NULL);
++      }
++#endif 
++
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      
++      if(psDevInfo->psSGXEventObject)
++      {
++              OSEventObjectDestroy(psDevInfo->psSGXEventObject);
++              OSFreeMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                               sizeof(PVRSRV_EVENTOBJECT) , 
++                                               psDevInfo->psSGXEventObject, 0);
++      }
++#endif 
++      
++      
++      OSFreePages(PVRSRV_OS_PAGEABLE_HEAP|PVRSRV_HAP_MULTI_PROCESS,
++                              sizeof(PVRSRV_SGXDEV_INFO),
++                              psDevInfo,
++                              hDevInfoOSMemHandle);
++
++      if (psDeviceMemoryHeap != IMG_NULL)
++      {
++      
++              OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                              sizeof(DEVICE_MEMORY_HEAP_INFO) * psDeviceNode->sDevMemoryInfo.ui32HeapCount, 
++                              psDeviceMemoryHeap, 
++                              0);
++      }
++
++      return PVRSRV_OK;
++}
++
++
++
++
++IMG_VOID HWRecoveryResetSGX (PVRSRV_SGXDEV_INFO *psDevInfo,
++                                                       IMG_UINT32             ui32Component,
++                                                       IMG_UINT32                     ui32CallerID)
++{
++      PVRSRV_ERROR eError;
++
++      PVR_UNREFERENCED_PARAMETER(ui32Component);
++      PVR_UNREFERENCED_PARAMETER(ui32CallerID);
++      
++      
++      PVR_DPF((PVR_DBG_ERROR, "HWRecoveryResetSGX: SGX Hardware Recovery triggered"));
++      
++      
++      PDUMPSUSPEND();
++
++      
++      ResetPBs(psDevInfo);
++
++      
++      eError = SGXInitialise(psDevInfo, IMG_TRUE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"HWRecoveryResetSGX: SGXInitialise failed (%d)", eError));
++      }
++
++      
++      PDUMPRESUME();
++}
++
++
++IMG_VOID HWRecoveryResetSGXEDM (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                                                      IMG_UINT32                      ui32Component,
++                                                                      IMG_UINT32                      ui32CallerID)
++{
++      PVRSRV_ERROR            eError;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SGX2DHWRecoveryStart(psDevInfo);
++#endif
++      
++
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++      if(eError != PVRSRV_OK)
++      {
++              
++
++
++              PVR_DPF((PVR_DBG_WARNING,"HWRecoveryResetSGXEDM: Power transition in progress"));
++              return;
++      }
++
++      psSGXHostCtl->ui32InterruptClearFlags |= PVRSRV_USSE_EDM_INTERRUPT_HWR;
++
++      
++      HWRecoveryResetSGX(psDevInfo, ui32Component, ui32CallerID);
++
++      PVRSRVPowerUnlock(ui32CallerID);
++      
++      
++      SGXScheduleProcessQueues(psDeviceNode);
++      
++      
++      
++      PVRSRVProcessQueues(ui32CallerID, IMG_TRUE);
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      SGX2DHWRecoveryEnd(psDevInfo);
++#endif
++}
++
++#if defined(SUPPORT_HW_RECOVERY)
++IMG_VOID SGXOSTimer(IMG_VOID *pvData)
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
++      PVRSRV_SGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
++      static IMG_UINT32       ui32EDMTasks = 0;
++      static IMG_UINT32       ui32LockupCounter = 0; 
++      static IMG_UINT32       ui32NumResets = 0;
++      IMG_UINT32              ui32CurrentEDMTasks;
++      IMG_BOOL                bLockup = IMG_FALSE;
++      IMG_BOOL                bPoweredDown;
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      IMG_UINT32              ui322DCompletedBlits = 0;
++      IMG_BOOL                b2DCoreIsBusy;
++#endif
++
++      
++      psDevInfo->ui32TimeStamp++;
++
++      bPoweredDown = (IMG_BOOL)!SGXIsDevicePowered(psDeviceNode);
++
++      
++      
++      if (bPoweredDown)
++      {
++              ui32LockupCounter = 0;
++      }
++      else
++      {
++              
++              ui32CurrentEDMTasks = OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg0);
++              if (psDevInfo->ui32EDMTaskReg1 != 0)
++              {
++                      ui32CurrentEDMTasks ^= OSReadHWReg(psDevInfo->pvRegsBaseKM, psDevInfo->ui32EDMTaskReg1);
++              }
++              if ((ui32CurrentEDMTasks == ui32EDMTasks) &&
++                      (psDevInfo->ui32NumResets == ui32NumResets))
++              {
++                      ui32LockupCounter++;
++                      if (ui32LockupCounter == 3)
++                      {
++                              ui32LockupCounter = 0;
++                              PVR_DPF((PVR_DBG_ERROR, "SGXOSTimer() detected SGX lockup (0x%x tasks)", ui32EDMTasks));
++
++                              bLockup = IMG_TRUE;
++                      }
++              }
++              else
++              {
++                      ui32LockupCounter = 0;
++                      ui32EDMTasks = ui32CurrentEDMTasks;
++                      ui32NumResets = psDevInfo->ui32NumResets;
++              }
++      }
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      if (!bPoweredDown)
++      {
++              ui322DCompletedBlits = psDevInfo->ui322DCompletedBlits;
++              psDevInfo->ui322DCompletedBlits = SGX2DCompletedBlits(psDevInfo);
++      }
++
++      if (!bLockup && !bPoweredDown)
++      {
++              b2DCoreIsBusy = SGX2DIsBusy(psDevInfo);
++
++              if (b2DCoreIsBusy && ui322DCompletedBlits == psDevInfo->ui322DCompletedBlits)
++              {
++                      if (psDevInfo->b2DLockupSuspected)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXTimer() detects 2D lockup (%d blits completed)", psDevInfo->ui322DCompletedBlits));
++                              bLockup = IMG_TRUE;
++                              psDevInfo->b2DLockupSuspected = IMG_FALSE;
++                      }
++                      else
++                      {
++                              
++                              psDevInfo->b2DLockupSuspected = IMG_TRUE;
++                      }
++              }
++              else
++              {
++                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
++              }
++      }
++      else
++      {
++                      psDevInfo->b2DLockupSuspected = IMG_FALSE;
++      }
++#endif 
++
++      if (bLockup)
++      {
++              PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++              
++              
++              psSGXHostCtl->ui32HostDetectedLockups ++;
++
++              
++              HWRecoveryResetSGXEDM(psDeviceNode, 0, KERNEL_ID);
++      }
++}
++#endif 
++
++
++#if defined(SYS_USING_INTERRUPTS)
++
++
++IMG_BOOL SGX_ISRHandler (IMG_VOID *pvData)
++{
++      IMG_BOOL bInterruptProcessed = IMG_FALSE;
++
++      
++      {
++              IMG_UINT32 ui32EventStatus, ui32EventEnable;
++              IMG_UINT32 ui32EventClear = 0;
++              PVRSRV_DEVICE_NODE *psDeviceNode;
++              PVRSRV_SGXDEV_INFO *psDevInfo;
++
++              
++              if(pvData == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGX_ISRHandler: Invalid params\n"));                   
++                      return bInterruptProcessed;
++              }
++
++              psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++              psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++
++              ui32EventStatus = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS);
++              ui32EventEnable = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_ENABLE);
++
++              
++
++              gui32EventStatusServicesByISR = ui32EventStatus;
++
++              
++              ui32EventStatus &= ui32EventEnable;
++
++              if (ui32EventStatus & EUR_CR_EVENT_STATUS_SW_EVENT_MASK)
++              {
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK;
++              }
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++              if (ui32EventStatus & EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK)
++              {
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK;
++                      SGX2DHandle2DComplete(psDevInfo);
++              }
++#endif
++
++              if (ui32EventClear)
++              {
++                      bInterruptProcessed = IMG_TRUE;
++
++                      
++                      ui32EventClear |= EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK;
++
++                      
++                      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32EventClear);
++              }
++      }
++
++              return bInterruptProcessed;
++}
++
++
++IMG_VOID SGX_MISRHandler (IMG_VOID *pvData)
++{
++      PVRSRV_ERROR            eError = PVRSRV_OK;
++      PVRSRV_DEVICE_NODE      *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
++      PVRSRV_SGXDEV_INFO      *psDevInfo = (PVRSRV_SGXDEV_INFO*)psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL     *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psDevInfo->psSGXHostCtl;
++      
++      if ((psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR) &&
++              !(psSGXHostCtl->ui32InterruptClearFlags & PVRSRV_USSE_EDM_INTERRUPT_HWR))
++      {
++              HWRecoveryResetSGXEDM(psDeviceNode, 0, ISR_ID);
++      }
++
++      if ((eError == PVRSRV_OK) &&
++              (psSGXHostCtl->ui32InterruptFlags & PVRSRV_USSE_EDM_INTERRUPT_ACTIVE_POWER) &&
++              !(psSGXHostCtl->ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_POWEROFF_REQUEST))
++      {
++              
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++              {
++
++                      
++                      PDUMPSUSPEND();
++              
++                      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                                               PVRSRV_POWER_STATE_D3,
++                                                                                               ISR_ID, IMG_FALSE);
++                      if (eError == PVRSRV_OK)
++                      {
++                              if ((*(volatile IMG_UINT32 *)(&psSGXHostCtl->ui32PowManFlags)
++                                      & PVRSRV_USSE_EDM_POWMAN_POWEROFF_RESTART_IMMEDIATE) != 0)
++                              {
++                                      
++
++
++                                      psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                              }
++                      }
++                      else if (eError == PVRSRV_ERROR_RETRY)
++                      {
++                              
++
++                              eError = PVRSRV_OK;
++                      }
++                      
++                      
++                      PDUMPRESUME();
++              }
++#endif 
++      }
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              if(psEventObject->hOSEventKM)
++              {
++                      OSEventObjectSignal(psEventObject->hOSEventKM);
++              }
++      }
++
++#endif
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGX_MISRHandler error:%lu", eError));
++      }
++}
++#endif 
++
++
++PVRSRV_ERROR SGXRegisterDevice (PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      DEVICE_MEMORY_INFO *psDevMemoryInfo;
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++      IMG_BOOL bSharedPB = IMG_TRUE;
++
++      
++      psDeviceNode->sDevId.eDeviceType        = DEV_DEVICE_TYPE;
++      psDeviceNode->sDevId.eDeviceClass       = DEV_DEVICE_CLASS;
++
++      psDeviceNode->pfnInitDevice             = DevInitSGXPart1;
++      psDeviceNode->pfnDeInitDevice           = DevDeInitSGX;
++
++      
++
++      psDeviceNode->pfnMMUInitialise = MMU_Initialise;
++      psDeviceNode->pfnMMUFinalise = MMU_Finalise;
++      psDeviceNode->pfnMMUInsertHeap = MMU_InsertHeap;
++      psDeviceNode->pfnMMUCreate = MMU_Create;
++      psDeviceNode->pfnMMUDelete = MMU_Delete;
++      psDeviceNode->pfnMMUAlloc = MMU_Alloc;
++      psDeviceNode->pfnMMUFree = MMU_Free;
++      psDeviceNode->pfnMMUMapPages = MMU_MapPages;
++      psDeviceNode->pfnMMUMapShadow = MMU_MapShadow;
++      psDeviceNode->pfnMMUUnmapPages = MMU_UnmapPages;
++      psDeviceNode->pfnMMUMapScatter = MMU_MapScatter;
++      psDeviceNode->pfnMMUGetPhysPageAddr = MMU_GetPhysPageAddr;
++      psDeviceNode->pfnMMUGetPDDevPAddr = MMU_GetPDDevPAddr;
++
++#if defined (SYS_USING_INTERRUPTS)
++      
++
++      psDeviceNode->pfnDeviceISR = SGX_ISRHandler;
++      psDeviceNode->pfnDeviceMISR = SGX_MISRHandler;
++#endif
++
++      
++
++      psDeviceNode->pfnDeviceCommandComplete = SGXCommandComplete;
++      
++      
++
++      psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++      
++      psDevMemoryInfo->ui32AddressSpaceSizeLog2 = SGX_ADDRESS_SPACE_SIZE;
++
++      
++      psDevMemoryInfo->ui32Flags = 0;
++
++      
++      psDevMemoryInfo->ui32HeapCount = SGX_MAX_HEAP_ID;
++
++      
++      psDevMemoryInfo->ui32SyncHeapID = SGX_SYNCINFO_HEAP_ID;
++
++      
++      psDevMemoryInfo->ui32MappingHeapID = SGX_GENERAL_MAPPING_HEAP_ID;
++
++      
++      if(OSAllocMem( PVRSRV_OS_PAGEABLE_HEAP, 
++                                       sizeof(DEVICE_MEMORY_HEAP_INFO) * psDevMemoryInfo->ui32HeapCount, 
++                                       (IMG_VOID **)&psDevMemoryInfo->psDeviceMemoryHeap, 0) != PVRSRV_OK)    
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXRegisterDevice : Failed to alloc memory for DEVICE_MEMORY_HEAP_INFO"));
++              return (PVRSRV_ERROR_OUT_OF_MEMORY);
++      }
++      OSMemSet(psDevMemoryInfo->psDeviceMemoryHeap, 0, sizeof(DEVICE_MEMORY_HEAP_INFO) * psDevMemoryInfo->ui32HeapCount);
++      
++      psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++      
++
++
++      
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_HEAP_ID);
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32HeapSize = SGX_GENERAL_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszName = "General";
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].pszBSName = "General BS";
++      psDeviceMemoryHeap[SGX_GENERAL_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_TADATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_TADATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32HeapSize = SGX_TADATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszName = "TA Data";
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].pszBSName = "TA Data BS";
++      psDeviceMemoryHeap[SGX_TADATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_KERNEL_CODE_HEAP_ID);
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].sDevVAddrBase.uiAddr = SGX_KERNEL_CODE_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32HeapSize = SGX_KERNEL_CODE_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                      | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                                      | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                      | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszName = "Kernel";
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].pszBSName = "Kernel BS";
++      psDeviceMemoryHeap[SGX_KERNEL_CODE_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++      
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_VIDEO_CODE_HEAP_ID);
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].sDevVAddrBase.uiAddr = SGX_VIDEO_CODE_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32HeapSize = SGX_VIDEO_CODE_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszName = "Video";
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].pszBSName = "Video BS";
++      psDeviceMemoryHeap[SGX_VIDEO_CODE_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED;
++
++      
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_KERNEL_VIDEO_DATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_KERNEL_VIDEO_DATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32HeapSize = SGX_KERNEL_VIDEO_DATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION |
++#if 0
++                                                                                                                              PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                              PVRSRV_HAP_MULTI_PROCESS;
++#endif
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszName = "KernelVideoData";
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].pszBSName = "KernelVideoData BS";
++      psDeviceMemoryHeap[SGX_KERNEL_VIDEO_DATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++      
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PIXELSHADER_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PIXELSHADER_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32HeapSize = SGX_PIXELSHADER_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszName = "PixelShaderUSSE";
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].pszBSName = "PixelShaderUSSE BS";
++      psDeviceMemoryHeap[SGX_PIXELSHADER_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_VERTEXSHADER_HEAP_ID);
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].sDevVAddrBase.uiAddr = SGX_VERTEXSHADER_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32HeapSize = SGX_VERTEXSHADER_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszName = "VertexShaderUSSE";
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].pszBSName = "VertexShaderUSSE BS";
++      psDeviceMemoryHeap[SGX_VERTEXSHADER_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PDSPIXEL_CODEDATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PDSPIXEL_CODEDATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32HeapSize = SGX_PDSPIXEL_CODEDATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE 
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszName = "PDSPixelCodeData";
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].pszBSName = "PDSPixelCodeData BS";
++      psDeviceMemoryHeap[SGX_PDSPIXEL_CODEDATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_PDSVERTEX_CODEDATA_HEAP_ID);
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].sDevVAddrBase.uiAddr = SGX_PDSVERTEX_CODEDATA_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32HeapSize = SGX_PDSVERTEX_CODEDATA_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszName = "PDSVertexCodeData";
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].pszBSName = "PDSVertexCodeData BS";
++      psDeviceMemoryHeap[SGX_PDSVERTEX_CODEDATA_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_SYNCINFO_HEAP_ID);
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].sDevVAddrBase.uiAddr = SGX_SYNCINFO_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32HeapSize = SGX_SYNCINFO_HEAP_SIZE;
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszName = "CacheCoherent";
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].pszBSName = "CacheCoherent BS";
++#if defined(SGX535)
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#else
++      
++      psDeviceMemoryHeap[SGX_SYNCINFO_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif
++
++      
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapID = HEAP_ID(PVRSRV_DEVICE_TYPE_SGX, SGX_3DPARAMETERS_HEAP_ID);
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].sDevVAddrBase.uiAddr = SGX_3DPARAMETERS_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32HeapSize = SGX_3DPARAMETERS_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszName = "3DParameters";
++      psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].pszBSName = "3DParameters BS";
++
++
++      if(bSharedPB)
++      {
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++#if 0
++                                                                                                              | PVRSRV_HAP_KERNEL_ONLY;
++#else
++                                                                                                              | PVRSRV_HAP_MULTI_PROCESS;
++#endif
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++      }
++      else
++      {
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++              psDeviceMemoryHeap[SGX_3DPARAMETERS_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_PERCONTEXT;
++      }
++
++      
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX , SGX_GENERAL_MAPPING_HEAP_ID);
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].sDevVAddrBase.uiAddr = SGX_GENERAL_MAPPING_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32HeapSize = SGX_GENERAL_MAPPING_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszName = "GeneralMapping";
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].pszBSName = "GeneralMapping BS";
++
++      psDeviceMemoryHeap[SGX_GENERAL_MAPPING_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      
++
++      
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32HeapID = HEAP_ID( PVRSRV_DEVICE_TYPE_SGX ,SGX_2D_HEAP_ID);
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].sDevVAddrBase.uiAddr = SGX_2D_HEAP_BASE;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32HeapSize = SGX_2D_HEAP_SIZE;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].ui32Attribs = PVRSRV_HAP_WRITECOMBINE
++                                                                                                              | PVRSRV_MEM_RAM_BACKED_ALLOCATION
++                                                                                                              | PVRSRV_HAP_SINGLE_PROCESS;
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].pszName = "2D";
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].pszBSName = "2D BS";
++      
++      psDeviceMemoryHeap[SGX_2D_HEAP_ID].DevMemHeapType = DEVICE_MEMORY_HEAP_SHARED_EXPORTED;
++#endif 
++
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetClientInfoKM(IMG_HANDLE                                    hDevCookie,
++                                                              PVR3DIF4_CLIENT_INFO*           psClientInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      
++
++      psDevInfo->ui32ClientRefCount++;
++#ifdef PDUMP
++      if(psDevInfo->ui32ClientRefCount == 1)
++      {
++              psDevInfo->psKernelCCBInfo->ui32CCBDumpWOff = 0;
++      }
++#endif
++      
++
++      psClientInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
++#if defined(SGX_FEATURE_2D_HARDWARE)
++      psClientInfo->s2DSlavePort = psDevInfo->s2DSlavePortKM;
++#endif
++      psClientInfo->pvRegsBase = psDevInfo->pvRegsBaseKM;
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              psClientInfo->hOSEventKM = psEventObject->hOSEventKM;
++      }
++      else
++      {
++              psClientInfo->hOSEventKM = IMG_NULL;
++      }
++#endif
++      
++      
++
++      OSMemCopy(&psClientInfo->asDevData, &psDevInfo->asSGXDevData, sizeof(psClientInfo->asDevData));
++
++      
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetMiscInfoKM(PVRSRV_SGXDEV_INFO *psDevInfo, SGX_MISC_INFO *psMiscInfo)
++{
++      PVR_UNREFERENCED_PARAMETER(psDevInfo);
++
++      switch(psMiscInfo->eRequest)
++      {
++              default:
++              {
++                      
++                      return PVRSRV_ERROR_INVALID_PARAMS;
++              }
++      }
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxkick.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,201 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#if defined (PDUMP)
++#include "sgxapi_km.h"
++#endif
++#include "sgx_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++
++#define       CCB_DATA_FROM_OFFSET(type, psCCBMemInfo, psCCBKick, offset) \
++      ((type *)(((char *)(psCCBMemInfo)->pvLinAddrKM) + \
++              (psCCBKick)->offset))
++
++#define       CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, offset) \
++              ((psCCBKick)->offset < (psCCBMemInfo)->ui32AllocSize)
++
++IMG_EXPORT
++PVRSRV_ERROR SGXDoKickKM(IMG_HANDLE hDevHandle, PVR3DIF4_CCB_KICK *psCCBKick)
++{
++      PVRSRV_ERROR eError;
++      PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++      PVRSRV_KERNEL_MEM_INFO  *psCCBMemInfo = (PVRSRV_KERNEL_MEM_INFO *) psCCBKick->hCCBKernelMemInfo;
++      IMG_UINT32 *pui32DstReadOpsPendingVal;
++      IMG_UINT32 *pui32DstWriteOpsPendingVal;
++      IMG_UINT32 i;
++
++
++#if defined(NO_HARDWARE)
++      pui32DstReadOpsPendingVal = IMG_NULL;
++      pui32DstWriteOpsPendingVal = IMG_NULL;
++#endif
++
++      if (psCCBKick->hDstKernelSyncInfo != IMG_NULL)
++      {
++              
++              if (!CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset) || !CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: ui32DstReadOpsPendingOffset or ui32DstWriteOpsPendingOffset out of range"));
++              }
++              else
++              {
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
++                              pui32DstReadOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstReadOpsPendingOffset);
++                              pui32DstWriteOpsPendingVal = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, ui32DstWriteOpsPendingOffset);
++
++                              *pui32DstReadOpsPendingVal = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                              *pui32DstWriteOpsPendingVal = psSyncInfo->psSyncData->ui32WriteOpsPending++;
++              }
++
++      }
++
++      if (psCCBKick->ui32NumTAStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                              *pui32TAStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui32TAStatusValueOffset[%d] out of range", i));
++                      }
++              }
++      }
++
++      if (psCCBKick->ui32Num3DStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                              *pui323DStatusValue = psSyncInfo->psSyncData->ui32ReadOpsPending;
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: aui323DStatusValueOffset[%d] out of range", i));
++                      }
++              }
++      }
++
++      eError = SGXScheduleCCBCommandKM(hDevHandle, psCCBKick->eCommand, &psCCBKick->sCommand, KERNEL_ID);
++      if (eError == PVRSRV_ERROR_RETRY)
++      {
++              
++              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *) psCCBKick->hDstKernelSyncInfo;
++              psSyncInfo->psSyncData->ui32WriteOpsPending--;
++              return eError;
++      }
++      else if (PVRSRV_OK != eError)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXDoKickKM: SGXScheduleCCBCommandKM failed.")); 
++              return eError;
++      }
++
++
++#if defined(NO_HARDWARE)
++      if (psCCBKick->ui32NumTAStatusVals != 0)
++      {
++              
++              for (i = 0; i < psCCBKick->ui32NumTAStatusVals; i++)
++              {
++                      if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]))
++                      {
++                              IMG_UINT32 *pui32TAStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui32TAStatusValueOffset[i]);
++                              psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ahTAStatusSyncInfo[i];
++
++                              psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui32TAStatusValue;
++                      }
++              }
++      }
++      
++      if (psCCBKick->bTerminate)
++      {
++              if (psCCBKick->hUpdateDstKernelSyncInfo != IMG_NULL)
++              {
++                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->hUpdateDstKernelSyncInfo;
++                      psSyncInfo->psSyncData->ui32WriteOpsComplete = ((pui32DstWriteOpsPendingVal != IMG_NULL) ? *pui32DstWriteOpsPendingVal : psCCBKick->ui32WriteOpsPendingVal) + 1;
++              }
++
++              if (psCCBKick->ui32Num3DStatusVals != 0)
++              {
++                      
++                      for (i = 0; i < psCCBKick->ui32Num3DStatusVals; i++)
++                      {
++                              if (CCB_OFFSET_IS_VALID(psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]))
++                              {
++                                      IMG_UINT32 *pui323DStatusValue = CCB_DATA_FROM_OFFSET(IMG_UINT32, psCCBMemInfo, psCCBKick, aui323DStatusValueOffset[i]);
++                                      psSyncInfo = (PVRSRV_KERNEL_SYNC_INFO *)psCCBKick->ah3DStatusSyncInfo[i];
++
++                                      psSyncInfo->psSyncData->ui32ReadOpsComplete = *pui323DStatusValue;
++                              }
++                      }
++              }
++      }
++#endif
++
++      return eError;
++}
++
++
++IMG_VOID SGXScheduleProcessQueues(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVRSRV_ERROR                    eError;
++      PVRSRV_SGXDEV_INFO              *psDevInfo = psDeviceNode->pvDevice;
++      PVRSRV_SGX_HOST_CTL             *psHostCtl = psDevInfo->psKernelSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++      PVRSRV_SGX_COMMAND              sCommand = {0};
++
++      ui32PowManFlags = psHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++      {
++              
++              return;
++      }
++
++      sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_PROCESS_QUEUESCMD;
++      eError = SGXScheduleCCBCommandKM(psDeviceNode, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, ISR_ID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SGXScheduleProcessQueues failed to schedule CCB command: %lu", eError));
++      }
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxtransfer.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,58 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(TRANSFER_QUEUE)
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxinfo.h"
++#include "sysconfig.h"
++#include "regpaths.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge.h"
++#include "sgx_bridge_km.h"
++#include "sgxinfokm.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++IMG_EXPORT PVRSRV_ERROR SGXSubmitTransferKM(IMG_HANDLE hDevHandle,
++                                                                                      IMG_DEV_VIRTADDR sHWRenderContextDevVAddr)
++                                          
++{
++      PVRSRV_SGX_COMMAND sCommand = {0};
++
++    sCommand.ui32Data[0] = PVRSRV_CCBFLAGS_TRANSFERCMD;
++    sCommand.ui32Data[1] = sHWRenderContextDevVAddr.uiAddr;
++      
++      return SGXScheduleCCBCommandKM(hDevHandle, PVRSRV_SGX_COMMAND_EDM_KICK, &sCommand, KERNEL_ID);  
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,706 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <stddef.h>
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "buffer_manager.h"
++#include "sgxapi_km.h"
++#include "sgxinfo.h"
++#include "sgxinfokm.h"
++#include "sysconfig.h"
++#include "pdump_km.h"
++#include "mmu.h"
++#include "pvr_bridge_km.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "sgxutils.h"
++
++#ifdef __linux__
++#include <linux/tty.h>                        
++#else
++#include <stdio.h>
++#endif
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SGXAcquireKernelCCBSlot)
++#endif
++static INLINE PVRSRV_SGX_COMMAND * SGXAcquireKernelCCBSlot(PVRSRV_SGX_CCB_INFO *psCCB)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      
++      do
++      {
++              if(((*psCCB->pui32WriteOffset + 1) & 255) != *psCCB->pui32ReadOffset)
++              {
++                      return &psCCB->psCommands[*psCCB->pui32WriteOffset];
++              }
++              
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
++                                                                       PVRSRV_SGX_COMMAND_TYPE        eCommandType,
++                                                                       PVRSRV_SGX_COMMAND                     *psCommandData,
++                                                                       IMG_UINT32                                     ui32CallerID)
++{
++      PVRSRV_SGX_CCB_INFO *psKernelCCB;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      PVRSRV_SGX_COMMAND *psSGXCommand;
++#if defined(PDUMP)
++      IMG_VOID *pvDumpCommand;
++#endif
++
++      psDevInfo = (PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
++      psKernelCCB = psDevInfo->psKernelCCBInfo;
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (ui32CallerID == ISR_ID)
++      {
++              PDUMPSUSPEND();
++      }
++
++      
++      eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex,
++                                                                               PVRSRV_POWER_STATE_D0,
++                                                                               ui32CallerID,
++                                                                               IMG_TRUE);
++                                                                               
++      if (ui32CallerID == ISR_ID)
++      {
++              PDUMPRESUME();
++      }
++#else
++      
++      eError = PVRSRVPowerLock(ui32CallerID, IMG_FALSE);
++#endif 
++       
++      if (eError == PVRSRV_OK)
++      {
++              psDeviceNode->bReProcessDeviceCommandComplete = IMG_FALSE;
++      }
++      else
++      {
++              if (eError == PVRSRV_ERROR_RETRY)
++              {
++                      if (ui32CallerID == ISR_ID)
++                      {
++                              
++
++
++                              psDeviceNode->bReProcessDeviceCommandComplete = IMG_TRUE;
++                              eError = PVRSRV_OK;
++                      }
++                      else
++                      {
++                              
++
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM failed to acquire lock - "
++                                       "ui32CallerID:%ld eError:%lu", ui32CallerID, eError));
++              }
++
++              return eError;
++      }
++      
++      psSGXCommand = SGXAcquireKernelCCBSlot(psKernelCCB);
++
++      
++      if(!psSGXCommand)
++      {
++              eError = PVRSRV_ERROR_TIMEOUT;
++              goto Exit;
++      }
++      
++      
++      psCommandData->ui32Data[2] = psDevInfo->ui32CacheControl;
++      
++#if defined(PDUMP)
++      
++      psDevInfo->sPDContext.ui32CacheControl |= psDevInfo->ui32CacheControl;
++#endif
++
++      
++      psDevInfo->ui32CacheControl = 0;
++      
++      
++      *psSGXCommand = *psCommandData;
++      
++      switch(eCommandType)
++      {
++              case PVRSRV_SGX_COMMAND_EDM_KICK:
++                      psSGXCommand->ui32ServiceAddress = psDevInfo->ui32TAKickAddress;
++                      break;
++              case PVRSRV_SGX_COMMAND_VIDEO_KICK:
++                      psSGXCommand->ui32ServiceAddress = psDevInfo->ui32VideoHandlerAddress;
++                      break;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR,"SGXScheduleCCBCommandKM: Unknown command type: %d", eCommandType)) ;
++                      eError = PVRSRV_ERROR_GENERIC;
++                      goto Exit;
++      }
++
++#if defined(PDUMP)
++      if (ui32CallerID != ISR_ID)
++      {
++              
++              PDUMPCOMMENTWITHFLAGS(0, "Poll for space in the Kernel CCB\r\n");
++              PDUMPMEMPOL(psKernelCCB->psCCBCtlMemInfo, offsetof(PVRSRV_SGX_CCB_CTL, ui32ReadOffset), (psKernelCCB->ui32CCBDumpWOff + 1) & 0xff, 0xff, PDUMP_POLL_OPERATOR_NOTEQUAL, IMG_FALSE, IMG_FALSE, MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB command\r\n");
++              pvDumpCommand = (IMG_VOID *)((IMG_UINT8 *)psKernelCCB->psCCBMemInfo->pvLinAddrKM + (*psKernelCCB->pui32WriteOffset * sizeof(PVRSRV_SGX_COMMAND)));
++
++              PDUMPMEM(pvDumpCommand,
++                                      psKernelCCB->psCCBMemInfo,
++                                      psKernelCCB->ui32CCBDumpWOff * sizeof(PVRSRV_SGX_COMMAND),
++                                      sizeof(PVRSRV_SGX_COMMAND),
++                                      0,
++                                      MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++              
++              PDUMPMEM(&psDevInfo->sPDContext.ui32CacheControl,
++                                      psKernelCCB->psCCBMemInfo,
++                                      psKernelCCB->ui32CCBDumpWOff * sizeof(PVRSRV_SGX_COMMAND) +
++                                      offsetof(PVRSRV_SGX_COMMAND, ui32Data[2]),
++                                      sizeof(IMG_UINT32),
++                                      0,
++                                      MAKEUNIQUETAG(psKernelCCB->psCCBMemInfo));
++
++              if(PDumpIsCaptureFrameKM())
++              {
++                      
++                      psDevInfo->sPDContext.ui32CacheControl = 0;
++              }
++      }
++#endif
++
++      
++
++      *psKernelCCB->pui32WriteOffset = (*psKernelCCB->pui32WriteOffset + 1) & 255;
++      
++#if defined(PDUMP)
++      if (ui32CallerID != ISR_ID)
++      {
++              if(PDumpIsCaptureFrameKM())
++              {
++                      psKernelCCB->ui32CCBDumpWOff = (psKernelCCB->ui32CCBDumpWOff + 1) & 0xFF;
++              }
++
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB write offset\r\n");
++              PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++                               psKernelCCB->psCCBCtlMemInfo,
++                               offsetof(PVRSRV_SGX_CCB_CTL, ui32WriteOffset),
++                               sizeof(IMG_UINT32),
++                               0,
++                               MAKEUNIQUETAG(psKernelCCB->psCCBCtlMemInfo));
++              PDUMPCOMMENTWITHFLAGS(0, "Kernel CCB event kicker\r\n");
++              PDUMPMEM(&psKernelCCB->ui32CCBDumpWOff,
++                               psDevInfo->psKernelCCBEventKickerMemInfo,
++                               0,
++                               sizeof(IMG_UINT32),
++                               0,
++                               MAKEUNIQUETAG(psDevInfo->psKernelCCBEventKickerMemInfo));
++              PDUMPCOMMENTWITHFLAGS(0, "Event kick\r\n");
++              PDUMPREGWITHFLAGS(EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK, 0);
++      }
++#endif
++
++      *psDevInfo->pui32KernelCCBEventKicker = (*psDevInfo->pui32KernelCCBEventKicker + 1) & 0xFF;
++      OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_KICK, EUR_CR_EVENT_KICK_NOW_MASK);
++
++#if defined(NO_HARDWARE)
++      
++      *psKernelCCB->pui32ReadOffset = (*psKernelCCB->pui32ReadOffset + 1) & 255;
++#endif
++
++Exit:
++      PVRSRVPowerUnlock(ui32CallerID);
++
++      return eError;
++}
++
++
++#if 0 
++PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
++                                         IMG_UINT32                   ui32CCBSize,
++                                         IMG_UINT32                   ui32AllocGran,
++                                         IMG_UINT32                   ui32OverrunSize,
++                                         IMG_HANDLE                   hDevMemHeap,
++                                         PVRSRV_SGX_CCB               **ppsCCB)
++{
++      PVRSRV_SGX_CCB  *psCCB;
++
++      PVR_UNREFERENCED_PARAMETER(psSGXDevInfo);
++
++      psCCB = IMG_NULL;
++
++      if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                 sizeof(PVRSRV_SGX_CCB),
++                                 (IMG_VOID **)&psCCB,
++                                 IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: psCCB alloc failed"));
++
++              return PVRSRV_ERROR_OUT_OF_MEMORY;
++      }
++
++      
++      psCCB->psCCBMemInfo = IMG_NULL;
++      psCCB->psCCBCtlMemInfo = IMG_NULL;
++      psCCB->pui32CCBLinAddr = IMG_NULL;
++      psCCB->pui32WriteOffset = IMG_NULL;
++      psCCB->pui32ReadOffset = IMG_NULL;
++
++      #ifdef PDUMP
++      psCCB->ui32CCBDumpWOff = 0;
++      #endif
++
++      
++      if ( ui32CCBSize < 0x1000 )
++      {
++              IMG_UINT32      i, ui32PowOfTwo;
++
++              ui32PowOfTwo = 0x1000;
++
++              for (i = 12; i > 0; i--)
++              {
++                      if (ui32CCBSize & ui32PowOfTwo)
++                      {
++                              break;
++                      }
++      
++                      ui32PowOfTwo >>= 1;
++              }
++      
++              if (ui32CCBSize & (ui32PowOfTwo - 1))
++              {
++                      ui32PowOfTwo <<= 1;
++              }
++
++              ui32AllocGran = ui32PowOfTwo;
++      }
++      else
++      {
++              ui32AllocGran = 0x1000;
++      }
++
++      
++      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
++                                                         hDevMemHeap,
++                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
++                                                         ui32CCBSize + ui32OverrunSize,
++                                                         ui32AllocGran,
++                                                         &psCCB->psCCBMemInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBMemInfo alloc failed"));
++
++              goto ErrorExit;
++      }
++
++      psCCB->pui32CCBLinAddr = psCCB->psCCBMemInfo->pvLinAddrKM;
++      psCCB->sCCBDevAddr = psCCB->psCCBMemInfo->sDevVAddr;
++      psCCB->ui32Size = ui32CCBSize;
++      psCCB->ui32AllocGran = ui32AllocGran;
++
++      
++      if (PVRSRVAllocDeviceMemKM(IMG_NULL,
++                                                         hDevMemHeap,
++                                                         PVRSRV_MEM_READ | PVRSRV_MEM_WRITE | PVRSRV_MEM_EDM_PROTECT | PVRSRV_MEM_NO_SYNCOBJ,
++                                                         sizeof(PVRSRV_SGX_CCB_CTL),
++                                                         32,
++                                                         &psCCB->psCCBCtlMemInfo) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"CreateCCB: CCBCtlMemInfo alloc failed"));
++
++              goto ErrorExit;
++      }
++
++      
++      psCCB->pui32WriteOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32WriteOffset;
++      psCCB->pui32ReadOffset = &((PVRSRV_SGX_CCB_CTL *)psCCB->psCCBCtlMemInfo->pvLinAddrKM)->ui32ReadOffset;
++
++      
++      *psCCB->pui32WriteOffset = 0;
++      *psCCB->pui32ReadOffset = 0;
++
++      
++      *ppsCCB = psCCB;
++
++      return PVRSRV_OK;
++
++ErrorExit:
++
++      
++      if (psCCB->psCCBMemInfo)
++      {
++              PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++      }
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
++
++      return PVRSRV_ERROR_OUT_OF_MEMORY;
++;
++}
++
++IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags)
++{
++      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBMemInfo, IMG_FALSE);
++
++      PVRSRVFreeDeviceMemKM(IMG_NULL, psCCB->psCCBCtlMemInfo, IMG_FALSE);
++
++      if (!(ui32PFlags & PFLAGS_POWERDOWN))
++      {
++              if (psCCB)
++              {
++                      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0, psCCB, IMG_NULL);
++              }
++      }
++}
++#endif 
++#if defined (PDUMP)
++IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
++                                               IMG_UINT32                                             ui32BufferArrayLength,
++                                               IMG_BOOL                                               bDumpPolls)
++{
++      IMG_UINT32      i;
++
++      for (i=0; i<ui32BufferArrayLength; i++)
++      {
++              PPVR3DIF4_KICKTA_DUMP_BUFFER    psBuffer;
++              PVRSRV_KERNEL_SYNC_INFO *psSyncInfo;
++              IMG_CHAR * pszName;
++              IMG_HANDLE hUniqueTag;
++              
++              psBuffer = &psBufferArray[i];
++              pszName = psBuffer->pszName;
++              if (!pszName)
++              {
++                      pszName = "Nameless buffer";
++              }
++
++              hUniqueTag = MAKEUNIQUETAG((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo);
++              psSyncInfo = ((PVRSRV_KERNEL_MEM_INFO *)psBuffer->hKernelMemInfo)->psKernelSyncInfo;
++
++              if (psBuffer->ui32Start <= psBuffer->ui32End)
++              {
++                      if (bDumpPolls)
++                      {
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               psBuffer->ui32Start,
++                                               psBuffer->ui32SpaceUsed,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++
++                      PDUMPCOMMENTWITHFLAGS(0, "%s\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       psBuffer->ui32Start,
++                                       psBuffer->ui32End - psBuffer->ui32Start,
++                                       0,
++                                       hUniqueTag);
++              }
++              else
++              {
++                      
++
++                      if (bDumpPolls)
++                      {
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               psBuffer->ui32Start,
++                                               psBuffer->ui32BackEndLength,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++                      PDUMPCOMMENTWITHFLAGS(0, "%s (part 1)\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       psBuffer->ui32Start,
++                                       psBuffer->ui32BackEndLength,
++                                       0,
++                                       hUniqueTag);
++
++                      if (bDumpPolls)
++                      {
++                              PDUMPMEMPOL(psSyncInfo->psSyncDataMemInfoKM,
++                                                      offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                                      0,
++                                                      0xFFFFFFFF,
++                                                      PDUMP_POLL_OPERATOR_NOTEQUAL,
++                                                      IMG_FALSE,
++                                                      IMG_FALSE,
++                                                      MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++
++                              PDUMPCOMMENTWITHFLAGS(0, "Wait for %s space\r\n", pszName);
++                              PDUMPCBP(psSyncInfo->psSyncDataMemInfoKM,
++                                               offsetof(PVRSRV_SYNC_DATA, ui32ReadOpsComplete),
++                                               0,
++                                               psBuffer->ui32End,
++                                               psBuffer->ui32BufferSize,
++                                               0,
++                                               MAKEUNIQUETAG(psSyncInfo->psSyncDataMemInfoKM));
++                      }
++                      PDUMPCOMMENTWITHFLAGS(0, "%s (part 2)\r\n", pszName);
++                      PDUMPMEM(NULL,
++                                       (PVRSRV_KERNEL_MEM_INFO*)psBuffer->hKernelMemInfo,
++                                       0,
++                                       psBuffer->ui32End,
++                                       0,
++                                       hUniqueTag);
++              }
++      }
++}
++#endif 
++
++
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      return PVRSRVIsDevicePowered(psDeviceNode->sDevId.ui32DeviceIndex);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXGetInternalDevInfoKM(IMG_HANDLE hDevCookie, 
++                                                                      PVR3DIF4_INTERNAL_DEVINFO *psSGXInternalDevInfo)
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo = (PVRSRV_SGXDEV_INFO *)((PVRSRV_DEVICE_NODE *)hDevCookie)->pvDevice;
++
++      psSGXInternalDevInfo->ui32Flags = psDevInfo->ui32Flags;
++      psSGXInternalDevInfo->bForcePTOff = (IMG_BOOL)psDevInfo->bForcePTOff;
++      psSGXInternalDevInfo->ui32RegFlags = (IMG_BOOL)psDevInfo->ui32RegFlags;
++
++#if defined(SUPPORT_SGX_EVENT_OBJECT)
++      if (psDevInfo->psSGXEventObject)
++      {
++              PVRSRV_EVENTOBJECT *psEventObject = psDevInfo->psSGXEventObject;
++              psSGXInternalDevInfo->hOSEvent = psEventObject->hOSEventKM;
++      }
++      else
++      {
++              psSGXInternalDevInfo->hOSEvent = IMG_NULL;
++      }
++#endif
++
++      
++      psSGXInternalDevInfo->hCtlKernelMemInfoHandle =
++              (IMG_HANDLE)psDevInfo->psKernelSGXHostCtlMemInfo;
++
++      return PVRSRV_OK;
++}
++
++static IMG_VOID SGXCleanupRequest(PVRSRV_SGXDEV_INFO  *psSGXDevInfo,
++                                                                IMG_DEV_VIRTADDR              *psHWDataDevVAddr,
++                                                                IMG_BOOL                              bContextCleanup)
++{
++      IMG_UINT32                              ui32ResManRequestFlag = 0;
++      PVRSRV_KERNEL_MEM_INFO  *psSGXHostCtlMemInfo = psSGXDevInfo->psKernelSGXHostCtlMemInfo;
++      PVRSRV_SGX_HOST_CTL             *psSGXHostCtl = (PVRSRV_SGX_HOST_CTL *)psSGXHostCtlMemInfo->pvLinAddrKM;
++      IMG_UINT32                              ui32PowManFlags;
++#if defined (PDUMP)
++      IMG_HANDLE hUniqueTag = MAKEUNIQUETAG(psSGXHostCtlMemInfo);
++#endif
++
++      ui32PowManFlags = psSGXHostCtl->ui32PowManFlags;
++      if ((ui32PowManFlags & PVRSRV_USSE_EDM_POWMAN_NO_WORK) != 0)
++      {
++              
++      }
++      else
++      {
++              
++              if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PDCACHE)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPD;
++                      psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PDCACHE;
++              }
++              if (psSGXDevInfo->ui32CacheControl & SGX_BIF_INVALIDATE_PTCACHE)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_INVALPT;
++                      psSGXDevInfo->ui32CacheControl ^= SGX_BIF_INVALIDATE_PTCACHE;
++              }
++              if (bContextCleanup)
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RC_REQUEST;
++              }
++              else
++              {
++                      ui32ResManRequestFlag |= PVRSRV_USSE_EDM_RESMAN_CLEANUP_RT_REQUEST;
++              }
++              
++              
++              psSGXHostCtl->sResManCleanupData.uiAddr = psHWDataDevVAddr->uiAddr;
++              psSGXHostCtl->ui32ResManFlags |= ui32ResManRequestFlag;
++
++              
++              PDUMPCOMMENT("TA/3D CCB Control - Request clean-up event on uKernel...");
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, sResManCleanupData.uiAddr), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++
++              
++              #if !defined(NO_HARDWARE)
++              if(PollForValueKM ((volatile IMG_UINT32 *)(&psSGXHostCtl->ui32ResManFlags),
++                                      PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                      PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                      MAX_HW_TIME_US/WAIT_TRY_COUNT,
++                                      WAIT_TRY_COUNT) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SGXCleanupRequest: Wait for uKernel to clean up render context failed"));
++              }
++              #endif
++
++              #ifdef PDUMP
++              
++              PDUMPCOMMENT("TA/3D CCB Control - Wait for clean-up request to complete...");
++              PDUMPMEMPOL(psSGXHostCtlMemInfo,
++                                         offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags),
++                                         PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                         PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE,
++                                         PDUMP_POLL_OPERATOR_EQUAL,
++                                         IMG_FALSE, IMG_FALSE,
++                                         hUniqueTag);
++              #endif
++
++              psSGXHostCtl->ui32ResManFlags &= ~(ui32ResManRequestFlag);
++              psSGXHostCtl->ui32ResManFlags &= ~(PVRSRV_USSE_EDM_RESMAN_CLEANUP_COMPLETE);
++              PDUMPMEM(IMG_NULL, psSGXHostCtlMemInfo, offsetof(PVRSRV_SGX_HOST_CTL, ui32ResManFlags), sizeof(IMG_UINT32), PDUMP_FLAGS_CONTINUOUS, hUniqueTag);
++      }
++}
++
++typedef struct _SGX_HW_RENDER_CONTEXT_CLEANUP_
++{
++      PVRSRV_SGXDEV_INFO *psDevInfo;
++      IMG_DEV_VIRTADDR sHWDataDevVAddr;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResItem;
++} SGX_HW_RENDER_CONTEXT_CLEANUP;
++
++static PVRSRV_ERROR SGXCleanupHWRenderContextCallback(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param)
++{
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)pvParam;
++
++      PVR_UNREFERENCED_PARAMETER(ui32ProcessID);
++      PVR_UNREFERENCED_PARAMETER(ui32Param);
++
++      SGXCleanupRequest(psCleanup->psDevInfo,
++                                                      &psCleanup->sHWDataDevVAddr, IMG_TRUE);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                        sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                        psCleanup,
++                        psCleanup->hBlockAlloc);
++
++      return PVRSRV_OK;
++}
++
++IMG_EXPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr)
++{
++      PVRSRV_ERROR eError;
++      IMG_HANDLE hBlockAlloc;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++      PRESMAN_ITEM psResItem;
++
++      eError = OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                              sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                                              (IMG_VOID **)&psCleanup,
++                                              &hBlockAlloc);
++
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: Couldn't allocate memory for SGX_HW_RENDER_CONTEXT_CLEANUP structure"));
++              return IMG_NULL;
++      }
++
++      psCleanup->hBlockAlloc = hBlockAlloc;
++      psCleanup->psDevInfo = psSGXDevInfo;
++      psCleanup->sHWDataDevVAddr = *psHWRenderContextDevVAddr;
++
++      psResItem = ResManRegisterRes(RESMAN_TYPE_HW_RENDER_CONTEXT,
++                                                                (IMG_VOID *)psCleanup,
++                                                                0,
++                                                                &SGXCleanupHWRenderContextCallback,
++                                                                0);
++
++      if (psResItem == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SGXRegisterHWRenderContextKM: ResManRegisterRes failed"));
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
++                                sizeof(SGX_HW_RENDER_CONTEXT_CLEANUP),
++                                psCleanup,
++                                psCleanup->hBlockAlloc);
++
++              return IMG_NULL;
++      }
++
++      psCleanup->psResItem = psResItem;
++
++      return (IMG_HANDLE)psCleanup;
++}
++
++IMG_EXPORT
++IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR sHWRTDataSetDevVAddr)
++{
++      PVR_ASSERT(sHWRTDataSetDevVAddr.uiAddr != IMG_NULL);
++
++      SGXCleanupRequest(psDevInfo, &sHWRTDataSetDevVAddr, IMG_FALSE);
++}
++
++IMG_EXPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext)
++{
++      PVRSRV_ERROR eError;
++      SGX_HW_RENDER_CONTEXT_CLEANUP *psCleanup;
++
++      PVR_ASSERT(hHWRenderContext != IMG_NULL);
++
++      psCleanup = (SGX_HW_RENDER_CONTEXT_CLEANUP *)hHWRenderContext;
++
++      eError = ResManFreeResByPtr(psCleanup->psResItem, IMG_TRUE);
++
++      return eError;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h
+--- git/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/devices/sgx/sgxutils.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,93 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
++      (((ROff - WOff) + (CCBSize - 1)) & (CCBSize - 1))
++
++#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
++      Off = ((Off + PacketSize) & (CCBSize - 1))
++
++static INLINE IMG_UINT32 SGXCalcContextCCBParamSize(IMG_UINT32 ui32ParamSize, IMG_UINT32 ui32AllocGran)
++{
++      return (ui32ParamSize + (ui32AllocGran - 1)) & ~(ui32AllocGran - 1);
++}
++
++static INLINE IMG_PVOID SGXAcquireCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32CmdSize)
++{
++      IMG_BOOL        bStart = IMG_FALSE;
++      IMG_UINT32      uiStart = 0;
++
++      do
++      {
++              if(GET_CCB_SPACE(*psCCB->pui32WriteOffset, *psCCB->pui32ReadOffset, psCCB->ui32Size) > ui32CmdSize)
++              {
++                      return (IMG_PVOID)((IMG_UINT32)psCCB->psCCBMemInfo->pvLinAddrKM + *psCCB->pui32WriteOffset);
++              }
++
++              if (bStart == IMG_FALSE)
++              {
++                      bStart = IMG_TRUE;
++                      uiStart = OSClockus();
++              }
++              OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
++      } while ((OSClockus() - uiStart) < MAX_HW_TIME_US);
++
++      
++      return IMG_NULL;
++}
++
++PVRSRV_ERROR CreateCCB(PVRSRV_SGXDEV_INFO     *psSGXDevInfo,
++                                         IMG_UINT32                   ui32CCBSize,
++                                         IMG_UINT32                   ui32AllocGran,
++                                         IMG_UINT32                   ui32OverrunSize,
++                                         IMG_HANDLE                   hDevMemHeap,
++                                         PVRSRV_SGX_CCB               **ppsCCB);
++IMG_VOID DestroyCCB(PVRSRV_SGX_CCB *psCCB, IMG_UINT32 ui32PFlags);
++
++#if defined (PDUMP)
++IMG_VOID DumpBufferArray(PPVR3DIF4_KICKTA_DUMP_BUFFER psBufferArray,
++                                               IMG_UINT32                                             ui32BufferArrayLength,
++                                               IMG_BOOL                                               bDumpPolls);
++#endif
++
++IMG_IMPORT
++PVRSRV_ERROR SGXScheduleCCBCommandKM(PVRSRV_DEVICE_NODE                       *psDeviceNode,
++                                                                       PVRSRV_SGX_COMMAND_TYPE        eCommandType,
++                                                                       PVRSRV_SGX_COMMAND                     *psCommandData,
++                                                                       IMG_UINT32                                     ui32CallerID);
++
++IMG_IMPORT
++IMG_BOOL SGXIsDevicePowered(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_IMPORT
++IMG_HANDLE SGXRegisterHWRenderContextKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR *psHWRenderContextDevVAddr);
++
++IMG_IMPORT
++IMG_VOID SGXFlushHWRenderTargetKM(PVRSRV_SGXDEV_INFO *psSGXDevInfo, IMG_DEV_VIRTADDR psHWRTDataSetDevVAddr);
++
++IMG_IMPORT
++PVRSRV_ERROR SGXUnregisterHWRenderContextKM(IMG_HANDLE hHWRenderContext);
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/env_data.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,50 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _ENV_DATA_
++#define _ENV_DATA_
++
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#define PVRSRV_MAX_BRIDGE_IN_SIZE     0x1000
++#define PVRSRV_MAX_BRIDGE_OUT_SIZE    0x1000
++
++
++typedef struct _ENV_DATA_TAG
++{
++      IMG_VOID                *pvBridgeData;
++      struct pm_dev           *psPowerDevice;
++      IMG_BOOL                bLISRInstalled;
++      IMG_BOOL                bMISRInstalled;
++      IMG_UINT32              ui32IRQ;
++      IMG_VOID                *pvISRCookie;
++      struct tasklet_struct   sMISRTasklet;
++      struct pci_dev          *psPCIDev;
++      IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE];
++} ENV_DATA;
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,841 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/shmparam.h>
++#include <asm/pgtable.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <linux/sched.h>
++#include <asm/current.h>
++#endif
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "mm.h"
++#include "pvr_debug.h"
++#include "osfunc.h"
++#include "proc.h"
++#include "mutex.h"
++
++
++
++static PKV_OFFSET_STRUCT FindOffsetStructFromLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++static IMG_UINT32 GetFirstFreePageAlignedNumber(void);
++static PKV_OFFSET_STRUCT FindOffsetStructByKVIndexAddress(IMG_VOID *pvVirtAddress,
++                                                  IMG_UINT32 ui32ByteSize);
++static void DeterminUsersSizeAndByteOffset(IMG_VOID *pvKVIndexAddress,
++                                            LinuxMemArea *psLinuxMemArea,
++                                            IMG_UINT32 *pui32RealByteSize,
++                                            IMG_UINT32 *pui32ByteOffset);
++static PKV_OFFSET_STRUCT FindOffsetStructByMMapOffset(IMG_UINT32 ui32Offset);
++static IMG_BOOL DoMapToUser(LinuxMemArea *psLinuxMemArea,
++                            struct vm_area_struct* ps_vma,
++                            IMG_UINT32 ui32ByteOffset,
++                            IMG_UINT32 ui32Size);
++static IMG_UINT32 MapPageToVMA(struct vm_area_struct *psVma,
++                               unsigned long ulFromCpuVAddr,
++                               struct page *pPage);
++static IMG_UINT32 MapIORangeToVMA(struct vm_area_struct *psVma,
++                                  unsigned long ulFromCpuVAddr,
++                                  unsigned long ulCpuPAddr,
++                                  unsigned long ulBytes);
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static off_t PrintMMapRegistrations(char * buffer, size_t size, off_t off);
++#endif
++
++
++static void MMapVOpen(struct vm_area_struct* ps_vma);
++static void MMapVClose(struct vm_area_struct* ps_vma);
++
++static struct vm_operations_struct MMapIOOps =
++{
++      open:           MMapVOpen,
++      close:          MMapVClose
++};
++
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++static PKV_OFFSET_STRUCT g_psKVOffsetTable = 0;
++static LinuxKMemCache *g_psMemmapCache = 0;
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static IMG_UINT32 g_ui32RegisteredAreas = 0;
++static IMG_UINT32 g_ui32TotalByteSize = 0;
++#endif
++
++
++
++IMG_VOID
++PVRMMapInit(IMG_VOID)
++{
++    g_psKVOffsetTable = 0;
++
++    g_psMemmapCache = KMemCacheCreateWrapper("img-mmap", sizeof(KV_OFFSET_STRUCT), 0, 0);
++    if (g_psMemmapCache)
++    {
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++        CreateProcReadEntry("mmap", PrintMMapRegistrations);
++#endif
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++    }
++}
++
++
++IMG_VOID
++PVRMMapCleanup(void)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++
++    if(!g_psMemmapCache)
++        return;
++    
++    if(g_psKVOffsetTable)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: BUG! g_psMemmapCache isn't empty!",
++                __FUNCTION__));
++        
++        for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct = psOffsetStruct->psNext)
++        {
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Un-registering mmapable area: psLinuxMemArea=0x%p, CpuPAddr=0x%08lx\n",
++                    __FUNCTION__,
++                    psOffsetStruct->psLinuxMemArea,
++                    LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0).uiAddr
++                    ));
++                      PVRMMapRemoveRegisteredArea(psOffsetStruct->psLinuxMemArea);
++        }
++    }
++    
++    RemoveProcEntry("mmap");
++    KMemCacheDestroyWrapper(g_psMemmapCache);
++    g_psMemmapCache = NULL;
++    PVR_DPF((PVR_DBG_MESSAGE,"PVRMMapCleanup: KVOffsetTable deallocated"));
++}
++
++
++PVRSRV_ERROR
++PVRMMapRegisterArea(const IMG_CHAR *pszName,
++                    LinuxMemArea *psLinuxMemArea,
++                    IMG_UINT32 ui32AllocFlags)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s(%s, psLinuxMemArea=%p, ui32AllocFlags=0x%8lx)",
++             __FUNCTION__, pszName, psLinuxMemArea, ui32AllocFlags));
++
++    
++    psOffsetStruct = FindOffsetStructFromLinuxMemArea(psLinuxMemArea);
++    if(psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "PVRMMapRegisterArea: psLinuxMemArea=%p is already registered",
++                psOffsetStruct->psLinuxMemArea));
++        return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    psOffsetStruct = KMemCacheAllocWrapper(g_psMemmapCache, GFP_KERNEL);
++    if(!psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR,"PVRMMapRegisterArea: Couldn't alloc another mapping record from cache"));
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    
++    
++    psOffsetStruct->ui32MMapOffset = GetFirstFreePageAlignedNumber();
++    psOffsetStruct->psLinuxMemArea = psLinuxMemArea;
++    
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        psOffsetStruct->ui32AllocFlags = ui32AllocFlags;
++    }
++    else
++    {
++        PKV_OFFSET_STRUCT psParentOffsetStruct;
++        psParentOffsetStruct = 
++            FindOffsetStructFromLinuxMemArea(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++        PVR_ASSERT(psParentOffsetStruct);
++        psOffsetStruct->ui32AllocFlags = psParentOffsetStruct->ui32AllocFlags;
++    }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    
++    psOffsetStruct->pszName                                   = pszName;
++    psOffsetStruct->pid                                               = current->pid;
++    psOffsetStruct->ui16Mapped                                = 0;
++    psOffsetStruct->ui16Faults                                = 0;
++
++    g_ui32RegisteredAreas++;
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_ui32TotalByteSize+=psLinuxMemArea->ui32ByteSize;
++    }
++#endif
++    
++      
++    psOffsetStruct->psNext                                    = g_psKVOffsetTable;
++    
++    g_psKVOffsetTable                         = psOffsetStruct;
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea)
++{
++    PKV_OFFSET_STRUCT *ppsOffsetStruct, psOffsetStruct;
++    
++    for(ppsOffsetStruct=&g_psKVOffsetTable;
++        (psOffsetStruct = *ppsOffsetStruct);
++        ppsOffsetStruct=&(*ppsOffsetStruct)->psNext)
++    {
++        if(psOffsetStruct->psLinuxMemArea == psLinuxMemArea)
++        {
++            break;
++        }
++    }
++
++    if(!psOffsetStruct)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: Registration for psLinuxMemArea = 0x%p not found",
++                __FUNCTION__,
++                psLinuxMemArea));
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    
++    if(psOffsetStruct->ui16Mapped)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "%s: Unregistering still-mapped area! (psLinuxMemArea=0x%p)\n",
++                __FUNCTION__, psOffsetStruct->psLinuxMemArea));
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++    g_ui32RegisteredAreas--;
++
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_ui32TotalByteSize -= psOffsetStruct->psLinuxMemArea->ui32ByteSize;
++    }
++#endif
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Table entry: "
++             "psLinuxMemArea=0x%08lX, CpuPAddr=0x%08lX", __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea, 0)));
++    
++    *ppsOffsetStruct = psOffsetStruct->psNext;
++ 
++    KMemCacheFreeWrapper(g_psMemmapCache, psOffsetStruct);
++
++    return PVRSRV_OK;
++}
++
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructFromLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct = NULL;
++    
++    for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++    {
++        if(psOffsetStruct->psLinuxMemArea == psLinuxMemArea)
++        {
++            return psOffsetStruct;
++        }
++    }
++    return NULL;
++}
++
++
++
++static IMG_UINT32
++GetFirstFreePageAlignedNumber(void)
++{
++    PKV_OFFSET_STRUCT psCurrentRec;
++    IMG_UINT32 ui32CurrentPageOffset;
++    
++    if(!g_psKVOffsetTable)
++    {
++        return 0;
++    }
++
++    psCurrentRec = g_psKVOffsetTable;
++    ui32CurrentPageOffset = (g_psKVOffsetTable->ui32MMapOffset);
++
++    while(psCurrentRec)
++    {
++        if(ui32CurrentPageOffset != (psCurrentRec->ui32MMapOffset))
++        {
++            return ui32CurrentPageOffset;
++        }
++        psCurrentRec = psCurrentRec->psNext;
++        ui32CurrentPageOffset+=PAGE_SIZE;
++    }
++    
++    return g_psKVOffsetTable->ui32MMapOffset + PAGE_SIZE;
++}
++
++
++
++PVRSRV_ERROR
++PVRMMapKVIndexAddressToMMapData(IMG_VOID *pvKVIndexAddress,
++                                IMG_UINT32 ui32Size,
++                                IMG_UINT32 *pui32MMapOffset,
++                                IMG_UINT32 *pui32ByteOffset,
++                                IMG_UINT32 *pui32RealByteSize)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    psOffsetStruct = FindOffsetStructByKVIndexAddress(pvKVIndexAddress, ui32Size);
++    if (!psOffsetStruct)
++    {
++        return PVRSRV_ERROR_BAD_MAPPING;
++    }
++
++    *pui32MMapOffset = psOffsetStruct->ui32MMapOffset;
++
++    DeterminUsersSizeAndByteOffset(pvKVIndexAddress,
++                                   psOffsetStruct->psLinuxMemArea,
++                                   pui32RealByteSize,
++                                   pui32ByteOffset);
++
++    return PVRSRV_OK;
++}
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructByKVIndexAddress(IMG_VOID *pvKVIndexAddress,
++                                 IMG_UINT32 ui32ByteSize)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    IMG_UINT8 *pui8CpuVAddr;
++    IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *)pvKVIndexAddress;
++
++    for(psOffsetStruct=g_psKVOffsetTable; psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++    {
++        LinuxMemArea *psLinuxMemArea = psOffsetStruct->psLinuxMemArea;
++        
++              switch(psLinuxMemArea->eAreaType)
++              {
++                      case LINUX_MEM_AREA_IOREMAP:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++                              break;
++                      case LINUX_MEM_AREA_VMALLOC:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++                              break;
++                      case LINUX_MEM_AREA_EXTERNAL_KV:
++                              pui8CpuVAddr = psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++                              break;
++                      default:
++                              pui8CpuVAddr = IMG_NULL;
++                              break;
++              }
++        
++        
++        if(pui8CpuVAddr)
++        {
++            if(pui8IndexCpuVAddr >= pui8CpuVAddr
++               && (pui8IndexCpuVAddr + ui32ByteSize) <= (pui8CpuVAddr + psLinuxMemArea->ui32ByteSize))
++            {
++                return psOffsetStruct;
++            }
++            else
++            {
++                pui8CpuVAddr = NULL;
++            }
++        }
++        
++        if(pvKVIndexAddress == psOffsetStruct->psLinuxMemArea)
++        {
++            if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++            {
++                PVR_ASSERT(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea->eAreaType
++                              != LINUX_MEM_AREA_SUB_ALLOC);
++            }
++            return psOffsetStruct;
++        }
++    }
++    printk(KERN_ERR "%s: Failed to find offset struct (KVAddress=%p)\n", __FUNCTION__, pvKVIndexAddress);
++    return NULL;
++}
++
++
++static void
++DeterminUsersSizeAndByteOffset(IMG_VOID *pvKVIndexAddress,
++                               LinuxMemArea *psLinuxMemArea,
++                               IMG_UINT32 *pui32RealByteSize,
++                               IMG_UINT32 *pui32ByteOffset)
++{
++    IMG_UINT8 *pui8StartVAddr = NULL;
++    IMG_UINT8 *pui8IndexCpuVAddr = (IMG_UINT8 *)pvKVIndexAddress;
++    IMG_UINT32 ui32PageAlignmentOffset=0;
++    IMG_CPU_PHYADDR CpuPAddr;
++    
++    CpuPAddr=LinuxMemAreaToCpuPAddr(psLinuxMemArea, 0);
++    ui32PageAlignmentOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++    if(pvKVIndexAddress != psLinuxMemArea &&
++       (psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP
++       || psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC))
++    {
++        pui8StartVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++        *pui32ByteOffset = (pui8IndexCpuVAddr - pui8StartVAddr) + ui32PageAlignmentOffset;
++    }
++    else
++    {
++        *pui32ByteOffset = ui32PageAlignmentOffset;
++    }
++
++    *pui32RealByteSize = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize + ui32PageAlignmentOffset);
++}
++
++
++int
++PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma)
++{
++      unsigned long ulBytes;
++      PKV_OFFSET_STRUCT psCurrentRec = NULL;
++    int iRetVal=0;
++
++    LinuxLockMutex(&gPVRSRVLock);
++    
++      ulBytes = ps_vma->vm_end - ps_vma->vm_start;
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Recieved mmap(2) request with a ui32MMapOffset=0x%08lx,"
++                              " and ui32ByteSize=%ld(0x%08lx)\n",
++            __FUNCTION__,
++            (ps_vma->vm_pgoff<<PAGE_SHIFT),
++            ulBytes, ulBytes));
++   
++      
++    if(
++       (ps_vma->vm_flags & VM_WRITE) &&
++       !(ps_vma->vm_flags & VM_SHARED)
++      )
++    {
++        PVR_DPF((PVR_DBG_ERROR,"PVRMMap: Error - Cannot mmap non-shareable writable areas."));
++        iRetVal = -EINVAL;
++        goto unlock_and_return;
++    }
++   
++    psCurrentRec=FindOffsetStructByMMapOffset((ps_vma->vm_pgoff<<PAGE_SHIFT));
++    if (!psCurrentRec)
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "PVRMMap: Error - Attempted to mmap unregistered area at vm_pgoff=%ld",
++                 ps_vma->vm_pgoff));
++        iRetVal = -EINVAL;
++        goto unlock_and_return;
++    }
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: > psCurrentRec->psLinuxMemArea=%p\n",
++             __FUNCTION__, psCurrentRec->psLinuxMemArea));
++    
++    ps_vma->vm_flags |= VM_RESERVED;
++    ps_vma->vm_flags |= VM_IO;
++    
++    ps_vma->vm_flags |= VM_DONTEXPAND;
++    
++    ps_vma->vm_private_data = (void *)psCurrentRec;
++    
++    
++    
++    switch(psCurrentRec->ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++            
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__) || defined(__sh__)
++            ps_vma->vm_page_prot = pgprot_writecombine(ps_vma->vm_page_prot);
++#else
++#if defined(__i386__)
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++
++#if defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++            
++            if(psCurrentRec->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP
++               || psCurrentRec->psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO)
++            {
++                ps_vma->vm_page_prot = __pgprot(pgprot_val(ps_vma->vm_page_prot) &= ~_PAGE_PWT);
++            }
++#endif
++
++#else
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++    #error  Unsupported architecture!
++#endif
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            ps_vma->vm_page_prot = pgprot_noncached(ps_vma->vm_page_prot);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type", __FUNCTION__));
++    }
++    
++    
++    
++    ps_vma->vm_ops = &MMapIOOps;
++    
++    if(!DoMapToUser(psCurrentRec->psLinuxMemArea, ps_vma, 0, ulBytes))
++    {
++        iRetVal = -EAGAIN;
++        goto unlock_and_return;
++    }
++    
++    
++    MMapVOpen(ps_vma);
++    
++    PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapped area at offset 0x%08lx\n",
++             __FUNCTION__, ps_vma->vm_pgoff));
++    
++unlock_and_return:
++    
++    LinuxUnLockMutex(&gPVRSRVLock);
++    
++    return iRetVal;
++}
++
++
++static PKV_OFFSET_STRUCT
++FindOffsetStructByMMapOffset(IMG_UINT32 ui32MMapOffset)
++{
++    PKV_OFFSET_STRUCT psOffsetStruct;
++    
++    for(psOffsetStruct = g_psKVOffsetTable; psOffsetStruct; psOffsetStruct = psOffsetStruct->psNext)
++    {
++        if(psOffsetStruct->ui32MMapOffset == ui32MMapOffset)
++        {
++            return psOffsetStruct;
++        }
++    }
++    return NULL;
++}
++
++
++static IMG_BOOL
++DoMapToUser(LinuxMemArea *psLinuxMemArea,
++            struct vm_area_struct* ps_vma,
++            IMG_UINT32 ui32ByteOffset,
++            IMG_UINT32 ui32ByteSize)
++{
++    IMG_INT32 ui32Status=0;
++    LINUX_MEM_AREA_TYPE eAreaType = psLinuxMemArea->eAreaType;
++
++    PVR_ASSERT((ui32ByteSize & (PAGE_SIZE-1))==0);
++
++      if(eAreaType == LINUX_MEM_AREA_EXTERNAL_KV)
++      {
++              unsigned long phys_addr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr;
++      
++              
++              if (phys_addr > virt_to_phys(high_memory - 1)) 
++              {
++                      eAreaType = LINUX_MEM_AREA_IOREMAP;
++              }
++      }
++
++    switch(eAreaType)
++    {
++        case LINUX_MEM_AREA_IO: 
++        case LINUX_MEM_AREA_IOREMAP: 
++        {
++            unsigned long ulAddr;
++            
++            
++            ulAddr = LinuxMemAreaToCpuPAddr(psLinuxMemArea, ui32ByteOffset).uiAddr;
++            
++            
++            ulAddr &= ~(PAGE_SIZE-1);
++            
++            ui32Status = MapIORangeToVMA(ps_vma, ps_vma->vm_start, ulAddr, ui32ByteSize);
++            if(ui32Status != 0)
++            {
++                PVR_DPF((PVR_DBG_ERROR, "%s: Error - Failed to map memory.\n", __FUNCTION__));
++                return IMG_FALSE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_VMALLOC: 
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++        {
++            IMG_CHAR *pAddr, *pCurrentAddr;
++            unsigned long ulVMAPos;
++
++            
++                      pAddr = (IMG_CHAR *)LinuxMemAreaToCpuVAddr(psLinuxMemArea) + ui32ByteOffset;
++
++                      pAddr = (IMG_CHAR *)((unsigned long)pAddr & ~(PAGE_SIZE-1));
++            pCurrentAddr = pAddr;
++            
++            ulVMAPos=ps_vma->vm_start;
++            
++            while(pCurrentAddr < (pAddr + ui32ByteSize))
++            {
++                struct page *current_page;
++
++                current_page = vmalloc_to_page(pCurrentAddr);
++                ui32Status = MapPageToVMA(ps_vma, ulVMAPos, current_page);
++                if(ui32Status != 0)
++                {
++                    PVR_DPF((PVR_DBG_ERROR,"%s: Error - Failed to map memory.\n", __FUNCTION__));
++                    return IMG_FALSE;
++                }
++                pCurrentAddr += PAGE_SIZE;
++                ulVMAPos += PAGE_SIZE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++        {
++            struct page **pvPageList;
++            IMG_UINT32 ui32PageIndex, ui32PageCount, i;
++            unsigned long ulVMAPos;
++
++            pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++            ui32PageIndex = ui32ByteOffset>>PAGE_SHIFT;
++            ui32PageCount = ui32ByteSize>>PAGE_SHIFT;
++
++            
++            ulVMAPos=ps_vma->vm_start;
++            
++            for(i=ui32PageIndex; i<(ui32PageIndex+ui32PageCount); i++)
++            {
++                ui32Status = MapPageToVMA(ps_vma, ulVMAPos, pvPageList[i]);
++                if(ui32Status != 0)
++                {
++                    PVR_DPF((PVR_DBG_ERROR,"%s: Error - Failed to map memory.\n", __FUNCTION__));
++                    return IMG_FALSE;
++                }
++                ulVMAPos += PAGE_SIZE;
++            }
++            break;
++        }
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            if(!DoMapToUser(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                            ps_vma,
++                            psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset + ui32ByteOffset,
++                            ui32ByteSize))
++            {
++                return IMG_FALSE;
++            }
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"pvr_mmap: Unknown mapping type requested! (0x%X)\n",
++                     psLinuxMemArea->eAreaType));
++    }
++
++    return IMG_TRUE;
++}
++
++
++static IMG_UINT32
++MapPageToVMA(struct vm_area_struct *psVma,
++             unsigned long ulFromCpuVAddr,
++             struct page *pPage)
++{
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++    return vm_insert_page(psVma, ulFromCpuVAddr, pPage);
++#else 
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10))
++    return remap_pfn_range(psVma, ulFromCpuVAddr, page_to_pfn(pPage), PAGE_SIZE, psVma->vm_page_prot);
++#else 
++    return remap_page_range(ulFromCpuVAddr, page_to_phys(pPage), PAGE_SIZE, psVma->vm_page_prot);
++#endif
++#endif
++}
++
++
++static IMG_UINT32
++MapIORangeToVMA(struct vm_area_struct *psVma,
++                unsigned long ulFromCpuVAddr,
++                unsigned long ulCpuPAddr,
++                unsigned long ulBytes)
++{
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12))
++    return io_remap_pfn_range(psVma, ulFromCpuVAddr, ulCpuPAddr>>PAGE_SHIFT, ulBytes, psVma->vm_page_prot);
++#else 
++    return io_remap_page_range(ulFromCpuVAddr, ulCpuPAddr, ulBytes, psVma->vm_page_prot);
++#endif
++}
++
++
++static void
++MMapVOpen(struct vm_area_struct* ps_vma)
++{
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++    PVR_ASSERT(psOffsetStruct != IMG_NULL)
++    psOffsetStruct->ui16Mapped++;
++
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s: psLinuxMemArea=%p, KVAddress=%p MMapOffset=%ld, ui16Mapped=%d",
++             __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++             psOffsetStruct->ui32MMapOffset,
++             psOffsetStruct->ui16Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++    MOD_INC_USE_COUNT;
++#endif
++}
++
++
++static void
++MMapVClose(struct vm_area_struct* ps_vma)
++{
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    PKV_OFFSET_STRUCT psOffsetStruct = (PKV_OFFSET_STRUCT)ps_vma->vm_private_data;
++    PVR_ASSERT(psOffsetStruct != IMG_NULL)
++    psOffsetStruct->ui16Mapped--;
++
++    PVR_DPF((PVR_DBG_MESSAGE,
++             "%s: psLinuxMemArea=%p, CpuVAddr=%p ui32MMapOffset=%ld, ui16Mapped=%d",
++             __FUNCTION__,
++             psOffsetStruct->psLinuxMemArea,
++             LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++             psOffsetStruct->ui32MMapOffset,
++             psOffsetStruct->ui16Mapped));
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++    MOD_DEC_USE_COUNT;
++#endif
++}
++
++
++
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++static off_t
++PrintMMapRegistrations(char * buffer, size_t size, off_t off)
++{
++      PKV_OFFSET_STRUCT psOffsetStruct;
++    off_t Ret;
++      
++    LinuxLockMutex(&gPVRSRVLock);
++
++      if(!off)
++    {
++              Ret = printAppend(buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                                                "Allocations registered for mmap: %lu\n"
++                          "In total these areas correspond to %lu bytes (excluding SUB areas)\n"
++                          "psLinuxMemArea "
++                                                "CpuVAddr "
++                                                "CpuPAddr "
++                          "MMapOffset "
++                          "ByteLength "
++                          "LinuxMemType             "
++                                                "Pid   Name     Mapped Flags\n",
++#else
++                          "<mmap_header>\n"
++                          "\t<count>%lu</count>\n"
++                          "\t<bytes>%lu</bytes>\n" 
++                          "</mmap_header>\n",
++#endif
++                                                g_ui32RegisteredAreas,
++                          g_ui32TotalByteSize
++                          );
++
++        goto unlock_and_return;
++    }
++
++      if (size < 135) 
++    {
++              Ret = 0;
++        goto unlock_and_return;
++    }
++      
++      for(psOffsetStruct=g_psKVOffsetTable; --off && psOffsetStruct; psOffsetStruct=psOffsetStruct->psNext)
++              ;
++      if(!psOffsetStruct)
++    {
++              Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++      Ret =  printAppend (buffer, size, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                                              "%-8p       %-8p %08lx %08lx   %-8ld   %-24s %-5d %-8s %-5u  %08lx(%s)\n",
++#else
++                        "<mmap_record>\n"
++                                              "\t<pointer>%-8p</pointer>\n"
++                        "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                        "\t<cpu_physical>%08lx</cpu_physical>\n"
++                        "\t<mmap_offset>%08lx</mmap_offset>\n"
++                        "\t<bytes>%-8ld</bytes>\n"
++                        "\t<linux_mem_area_type>%-24s</linux_mem_area_type>\n"
++                        "\t<pid>%-5d</pid>\n"
++                        "\t<name>%-8s</name>\n"
++                        "\t<mapping_count>%-5u</mapping_count>\n"
++                        "\t<flags>%08lx</flags>\n"
++                        "\t<flags_string>%s</flags_string>\n"
++                        "</mmap_record>\n",
++#endif
++                        psOffsetStruct->psLinuxMemArea,
++                                              LinuxMemAreaToCpuVAddr(psOffsetStruct->psLinuxMemArea),
++                        LinuxMemAreaToCpuPAddr(psOffsetStruct->psLinuxMemArea,0).uiAddr,
++                                              psOffsetStruct->ui32MMapOffset,
++                                              psOffsetStruct->psLinuxMemArea->ui32ByteSize,
++                        LinuxMemAreaTypeToString(psOffsetStruct->psLinuxMemArea->eAreaType),
++                                              psOffsetStruct->pid,
++                                              psOffsetStruct->pszName,
++                                              psOffsetStruct->ui16Mapped,
++                                              psOffsetStruct->ui32AllocFlags,
++                        HAPFlagsToString(psOffsetStruct->ui32AllocFlags));
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret;
++}
++#endif
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mmap.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,84 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__MMAP_H__)
++#define __MMAP_H__
++
++#include <linux/mm.h>
++
++#include "mm.h"
++
++typedef struct KV_OFFSET_STRUCT_TAG
++{
++    
++    IMG_UINT32                  ui32MMapOffset;
++    
++    
++    LinuxMemArea                *psLinuxMemArea;
++    
++    
++    IMG_UINT32                  ui32AllocFlags;
++    
++    
++#if defined(DEBUG_LINUX_MMAP_AREAS)
++    pid_t                                             pid;
++    const IMG_CHAR                            *pszName;
++    IMG_UINT16                                        ui16Mapped;
++    IMG_UINT16                                        ui16Faults;
++#endif
++    
++    
++    struct KV_OFFSET_STRUCT_TAG       *psNext;
++}KV_OFFSET_STRUCT, *PKV_OFFSET_STRUCT;
++
++
++
++IMG_VOID PVRMMapInit(void);
++
++
++IMG_VOID PVRMMapCleanup(void);
++
++
++PVRSRV_ERROR PVRMMapRegisterArea(const IMG_CHAR *pszName,
++                                 LinuxMemArea *psLinuxMemArea,
++                                 IMG_UINT32 ui32AllocFlags);
++
++
++PVRSRV_ERROR PVRMMapRemoveRegisteredArea(LinuxMemArea *psLinuxMemArea);
++
++
++PVRSRV_ERROR PVRMMapKVIndexAddressToMMapData(IMG_VOID *pvKVIndexAddress,
++                                             IMG_UINT32 ui32Size,
++                                             IMG_UINT32 *pui32MMapOffset,
++                                             IMG_UINT32 *pui32ByteOffset,
++                                             IMG_UINT32 *pui32RealByteSize);
++
++
++int PVRMMap(struct file* pFile, struct vm_area_struct* ps_vma);
++
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.c   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1870 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++#include <linux/wrapper.h>
++#endif
++#include <linux/slab.h>
++#include <linux/highmem.h>
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "osfunc.h"
++#include "pvr_debug.h"
++#include "proc.h"
++#include "mutex.h"
++
++#if defined(CONFIG_ARCH_OMAP)
++#define       PVR_FLUSH_CACHE_BEFORE_KMAP
++#endif
++
++#if defined(PVR_FLUSH_CACHE_BEFORE_KMAP)
++#include <asm/cacheflush.h>
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++typedef enum {
++    DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++    DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++    DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++    DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++    DEBUG_MEM_ALLOC_TYPE_IO,
++    DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++    DEBUG_MEM_ALLOC_TYPE_KMAP,
++    DEBUG_MEM_ALLOC_TYPE_COUNT
++}DEBUG_MEM_ALLOC_TYPE;
++
++typedef struct _DEBUG_MEM_ALLOC_REC
++{
++    DEBUG_MEM_ALLOC_TYPE    eAllocType;
++      IMG_VOID                                *pvKey; 
++    IMG_VOID                *pvCpuVAddr;
++    unsigned long           ulCpuPAddr;
++    IMG_VOID                *pvPrivateData;
++      IMG_UINT32                              ui32Bytes;
++      pid_t                                   pid;
++    IMG_CHAR                *pszFileName;
++    IMG_UINT32              ui32Line;
++    
++    struct _DEBUG_MEM_ALLOC_REC   *psNext;
++}DEBUG_MEM_ALLOC_REC;
++
++static DEBUG_MEM_ALLOC_REC *g_MemoryRecords;
++
++static IMG_UINT32 g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++static IMG_UINT32 g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_COUNT];
++
++static IMG_UINT32 g_SysRAMWaterMark;
++static IMG_UINT32 g_SysRAMHighWaterMark;
++
++static IMG_UINT32 g_IOMemWaterMark;
++static IMG_UINT32 g_IOMemHighWaterMark;
++
++static IMG_VOID DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++                                       IMG_VOID *pvKey,
++                                       IMG_VOID *pvCpuVAddr,
++                                       unsigned long ulCpuPAddr,
++                                       IMG_VOID *pvPrivateData,
++                                       IMG_UINT32 ui32Bytes,
++                                       IMG_CHAR *pszFileName,
++                                       IMG_UINT32 ui32Line);
++static IMG_VOID DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey);
++static IMG_CHAR *DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType);
++
++static off_t printMemoryRecords(char * buffer, size_t size, off_t off);
++#endif
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++typedef struct _DEBUG_LINUX_MEM_AREA_REC
++{
++      LinuxMemArea                *psLinuxMemArea;
++    IMG_UINT32                  ui32Flags;
++      pid_t                                       pid;
++
++      struct _DEBUG_LINUX_MEM_AREA_REC  *psNext;
++}DEBUG_LINUX_MEM_AREA_REC;
++
++static DEBUG_LINUX_MEM_AREA_REC *g_LinuxMemAreaRecords;
++static IMG_UINT32 g_LinuxMemAreaCount;
++static IMG_UINT32 g_LinuxMemAreaWaterMark;
++static IMG_UINT32 g_LinuxMemAreaHighWaterMark;
++
++static off_t printLinuxMemAreaRecords(char * buffer, size_t size, off_t off);
++#endif
++
++static LinuxKMemCache *psLinuxMemAreaCache;
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++static IMG_VOID UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length);
++#endif
++
++static LinuxMemArea *LinuxMemAreaStructAlloc(IMG_VOID);
++static IMG_VOID LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea);
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags);
++static DEBUG_LINUX_MEM_AREA_REC *DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea);
++static IMG_VOID DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea);
++#endif
++
++PVRSRV_ERROR
++LinuxMMInit(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        int iStatus;
++        iStatus = CreateProcReadEntry("mem_areas", printLinuxMemAreaRecords);
++        if(iStatus!=0)
++        {
++            return PVRSRV_ERROR_OUT_OF_MEMORY;
++        }
++    }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    {
++        int iStatus;
++        iStatus = CreateProcReadEntry("meminfo", printMemoryRecords);
++        if(iStatus!=0)
++        {
++            return PVRSRV_ERROR_OUT_OF_MEMORY;
++        }
++    }
++#endif
++
++    psLinuxMemAreaCache = KMemCacheCreateWrapper("img-mm", sizeof(LinuxMemArea), 0, 0);
++    if(!psLinuxMemAreaCache)
++    {
++        PVR_DPF((PVR_DBG_ERROR,"%s: failed to allocate kmem_cache", __FUNCTION__));
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++
++    return PVRSRV_OK;
++}
++
++
++IMG_VOID
++LinuxMMCleanup(IMG_VOID)
++{
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord = g_LinuxMemAreaRecords, *psNextRecord;
++
++        if(g_LinuxMemAreaCount)
++        {
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: There are %d LinuxMemArea allocation unfreed (%ld bytes)",
++                    __FUNCTION__, g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark));
++        }
++
++        while(psCurrentRecord)
++        {
++            LinuxMemArea *psLinuxMemArea;
++
++            psNextRecord = psCurrentRecord->psNext;
++            psLinuxMemArea = psCurrentRecord->psLinuxMemArea;
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up Linux memory area (%p), type=%s, size=%ld bytes",
++                        __FUNCTION__,
++                        psCurrentRecord->psLinuxMemArea,
++                        LinuxMemAreaTypeToString(psCurrentRecord->psLinuxMemArea->eAreaType),
++                        psCurrentRecord->psLinuxMemArea->ui32ByteSize));
++            
++            LinuxMemAreaDeepFree(psLinuxMemArea);
++
++            psCurrentRecord = psNextRecord;
++        }
++        RemoveProcEntry("mem_areas");
++    }
++#endif
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    {
++        DEBUG_MEM_ALLOC_REC *psCurrentRecord = g_MemoryRecords, *psNextRecord;
++        
++        
++        while(psCurrentRecord)
++        {
++            psNextRecord = psCurrentRecord->psNext;
++            PVR_DPF((PVR_DBG_ERROR, "%s: BUG!: Cleaning up memory: "
++                                    "type=%s "
++                                    "CpuVAddr=%p "
++                                    "CpuPAddr=0x%08lx, "
++                                    "allocated @ file=%s,line=%d",
++                    __FUNCTION__,
++                    DebugMemAllocRecordTypeToString(psCurrentRecord->eAllocType),
++                    psCurrentRecord->pvCpuVAddr,
++                    psCurrentRecord->ulCpuPAddr,
++                    psCurrentRecord->pszFileName,
++                    psCurrentRecord->ui32Line));
++            switch(psCurrentRecord->eAllocType)
++            {
++                case DEBUG_MEM_ALLOC_TYPE_KMALLOC:
++                    KFreeWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_IOREMAP:
++                    IOUnmapWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_IO:
++                    
++                    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO, psCurrentRecord->pvKey);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_VMALLOC:
++                    VFreeWrapper(psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES:
++                    
++                    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, psCurrentRecord->pvKey);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE:
++                    KMemCacheFreeWrapper(psCurrentRecord->pvPrivateData, psCurrentRecord->pvCpuVAddr);
++                    break;
++                case DEBUG_MEM_ALLOC_TYPE_KMAP:
++                    KUnMapWrapper(psCurrentRecord->pvKey);
++                    break;
++                default:
++                    PVR_ASSERT(0);
++            }
++            psCurrentRecord = psNextRecord;
++        }
++        RemoveProcEntry("meminfo");
++    }
++#endif
++
++    if(psLinuxMemAreaCache)
++    {
++        KMemCacheDestroyWrapper(psLinuxMemAreaCache); 
++        psLinuxMemAreaCache=NULL;
++    }
++}
++
++
++IMG_VOID *
++_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++    pvRet = kmalloc(ui32ByteSize, GFP_KERNEL);
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMALLOC,
++                               pvRet,
++                               pvRet,
++                               0,
++                               NULL,
++                               ui32ByteSize,
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++    return pvRet;
++}
++
++
++IMG_VOID
++KFreeWrapper(IMG_VOID *pvCpuVAddr)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMALLOC, pvCpuVAddr);
++#endif
++    kfree(pvCpuVAddr);
++}
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static IMG_VOID
++DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE eAllocType,
++                       IMG_VOID *pvKey,
++                       IMG_VOID *pvCpuVAddr,
++                       unsigned long ulCpuPAddr,
++                       IMG_VOID *pvPrivateData,
++                       IMG_UINT32 ui32Bytes,
++                       IMG_CHAR *pszFileName,
++                       IMG_UINT32 ui32Line)
++{
++    DEBUG_MEM_ALLOC_REC *psRecord;
++
++    psRecord = kmalloc(sizeof(DEBUG_MEM_ALLOC_REC), GFP_KERNEL);
++
++    psRecord->eAllocType = eAllocType;
++    psRecord->pvKey = pvKey;
++    psRecord->pvCpuVAddr = pvCpuVAddr;
++    psRecord->ulCpuPAddr = ulCpuPAddr;
++    psRecord->pvPrivateData = pvPrivateData;
++    psRecord->pid = current->pid;
++    psRecord->ui32Bytes = ui32Bytes;
++    psRecord->pszFileName = pszFileName;
++    psRecord->ui32Line = ui32Line;
++    
++    psRecord->psNext = g_MemoryRecords;
++    g_MemoryRecords = psRecord;
++    
++    g_WaterMarkData[eAllocType] += ui32Bytes;
++    if(g_WaterMarkData[eAllocType] > g_HighWaterMarkData[eAllocType])
++    {
++        g_HighWaterMarkData[eAllocType] = g_WaterMarkData[eAllocType];
++    }
++
++    if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++       || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++    {
++        g_SysRAMWaterMark += ui32Bytes;
++        if(g_SysRAMWaterMark > g_SysRAMHighWaterMark)
++        {
++            g_SysRAMHighWaterMark = g_SysRAMWaterMark;
++        }
++    }
++    else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++            || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++    {
++        g_IOMemWaterMark += ui32Bytes;
++        if(g_IOMemWaterMark > g_IOMemHighWaterMark)
++        {
++            g_IOMemHighWaterMark = g_IOMemWaterMark;
++        }
++    }
++}
++
++
++
++static IMG_VOID
++DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE eAllocType, IMG_VOID *pvKey)
++{
++    DEBUG_MEM_ALLOC_REC **ppsCurrentRecord;
++
++    
++    for(ppsCurrentRecord = &g_MemoryRecords;
++        *ppsCurrentRecord;
++        ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++    {
++        if((*ppsCurrentRecord)->eAllocType == eAllocType
++           && (*ppsCurrentRecord)->pvKey == pvKey)
++        {
++            DEBUG_MEM_ALLOC_REC *psNextRecord;
++            DEBUG_MEM_ALLOC_TYPE eAllocType;
++
++            psNextRecord = (*ppsCurrentRecord)->psNext;
++            eAllocType = (*ppsCurrentRecord)->eAllocType;
++            g_WaterMarkData[eAllocType] -= (*ppsCurrentRecord)->ui32Bytes;
++            
++            if(eAllocType == DEBUG_MEM_ALLOC_TYPE_KMALLOC
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_VMALLOC
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES
++               || eAllocType == DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++            {
++                g_SysRAMWaterMark -= (*ppsCurrentRecord)->ui32Bytes;
++            }
++            else if(eAllocType == DEBUG_MEM_ALLOC_TYPE_IOREMAP
++                    || eAllocType == DEBUG_MEM_ALLOC_TYPE_IO)
++            {
++                g_IOMemWaterMark -= (*ppsCurrentRecord)->ui32Bytes;
++            }
++            
++            kfree(*ppsCurrentRecord);
++            *ppsCurrentRecord = psNextRecord;
++            return;
++        }
++    }
++    
++    PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for type=%s with pvKey=%p\n",
++             __FUNCTION__, DebugMemAllocRecordTypeToString(eAllocType), pvKey));
++}
++
++
++static IMG_CHAR *
++DebugMemAllocRecordTypeToString(DEBUG_MEM_ALLOC_TYPE eAllocType)
++{
++    char *apszDebugMemoryRecordTypes[] = {
++        "KMALLOC",
++        "VMALLOC",
++        "ALLOC_PAGES",
++        "IOREMAP",
++        "IO",
++        "KMEM_CACHE_ALLOC",
++        "KMAP"
++    };
++    return apszDebugMemoryRecordTypes[eAllocType];
++}
++#endif
++
++
++
++IMG_VOID *
++_VMallocWrapper(IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32AllocFlags,
++                IMG_CHAR *pszFileName,
++                IMG_UINT32 ui32Line)
++{
++    pgprot_t PGProtFlags;
++    IMG_VOID *pvRet;
++    
++    switch(ui32AllocFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++            PGProtFlags = PAGE_KERNEL;
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++            PGProtFlags = pgprot_writecombine(PAGE_KERNEL);
++#else
++            PGProtFlags = pgprot_noncached(PAGE_KERNEL);
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            PGProtFlags = pgprot_noncached(PAGE_KERNEL);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR,
++                     "VMAllocWrapper: unknown mapping flags=0x%08lx",
++                     ui32AllocFlags));
++            dump_stack();
++            return NULL;
++    }
++
++      
++    pvRet = __vmalloc(ui32Bytes, GFP_KERNEL | __GFP_HIGHMEM, PGProtFlags);
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_VMALLOC,
++                               pvRet,
++                               pvRet,
++                               0,
++                               NULL,
++                               PAGE_ALIGN(ui32Bytes),
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++VFreeWrapper(IMG_VOID *pvCpuVAddr)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_VMALLOC, pvCpuVAddr);
++#endif
++    vfree(pvCpuVAddr);
++}
++
++
++LinuxMemArea *
++NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_VOID *pvCpuVAddr;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        goto failed;
++    }
++
++    pvCpuVAddr = VMallocWrapper(ui32Bytes, ui32AreaFlags);
++    if(!pvCpuVAddr)
++    {
++        goto failed;
++    }
++    
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++    
++    ReservePages(pvCpuVAddr, ui32Bytes);
++#endif
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_VMALLOC;
++    psLinuxMemArea->uData.sVmalloc.pvVmallocAddress = pvCpuVAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++
++failed:
++    PVR_DPF((PVR_DBG_ERROR, "%s: failed!", __FUNCTION__));
++    if(psLinuxMemArea)
++        LinuxMemAreaStructFree(psLinuxMemArea);
++    return NULL;
++}
++
++
++IMG_VOID
++FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea);
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_VMALLOC);
++    PVR_ASSERT(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      UnreservePages(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress,
++                    psLinuxMemArea->ui32ByteSize);
++#endif
++
++    PVR_DPF((PVR_DBG_MESSAGE,"%s: pvCpuVAddr: %p",
++             __FUNCTION__, psLinuxMemArea->uData.sVmalloc.pvVmallocAddress));
++    VFreeWrapper(psLinuxMemArea->uData.sVmalloc.pvVmallocAddress);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++static IMG_VOID
++ReservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++      IMG_VOID *pvPage;
++      IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++      for(pvPage = pvAddress; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++      {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++              SetPageReserved(ConvertKVToPage(pvPage));
++#else
++              mem_map_reserve(ConvertKVToPage(pvPage));
++#endif
++      }
++}
++
++
++static IMG_VOID
++UnreservePages(IMG_VOID *pvAddress, IMG_UINT32 ui32Length)
++{
++      IMG_VOID *pvPage;
++      IMG_VOID *pvEnd = pvAddress + ui32Length;
++
++      for(pvPage = pvAddress; pvPage < pvEnd;  pvPage += PAGE_SIZE)
++      {
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++              ClearPageReserved(ConvertKVToPage(pvPage));
++#else
++              mem_map_unreserve(ConvertKVToPage(pvPage));
++#endif
++      }
++}
++#endif 
++
++
++IMG_VOID *
++_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++               IMG_UINT32 ui32Bytes,
++               IMG_UINT32 ui32MappingFlags,
++               IMG_CHAR *pszFileName,
++               IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvIORemapCookie = IMG_NULL;
++    
++    switch(ui32MappingFlags & PVRSRV_HAP_CACHETYPE_MASK)
++    {
++        case PVRSRV_HAP_CACHED:
++#if defined(__arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++            pvIORemapCookie = (IMG_VOID *)ioremap_cached(BasePAddr.uiAddr, ui32Bytes);
++#else
++                  pvIORemapCookie = (IMG_VOID *)ioremap(BasePAddr.uiAddr, ui32Bytes);
++#endif
++            break;
++        case PVRSRV_HAP_WRITECOMBINE:
++#if defined(__arm__)
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++                      pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++#else
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17))
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, L_PTE_BUFFERABLE);
++#else
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, L_PTE_BUFFERABLE, 1);
++#endif
++#endif
++#else
++#if defined(__i386__) && defined(SUPPORT_LINUX_X86_WRITECOMBINE)
++                      pvIORemapCookie = (IMG_VOID *)__ioremap(BasePAddr.uiAddr, ui32Bytes, _PAGE_PCD);
++#else
++                      pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++#endif
++#endif
++            break;
++        case PVRSRV_HAP_UNCACHED:
++            pvIORemapCookie = (IMG_VOID *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "IORemapWrapper: unknown mapping flags"));
++            return NULL;
++    }
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvIORemapCookie)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IOREMAP,
++                               pvIORemapCookie,
++                               pvIORemapCookie,
++                               BasePAddr.uiAddr,
++                               NULL,
++                               ui32Bytes,
++                               pszFileName,
++                               ui32Line
++                               );
++    }
++#endif
++
++    return pvIORemapCookie;
++}
++
++
++IMG_VOID
++IOUnmapWrapper(IMG_VOID *pvIORemapCookie)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IOREMAP, pvIORemapCookie);
++#endif
++    iounmap(pvIORemapCookie);
++}
++
++
++LinuxMemArea *
++NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++                       IMG_UINT32 ui32Bytes,
++                       IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_VOID *pvIORemapCookie;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32AreaFlags);
++    if(!pvIORemapCookie)
++    {
++        LinuxMemAreaStructFree(psLinuxMemArea);
++        return NULL;
++    }
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IOREMAP;
++    psLinuxMemArea->uData.sIORemap.pvIORemapCookie = pvIORemapCookie;
++    psLinuxMemArea->uData.sIORemap.CPUPhysAddr = BasePAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IOREMAP);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    IOUnmapWrapper(psLinuxMemArea->uData.sIORemap.pvIORemapCookie);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCPUVAddr, 
++                                                                              IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_EXTERNAL_KV;
++    psLinuxMemArea->uData.sExternalKV.pvExternalKV = pvCPUVAddr;
++    psLinuxMemArea->uData.sExternalKV.CPUPhysAddr = BasePAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_EXTERNAL_KV);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++LinuxMemArea *
++NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr,
++                  IMG_UINT32 ui32Bytes,
++                  IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++
++    
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_IO;
++    psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr = BasePAddr.uiAddr;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_IO,
++                           (IMG_VOID *)BasePAddr.uiAddr,
++                           0,
++                           BasePAddr.uiAddr,
++                           NULL,
++                           ui32Bytes,
++                           "unknown",
++                           0
++                           );
++#endif
++   
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO);
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_IO,
++                              (IMG_VOID *)psLinuxMemArea->uData.sIO.CPUPhysAddr.uiAddr);
++#endif
++
++    
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page *
++ConvertKVToPage(IMG_VOID *pvCpuVAddr)
++{
++      struct page *psPage = 0;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20))
++
++      psPage = vmalloc_to_page(pvCpuVAddr);
++
++#else 
++
++      IMG_UINT32 ui32Addr = (IMG_UINT32)pvCpuVAddr;
++      pgd_t *ppgd;
++      pmd_t *ppmd;
++      pte_t *ppte, pte;
++
++      {
++              
++              pvAddr = (IMG_VOID *) VMALLOC_VMADDR(pvAddr);
++
++              
++              ppgd = pgd_offset_k(ui32Addr);
++
++              
++              if (!pgd_none(*ppgd))
++              {
++                      
++                      ppmd = pmd_offset(ppgd, ui32Addr);
++
++                      
++                      if (!pmd_none(*ppmd))
++                      {
++                              
++#ifndef PVR_ATOMIC_PTE
++                              ppte = pte_offset(ppmd, ui32Addr);
++                              pte = *ppte;
++#else
++                              ppte = pte_offset_atomic(ppmd, ui32Addr);
++                              pte = *ppte;
++                              pte_kunmap(ppte);
++#endif
++                              
++                              if (pte_present(pte))
++                              {
++                                      
++                                      psPage = pte_page(pte);
++                              }
++                              else
++                              {
++                                      PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid page table entry"));
++                              }
++                      }
++                      else
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid mid-level page directory"));
++                      }
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"ConvertKVToPage: Failed to find a valid page directory"));
++              }
++      }
++#endif
++      return psPage;
++}
++
++
++
++LinuxMemArea *
++NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags)
++{
++    LinuxMemArea *psLinuxMemArea;
++    IMG_UINT32 ui32PageCount;
++    struct page **pvPageList;
++    IMG_UINT32 i;
++    
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        goto failed_area_alloc;
++    }
++    
++    ui32PageCount = PAGE_ALIGN(ui32Bytes)>>PAGE_SHIFT;
++    pvPageList = VMallocWrapper(sizeof(void *) * ui32PageCount, PVRSRV_HAP_CACHED);
++    if(!pvPageList)
++    {
++        goto failed_vmalloc;
++    }
++    
++    for(i=0; i<ui32PageCount; i++)
++    {
++        pvPageList[i] = alloc_pages(GFP_KERNEL, 0);
++        if(!pvPageList[i])
++        {
++            goto failed_alloc_pages;
++        }
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
++      
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))              
++      SetPageReserved(pvPageList[i]);
++#else
++              mem_map_reserve(pvPageList[i]);
++#endif
++#endif
++
++    }
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES,
++                           pvPageList,
++                           0,
++                           0,
++                           NULL,
++                           PAGE_ALIGN(ui32Bytes),
++                           "unknown",
++                           0
++                           );
++#endif
++
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_ALLOC_PAGES;
++    psLinuxMemArea->uData.sPageList.pvPageList = pvPageList;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordAdd(psLinuxMemArea, ui32AreaFlags);
++#endif
++
++    return psLinuxMemArea;
++    
++failed_alloc_pages:
++    for(i--;i>=0;i--)
++    {
++        __free_pages(pvPageList[i], 0);
++    }
++    VFreeWrapper(pvPageList);
++failed_vmalloc:
++    LinuxMemAreaStructFree(psLinuxMemArea);
++failed_area_alloc:
++    PVR_DPF((PVR_DBG_ERROR, "%s: failed", __FUNCTION__));
++    
++    return NULL;
++}
++
++
++IMG_VOID
++FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    IMG_UINT32 ui32PageCount;
++    struct page **pvPageList;
++    IMG_UINT32 i;
++
++    PVR_ASSERT(psLinuxMemArea);
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    ui32PageCount = PAGE_ALIGN(psLinuxMemArea->ui32ByteSize)>>PAGE_SHIFT;
++    pvPageList = psLinuxMemArea->uData.sPageList.pvPageList;
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES, pvPageList);
++#endif
++
++    for(i=0;i<ui32PageCount;i++)
++    {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))             
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))              
++              ClearPageReserved(pvPageList[i]);
++#else
++              mem_map_reserve(pvPageList[i]);
++#endif                
++#endif        
++        __free_pages(pvPageList[i], 0);
++    }
++    VFreeWrapper(psLinuxMemArea->uData.sPageList.pvPageList);
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++struct page*
++LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea,
++                         IMG_UINT32 ui32ByteOffset)
++{
++    IMG_UINT32 ui32PageIndex;
++    IMG_CHAR *pui8Addr;
++
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            ui32PageIndex = ui32ByteOffset>>PAGE_SHIFT;
++            return psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++            break;
++        case LINUX_MEM_AREA_VMALLOC:
++            pui8Addr = psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++            pui8Addr += ui32ByteOffset;
++            return vmalloc_to_page(pui8Addr);
++            break;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            return LinuxMemAreaOffsetToPage(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                                            psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++                                             + ui32ByteOffset);
++        default:
++            PVR_DPF((PVR_DBG_ERROR,
++                    "%s: Unsupported request for struct page from LinuxMemArea with type=%s",
++                    LinuxMemAreaTypeToString(psLinuxMemArea->eAreaType)));
++            return NULL;
++    }
++}
++
++
++IMG_VOID *
++_KMapWrapper(struct page *psPage, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++
++#if defined(PVR_FLUSH_CACHE_BEFORE_KMAP)
++    
++    flush_cache_all();
++#endif
++
++    pvRet = kmap(psPage);
++    
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    if(pvRet)
++    {
++        DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMAP,
++                               psPage,
++                               pvRet,
++                               0,
++                               NULL,
++                               PAGE_SIZE,
++                               "unknown",
++                               0
++                               );
++    }
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++KUnMapWrapper(struct page *psPage)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMAP, psPage);
++#endif
++
++    kunmap(psPage);
++}
++
++
++LinuxKMemCache *
++KMemCacheCreateWrapper(IMG_CHAR *pszName,
++                       size_t Size,
++                       size_t Align,
++                       IMG_UINT32 ui32Flags)
++{
++#if defined(DEBUG_LINUX_SLAB_ALLOCATIONS)
++    ui32Flags |= SLAB_POISON|SLAB_RED_ZONE;
++#endif
++    return kmem_cache_create(pszName, Size, Align, ui32Flags, NULL);
++}
++
++
++IMG_VOID
++KMemCacheDestroyWrapper(LinuxKMemCache *psCache)
++{
++    kmem_cache_destroy(psCache);
++}
++
++
++IMG_VOID *
++_KMemCacheAllocWrapper(LinuxKMemCache *psCache,
++                      gfp_t Flags,
++                      IMG_CHAR *pszFileName,
++                      IMG_UINT32 ui32Line)
++{
++    IMG_VOID *pvRet;
++    
++    pvRet = kmem_cache_alloc(psCache, Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordAdd(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE,
++                           pvRet,
++                           pvRet,
++                           0,
++                           psCache,
++                           kmem_cache_size(psCache),
++                           pszFileName,
++                           ui32Line
++                           );
++#endif
++    
++    return pvRet;
++}
++
++
++IMG_VOID
++KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject)
++{
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    DebugMemAllocRecordRemove(DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE, pvObject);
++#endif
++
++    kmem_cache_free(psCache, pvObject);
++}
++
++
++const IMG_CHAR *
++KMemCacheNameWrapper(LinuxKMemCache *psCache)
++{
++    
++    return "";
++}
++
++
++LinuxMemArea *
++NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++                   IMG_UINT32 ui32ByteOffset,
++                   IMG_UINT32 ui32Bytes)
++{
++    LinuxMemArea *psLinuxMemArea;
++    
++    PVR_ASSERT((ui32ByteOffset+ui32Bytes) <= psParentLinuxMemArea->ui32ByteSize);
++    
++    psLinuxMemArea = LinuxMemAreaStructAlloc();
++    if(!psLinuxMemArea)
++    {
++        return NULL;
++    }
++    
++    psLinuxMemArea->eAreaType = LINUX_MEM_AREA_SUB_ALLOC;
++    psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea = psParentLinuxMemArea;
++    psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset = ui32ByteOffset;
++    psLinuxMemArea->ui32ByteSize = ui32Bytes;
++    
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    {
++        DEBUG_LINUX_MEM_AREA_REC *psParentRecord;
++        psParentRecord = DebugLinuxMemAreaRecordFind(psParentLinuxMemArea);
++        DebugLinuxMemAreaRecordAdd(psLinuxMemArea, psParentRecord->ui32Flags);
++    }
++#endif
++    
++    return psLinuxMemArea;
++}
++
++
++IMG_VOID
++FreeSubLinuxMemArea(LinuxMemArea *psLinuxMemArea)
++{
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++    DebugLinuxMemAreaRecordRemove(psLinuxMemArea);
++#endif
++    
++    
++
++    LinuxMemAreaStructFree(psLinuxMemArea);
++}
++
++
++static LinuxMemArea *
++LinuxMemAreaStructAlloc(IMG_VOID)
++{
++#if 0
++    LinuxMemArea *psLinuxMemArea;
++    psLinuxMemArea = kmem_cache_alloc(psLinuxMemAreaCache, GFP_KERNEL);
++    printk(KERN_ERR "%s: psLinuxMemArea=%p\n", __FUNCTION__, psLinuxMemArea);
++    dump_stack();
++    return psLinuxMemArea;
++#else
++    return KMemCacheAllocWrapper(psLinuxMemAreaCache, GFP_KERNEL);
++#endif
++}
++
++
++static IMG_VOID
++LinuxMemAreaStructFree(LinuxMemArea *psLinuxMemArea)
++{
++    KMemCacheFreeWrapper(psLinuxMemAreaCache, psLinuxMemArea);
++    
++    
++}
++
++
++IMG_VOID
++LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea)
++{
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_VMALLOC:
++            FreeVMallocLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            FreeAllocPagesLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_IOREMAP:
++            FreeIORemapLinuxMemArea(psLinuxMemArea);
++            break;
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      FreeExternalKVLinuxMemArea(psLinuxMemArea);
++                      break;
++        case LINUX_MEM_AREA_IO:
++            FreeIOLinuxMemArea(psLinuxMemArea);
++            break;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            FreeSubLinuxMemArea(psLinuxMemArea);
++            break;
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: Unknown are type (%d)\n",
++                     __FUNCTION__, psLinuxMemArea->eAreaType));
++    }
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static IMG_VOID
++DebugLinuxMemAreaRecordAdd(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32Flags)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psNewRecord;
++    const char *pi8FlagsString;
++    
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_LinuxMemAreaWaterMark += psLinuxMemArea->ui32ByteSize;
++        if(g_LinuxMemAreaWaterMark > g_LinuxMemAreaHighWaterMark)
++        {
++            g_LinuxMemAreaHighWaterMark = g_LinuxMemAreaWaterMark;
++        }
++    }
++    g_LinuxMemAreaCount++;
++    
++    
++    psNewRecord = kmalloc(sizeof(DEBUG_LINUX_MEM_AREA_REC), GFP_KERNEL);
++    if(psNewRecord)
++    {
++        
++        psNewRecord->psLinuxMemArea = psLinuxMemArea;
++        psNewRecord->ui32Flags = ui32Flags;
++        psNewRecord->pid = current->pid;
++        psNewRecord->psNext = g_LinuxMemAreaRecords;
++        g_LinuxMemAreaRecords = psNewRecord;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "%s: failed to allocate linux memory area record.",
++                 __FUNCTION__));
++    }
++    
++    
++    pi8FlagsString = HAPFlagsToString(ui32Flags);
++    if(strstr(pi8FlagsString, "UNKNOWN"))
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "%s: Unexpected flags (0x%08lx) associated with psLinuxMemArea @ 0x%08lx",
++                 __FUNCTION__,
++                 ui32Flags,
++                 psLinuxMemArea));
++        
++    }
++}
++
++
++static DEBUG_LINUX_MEM_AREA_REC *
++DebugLinuxMemAreaRecordFind(LinuxMemArea *psLinuxMemArea)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psCurrentRecord;
++
++    for(psCurrentRecord = g_LinuxMemAreaRecords;
++        psCurrentRecord;
++        psCurrentRecord = psCurrentRecord->psNext)
++    {
++        if(psCurrentRecord->psLinuxMemArea == psLinuxMemArea)
++        {
++            return psCurrentRecord;
++        }
++    }
++    return NULL;
++}
++
++
++static IMG_VOID
++DebugLinuxMemAreaRecordRemove(LinuxMemArea *psLinuxMemArea)
++{
++    DEBUG_LINUX_MEM_AREA_REC **ppsCurrentRecord;
++
++    if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        g_LinuxMemAreaWaterMark -= psLinuxMemArea->ui32ByteSize;
++    }
++    g_LinuxMemAreaCount--;
++
++    
++    for(ppsCurrentRecord = &g_LinuxMemAreaRecords;
++        *ppsCurrentRecord;
++        ppsCurrentRecord = &((*ppsCurrentRecord)->psNext))
++    {
++        if((*ppsCurrentRecord)->psLinuxMemArea == psLinuxMemArea)
++        {
++            DEBUG_LINUX_MEM_AREA_REC *psNextRecord;
++            
++            psNextRecord = (*ppsCurrentRecord)->psNext;
++            kfree(*ppsCurrentRecord);
++            *ppsCurrentRecord = psNextRecord;
++            return;
++        }
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "%s: couldn't find an entry for psLinuxMemArea=%p\n",
++             __FUNCTION__, psLinuxMemArea));
++}
++#endif
++
++
++IMG_VOID *
++LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea)
++{
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_VMALLOC:
++            return psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++        case LINUX_MEM_AREA_IOREMAP:
++            return psLinuxMemArea->uData.sIORemap.pvIORemapCookie;
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      return psLinuxMemArea->uData.sExternalKV.pvExternalKV;
++        case LINUX_MEM_AREA_SUB_ALLOC:
++        {
++            IMG_CHAR *pAddr =
++                LinuxMemAreaToCpuVAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++            if(!pAddr)
++            {
++                return NULL;
++            }
++            return pAddr + psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset;
++        }
++        default:
++            return NULL;
++    }
++}
++
++
++IMG_CPU_PHYADDR
++LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset)
++{
++    IMG_CPU_PHYADDR CpuPAddr;
++    
++    CpuPAddr.uiAddr = 0;
++
++    switch(psLinuxMemArea->eAreaType)
++    {
++        case LINUX_MEM_AREA_IOREMAP:
++        {
++            CpuPAddr = psLinuxMemArea->uData.sIORemap.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++        }
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++              {
++            CpuPAddr = psLinuxMemArea->uData.sExternalKV.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++              }
++        case LINUX_MEM_AREA_IO:
++        {
++            CpuPAddr = psLinuxMemArea->uData.sIO.CPUPhysAddr;
++            CpuPAddr.uiAddr += ui32ByteOffset;
++            break;
++        }
++        case LINUX_MEM_AREA_VMALLOC:
++        {
++            IMG_CHAR *pCpuVAddr;
++            struct page *page;
++            pCpuVAddr =
++                (IMG_CHAR *)psLinuxMemArea->uData.sVmalloc.pvVmallocAddress;
++            pCpuVAddr += ui32ByteOffset;
++            page = ConvertKVToPage(pCpuVAddr);
++            CpuPAddr.uiAddr = page_to_phys(page);
++            CpuPAddr.uiAddr += ui32ByteOffset & (PAGE_SIZE - 1);
++            break;
++        }
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++        {
++            struct page *page;
++            IMG_UINT32 ui32PageIndex = ui32ByteOffset >> PAGE_SHIFT;
++            page = psLinuxMemArea->uData.sPageList.pvPageList[ui32PageIndex];
++            CpuPAddr.uiAddr = page_to_phys(page);
++            CpuPAddr.uiAddr += ui32ByteOffset & (PAGE_SIZE - 1);
++            break;
++        }
++        case LINUX_MEM_AREA_SUB_ALLOC:
++        {
++            CpuPAddr =
++                OSMemHandleToCpuPAddr(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea,
++                                      psLinuxMemArea->uData.sSubAlloc.ui32ByteOffset
++                                        + ui32ByteOffset);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR, "%s: Unknown LinuxMemArea type (%d)\n",
++                     __FUNCTION__, psLinuxMemArea->eAreaType));
++    }
++    
++    PVR_ASSERT(CpuPAddr.uiAddr);
++    return CpuPAddr;
++}
++
++
++LINUX_MEM_AREA_TYPE
++LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea)
++{
++    if(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC)
++    {
++        return LinuxMemAreaRootType(psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea);
++    }
++    else
++    {
++        return psLinuxMemArea->eAreaType;
++    }
++}
++
++
++const IMG_CHAR *
++LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType)
++{
++    PVR_ASSERT(LINUX_MEM_AREA_TYPE_COUNT == 5);
++    PVR_ASSERT(eMemAreaType < LINUX_MEM_AREA_TYPE_COUNT);
++    
++    
++    switch(eMemAreaType)
++    {
++        case LINUX_MEM_AREA_IOREMAP:
++            return "LINUX_MEM_AREA_IOREMAP";
++              case LINUX_MEM_AREA_EXTERNAL_KV:
++                      return "LINUX_MEM_AREA_EXTERNAL_KV";
++        case LINUX_MEM_AREA_IO:
++            return "LINUX_MEM_AREA_IO";
++        case LINUX_MEM_AREA_VMALLOC:
++            return "LINUX_MEM_AREA_VMALLOC";
++        case LINUX_MEM_AREA_SUB_ALLOC:
++            return "LINUX_MEM_AREA_SUB_ALLOC";
++        case LINUX_MEM_AREA_ALLOC_PAGES:
++            return "LINUX_MEM_AREA_ALLOC_PAGES";
++        default:
++            PVR_ASSERT(0);
++    }
++
++    return "";
++}
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS)
++static off_t
++printLinuxMemAreaRecords(char * buffer, size_t count, off_t off)
++{
++    DEBUG_LINUX_MEM_AREA_REC *psRecord;
++    off_t Ret;
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++    if(!off)
++    {
++        if(count < 500)
++        {
++            Ret = 0;
++            goto unlock_and_return;
++        }
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++        Ret = printAppend(buffer, count, 0,
++                          "Number of Linux Memory Areas: %lu\n"
++                          "At the current water mark these areas correspond to %lu bytes (excluding SUB areas)\n"
++                          "At the highest water mark these areas corresponded to %lu bytes (excluding SUB areas)\n"
++                          "\nDetails for all Linux Memory Areas:\n"
++                          "%s %-24s %s %s %-8s %-5s %s\n",
++                          g_LinuxMemAreaCount,
++                          g_LinuxMemAreaWaterMark,
++                          g_LinuxMemAreaHighWaterMark,
++                          "psLinuxMemArea",
++                          "LinuxMemType",
++                          "CpuVAddr",
++                          "CpuPAddr",
++                          "Bytes",
++                          "Pid",
++                          "Flags"
++                         );
++#else
++        Ret = printAppend(buffer, count, 0,
++                          "<mem_areas_header>\n"
++                          "\t<count>%lu</count>\n"
++                          "\t<watermark key=\"mar0\" description=\"current\" bytes=\"%lu\"/>\n" 
++                          "\t<watermark key=\"mar1\" description=\"high\" bytes=\"%lu\"/>\n" 
++                          "</mem_areas_header>\n",
++                          g_LinuxMemAreaCount,
++                          g_LinuxMemAreaWaterMark,
++                          g_LinuxMemAreaHighWaterMark
++                         );
++#endif
++        goto unlock_and_return;
++    }
++
++    for(psRecord=g_LinuxMemAreaRecords; --off && psRecord; psRecord=psRecord->psNext)
++        ;
++    if(!psRecord)
++    {
++        Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++    if(count < 500)
++    {
++        Ret = 0;
++        goto unlock_and_return;
++    }
++
++    Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                       "%8p       %-24s %8p %08lx %-8ld %-5u %08lx=(%s)\n",
++#else
++                       "<linux_mem_area>\n"
++                       "\t<pointer>%8p</pointer>\n"
++                       "\t<type>%s</type>\n"
++                       "\t<cpu_virtual>%8p</cpu_virtual>\n"
++                       "\t<cpu_physical>%08lx</cpu_physical>\n"
++                       "\t<bytes>%ld</bytes>\n"
++                       "\t<pid>%u</pid>\n"
++                       "\t<flags>%08lx</flags>\n"
++                       "\t<flags_string>%s</flags_string>\n"
++                       "</linux_mem_area>\n",
++#endif
++                       psRecord->psLinuxMemArea,
++                       LinuxMemAreaTypeToString(psRecord->psLinuxMemArea->eAreaType),
++                       LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea),
++                       LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea,0).uiAddr,
++                       psRecord->psLinuxMemArea->ui32ByteSize,
++                       psRecord->pid,
++                       psRecord->ui32Flags,
++                       HAPFlagsToString(psRecord->ui32Flags)
++                      );
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret;
++}
++#endif 
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++static off_t
++printMemoryRecords(char * buffer, size_t count, off_t off)
++{
++    DEBUG_MEM_ALLOC_REC *psRecord;
++    off_t Ret;
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++
++    if(!off)
++    {
++        if(count < 1000)
++        {
++            Ret = 0;
++            goto unlock_and_return;
++        }
++
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++        
++        Ret =  printAppend(buffer, count, 0, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via kmalloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via kmalloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via vmalloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via vmalloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via alloc_pages",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via alloc_pages",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via ioremap",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via ioremap",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes reserved for \"IO\" memory areas",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated for \"IO\" memory areas",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes allocated via kmem_cache_alloc",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes allocated via kmem_cache_alloc",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Current Water Mark of bytes mapped via kmap",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "Highest Water Mark of bytes mapped via kmap",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Current Water Mark for memory allocated from system RAM",
++                           g_SysRAMWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Highest Water Mark for memory allocated from system RAM",
++                           g_SysRAMHighWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Current Water Mark for memory allocated from IO memory",
++                           g_IOMemWaterMark);
++        Ret =  printAppend(buffer, count, Ret, "%-60s: %ld bytes\n",
++                           "The Highest Water Mark for memory allocated from IO memory",
++                           g_IOMemHighWaterMark);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret, "Details for all known allocations:\n"
++                           "%-16s %-8s %-8s %-10s %-5s %-10s %s\n",
++                           "Type",
++                           "CpuVAddr",
++                           "CpuPAddr",
++                           "Bytes",
++                           "PID",
++                           "PrivateData",
++                           "Filename:Line");
++
++#else 
++              
++              
++        Ret =  printAppend(buffer, count, 0, "<meminfo>\n<meminfo_header>\n");
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr0\" description=\"kmalloc_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr1\" description=\"kmalloc_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr2\" description=\"vmalloc_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr3\" description=\"vmalloc_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr4\" description=\"alloc_pages_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr5\" description=\"alloc_pages_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr6\" description=\"ioremap_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr7\" description=\"ioremap_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr8\" description=\"io_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr9\" description=\"io_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr10\" description=\"kmem_cache_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr11\" description=\"kmem_cache_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr12\" description=\"kmap_current\" bytes=\"%ld\"/>\n",
++                           g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr13\" description=\"kmap_high\" bytes=\"%ld\"/>\n",
++                           g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]);
++
++        Ret =  printAppend(buffer, count, Ret, "\n");
++
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr14\" description=\"system_ram_current\" bytes=\"%ld\"/>\n",
++                           g_SysRAMWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr15\" description=\"system_ram_high\" bytes=\"%ld\"/>\n",
++                           g_SysRAMHighWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr16\" description=\"system_io_current\" bytes=\"%ld\"/>\n",
++                           g_IOMemWaterMark);
++        Ret =  printAppend(buffer, count, Ret,
++                           "<watermark key=\"mr17\" description=\"system_io_high\" bytes=\"%ld\"/>\n",
++                           g_IOMemHighWaterMark);
++
++        Ret =  printAppend(buffer, count, Ret, "</meminfo_header>\n");
++
++#endif 
++
++        goto unlock_and_return;
++    }
++
++    if(count < 1000)
++    {
++        Ret = 0;
++        goto unlock_and_return;
++    }
++
++    for(psRecord=g_MemoryRecords; --off && psRecord; psRecord=psRecord->psNext)
++        ;
++    if(!psRecord)
++    {
++#if defined(DEBUG_LINUX_XML_PROC_FILES)
++              if(off == 0)
++              {
++                      Ret =  printAppend(buffer, count, 0, "</meminfo>\n");
++                      goto unlock_and_return;
++              }
++#endif
++        Ret = END_OF_FILE;
++        goto unlock_and_return;
++    }
++
++    if(psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE)
++    {
++        Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                           "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++                           "<allocation>\n"
++                           "\t<type>%s</type>\n"
++                           "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                           "\t<cpu_physical>%08lx</cpu_physical>\n"
++                           "\t<bytes>%ld</bytes>\n"
++                           "\t<pid>%d</pid>\n"
++                           "\t<private>%s</private>\n"
++                           "\t<filename>%s</filename>\n"
++                           "\t<line>%ld</line>\n"
++                           "</allocation>\n",
++#endif
++                           DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++                           psRecord->pvCpuVAddr,
++                           psRecord->ulCpuPAddr,
++                           psRecord->ui32Bytes,
++                           psRecord->pid,
++                           "NULL",
++                           psRecord->pszFileName,
++                           psRecord->ui32Line);
++    }
++    else
++    {
++        Ret =  printAppend(buffer, count, 0,
++#if !defined(DEBUG_LINUX_XML_PROC_FILES)
++                           "%-16s %-8p %08lx %-10ld %-5d %-10s %s:%ld\n",
++#else
++                           "<allocation>\n"
++                           "\t<type>%s</type>\n"
++                           "\t<cpu_virtual>%-8p</cpu_virtual>\n"
++                           "\t<cpu_physical>%08lx</cpu_physical>\n"
++                           "\t<bytes>%ld</bytes>\n"
++                           "\t<pid>%d</pid>\n"
++                           "\t<private>%s</private>\n"
++                           "\t<filename>%s</filename>\n"
++                           "\t<line>%ld</line>\n"
++                           "</allocation>\n",
++#endif
++                           DebugMemAllocRecordTypeToString(psRecord->eAllocType),
++                           psRecord->pvCpuVAddr,
++                           psRecord->ulCpuPAddr,
++                           psRecord->ui32Bytes,
++                           psRecord->pid,
++                           KMemCacheNameWrapper(psRecord->pvPrivateData),
++                           psRecord->pszFileName,
++                           psRecord->ui32Line);
++    }
++
++unlock_and_return:
++
++    LinuxUnLockMutex(&gPVRSRVLock);
++    return Ret; 
++}
++#endif 
++
++
++#if defined(DEBUG_LINUX_MEM_AREAS) || defined(DEBUG_LINUX_MMAP_AREAS)
++const IMG_CHAR *
++HAPFlagsToString(IMG_UINT32 ui32Flags)
++{
++    static IMG_CHAR szFlags[50];
++    IMG_UINT32 ui32Pos = 0;
++    IMG_UINT32 ui32CacheTypeIndex, ui32MapTypeIndex;
++    IMG_CHAR *apszCacheTypes[] = {
++        "UNCACHED",
++        "CACHED",
++        "WRITECOMBINE",
++        "UNKNOWN"
++    };
++    IMG_CHAR *apszMapType[] = {
++        "KERNEL_ONLY",
++        "SINGLE_PROCESS",
++        "MULTI_PROCESS",
++        "FROM_EXISTING_PROCESS",
++        "NO_CPU_VIRTUAL",
++        "UNKNOWN"
++    };
++    
++    
++    if(ui32Flags & PVRSRV_HAP_UNCACHED){
++        ui32CacheTypeIndex=0;
++    }else if(ui32Flags & PVRSRV_HAP_CACHED){
++        ui32CacheTypeIndex=1;
++    }else if(ui32Flags & PVRSRV_HAP_WRITECOMBINE){
++        ui32CacheTypeIndex=2;
++    }else{
++        ui32CacheTypeIndex=3;
++        PVR_DPF((PVR_DBG_ERROR, "%s: unknown cache type (%d)",
++                 __FUNCTION__, (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)));
++    }
++
++    
++    if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY){
++        ui32MapTypeIndex = 0;
++    }else if(ui32Flags & PVRSRV_HAP_SINGLE_PROCESS){
++        ui32MapTypeIndex = 1;
++    }else if(ui32Flags & PVRSRV_HAP_MULTI_PROCESS){
++        ui32MapTypeIndex = 2;
++    }else if(ui32Flags & PVRSRV_HAP_FROM_EXISTING_PROCESS){
++        ui32MapTypeIndex = 3;
++    }else if(ui32Flags & PVRSRV_HAP_NO_CPU_VIRTUAL){
++        ui32MapTypeIndex = 4;
++    }else{
++        ui32MapTypeIndex = 5;
++        PVR_DPF((PVR_DBG_ERROR, "%s: unknown map type (%d)",
++                 __FUNCTION__, (ui32Flags & PVRSRV_HAP_MAPTYPE_MASK)));
++    }
++
++    ui32Pos = sprintf(szFlags, "%s|", apszCacheTypes[ui32CacheTypeIndex]);
++    sprintf(szFlags + ui32Pos, "%s", apszMapType[ui32MapTypeIndex]);
++
++    return szFlags;
++}
++#endif
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mm.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,243 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __IMG_LINUX_MM_H__
++#define __IMG_LINUX_MM_H__
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <linux/slab.h>
++
++typedef enum {
++    LINUX_MEM_AREA_IOREMAP,
++      LINUX_MEM_AREA_EXTERNAL_KV,
++    LINUX_MEM_AREA_IO,
++    LINUX_MEM_AREA_VMALLOC,
++    LINUX_MEM_AREA_ALLOC_PAGES,
++    LINUX_MEM_AREA_SUB_ALLOC,
++    LINUX_MEM_AREA_TYPE_COUNT
++}LINUX_MEM_AREA_TYPE;
++
++typedef struct _LinuxMemArea LinuxMemArea;
++
++
++struct _LinuxMemArea {
++    LINUX_MEM_AREA_TYPE eAreaType;
++    union _uData
++    {
++        struct _sIORemap
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++            IMG_VOID *pvIORemapCookie;
++        }sIORemap;
++        struct _sExternalKV
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++            IMG_VOID *pvExternalKV;
++        }sExternalKV;
++        struct _sIO
++        {
++            
++            IMG_CPU_PHYADDR CPUPhysAddr;
++        }sIO;
++        struct _sVmalloc
++        {
++            
++            IMG_VOID *pvVmallocAddress;
++        }sVmalloc;
++        struct _sPageList
++        {
++            
++            struct page **pvPageList;
++        }sPageList;
++        struct _sSubAlloc
++        {
++            
++            LinuxMemArea *psParentLinuxMemArea;
++            IMG_UINT32 ui32ByteOffset;
++        }sSubAlloc;
++    }uData;
++
++    IMG_UINT32 ui32ByteSize;
++};
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17))
++typedef kmem_cache_t LinuxKMemCache;
++#else
++typedef struct kmem_cache LinuxKMemCache;
++#endif
++
++
++PVRSRV_ERROR LinuxMMInit(IMG_VOID);
++
++
++IMG_VOID LinuxMMCleanup(IMG_VOID);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, __FILE__, __LINE__)
++#else
++#define KMallocWrapper(ui32ByteSize) _KMallocWrapper(ui32ByteSize, NULL, 0)
++#endif
++IMG_VOID *_KMallocWrapper(IMG_UINT32 ui32ByteSize, IMG_CHAR *szFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KFreeWrapper(IMG_VOID *pvCpuVAddr);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, __FILE__, __LINE__)
++#else
++#define VMallocWrapper(ui32Bytes, ui32AllocFlags) _VMallocWrapper(ui32Bytes, ui32AllocFlags, NULL, 0)
++#endif
++IMG_VOID *_VMallocWrapper(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AllocFlags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID VFreeWrapper(IMG_VOID *pvCpuVAddr);
++
++
++LinuxMemArea *NewVMallocLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeVMallocLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++    _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, __FILE__, __LINE__)
++#else
++#define IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags) \
++    _IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags, NULL, 0)
++#endif
++IMG_VOID *_IORemapWrapper(IMG_CPU_PHYADDR BasePAddr,
++                          IMG_UINT32 ui32Bytes,
++                          IMG_UINT32 ui32MappingFlags,
++                          IMG_CHAR *pszFileName,
++                          IMG_UINT32 ui32Line);
++
++
++LinuxMemArea *NewIORemapLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIORemapLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++LinuxMemArea *NewExternalKVLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCPUVAddr, IMG_UINT32 ui32Bytes, 
++                                                                              IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeExternalKVLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_VOID IOUnmapWrapper(IMG_VOID *pvIORemapCookie);
++
++
++struct page *LinuxMemAreaOffsetToPage(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMapWrapper(psPage) _KMapWrapper(psPage, __FILE__, __LINE__)
++#else
++#define KMapWrapper(psPage) _KMapWrapper(psPage, NULL, 0)
++#endif
++IMG_VOID *_KMapWrapper(struct page *psPage, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KUnMapWrapper(struct page *psPage);
++
++
++LinuxKMemCache *KMemCacheCreateWrapper(IMG_CHAR *pszName, size_t Size, size_t Align, IMG_UINT32 ui32Flags);
++
++
++IMG_VOID KMemCacheDestroyWrapper(LinuxKMemCache *psCache);
++
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, __FILE__, __LINE__)
++#else
++#define KMemCacheAllocWrapper(psCache, Flags) _KMemCacheAllocWrapper(psCache, Flags, NULL, 0)
++#endif
++IMG_VOID *_KMemCacheAllocWrapper(LinuxKMemCache *psCache, gfp_t Flags, IMG_CHAR *pszFileName, IMG_UINT32 ui32Line);
++
++
++IMG_VOID KMemCacheFreeWrapper(LinuxKMemCache *psCache, IMG_VOID *pvObject);
++
++
++const IMG_CHAR *KMemCacheNameWrapper(LinuxKMemCache *psCache);
++
++
++struct page *ConvertKVToPage(IMG_VOID *pvAddr);
++
++
++LinuxMemArea *NewIOLinuxMemArea(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeIOLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewAllocPagesLinuxMemArea(IMG_UINT32 ui32Bytes, IMG_UINT32 ui32AreaFlags);
++
++
++IMG_VOID FreeAllocPagesLinuxMemArea(LinuxMemArea *psLinuxMemArea);
++
++
++LinuxMemArea *NewSubLinuxMemArea(LinuxMemArea *psParentLinuxMemArea,
++                                 IMG_UINT32 ui32ByteOffset,
++                                 IMG_UINT32 ui32Bytes);
++
++
++IMG_VOID LinuxMemAreaDeepFree(LinuxMemArea *psLinuxMemArea);
++
++
++#if defined(LINUX_MEM_AREAS_DEBUG)
++IMG_VOID LinuxMemAreaRegister(LinuxMemArea *psLinuxMemArea);
++#else
++#define LinuxMemAreaRegister(X)
++#endif
++
++
++IMG_VOID *LinuxMemAreaToCpuVAddr(LinuxMemArea *psLinuxMemArea);
++
++
++IMG_CPU_PHYADDR LinuxMemAreaToCpuPAddr(LinuxMemArea *psLinuxMemArea, IMG_UINT32 ui32ByteOffset);
++
++
++LINUX_MEM_AREA_TYPE LinuxMemAreaRootType(LinuxMemArea *psLinuxMemArea);
++
++
++const IMG_CHAR *LinuxMemAreaTypeToString(LINUX_MEM_AREA_TYPE eMemAreaType);
++
++
++#if defined(DEBUG) || defined(DEBUG_LINUX_MEM_AREAS)
++const IMG_CHAR *HAPFlagsToString(IMG_UINT32 ui32Flags);
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/module.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/module.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/module.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,407 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++// #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#if defined(LDM_PLATFORM)
++#include <linux/platform_device.h>
++#endif 
++#include "img_defs.h"
++#include "services.h"
++#include "kerneldisplay.h"
++#include "kernelbuffer.h"
++#include "syscommon.h"
++#include "pvrmmap.h"
++#include "mm.h"
++#include "mmap.h"
++#include "mutex.h"
++#include "pvr_debug.h"
++#include "perproc.h"
++#include "handle.h"
++#include "pvr_bridge_km.h"
++#include "proc.h"
++
++
++#define CLASSNAME     "powervr"
++#define DRVNAME               "pvrsrvkm"
++#define DEVNAME               "pvrsrvkm"
++
++
++MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE(DEVNAME);
++#ifdef DEBUG
++static int debug = DBGPRIV_WARNING;
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++#include <linux/moduleparam.h>
++module_param(debug, int, 0);
++#else
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Sets the level of debug output (default=0x4)");
++#endif
++#endif
++
++
++void PVRDebugSetLevel(IMG_UINT32 uDebugLevel);
++
++extern IMG_BOOL PVRGetDisplayClassJTable(PVRSRV_DC_DISP2SRV_KMJTABLE *psJTable);
++extern IMG_BOOL PVRGetBufferClassJTable(PVRSRV_BC_BUFFER2SRV_KMJTABLE *psJTable);
++EXPORT_SYMBOL(PVRGetDisplayClassJTable);
++EXPORT_SYMBOL(PVRGetBufferClassJTable);
++
++
++static int AssignedMajorNumber;
++
++
++extern int PVRSRV_BridgeDispatchKM(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
++static int PVRSRVOpen(struct inode* pInode, struct file* pFile);
++static int PVRSRVRelease(struct inode* pInode, struct file* pFile);
++
++PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++static struct file_operations pvrsrv_fops = {
++      owner:THIS_MODULE,
++      ioctl:PVRSRV_BridgeDispatchKM,
++      open:PVRSRVOpen,
++      release:PVRSRVRelease,
++      mmap:PVRMMap,
++};
++
++
++#if defined(LDM_PLATFORM)
++static int PVRSRVDriverRemove(struct platform_device *device);
++static int PVRSRVDriverProbe(struct platform_device *device);
++static int PVRSRVDriverSuspend(struct platform_device *device, pm_message_t state);
++static void PVRSRVDriverShutdown(struct platform_device *device);
++static int PVRSRVDriverResume(struct platform_device *device);
++
++static struct platform_driver powervr_driver = {
++      .driver = {
++              .name           = DEVNAME,
++      },
++      .probe          = PVRSRVDriverProbe,
++      .remove         = PVRSRVDriverRemove,
++      .suspend        = PVRSRVDriverSuspend,
++      .resume         = PVRSRVDriverResume,
++      .shutdown       = PVRSRVDriverShutdown,
++};
++
++static void PVRSRVDeviceRelease(struct device *device);
++
++static struct platform_device powervr_device = {
++      .name                   = DEVNAME,
++      .id                             = -1,
++      .dev                    = {
++              .release                = PVRSRVDeviceRelease
++      }
++};
++
++
++
++static int PVRSRVDriverProbe(struct platform_device *pDevice)
++{
++      SYS_DATA *psSysData;
++      PVRSRV_ERROR eError;
++      int error;
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverProbe(pDevice=%p)", pDevice));
++
++      pDevice->dev.driver_data = NULL;
++
++#if 0
++      
++      if (PerDeviceSysInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++#endif        
++      
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              if (SysInitialise() != PVRSRV_OK)
++              {
++                      return -ENODEV;
++              }
++
++              eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"PVRSRVDriverProbe: Failed to connect to resource manager"));
++                      error = -ENODEV;
++              }
++      }
++
++      return 0;
++}
++
++
++static int PVRSRVDriverRemove(struct platform_device *pDevice)
++{
++      SYS_DATA *psSysData;
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverRemove(pDevice=%p)", pDevice));
++
++      if(PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++      
++      if (SysAcquireData(&psSysData) == PVRSRV_OK)
++      {
++              SysDeinitialise(psSysData);
++      }
++
++#if 0
++      if (PerDeviceSysDeInitialise((IMG_PVOID)pDevice) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++#endif
++
++
++      return 0;
++}
++
++
++static void PVRSRVDriverShutdown(struct platform_device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverShutdown(pDevice=%p)", pDevice));
++
++      (void) PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3);
++}
++
++
++static int PVRSRVDriverSuspend(struct platform_device *pDevice, pm_message_t state)
++{
++
++      PVR_DPF((PVR_DBG_WARNING,
++                      "PVRSRVDriverSuspend(pDevice=%p)",
++                      pDevice));
++
++      if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D3) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++static int PVRSRVDriverResume(struct platform_device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDriverResume(pDevice=%p)", pDevice));
++
++      if (PVRSRVSetPowerStateKM(PVRSRV_POWER_STATE_D0) != PVRSRV_OK)
++      {
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++
++static void PVRSRVDeviceRelease(struct device *pDevice)
++{
++      PVR_DPF((PVR_DBG_WARNING, "PVRSRVDeviceRelease(pDevice=%p)", pDevice));
++}
++#endif 
++
++static int PVRSRVOpen(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++      int Ret = 0;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVOpen"));
++
++    LinuxLockMutex(&gPVRSRVLock);
++
++      if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_TRUE) != PVRSRV_OK)
++      {
++              Ret = -ENOMEM;
++      }
++      
++    LinuxUnLockMutex(&gPVRSRVLock);
++
++      return Ret;
++}
++
++
++static int PVRSRVRelease(struct inode unref__ * pInode, struct file unref__ * pFile)
++{
++      int Ret = 0;
++
++      PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRelease"));
++
++      if (PVRSRVResManConnect(PVRSRVRESMAN_PROCESSID_FIND, IMG_FALSE) != PVRSRV_OK)
++      {
++              Ret = -ENOMEM;
++      }
++
++      return Ret;
++}
++
++
++static int __init PVRCore_Init(void)
++{
++      int error;
++#if !defined(LDM_PLATFORM)
++      PVRSRV_ERROR eError;
++#endif 
++      
++      AssignedMajorNumber = register_chrdev(0, DEVNAME, &pvrsrv_fops);
++
++      if (AssignedMajorNumber <= 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to get major number"));
++
++              return -EBUSY;
++      }
++
++      PVR_DPF((PVR_DBG_WARNING, "PVRCore_Init: major device %d", AssignedMajorNumber));
++
++      
++      if (CreateProcEntries ())
++      {
++              unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++              return -ENOMEM;
++      }
++
++    LinuxInitMutex(&gPVRSRVLock);
++
++#ifdef DEBUG
++      PVRDebugSetLevel(debug);
++#endif
++
++      if(LinuxMMInit() != PVRSRV_OK)
++    {
++        error = -ENOMEM;
++        goto init_failed;
++    }
++
++      LinuxBridgeInit();
++
++      PVRMMapInit();
++
++#if defined(LDM_PLATFORM)
++      if ((error = platform_driver_register(&powervr_driver)) != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform driver (%d)", error));
++
++              goto init_failed;
++      }
++
++      if ((error = platform_device_register(&powervr_device)) != 0)
++      {
++              platform_driver_unregister(&powervr_driver);
++
++              PVR_DPF((PVR_DBG_ERROR, "PVRCore_Init: unable to register platform device (%d)", error));
++
++              goto init_failed;
++      }
++#else 
++      
++      if ((eError = SysInitialise()) != PVRSRV_OK)
++      {
++              error = -ENODEV;
++#if defined(TCF_REV) && (TCF_REV == 110)
++              if(eError == PVRSRV_ERROR_NOT_SUPPORTED)
++              {
++                      printk("\nAtlas wrapper (FPGA image) version mismatch");
++                      error = -ENODEV;
++              }
++#endif
++              goto init_failed;
++      }
++
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_TRUE);
++      if(eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"PVRCore_Init: Failed to connect to resource manager"));
++              error = -ENODEV;
++              goto init_failed;
++      }
++#endif 
++      return 0;
++
++init_failed:
++
++      (void) PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      PVRMMapCleanup();
++      LinuxMMCleanup();
++      RemoveProcEntries();
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++
++      return error;
++
++} 
++
++
++static void __exit PVRCore_Cleanup(void)
++{
++      SYS_DATA *psSysData;
++#if !defined(LDM_PLATFORM)
++      PVRSRV_ERROR eError;
++#endif 
++
++      SysAcquireData(&psSysData);
++      unregister_chrdev(AssignedMajorNumber, DRVNAME);
++      
++#if defined (LDM_PLATFORM)
++      platform_device_unregister(&powervr_device);
++      platform_driver_unregister(&powervr_driver);
++#else 
++      eError = PVRSRVResManConnect(RESMAN_KERNEL_PROCESSID, IMG_FALSE);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"KernelResManDisconnect: Failed to disconnect"));
++      }
++
++      
++      SysDeinitialise(psSysData);
++#endif 
++
++      PVRMMapCleanup();
++
++      LinuxMMCleanup();
++
++      LinuxBridgeDeInit();
++
++      RemoveProcEntries();
++
++      PVR_DPF((PVR_DBG_WARNING,"unloading"));
++}
++
++module_init(PVRCore_Init);
++module_exit(PVRCore_Cleanup);
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,134 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include <linux/version.h>
++#include <linux/errno.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++#include <linux/module.h>
++
++#include <img_defs.h>
++#include <services.h>
++
++#include "mutex.h"
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_init(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_lock(psPVRSRVMutex);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    if(mutex_lock_interruptible(psPVRSRVMutex) == -EINTR)
++    {
++        return PVRSRV_ERROR_GENERIC;
++    }else{
++        return PVRSRV_OK;
++    }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    return mutex_trylock(psPVRSRVMutex);
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    mutex_unlock(psPVRSRVMutex);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    return mutex_is_locked(psPVRSRVMutex);
++}
++
++
++#else 
++
++
++IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    init_MUTEX(&psPVRSRVMutex->sSemaphore);
++    atomic_set(&psPVRSRVMutex->Count, 0);
++}
++
++IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    down(&psPVRSRVMutex->sSemaphore);
++    atomic_dec(&psPVRSRVMutex->Count);
++}
++
++PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    if(down_interruptible(&psPVRSRVMutex->sSemaphore) == -EINTR)
++    {
++        
++        return PVRSRV_ERROR_GENERIC;
++    }else{
++        atomic_dec(&psPVRSRVMutex->Count);
++        return PVRSRV_OK;
++    }
++}
++
++IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    IMG_INT32 Status = down_trylock(&psPVRSRVMutex->sSemaphore);
++    if(Status == 0)
++    {
++        atomic_dec(&psPVRSRVMutex->Count);
++    }
++
++    return Status;
++}
++
++IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    atomic_inc(&psPVRSRVMutex->Count);
++    up(&psPVRSRVMutex->sSemaphore);
++}
++
++IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex)
++{
++    IMG_INT32 iCount;
++    
++    iCount = atomic_read(&psPVRSRVMutex->Count);
++
++    return (IMG_BOOL)iCount;
++}
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/mutex.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,70 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __INCLUDED_LINUX_MUTEX_H_
++#define __INCLUDED_LINUX_MUTEX_H_
++
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++#include <linux/mutex.h>
++#else
++#include <asm/semaphore.h>
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
++
++typedef struct mutex PVRSRV_LINUX_MUTEX;
++
++#else 
++
++
++typedef struct {
++    struct semaphore sSemaphore;
++    
++    atomic_t Count;
++}PVRSRV_LINUX_MUTEX;
++
++#endif
++
++
++extern IMG_VOID LinuxInitMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern PVRSRV_ERROR LinuxLockMutexInterruptible(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_INT32 LinuxTryLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_VOID LinuxUnLockMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++extern IMG_BOOL LinuxIsLockedMutex(PVRSRV_LINUX_MUTEX *psPVRSRVMutex);
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/osfunc.c       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1617 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#include <asm/system.h>
++#endif
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/hardirq.h>
++#include <linux/timer.h>
++#include <linux/capability.h>
++#include <asm/uaccess.h>
++
++#include "img_types.h"
++#include "services_headers.h"
++#include "mm.h"
++#include "pvrmmap.h"
++#include "mmap.h"
++#include "env_data.h"
++#include "proc.h"
++#include "mutex.h"
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++#if !defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc)
++#else
++PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvCpuVAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *szFilename, IMG_UINT32 ui32Line)
++#endif
++{
++    PVR_UNREFERENCED_PARAMETER(phBlockAlloc);
++    PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++#if defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++    *ppvCpuVAddr = _KMallocWrapper(ui32Size, szFilename, ui32Line);
++#else
++    *ppvCpuVAddr = KMallocWrapper(ui32Size);
++#endif
++    if(*ppvCpuVAddr)
++    {
++        return PVRSRV_OK;
++    }
++    else
++    {
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++}
++
++
++PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvCpuVAddr, IMG_HANDLE hBlockAlloc)
++{     
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      PVR_UNREFERENCED_PARAMETER(ui32Size);
++      PVR_UNREFERENCED_PARAMETER(hBlockAlloc);
++
++    KFreeWrapper(pvCpuVAddr);
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSAllocPages(IMG_UINT32 ui32AllocFlags,
++               IMG_UINT32 ui32Size,
++               IMG_VOID **ppvCpuVAddr,
++               IMG_HANDLE *phOSMemHandle)
++{
++      LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32AllocFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32AllocFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32AllocFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++      {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++            psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        {
++            
++            
++            psLinuxMemArea = NewAllocPagesLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++                      PVRMMapRegisterArea("Import Arena", psLinuxMemArea, ui32AllocFlags);
++            break;
++        }
++
++              case PVRSRV_HAP_MULTI_PROCESS:
++              {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32AllocFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++            psLinuxMemArea = NewVMallocLinuxMemArea(ui32Size, ui32AllocFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_OUT_OF_MEMORY;
++            }
++                      PVRMMapRegisterArea("Import Arena", psLinuxMemArea, ui32AllocFlags);
++            break;
++        }
++        default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSAllocPages: invalid flags 0x%x\n", ui32AllocFlags));
++            *ppvCpuVAddr = NULL;
++            *phOSMemHandle = (IMG_HANDLE)0;
++                      return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++    *phOSMemHandle = psLinuxMemArea;
++    
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSFreePages(IMG_UINT32 ui32AllocFlags, IMG_UINT32 ui32Bytes, IMG_VOID *pvCpuVAddr, IMG_HANDLE hOSMemHandle)
++{   
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++    
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++    switch(ui32AllocFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                PVR_DPF((PVR_DBG_ERROR,
++                         "OSFreePages(ui32AllocFlags=0x%08X, ui32Bytes=%ld, "
++                                        "pvCpuVAddr=%p, hOSMemHandle=%p) FAILED!",
++                         ui32AllocFlags, ui32Bytes, pvCpuVAddr, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        default:
++                      PVR_DPF((PVR_DBG_ERROR,"%s: invalid flags 0x%x\n",
++                    __FUNCTION__, ui32AllocFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++    }
++
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                  IMG_UINT32 ui32ByteOffset,
++                  IMG_UINT32 ui32Bytes,
++                  IMG_UINT32 ui32Flags,
++                  IMG_HANDLE *phOSMemHandleRet)
++{
++    LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++    PVRSRV_ERROR eError = PVRSRV_OK;
++
++    psParentLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    
++    psLinuxMemArea = NewSubLinuxMemArea(psParentLinuxMemArea, ui32ByteOffset, ui32Bytes);
++    if(!psLinuxMemArea)
++    {
++        *phOSMemHandleRet = NULL;
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    *phOSMemHandleRet = psLinuxMemArea;
++
++    
++    if(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        return PVRSRV_OK;
++    }
++
++    
++    if(psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO)
++    {
++        eError = PVRMMapRegisterArea("Physical",
++                                     psLinuxMemArea,
++                                     0); 
++        if(eError != PVRSRV_OK)
++        {
++            goto failed_register_area;
++        }
++    }
++    else if(psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES)
++    {
++        eError = PVRMMapRegisterArea("Import Arena",
++                                     psLinuxMemArea,
++                                     0); 
++        if(eError != PVRSRV_OK)
++        {
++            goto failed_register_area;
++        }
++    }
++
++    return PVRSRV_OK;
++
++failed_register_area:
++    *phOSMemHandleRet = NULL;
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++    return eError;
++}
++
++PVRSRV_ERROR
++OSReleaseSubMemHandle(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++    LinuxMemArea *psParentLinuxMemArea, *psLinuxMemArea;
++    PVRSRV_ERROR eError;
++    
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    PVR_ASSERT(psLinuxMemArea->eAreaType == LINUX_MEM_AREA_SUB_ALLOC);
++    
++    psParentLinuxMemArea = psLinuxMemArea->uData.sSubAlloc.psParentLinuxMemArea;
++    
++    if(!(ui32Flags & PVRSRV_HAP_KERNEL_ONLY)
++       && (psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_IO
++           || psParentLinuxMemArea->eAreaType == LINUX_MEM_AREA_ALLOC_PAGES)
++      )
++    {
++        eError = PVRMMapRemoveRegisteredArea(psLinuxMemArea);
++        if(eError != PVRSRV_OK)
++        {
++            return eError;
++        }
++    }
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++IMG_CPU_PHYADDR
++OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++    PVR_ASSERT(hOSMemHandle);
++
++    return LinuxMemAreaToCpuPAddr(hOSMemHandle, ui32ByteOffset);
++}
++
++
++
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMCPY)
++    unsigned char *Src,*Dst;
++    int i;
++
++    Src=(unsigned char *)pvSrc;
++    Dst=(unsigned char *)pvDst;
++    for(i=0;i<ui32Size;i++)
++    {
++        Dst[i]=Src[i];
++    }
++#else
++      memcpy(pvDst, pvSrc, ui32Size);
++#endif
++}
++
++
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
++{
++#if defined(USE_UNOPTIMISED_MEMSET)
++    unsigned char *Buff;
++    int i;
++
++    Buff=(unsigned char *)pvDest;
++    for(i=0;i<ui32Size;i++)
++    {
++        Buff[i]=ui8Value;
++    }
++#else
++      memset(pvDest, (int) ui8Value, (size_t) ui32Size);
++#endif
++}
++
++
++IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
++{
++      return (strcpy(pszDest, pszSrc));
++}
++
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...)
++{
++    va_list argList;
++    IMG_INT32 iCount;
++
++    va_start(argList, pszFormat);
++    iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
++    va_end(argList);
++
++    return iCount;
++}
++
++IMG_VOID OSBreakResourceLock (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++      if(*pui32Access)
++      {
++              if(psResource->ui32ID == ui32ID)
++              {
++                      psResource->ui32ID = 0;
++                      *pui32Access = 0;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked for this process.")); 
++              }
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_MESSAGE,"OSBreakResourceLock: Resource is not locked"));
++      }
++}
++
++
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource)
++{
++      psResource->ui32ID = 0;
++      psResource->ui32Lock = 0;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDestroyResource (PVRSRV_RESOURCE *psResource)
++{
++      OSBreakResourceLock (psResource, psResource->ui32ID);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData)
++{
++      ENV_DATA                *psEnvData;
++      
++      
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), (IMG_VOID *)&psEnvData, IMG_NULL) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE, 
++                                      &psEnvData->pvBridgeData, IMG_NULL) != PVRSRV_OK)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), psEnvData, IMG_NULL);
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++
++      
++      psEnvData->bMISRInstalled = IMG_FALSE;
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      
++      psEnvData->psPCIDev = NULL;
++
++      
++      *ppvEnvSpecificData = psEnvData;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData)
++{
++      ENV_DATA                *psEnvData = (ENV_DATA*)pvEnvSpecificData;
++
++      PVR_ASSERT(!psEnvData->bMISRInstalled);
++      PVR_ASSERT(!psEnvData->bLISRInstalled);
++      PVR_ASSERT(psEnvData->psPCIDev == NULL);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, 0x1000, psEnvData->pvBridgeData, IMG_NULL);
++
++      OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(ENV_DATA), pvEnvSpecificData, IMG_NULL);
++
++      return PVRSRV_OK;
++}
++
++
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg)
++{
++        struct pci_dev *dev;
++        IMG_UINT32 ui32Value;
++
++        dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++        if (dev)
++        {
++                pci_read_config_dword(dev, (int) ui32Reg, (u32 *) & ui32Value);
++                return (ui32Value);
++        }
++        else
++        {
++                return (0);
++        }
++}
++
++
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value)
++{
++        struct pci_dev *dev;
++
++        dev = pci_find_slot(ui32Bus, PCI_DEVFN(ui32Dev, ui32Func));
++        if (dev)
++        {
++                pci_write_config_dword(dev, (int) ui32Reg, (u32) ui32Value);
++        }
++}
++
++ 
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID)
++{
++      schedule();
++}
++
++
++ 
++IMG_UINT32 OSClockus(IMG_VOID)
++{
++      unsigned long time, j = jiffies;
++
++      time = j * (1000000 / HZ);
++
++      return time;
++}
++
++
++ 
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus)
++{
++      udelay(ui32Timeus);
++}
++
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID)
++{
++      if (in_interrupt())
++      {
++              return KERNEL_ID;
++      }
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
++      return current->pgrp;
++#else
++      return current->signal->pgrp;
++#endif
++}
++
++
++IMG_UINT32 OSGetPageSize(IMG_VOID)
++{
++#if defined(__sh__)
++      IMG_UINT32 ui32ReturnValue = PAGE_SIZE;
++
++      return (ui32ReturnValue);
++#else
++      return PAGE_SIZE;
++#endif
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++static irqreturn_t DeviceISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++              , struct pt_regs *regs
++#endif
++              )
++{
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      IMG_BOOL bStatus = IMG_FALSE;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++      PVR_UNREFERENCED_PARAMETER(regs);
++#endif        
++      psDeviceNode = (PVRSRV_DEVICE_NODE*)dev_id;
++      if(!psDeviceNode)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DeviceISRWrapper: invalid params\n"));
++              goto out;
++      }
++
++      bStatus = PVRSRVDeviceLISR(psDeviceNode);
++
++      if (bStatus)
++      {
++              SYS_DATA *psSysData = psDeviceNode->psSysData;
++              ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++      return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++
++
++
++static irqreturn_t SystemISRWrapper(int irq, void *dev_id
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++              , struct pt_regs *regs
++#endif
++              )
++{
++      SYS_DATA *psSysData;
++      IMG_BOOL bStatus = IMG_FALSE;
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
++      PVR_UNREFERENCED_PARAMETER(regs);
++#endif
++      psSysData = (SYS_DATA *)dev_id;
++      if(!psSysData)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "SystemISRWrapper: invalid params\n"));
++              goto out;
++      }
++
++      bStatus = PVRSRVSystemLISR(psSysData);
++
++      if (bStatus)
++      {
++              ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++out:
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
++      return bStatus ? IRQ_HANDLED : IRQ_NONE;
++#endif
++}
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++                                                                      IMG_UINT32 ui32Irq,
++                                                                      IMG_CHAR *pszISRName,
++                                                                      IMG_VOID *pvDeviceNode)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallDeviceLISR: An ISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing device LISR %s on IRQ %d with cookie %x", pszISRName, ui32Irq, pvDeviceNode));
++
++      if(request_irq(ui32Irq, DeviceISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++              SA_SHIRQ
++#else
++              IRQF_SHARED
++#endif
++              , pszISRName, pvDeviceNode))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Couldn't install device LISR on IRQ %d", ui32Irq));
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->ui32IRQ = ui32Irq;
++      psEnvData->pvISRCookie = pvDeviceNode;
++      psEnvData->bLISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;       
++}
++
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallDeviceLISR: No LISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++              
++      PVR_TRACE(("Uninstalling device LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ,  psEnvData->pvISRCookie));
++
++      free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallSystemLISR: An LISR has already been installed: IRQ %d cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing system LISR on IRQ %d with cookie %x", ui32Irq, pvSysData));
++
++      if(request_irq(ui32Irq, SystemISRWrapper,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
++              SA_SHIRQ
++#else
++              IRQF_SHARED
++#endif
++              , "PowerVR", pvSysData))
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallSystemLISR: Couldn't install system LISR on IRQ %d", ui32Irq));
++
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->ui32IRQ = ui32Irq;
++      psEnvData->pvISRCookie = pvSysData;
++      psEnvData->bLISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;       
++}
++
++
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bLISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallSystemLISR: No LISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Uninstalling system LISR on IRQ %d with cookie %x", psEnvData->ui32IRQ, psEnvData->pvISRCookie));
++
++      free_irq(psEnvData->ui32IRQ, psEnvData->pvISRCookie);
++
++      psEnvData->bLISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++
++static void MISRWrapper(unsigned long data)
++{
++      SYS_DATA *psSysData;
++
++      psSysData = (SYS_DATA *)data;
++      
++      PVRSRVMISR(psSysData);
++}
++
++
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bMISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: An MISR has already been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Installing MISR with cookie %x", pvSysData));
++
++      tasklet_init(&psEnvData->sMISRTasklet, MISRWrapper, (unsigned long)pvSysData);
++
++      psEnvData->bMISRInstalled = IMG_TRUE;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (!psEnvData->bMISRInstalled)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSUninstallMISR: No MISR has been installed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      PVR_TRACE(("Uninstalling MISR"));
++
++      tasklet_kill(&psEnvData->sMISRTasklet);
++
++      psEnvData->bMISRInstalled = IMG_FALSE;
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA*)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA*)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->bMISRInstalled)
++      {
++              tasklet_schedule(&psEnvData->sMISRTasklet);
++      }
++
++      return PVRSRV_OK;       
++}
++
++
++#endif 
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
++#define       OS_TAS(p)       xchg((p), 1)
++#else
++#define       OS_TAS(p)       tas(p)
++#endif
++PVRSRV_ERROR OSLockResource ( PVRSRV_RESOURCE         *psResource,
++                                                              IMG_UINT32                      ui32ID)
++
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(!OS_TAS(&psResource->ui32Lock))
++              psResource->ui32ID = ui32ID;
++      else
++              eError = PVRSRV_ERROR_GENERIC;
++
++      return eError;
++}
++
++
++PVRSRV_ERROR OSUnlockResource (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(*pui32Access)
++      {
++              if(psResource->ui32ID == ui32ID)
++              {
++                      psResource->ui32ID = 0;
++                      *pui32Access = 0;
++              }
++              else
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked with expected value.", psResource)); 
++                      PVR_DPF((PVR_DBG_MESSAGE,"Should be %x is actually %x", ui32ID, psResource->ui32ID));
++                      eError = PVRSRV_ERROR_GENERIC;
++              }
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSUnlockResource: Resource %p is not locked", psResource));
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++      
++      return eError;
++}
++
++
++IMG_BOOL OSIsResourceLocked (PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID)
++{
++      volatile IMG_UINT32 *pui32Access = (volatile IMG_UINT32 *)&psResource->ui32Lock;
++
++      return  (*(volatile IMG_UINT32 *)pui32Access == 1) && (psResource->ui32ID == ui32ID)
++                      ?       IMG_TRUE
++                      :       IMG_FALSE;
++}
++
++
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID *pvLinAddr)
++{
++    struct page *page;
++    IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32PageOffset = (IMG_UINT32)pvLinAddr & (PAGE_SIZE - 1);
++
++      page = ConvertKVToPage(pvLinAddr);
++    CpuPAddr.uiAddr = (IMG_UINTPTR_T) page_to_phys(page) + ui32PageOffset;
++
++    return CpuPAddr;
++}
++
++
++IMG_VOID *
++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
++               IMG_UINT32 ui32Bytes,
++               IMG_UINT32 ui32MappingFlags,
++               IMG_HANDLE *phOSMemHandle)
++{
++    if(phOSMemHandle)
++    {
++        *phOSMemHandle = (IMG_HANDLE)0;
++    }
++
++    if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        IMG_VOID *pvIORemapCookie;
++        pvIORemapCookie = IORemapWrapper(BasePAddr, ui32Bytes, ui32MappingFlags);
++        if(pvIORemapCookie == IMG_NULL)
++        {
++            return NULL;
++        }
++        return pvIORemapCookie;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                 "OSMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++                 " (Use OSReservePhys otherwise)"));
++        *phOSMemHandle = (IMG_HANDLE)0;
++        return NULL;
++    }
++
++    PVR_ASSERT(0);
++    return NULL;
++}
++
++IMG_BOOL
++OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32MappingFlags, IMG_HANDLE hPageAlloc)
++{
++    PVR_TRACE(("%s: unmapping %d bytes from 0x%08x", __FUNCTION__, ui32Bytes, pvLinAddr));
++
++    PVR_UNREFERENCED_PARAMETER(hPageAlloc);   
++
++    if(ui32MappingFlags & PVRSRV_HAP_KERNEL_ONLY)
++    {
++        IOUnmapWrapper(pvLinAddr);
++        return IMG_TRUE;
++    }
++    else
++    {
++        PVR_DPF((PVR_DBG_ERROR,
++                     "OSUnMapPhysToLin should only be used with PVRSRV_HAP_KERNEL_ONLY "
++                     " (Use OSUnReservePhys otherwise)"));
++        return IMG_FALSE;
++    }
++
++    PVR_ASSERT(0);
++    return IMG_FALSE;
++}
++
++PVRSRV_ERROR
++OSRegisterMem(IMG_CPU_PHYADDR BasePAddr,
++                        IMG_VOID *pvCPUVAddr,
++              IMG_UINT32 ui32Bytes,
++              IMG_UINT32 ui32MappingFlags,
++              IMG_HANDLE *phOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++              
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        {
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++                      psLinuxMemArea = NewExternalKVLinuxMemArea(BasePAddr, pvCPUVAddr, ui32Bytes, ui32MappingFlags);
++
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"OSRegisterMem : invalid flags 0x%x\n", ui32MappingFlags));
++            *phOSMemHandle = (IMG_HANDLE)0;
++            return PVRSRV_ERROR_GENERIC;
++    }
++    
++    *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSUnRegisterMem (IMG_VOID *pvCpuVAddr,
++                IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32MappingFlags,
++                IMG_HANDLE hOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                 PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++                          __FUNCTION__, pvCpuVAddr, ui32Bytes,
++                          ui32MappingFlags, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        default:
++        {
++            PVR_DPF((PVR_DBG_ERROR, "OSUnRegisterMem : invalid flags 0x%x", ui32MappingFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++        }
++    }
++
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR
++OSReservePhys(IMG_CPU_PHYADDR BasePAddr,
++              IMG_UINT32 ui32Bytes,
++              IMG_UINT32 ui32MappingFlags,
++              IMG_VOID **ppvCpuVAddr,
++              IMG_HANDLE *phOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++
++#if 0
++    
++    if(ui32MappingFlags & PVRSRV_HAP_SINGLE_PROCESS)
++    {
++        ui32MappingFlags &= ~PVRSRV_HAP_SINGLE_PROCESS;
++        ui32MappingFlags |= PVRSRV_HAP_MULTI_PROCESS;
++    }
++#endif
++
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++        {
++            
++            psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++                      }
++            break;
++        }
++        case PVRSRV_HAP_SINGLE_PROCESS:
++              {
++            
++            psLinuxMemArea = NewIOLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++                      }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            
++#if defined(VIVT_CACHE) || defined(__sh__)
++            
++            ui32MappingFlags &= ~PVRSRV_HAP_CACHED;
++#endif
++            psLinuxMemArea = NewIORemapLinuxMemArea(BasePAddr, ui32Bytes, ui32MappingFlags);
++            if(!psLinuxMemArea)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++            PVRMMapRegisterArea("Physical", psLinuxMemArea, ui32MappingFlags);
++            break;
++        }
++        default:
++            PVR_DPF((PVR_DBG_ERROR,"OSMapPhysToLin : invalid flags 0x%x\n", ui32MappingFlags));
++            *ppvCpuVAddr = NULL;
++            *phOSMemHandle = (IMG_HANDLE)0;
++            return PVRSRV_ERROR_GENERIC;
++      }
++
++    *phOSMemHandle = (IMG_HANDLE)psLinuxMemArea;
++    *ppvCpuVAddr = LinuxMemAreaToCpuVAddr(psLinuxMemArea);
++
++    LinuxMemAreaRegister(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++PVRSRV_ERROR
++OSUnReservePhys(IMG_VOID *pvCpuVAddr,
++                IMG_UINT32 ui32Bytes,
++                IMG_UINT32 ui32MappingFlags,
++                IMG_HANDLE hOSMemHandle)
++{
++    LinuxMemArea *psLinuxMemArea;
++    PVR_UNREFERENCED_PARAMETER(pvCpuVAddr);
++
++    psLinuxMemArea = (LinuxMemArea *)hOSMemHandle;
++    
++    switch(ui32MappingFlags & PVRSRV_HAP_MAPTYPE_MASK)
++    {
++        case PVRSRV_HAP_KERNEL_ONLY:
++            break;
++        case PVRSRV_HAP_SINGLE_PROCESS:
++        case PVRSRV_HAP_MULTI_PROCESS:
++        {
++            if(PVRMMapRemoveRegisteredArea(psLinuxMemArea) != PVRSRV_OK)
++            {
++                 PVR_DPF((PVR_DBG_ERROR, "%s(%p, %d, 0x%08X, %p) FAILED!",
++                          __FUNCTION__, pvCpuVAddr, ui32Bytes,
++                          ui32MappingFlags, hOSMemHandle));
++                return PVRSRV_ERROR_GENERIC;
++            }
++            break;
++        }
++        default:
++        {
++            PVR_DPF((PVR_DBG_ERROR, "OSUnMapPhysToLin : invalid flags 0x%x", ui32MappingFlags));
++            return PVRSRV_ERROR_INVALID_PARAMS;
++        }
++    }
++    
++    LinuxMemAreaDeepFree(psLinuxMemArea);
++
++    return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr)
++{
++    PVR_DPF((PVR_DBG_ERROR, "%s: Not available on Linux\n"));
++    return PVRSRV_ERROR_OUT_OF_MEMORY;
++
++    
++#if 0
++    unsigned long ulOrder;
++    struct page *psPage;
++    
++    ui32Size = PAGE_ALIGN(ui32Size);
++    
++    ulOrder = get_order(ui32Size);
++    
++    psPage = alloc_pages(GFP_KERNEL, ulOrder);
++    if(!psPage)
++    {
++        return PVRSRV_ERROR_OUT_OF_MEMORY;
++    }
++    *pLinAddr = page_address(psPage);
++    pPhysAddr->uiAddr = page_to_phys(psPage);
++    
++    
++    while(ui32Size > 0)
++    {
++        SetPageReserved(psPage);
++        psPage++;
++        ui32Size -= PAGE_SIZE;
++    }
++    
++    return PVRSRV_OK;
++#endif
++}
++
++
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr)
++{
++    return PVRSRV_OK;
++#if 0
++    unsigned long ulOrder;
++    struct page *psPage;
++    
++    ui32Size = PAGE_ALIGN(ui32Size);
++    ulOrder = get_order(ui32Size);
++
++    psPage = virt_to_page((IMG_VOID *)LinAddr);
++
++    while(ui32Size > 0)
++    {
++        ClearPageReserved(psPage);
++        psPage++;
++        ui32Size -= PAGE_SIZE;
++    }
++
++    __free_pages(psPage, ulOrder);
++
++    return PVRSRV_OK;
++#endif
++}
++
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset)
++{
++      return (IMG_UINT32) readl(pvLinRegBaseAddr+ui32Offset);
++}
++
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
++{
++      writel(ui32Value, pvLinRegBaseAddr+ui32Offset);
++}
++
++#if defined(CONFIG_PCI) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0))
++PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      int err;
++      IMG_UINT32 i;
++
++      if (psEnvData->psPCIDev != NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: A device has already been acquired"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      psEnvData->psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, psEnvData->psPCIDev);
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't acquire device"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      err = pci_enable_device(psEnvData->psPCIDev);
++      if (err != 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAcquireDev: Couldn't enable device (%d)", err));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if (eFlags & HOST_PCI_INIT_FLAG_BUS_MASTER)
++              pci_set_master(psEnvData->psPCIDev);
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIIRQ: Device hasn't been acquired"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      *pui32IRQ = psEnvData->psPCIDev->irq;
++
++      return PVRSRV_OK;
++}
++
++enum HOST_PCI_ADDR_RANGE_FUNC
++{
++      HOST_PCI_ADDR_RANGE_FUNC_LEN,
++      HOST_PCI_ADDR_RANGE_FUNC_START,
++      HOST_PCI_ADDR_RANGE_FUNC_END,
++      HOST_PCI_ADDR_RANGE_FUNC_REQUEST,
++      HOST_PCI_ADDR_RANGE_FUNC_RELEASE
++};
++
++static IMG_UINT32 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc,
++                                                                       IMG_VOID *pvSysData,
++                                                                       IMG_UINT32 ui32Index
++                                                                       
++)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Device hasn't been acquired"));
++              return 0;
++      }
++
++      if (ui32Index >= DEVICE_COUNT_RESOURCE)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Index out of range"));
++              return 0;
++
++      }
++
++      switch (eFunc)
++      {
++              case HOST_PCI_ADDR_RANGE_FUNC_LEN:
++                      return pci_resource_len(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_START:
++                      return pci_resource_start(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_END:
++                      return pci_resource_end(psEnvData->psPCIDev, ui32Index);
++              case HOST_PCI_ADDR_RANGE_FUNC_REQUEST:
++              {
++
++                      
++#ifdef FIXME
++                      int err;
++                      err = pci_request_region(psEnvData->psPCIDev, ui32Index, "PowerVR");
++                      if (err != 0)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err));
++                              return 0;
++                      }
++#endif
++                      psEnvData->abPCIResourceInUse[ui32Index] = IMG_TRUE;
++                      return 1;
++              }
++              case HOST_PCI_ADDR_RANGE_FUNC_RELEASE:
++                      if (psEnvData->abPCIResourceInUse[ui32Index])
++                      {
++                              pci_release_region(psEnvData->psPCIDev, ui32Index);
++                              psEnvData->abPCIResourceInUse[ui32Index] = IMG_FALSE;
++                      }
++                      return 1;
++              default:
++                      PVR_DPF((PVR_DBG_ERROR, "OSPCIAddrRangeFunc: Unknown function"));
++                      break;
++      }
++
++      return 0;
++}
++
++IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, pvSysData, ui32Index); 
++}
++
++IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, pvSysData, ui32Index); 
++}
++
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, pvSysData, ui32Index); 
++}
++
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData,
++                                                                 IMG_UINT32 ui32Index
++                                                                 
++)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index)
++{
++      return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, pvSysData, ui32Index) == 0 ? PVRSRV_ERROR_GENERIC : PVRSRV_OK;
++}
++
++PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData)
++{
++      SYS_DATA *psSysData = (SYS_DATA *)pvSysData;
++      ENV_DATA *psEnvData = (ENV_DATA *)psSysData->pvEnvSpecificData;
++      int i;
++
++      if (psEnvData->psPCIDev == NULL)
++      {
++              return PVRSRV_OK;
++      }
++
++      
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
++      {
++              if (psEnvData->abPCIResourceInUse[i])
++              {
++                      PVR_TRACE(("OSPCIReleaseDev: Releasing Address range %d", i));
++                      pci_release_region(psEnvData->psPCIDev, i);
++                      psEnvData->abPCIResourceInUse[i] = IMG_FALSE;
++              }
++      }
++
++      pci_disable_device(psEnvData->psPCIDev);
++
++      psEnvData->psPCIDev = NULL;
++
++      return PVRSRV_OK;
++}
++#endif 
++
++typedef struct TIMER_CALLBACK_DATA_TAG
++{
++      PFN_TIMER_FUNC          pfnTimerFunc;
++      IMG_VOID                *pvData;        
++      struct timer_list       sTimer;
++      IMG_UINT32              ui32Delay;
++      IMG_BOOL                bActive;
++}TIMER_CALLBACK_DATA;
++
++static IMG_VOID OSTimerCallbackWrapper(IMG_UINT32 ui32Data)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)ui32Data;
++      
++      if (!psTimerCBData->bActive)
++              return;
++
++      
++      psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
++      
++      
++      mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
++}
++
++
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData;
++      
++      
++      if(!pfnTimerFunc)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));                
++              return IMG_NULL;                
++      }
++      
++      
++      if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(TIMER_CALLBACK_DATA), 
++                                      (IMG_VOID **)&psTimerCBData, IMG_NULL) != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: failed to allocate memory for TIMER_CALLBACK_DATA"));              
++              return IMG_NULL;        
++      }
++
++      psTimerCBData->pfnTimerFunc = pfnTimerFunc;
++      psTimerCBData->pvData = pvData;
++      psTimerCBData->bActive = IMG_TRUE;
++      
++      
++
++
++      psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
++                                                              ?       1
++                                                              :       ((HZ * ui32MsTimeout) / 1000);
++      
++      init_timer(&psTimerCBData->sTimer);
++      
++      
++      psTimerCBData->sTimer.function = OSTimerCallbackWrapper;
++      psTimerCBData->sTimer.data = (IMG_UINT32)psTimerCBData;
++      psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
++      
++      
++      add_timer(&psTimerCBData->sTimer);
++      
++      return (IMG_HANDLE)psTimerCBData;
++}
++
++
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
++{
++      TIMER_CALLBACK_DATA     *psTimerCBData = (TIMER_CALLBACK_DATA*)hTimer;
++      
++      
++      psTimerCBData->bActive = IMG_FALSE;
++
++      
++      del_timer_sync(&psTimerCBData->sTimer); 
++      
++      
++      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(TIMER_CALLBACK_DATA), psTimerCBData, IMG_NULL);
++      
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, PVRSRV_EVENTOBJECT *psEventObject)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(psEventObject)
++      {
++              struct completion *psCompletion;
++
++              if(pszName)
++              {
++                      
++                      strncpy(psEventObject->szName, pszName, EVENTOBJNAME_MAXLENGTH);
++              }
++              else
++              {
++                              
++                      static IMG_UINT16 ui16NameIndex = 0;                    
++                      snprintf(psEventObject->szName, EVENTOBJNAME_MAXLENGTH, "PVRSRV_EVENTOBJECT_%d", ui16NameIndex++);
++              }
++              
++              
++              if(OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, 
++                                      sizeof(struct completion), 
++                                      (IMG_VOID **)&psCompletion, IMG_NULL) != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: failed to allocate memory for completion variable"));             
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;      
++              }
++
++              init_completion(psCompletion);
++      
++              psEventObject->hOSEventKM = (IMG_HANDLE) psCompletion;
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if(psEventObject)
++      {
++              if(psEventObject->hOSEventKM)
++              {
++                      struct completion *psCompletion = (struct completion *) psEventObject->hOSEventKM;
++                      OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(struct completion), psCompletion, IMG_NULL);
++              }
++              else
++              {
++          PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hOSEventKM is not a valid pointer"));
++              eError = PVRSRV_ERROR_INVALID_PARAMS;
++              }
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: psEventObject is not a valid pointer"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(hOSEventKM)
++      {
++              LinuxUnLockMutex(&gPVRSRVLock);         
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10))             
++              wait_for_completion_timeout((struct completion *)hOSEventKM, msecs_to_jiffies(ui32MSTimeout));
++#else
++              wait_for_completion((struct completion *)hOSEventKM);
++#endif        
++              LinuxLockMutex(&gPVRSRVLock);
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: hOSEventKM is not a valid handle"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++      
++      if(hOSEventKM)
++      {
++              complete_all((struct completion *) hOSEventKM);         
++      }
++      else
++      {
++        PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
++        eError = PVRSRV_ERROR_INVALID_PARAMS;
++      }
++      
++      return eError;
++}
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID)
++{
++      return capable(CAP_SYS_MODULE) != 0;
++}
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, 
++                          IMG_VOID *pvDest, 
++                          IMG_VOID *pvSrc, 
++                          IMG_UINT32 ui32Bytes)
++{
++      PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++      if(copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
++              return PVRSRV_OK;
++      else
++              return PVRSRV_ERROR_GENERIC;
++}
++
++PVRSRV_ERROR OSCopyFromUser( IMG_PVOID pvProcess, 
++                             IMG_VOID *pvDest, 
++                             IMG_VOID *pvSrc, 
++                             IMG_UINT32 ui32Bytes)
++{
++      PVR_UNREFERENCED_PARAMETER(pvProcess);
++
++      if(copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
++              return PVRSRV_OK;
++      else
++              return PVRSRV_ERROR_GENERIC;
++}
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes)
++{
++      int linuxType;
++
++      if(eVerification == PVR_VERIFY_READ)
++              linuxType = VERIFY_READ;
++      else if(eVerification == PVR_VERIFY_WRITE)
++              linuxType = VERIFY_WRITE;
++      else
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: Unknown eVerification", __FUNCTION__));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      return (IMG_BOOL)access_ok(linuxType, pvUserPtr, ui32Bytes);
++}
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pdump.c        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,1388 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined (PDUMP)
++#include "sgxdefs.h"
++#include "services_headers.h"
++
++#include "pvr_debug.h"
++
++#include "dbgdrvif.h"
++#include "sgxmmu.h"
++#include "mm.h"
++#include "pdump_km.h"
++
++#include <linux/tty.h>                        
++
++static IMG_BOOL PDumpWriteString2             (IMG_CHAR * pszString, IMG_UINT32 ui32Flags);
++static IMG_BOOL PDumpWriteILock                       (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags);
++static IMG_VOID DbgSetFrame                           (PDBG_STREAM psStream, IMG_UINT32 ui32Frame);
++static IMG_UINT32 DbgGetFrame                 (PDBG_STREAM psStream);
++static IMG_VOID DbgSetMarker                  (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
++static IMG_UINT32 DbgWrite                            (PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags);
++
++#define PDUMP_DATAMASTER_PIXEL                (1)
++
++#define MIN(a,b)       (a > b ? b : a)
++
++#define MAX_FILE_SIZE 0x40000000
++
++static PDBGKM_SERVICE_TABLE gpfnDbgDrv = IMG_NULL;
++
++#define PDUMP_STREAM_PARAM2                   0
++#define PDUMP_STREAM_SCRIPT2          1
++#define PDUMP_STREAM_DRIVERINFO               2
++#define PDUMP_NUM_STREAMS                     3
++
++
++
++IMG_CHAR *pszStreamName[PDUMP_NUM_STREAMS] = {        "ParamStream2",
++                                                                                              "ScriptStream2",
++                                                                                              "DriverInfoStream"};
++
++#define __PDBG_PDUMP_STATE_GET_MSG_STRING(ERROR) \
++      IMG_CHAR *pszMsg = gsDBGPdumpState.pszMsg; \
++      if(!pszMsg) return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(ERROR) \
++      IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \
++      if(!pszScript) return ERROR
++
++#define __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(ERROR) \
++      IMG_CHAR *pszScript = gsDBGPdumpState.pszScript; \
++      IMG_CHAR *pszFile = gsDBGPdumpState.pszFile; \
++      if(!pszScript || !pszFile) return ERROR
++
++typedef struct PDBG_PDUMP_STATE_TAG 
++{
++      PDBG_STREAM psStream[PDUMP_NUM_STREAMS];
++      IMG_UINT32 ui32ParamFileNum;
++
++      IMG_CHAR *pszMsg;
++      IMG_CHAR *pszScript;
++      IMG_CHAR *pszFile;
++
++} PDBG_PDUMP_STATE;
++
++static PDBG_PDUMP_STATE gsDBGPdumpState = {{IMG_NULL}, 0, IMG_NULL, IMG_NULL, IMG_NULL};
++
++#define SZ_MSG_SIZE_MAX                       PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_SCRIPT_SIZE_MAX            PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++#define SZ_FILENAME_SIZE_MAX  PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
++
++
++
++
++void DBGDrvGetServiceTable(IMG_VOID **fn_table);
++
++IMG_VOID PDumpInit(IMG_VOID)
++{     
++      IMG_UINT32 i=0;
++
++      
++      if (!gpfnDbgDrv)
++      {
++              DBGDrvGetServiceTable((IMG_VOID **)&gpfnDbgDrv);
++
++              
++
++              
++              if (gpfnDbgDrv == IMG_NULL)
++              {       
++                      return;
++              }
++      
++              if(!gsDBGPdumpState.pszFile)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszFile, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;
++                      }
++              }       
++              
++              if(!gsDBGPdumpState.pszMsg)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszMsg, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;
++                      }
++              }
++              
++              if(!gsDBGPdumpState.pszScript)
++              {
++                      if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID *)&gsDBGPdumpState.pszScript, 0) != PVRSRV_OK)
++                      {
++                              goto init_failed;               
++                      }
++              }
++              
++              for(i=0; i < PDUMP_NUM_STREAMS; i++)
++              {
++                      gsDBGPdumpState.psStream[i] = gpfnDbgDrv->pfnCreateStream(pszStreamName[i], 
++                                                                                                              DEBUG_CAPMODE_FRAMED, 
++                                                                                                              DEBUG_OUTMODE_STREAMENABLE, 
++                                                                                                              0,
++                                                                                                              10);
++                      
++                      gpfnDbgDrv->pfnSetCaptureMode(gsDBGPdumpState.psStream[i],DEBUG_CAPMODE_FRAMED,0xFFFFFFFF, 0xFFFFFFFF, 1);
++                      gpfnDbgDrv->pfnSetFrame(gsDBGPdumpState.psStream[i],0);
++              }
++
++              PDUMPCOMMENT("Start of Init Phase");
++      }
++
++      return;
++
++init_failed:  
++
++      if(gsDBGPdumpState.pszFile)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++              gsDBGPdumpState.pszFile = IMG_NULL;
++      }
++      
++      if(gsDBGPdumpState.pszScript)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++              gsDBGPdumpState.pszScript = IMG_NULL;
++      }
++
++      if(gsDBGPdumpState.pszMsg)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++              gsDBGPdumpState.pszMsg = IMG_NULL;
++      }
++
++      gpfnDbgDrv = IMG_NULL;
++}
++
++
++IMG_VOID PDumpDeInit(IMG_VOID)
++{     
++      IMG_UINT32 i=0;
++
++      for(i=0; i < PDUMP_NUM_STREAMS; i++)
++      {
++              gpfnDbgDrv->pfnDestroyStream(gsDBGPdumpState.psStream[i]);
++      }
++
++      if(gsDBGPdumpState.pszFile)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_FILENAME_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszFile, 0);
++              gsDBGPdumpState.pszFile = IMG_NULL;
++      }
++      
++      if(gsDBGPdumpState.pszScript)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_SCRIPT_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszScript, 0);
++              gsDBGPdumpState.pszScript = IMG_NULL;
++      }
++
++      if(gsDBGPdumpState.pszMsg)
++      {
++              OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, SZ_MSG_SIZE_MAX, (IMG_PVOID) gsDBGPdumpState.pszMsg, 0);
++              gsDBGPdumpState.pszMsg = IMG_NULL;
++      }
++
++      gpfnDbgDrv = IMG_NULL;
++}
++
++IMG_VOID PDumpEndInitPhase(IMG_VOID)
++{
++      IMG_UINT32 i;
++      
++      PDUMPCOMMENT("End of Init Phase");
++
++      for(i=0; i < PDUMP_NUM_STREAMS; i++)
++      {
++              gpfnDbgDrv->pfnEndInitPhase(gsDBGPdumpState.psStream[i]);
++      }
++}
++
++void PDumpComment(IMG_CHAR *pszFormat, ...)
++{
++      __PDBG_PDUMP_STATE_GET_MSG_STRING();    
++
++      
++      vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, (IMG_CHAR *) (&pszFormat + 1));
++
++      PDumpCommentKM(pszMsg, PDUMP_FLAGS_CONTINUOUS);
++}
++
++void PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
++{     
++      __PDBG_PDUMP_STATE_GET_MSG_STRING();
++
++      
++      vsnprintf(pszMsg, SZ_MSG_SIZE_MAX, pszFormat, (IMG_CHAR *) (&pszFormat + 1));
++
++      PDumpCommentKM(pszMsg, ui32Flags);
++}
++
++IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID)
++{
++      return gpfnDbgDrv->pfnIsLastCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++}
++
++
++IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID)
++{
++      return gpfnDbgDrv->pfnIsCaptureFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], IMG_FALSE);
++}
++
++PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32Reg, IMG_UINT32 ui32Data, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++void PDumpReg(IMG_UINT32 ui32Reg,IMG_UINT32 ui32Data)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "WRW :SGXREG:0x%8.8lX 0x%8.8lX\r\n", ui32Reg, ui32Data);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Flags)
++{
++      #define POLL_DELAY                      1000
++      #define POLL_COUNT_LONG         (2000000000 / POLL_DELAY)
++      #define POLL_COUNT_SHORT        (1000000 / POLL_DELAY)
++
++      IMG_UINT32      ui32PollCount;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      if (((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_TA_FINISHED_MASK)) ||
++              ((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK)) ||
++              ((ui32RegAddr == EUR_CR_EVENT_STATUS) && 
++              (ui32RegValue & ui32Mask & EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK)))
++      {
++              ui32PollCount = POLL_COUNT_LONG;
++      }
++      else
++      {
++              ui32PollCount = POLL_COUNT_SHORT;
++      }
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "POL :SGXREG:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %lu %d\r\n", ui32RegAddr, ui32RegValue, ui32Mask, 0, ui32PollCount, POLL_DELAY);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue, IMG_UINT32 ui32Mask)
++{
++      return PDumpRegPolWithFlagsKM(ui32RegAddr, ui32RegValue, ui32Mask, PDUMP_FLAGS_CONTINUOUS);
++}
++
++IMG_VOID PDumpMallocPages (PVRSRV_DEVICE_TYPE eDeviceType,
++                           IMG_UINT32         ui32DevVAddr,
++                           IMG_CPU_VIRTADDR   pvLinAddr,
++                           IMG_HANDLE         hOSMemHandle,
++                           IMG_UINT32         ui32NumBytes,
++                           IMG_HANDLE         hUniqueTag)
++{
++    IMG_UINT32      ui32Offset;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++      PVR_UNREFERENCED_PARAMETER(pvLinAddr);
++
++
++      PVR_ASSERT(((IMG_UINT32) ui32DevVAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(hOSMemHandle);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- MALLOC :SGXMEM:VA_%8.8lX 0x%8.8lX %d\r\n", ui32DevVAddr, ui32NumBytes, SGX_MMU_PAGE_SIZE);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      ui32Offset = 0;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr   = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
++              PVR_ASSERT((sCpuPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++              ui32Offset  += SGX_MMU_PAGE_SIZE;
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "MALLOC :SGXMEM:PA_%p%8.8lX %d %d 0x%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpMallocPageTable (PVRSRV_DEVICE_TYPE eDeviceType,
++                               IMG_CPU_VIRTADDR   pvLinAddr,
++                               IMG_UINT32         ui32NumBytes,
++                               IMG_HANDLE         hUniqueTag)
++{
++      IMG_PUINT8              pui8LinAddr;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- MALLOC :SGXMEM:PAGE_TABLE 0x%8.8lX %d\r\n", ui32NumBytes, SGX_MMU_PAGE_SIZE);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      pui8LinAddr             = (IMG_PUINT8) pvLinAddr;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr       = OSMapLinToCPUPhys(pui8LinAddr);
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++              pui8LinAddr     += SGX_MMU_PAGE_SIZE;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "MALLOC :SGXMEM:PA_%p%8.8lX 0x%x %d 0x%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpFreePages       (BM_HEAP                        *psBMHeap,
++                         IMG_DEV_VIRTADDR  sDevVAddr,
++                         IMG_UINT32        ui32NumBytes,
++                         IMG_HANDLE        hUniqueTag,
++                                               IMG_BOOL                  bInterleaved)
++{
++      IMG_UINT32 ui32NumPages, ui32PageCounter;
++      IMG_DEV_PHYADDR sDevPAddr;
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) sDevVAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:VA_%8.8lX\r\n", sDevVAddr.uiAddr);
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      ui32NumPages = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      psDeviceNode = psBMHeap->pBMContext->psDeviceNode;      
++      for (ui32PageCounter = 0; ui32PageCounter < ui32NumPages; ui32PageCounter++)
++      {
++              if (!bInterleaved || (ui32PageCounter % 2) == 0)
++              {
++                      sDevPAddr = psDeviceNode->pfnMMUGetPhysPageAddr(psBMHeap->pMMUHeap, sDevVAddr);
++
++                      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, sDevPAddr.uiAddr);
++                      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++              }
++              else
++              {
++                      
++              }
++
++              sDevVAddr.uiAddr += SGX_MMU_PAGE_SIZE;
++      }
++}
++
++IMG_VOID PDumpFreePageTable   (PVRSRV_DEVICE_TYPE eDeviceType,
++                                                       IMG_CPU_VIRTADDR   pvLinAddr,
++                                                       IMG_UINT32         ui32NumBytes,
++                                                       IMG_HANDLE         hUniqueTag)
++{
++      IMG_PUINT8              pui8LinAddr;
++      IMG_UINT32              ui32NumPages;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_UINT32              ui32Page;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      PVR_ASSERT(((IMG_UINT32) pvLinAddr & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++      PVR_ASSERT(((IMG_UINT32) ui32NumBytes & (SGX_MMU_PAGE_SIZE - 1)) == 0);
++
++      
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "-- FREE :SGXMEM:PAGE_TABLE\r\n");
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      
++
++      pui8LinAddr             = (IMG_PUINT8) pvLinAddr;
++      ui32NumPages    = ui32NumBytes >> SGX_MMU_PAGE_SHIFT;
++      while (ui32NumPages--)
++      {
++              sCpuPAddr       = OSMapLinToCPUPhys(pui8LinAddr);
++              sDevPAddr       = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++              ui32Page        = sDevPAddr.uiAddr >> SGX_MMU_PAGE_SHIFT;
++              pui8LinAddr     += SGX_MMU_PAGE_SIZE;
++
++              snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "FREE :SGXMEM:PA_%p%8.8lX\r\n", hUniqueTag, ui32Page * SGX_MMU_PAGE_SIZE);
++              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++      }
++}
++
++IMG_VOID PDumpPDReg   (IMG_UINT32 ui32Reg,
++                                       IMG_UINT32 ui32Data,
++                                       IMG_HANDLE hUniqueTag)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++
++      snprintf        (pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                              ui32Reg,
++                              hUniqueTag,
++                              ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++                              ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++}
++
++IMG_VOID PDumpPDRegWithFlags(IMG_UINT32 ui32Reg,
++                                                       IMG_UINT32 ui32Data,
++                                                       IMG_UINT32     ui32Flags,
++                                                       IMG_HANDLE hUniqueTag)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++
++      snprintf        (pszScript,
++                      SZ_SCRIPT_SIZE_MAX,
++                       "WRW :SGXREG:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                       ui32Reg,
++                       hUniqueTag,
++                       ui32Data & ~(SGX_MMU_PAGE_SIZE - 1),
++                       ui32Data & (SGX_MMU_PAGE_SIZE - 1));
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO             *psMemInfo,
++                                                 IMG_UINT32                   ui32Offset,
++                                                 IMG_UINT32                   ui32Value,
++                                                 IMG_UINT32                   ui32Mask,
++                                                 PDUMP_POLL_OPERATOR  eOperator,
++                                                 IMG_BOOL                             bLastFrame,
++                                                 IMG_BOOL                             bOverwrite,
++                                                 IMG_HANDLE                   hUniqueTag)
++{
++      #define MEMPOLL_DELAY           (1000)
++      #define MEMPOLL_COUNT           (2000000000 / MEMPOLL_DELAY)
++      
++      IMG_UINT32                      ui32PageOffset;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      IMG_DEV_VIRTADDR        sDevVPageAddr;
++    IMG_CPU_PHYADDR     CpuPAddr;
++      IMG_UINT32                      ui32Flags;
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++      
++      
++      PVR_ASSERT((ui32Offset + sizeof(IMG_UINT32)) <= psMemInfo->ui32AllocSize);
++      
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++      
++      ui32Flags = 0;
++      
++      if (bLastFrame)
++      {
++              ui32Flags |= PDUMP_FLAGS_LASTFRAME;
++      }
++
++      if (bOverwrite)
++      {
++              ui32Flags |= PDUMP_FLAGS_RESETLFBUFFER;
++      }
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++    ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++      
++      
++      sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32Offset - ui32PageOffset;
++      
++      
++      BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++      
++      
++      sDevPAddr.uiAddr += ui32PageOffset;
++      
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "POL :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %d %d %d\r\n",
++                       hUniqueTag,
++                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                       ui32Value,
++                       ui32Mask,
++                       eOperator,
++                       MEMPOLL_COUNT,
++                       MEMPOLL_DELAY);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMemKM(IMG_PVOID pvAltLinAddr,
++                                              PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                              IMG_UINT32 ui32Offset,
++                                              IMG_UINT32 ui32Bytes,
++                                              IMG_UINT32 ui32Flags,
++                                              IMG_HANDLE hUniqueTag)
++{
++      IMG_UINT32 ui32PageByteOffset;
++      IMG_UINT8* pui8DataLinAddr;
++      IMG_DEV_VIRTADDR sDevVPageAddr;
++      IMG_DEV_VIRTADDR sDevVAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32ParamOutPos;
++      IMG_UINT32 ui32CurrentOffset;
++      IMG_UINT32 ui32BytesRemaining;
++      LinuxMemArea *psLinuxMemArea;
++      LINUX_MEM_AREA_TYPE eRootAreaType;
++      IMG_CHAR *pui8TransientCpuVAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++      
++
++      PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);
++      
++      if(pvAltLinAddr)
++      {
++              pui8DataLinAddr = pvAltLinAddr;
++      }
++    else if(psMemInfo->pvLinAddrKM)
++    {
++        pui8DataLinAddr = (IMG_UINT8 *)psMemInfo->pvLinAddrKM + ui32Offset;
++    }
++    else
++    {
++        pui8DataLinAddr = 0;
++        psLinuxMemArea = (LinuxMemArea *)psMemInfo->sMemBlk.hOSMemHandle;
++        eRootAreaType = LinuxMemAreaRootType(psLinuxMemArea);
++    }
++    
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++      
++
++    if(pui8DataLinAddr)
++    {
++        if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                            pui8DataLinAddr,
++                            ui32Bytes,
++                            ui32Flags))
++        {
++            return PVRSRV_ERROR_GENERIC;
++        }
++    }
++    
++    else if(eRootAreaType == LINUX_MEM_AREA_IO)
++    {
++        
++        CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++        pui8TransientCpuVAddr = IORemapWrapper(CpuPAddr, ui32Bytes, PVRSRV_HAP_CACHED);
++        if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                            pui8TransientCpuVAddr,
++                            ui32Bytes,
++                            ui32Flags))
++        {
++            IOUnmapWrapper(pui8TransientCpuVAddr);
++            return PVRSRV_ERROR_GENERIC;
++        }
++        IOUnmapWrapper(pui8TransientCpuVAddr);
++    }
++    else
++    {
++        
++        PVR_ASSERT(eRootAreaType == LINUX_MEM_AREA_ALLOC_PAGES);
++        
++        ui32BytesRemaining = ui32Bytes;
++        ui32CurrentOffset = ui32Offset;
++
++        while(ui32BytesRemaining > 0)
++        {
++            IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++            struct page *psCurrentPage=NULL;
++
++            CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++                                             ui32CurrentOffset);
++            
++            if(CpuPAddr.uiAddr & (PAGE_SIZE -1))
++            {
++                ui32BlockBytes =
++                    MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++            }
++            
++            psCurrentPage = LinuxMemAreaOffsetToPage(psLinuxMemArea, ui32CurrentOffset);
++            pui8TransientCpuVAddr = KMapWrapper(psCurrentPage);
++            pui8TransientCpuVAddr += (CpuPAddr.uiAddr & ~PAGE_MASK);
++            if(!pui8TransientCpuVAddr)
++            {
++                return PVRSRV_ERROR_GENERIC;
++            }
++
++            if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                pui8TransientCpuVAddr,
++                                ui32BlockBytes,
++                                ui32Flags))
++            {
++                KUnMapWrapper(psCurrentPage);
++                return PVRSRV_ERROR_GENERIC;
++            }
++
++            KUnMapWrapper(psCurrentPage);
++
++            ui32BytesRemaining -= ui32BlockBytes;
++            ui32CurrentOffset += ui32BlockBytes;
++        }
++        PVR_ASSERT(ui32BytesRemaining == 0);
++
++    }
++
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++      
++
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "-- LDB :SGXMEM:VA_%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                       psMemInfo->sDevVAddr.uiAddr,
++                       ui32Offset,
++                       ui32Bytes,
++                       ui32ParamOutPos,
++                       pszFile);
++      PDumpWriteString2(pszScript, ui32Flags);
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++      ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++    
++      sDevVAddr = psMemInfo->sDevVAddr;
++      sDevVAddr.uiAddr += ui32Offset;
++
++    ui32BytesRemaining = ui32Bytes;
++    ui32CurrentOffset = ui32Offset;
++
++    while(ui32BytesRemaining > 0)
++    {
++        IMG_UINT32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
++        CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
++                                         ui32CurrentOffset);
++
++        sDevVPageAddr.uiAddr = psMemInfo->sDevVAddr.uiAddr + ui32CurrentOffset - ui32PageByteOffset;
++        
++        BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++
++              
++        sDevPAddr.uiAddr += ui32PageByteOffset;
++
++        if(ui32PageByteOffset)
++        {
++            ui32BlockBytes =
++                MIN(ui32BytesRemaining, PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);
++            
++            ui32PageByteOffset = 0;
++        }
++
++        snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                               hUniqueTag,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               ui32BlockBytes,
++                               ui32ParamOutPos,
++                               pszFile);
++              PDumpWriteString2(pszScript, ui32Flags);
++
++        ui32BytesRemaining -= ui32BlockBytes;
++        ui32CurrentOffset += ui32BlockBytes;
++        ui32ParamOutPos += ui32BlockBytes;
++    }
++    PVR_ASSERT(ui32BytesRemaining == 0);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
++                                               IMG_CPU_VIRTADDR pvLinAddr,
++                                               IMG_UINT32 ui32Bytes,
++                                               IMG_UINT32 ui32Flags,
++                                               IMG_BOOL bInitialisePages,
++                                               IMG_HANDLE hUniqueTag1,
++                                               IMG_HANDLE hUniqueTag2)
++{
++      IMG_UINT32 ui32NumPages;
++      IMG_UINT32 ui32PageOffset;
++      IMG_UINT32 ui32BlockBytes;
++      IMG_UINT8* pui8LinAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++      IMG_CPU_PHYADDR sCpuPAddr;
++      IMG_UINT32 ui32Offset;
++      IMG_UINT32 ui32ParamOutPos;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++      if (ui32Flags);
++
++      if (!pvLinAddr)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++    
++      if (bInitialisePages)
++      {
++              
++
++
++              if (!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                                      pvLinAddr,
++                                                      ui32Bytes,
++                                                      PDUMP_FLAGS_CONTINUOUS))
++              {               
++                      return PVRSRV_ERROR_GENERIC;    
++              }
++      
++              if (gsDBGPdumpState.ui32ParamFileNum == 0)
++              {
++                      snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++              }
++              else
++              {
++                      snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++              }
++      }
++
++      
++
++      
++      ui32PageOffset  = (IMG_UINT32) pvLinAddr & (HOST_PAGESIZE() - 1);
++      ui32NumPages    = (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() - 1) / HOST_PAGESIZE();
++      pui8LinAddr             = (IMG_UINT8*) pvLinAddr;
++      
++      while (ui32NumPages--)
++      {
++      sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr); 
++      sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);
++
++              
++              if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())
++              {
++                      
++                      ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
++              }
++              else
++              {
++                      
++                      ui32BlockBytes = ui32Bytes;
++              }
++
++              
++
++              if (bInitialisePages)
++              {
++                      snprintf(pszScript,
++                                       SZ_SCRIPT_SIZE_MAX,
++                                       "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
++                                       hUniqueTag1,
++                                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                                       ui32BlockBytes,
++                                       ui32ParamOutPos,
++                                       pszFile);
++                      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++              }
++              else
++              {
++                      for (ui32Offset = 0; ui32Offset < ui32BlockBytes; ui32Offset += sizeof(IMG_UINT32))
++                      {
++                              IMG_UINT32              ui32PTE = *((IMG_UINT32 *) (pui8LinAddr + ui32Offset));
++
++                              if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0)
++                              {                               
++                                      snprintf(pszScript,
++                                                      SZ_SCRIPT_SIZE_MAX,
++                                                       "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                                                       hUniqueTag1,
++                                                       (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_SIZE - 1),
++                                                       (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_SIZE - 1),
++                                                       hUniqueTag2,
++                                                       ui32PTE & SGX_MMU_PDE_ADDR_MASK,
++                                                       ui32PTE & ~SGX_MMU_PDE_ADDR_MASK);
++                              }
++                              else
++                              {
++                                      PVR_ASSERT(!(ui32PTE & SGX_MMU_PTE_VALID));
++                                      snprintf(pszScript,
++                                                       SZ_SCRIPT_SIZE_MAX,
++                                                       "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX%p\r\n",
++                                                       hUniqueTag1,
++                                                       (sDevPAddr.uiAddr + ui32Offset) & ~(SGX_MMU_PAGE_SIZE - 1),
++                                                       (sDevPAddr.uiAddr + ui32Offset) & (SGX_MMU_PAGE_SIZE - 1),
++                                                       ui32PTE,
++                                                       hUniqueTag2);
++                              }
++                              PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++                      }
++              }
++
++              
++
++              
++              ui32PageOffset = 0;
++              
++              ui32Bytes -= ui32BlockBytes;
++              
++              pui8LinAddr += ui32BlockBytes;
++              
++              ui32ParamOutPos += ui32BlockBytes;
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                         IMG_UINT32 ui32Offset,
++                                                         IMG_DEV_PHYADDR sPDDevPAddr,
++                                                         IMG_HANDLE hUniqueTag1,
++                                                         IMG_HANDLE hUniqueTag2)
++{
++      IMG_UINT32 ui32ParamOutPos;
++    IMG_CPU_PHYADDR CpuPAddr;
++      IMG_UINT32 ui32PageByteOffset;
++      IMG_DEV_VIRTADDR sDevVAddr;
++      IMG_DEV_VIRTADDR sDevVPageAddr;
++      IMG_DEV_PHYADDR sDevPAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);
++
++      ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++      if(!PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2],
++                                              (IMG_UINT8 *)&sPDDevPAddr,
++                                              sizeof(IMG_DEV_PHYADDR),
++                                              PDUMP_FLAGS_CONTINUOUS))
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++        
++      if (gsDBGPdumpState.ui32ParamFileNum == 0)
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
++      }
++      else
++      {
++              snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm", gsDBGPdumpState.ui32ParamFileNum);
++      }
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle, ui32Offset);
++      ui32PageByteOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++    
++      sDevVAddr = psMemInfo->sDevVAddr;
++      sDevVAddr.uiAddr += ui32Offset;
++
++      sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;
++      BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);
++      sDevPAddr.uiAddr += ui32PageByteOffset;
++
++      if ((sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK) != 0)
++      {
++              snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX :SGXMEM:PA_%p%8.8lX:0x%8.8lX\r\n",
++                               hUniqueTag1,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               hUniqueTag2,
++                               sPDDevPAddr.uiAddr & SGX_MMU_PDE_ADDR_MASK,
++                               sPDDevPAddr.uiAddr & ~SGX_MMU_PDE_ADDR_MASK);
++      }
++      else
++      {
++              PVR_ASSERT(!(sDevPAddr.uiAddr & SGX_MMU_PTE_VALID));
++              snprintf(pszScript,
++                               SZ_SCRIPT_SIZE_MAX,
++                               "WRW :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX\r\n",
++                               hUniqueTag1,
++                               sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                               sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                               sPDDevPAddr.uiAddr);
++      }
++      PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame)
++{
++      IMG_UINT32      ui32Stream;
++
++      for     (ui32Stream = 0; ui32Stream < PDUMP_NUM_STREAMS; ui32Stream++)
++      {
++              if      (gsDBGPdumpState.psStream[ui32Stream])
++              {
++                      DbgSetFrame(gsDBGPdumpState.psStream[ui32Stream], ui32Frame);
++              }
++      }
++              
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpGetFrameKM(IMG_PUINT32 pui32Frame)
++{
++      *pui32Frame = DbgGetFrame(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2]);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Count = 0;
++      PVRSRV_ERROR eError;
++      __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++      if(ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++      {
++              eError = PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
++      }
++      
++      if (!PDumpWriteString2("-- ", ui32Flags))
++      {
++              return eError;
++      }
++
++      
++      snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s",pszComment);
++
++      
++      while ((pszMsg[ui32Count]!=0) && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              ui32Count++;
++      }
++      
++      
++      if ( (pszMsg[ui32Count-1] != '\n') && (ui32Count<SZ_MSG_SIZE_MAX))
++      {
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++      if ( (pszMsg[ui32Count-2] != '\r') && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              pszMsg[ui32Count-1] = '\r';
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++
++      PDumpWriteString2(pszMsg, ui32Flags);
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Count = 0;
++      __PDBG_PDUMP_STATE_GET_MSG_STRING(PVRSRV_ERROR_GENERIC);
++
++      
++      snprintf(pszMsg, SZ_MSG_SIZE_MAX, "%s", pszString);
++
++      
++      while ((pszMsg[ui32Count]!=0) && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              ui32Count++;
++      }
++      
++      
++      if ( (pszMsg[ui32Count-1] != '\n') && (ui32Count<SZ_MSG_SIZE_MAX))
++      {
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++      if ( (pszMsg[ui32Count-2] != '\r') && (ui32Count<SZ_MSG_SIZE_MAX) )
++      {
++              pszMsg[ui32Count-1] = '\r';
++              pszMsg[ui32Count] = '\n';
++              ui32Count++;
++              pszMsg[ui32Count] = '\0';
++      }
++
++      if      (!PDumpWriteILock(gsDBGPdumpState.
++                                                psStream[PDUMP_STREAM_DRIVERINFO],
++                                                (IMG_UINT8 *)pszMsg,
++                                                ui32Count,
++                                                ui32Flags))
++      {
++              if      (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++              {
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              else
++              {
++                      return PVRSRV_ERROR_CMD_NOT_PROCESSED;
++              }
++      }
++
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpBitmapKM(   IMG_CHAR *pszFileName,
++                                                      IMG_UINT32 ui32FileOffset,
++                                                      IMG_UINT32 ui32Width,
++                                                      IMG_UINT32 ui32Height,
++                                                      IMG_UINT32 ui32StrideInBytes,
++                                                      IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                      IMG_UINT32 ui32Size,
++                                                      PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                      PDUMP_MEM_FORMAT eMemFormat,
++                                                      IMG_UINT32 ui32PDumpFlags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++      PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n");
++
++#if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS)
++      
++      snprintf(pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "SII %s %s.bin :SGXMEM:v%x:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++                              pszFileName,
++                              pszFileName,
++                              PDUMP_DATAMASTER_PIXEL,
++                              sDevBaseAddr.uiAddr,
++                              ui32Size,
++                              ui32FileOffset,
++                              ePixelFormat,
++                              ui32Width,
++                              ui32Height,
++                              ui32StrideInBytes,
++                              eMemFormat);
++#else
++      snprintf(pszScript,
++                              SZ_SCRIPT_SIZE_MAX,
++                              "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X 0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n",
++                              pszFileName,
++                              pszFileName,
++                              sDevBaseAddr.uiAddr,
++                              ui32Size,
++                              ui32FileOffset,
++                              ePixelFormat,
++                              ui32Width,
++                              ui32Height,
++                              ui32StrideInBytes,
++                              eMemFormat);
++#endif
++
++      PDumpWriteString2( pszScript, ui32PDumpFlags);
++      return PVRSRV_OK;
++}
++
++PVRSRV_ERROR PDumpReadRegKM           (       IMG_CHAR *pszFileName,
++                                                                      IMG_UINT32 ui32FileOffset,
++                                                                      IMG_UINT32 ui32Address,
++                                                                      IMG_UINT32 ui32Size,
++                                                                      IMG_UINT32 ui32PDumpFlags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC);
++
++      snprintf(pszScript,
++                      SZ_SCRIPT_SIZE_MAX,
++                      "SAB :SGXREG:0x%08lX 0x%08lX %s\r\n",
++                      ui32Address,
++                      ui32FileOffset,
++                      pszFileName);
++
++      PDumpWriteString2( pszScript, ui32PDumpFlags);
++
++      return PVRSRV_OK;
++}
++
++
++static IMG_BOOL PDumpWriteString2(IMG_CHAR * pszString, IMG_UINT32 ui32Flags)
++{
++      return PDumpWriteILock(gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2], (IMG_UINT8 *) pszString, strlen(pszString), ui32Flags);
++}
++
++
++static IMG_BOOL PDumpWriteILock(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32Count, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32 ui32Written = 0;
++      IMG_UINT32 ui32Off = 0;
++
++      if (!psStream)
++      {
++              return IMG_TRUE;
++      }
++      
++
++      
++
++      if (psStream == gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2])
++      {
++              IMG_UINT32 ui32ParamOutPos = gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2]);
++
++              if (ui32ParamOutPos + ui32Count > MAX_FILE_SIZE)
++              {
++                      if ((gsDBGPdumpState.psStream[PDUMP_STREAM_SCRIPT2] && PDumpWriteString2("\r\n-- Splitting pdump output file\r\n\r\n", ui32Flags)))
++                      {
++                              DbgSetMarker(gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], ui32ParamOutPos);
++                              gsDBGPdumpState.ui32ParamFileNum++;
++                      }
++              }
++      }
++      
++
++      while (((IMG_UINT32) ui32Count > 0) && (ui32Written != 0xFFFFFFFF))
++      {
++              ui32Written = DbgWrite(psStream, &pui8Data[ui32Off], ui32Count, ui32Flags);
++
++#if 0
++              
++
++
++              if (ui32Written == 0)
++              {
++                      ZwYieldExecution();
++              }
++#endif
++              if (ui32Written != 0xFFFFFFFF)
++              {
++                      ui32Off += ui32Written;
++                      ui32Count -= ui32Written;
++              }
++      }
++
++      if (ui32Written == 0xFFFFFFFF)
++      {
++              return IMG_FALSE;
++      }
++
++      return IMG_TRUE;
++}
++
++static IMG_VOID DbgSetFrame(PDBG_STREAM psStream, IMG_UINT32 ui32Frame)
++{     
++      gpfnDbgDrv->pfnSetFrame(psStream, ui32Frame);
++}
++
++
++static IMG_UINT32 DbgGetFrame(PDBG_STREAM psStream)
++{     
++      return gpfnDbgDrv->pfnGetFrame(psStream);
++}
++
++static IMG_VOID DbgSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
++{     
++      gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
++}
++
++static IMG_UINT32 DbgWrite(PDBG_STREAM psStream, IMG_UINT8 *pui8Data, IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
++{
++      IMG_UINT32      ui32BytesWritten;
++
++      if (ui32Flags & PDUMP_FLAGS_CONTINUOUS)
++      {
++              
++
++              if ((psStream->ui32CapMode & DEBUG_CAPMODE_FRAMED) && 
++                      (psStream->ui32Start == 0xFFFFFFFF) &&
++                      (psStream->ui32End == 0xFFFFFFFF) &&
++                      psStream->bInitPhaseComplete)
++              {
++                      ui32BytesWritten = ui32BCount;
++              }
++              else
++              {
++                      ui32BytesWritten = gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount, 1);
++              }
++      }
++      else
++      {
++              if (ui32Flags & PDUMP_FLAGS_LASTFRAME)
++              {
++                      IMG_UINT32      ui32DbgFlags;
++
++                      ui32DbgFlags = 0;
++                      if (ui32Flags & PDUMP_FLAGS_RESETLFBUFFER)
++                      {
++                              ui32DbgFlags |= WRITELF_FLAGS_RESETBUF;
++                      }
++
++                      ui32BytesWritten = gpfnDbgDrv->pfnWriteLF(psStream, pui8Data, ui32BCount, 1, ui32DbgFlags);
++              }
++              else
++              {
++                      ui32BytesWritten = gpfnDbgDrv->pfnWriteBINCM(psStream, pui8Data, ui32BCount, 1);
++              }
++      }
++
++      return ui32BytesWritten;
++}
++
++IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame)
++{
++      IMG_BOOL        bFrameDumped;
++
++      
++
++      bFrameDumped = IMG_FALSE;
++      PDumpSetFrameKM(ui32CurrentFrame + 1);
++      bFrameDumped = PDumpIsCaptureFrameKM();
++      PDumpSetFrameKM(ui32CurrentFrame);
++
++      return bFrameDumped;
++}
++
++IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 ui32RegOffset, IMG_BOOL bLastFrame)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "RDW :SGXREG:0x%lX\r\n", ui32RegOffset);
++      PDumpWriteString2(pszScript, bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0);
++}
++
++void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO         psROffMemInfo,
++                        IMG_UINT32                                    ui32ROffOffset,
++                        IMG_UINT32                                    ui32WPosVal,
++                        IMG_UINT32                                    ui32PacketSize,
++                        IMG_UINT32                                    ui32BufferSize,
++                        IMG_UINT32                                    ui32Flags,
++                        IMG_HANDLE                                    hUniqueTag)
++{
++      IMG_UINT32                      ui32PageOffset;
++      IMG_DEV_VIRTADDR        sDevVAddr;
++      IMG_DEV_PHYADDR         sDevPAddr;
++      IMG_DEV_VIRTADDR        sDevVPageAddr;
++    IMG_CPU_PHYADDR     CpuPAddr;
++
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      
++      PVR_ASSERT((ui32ROffOffset + sizeof(IMG_UINT32)) <= psROffMemInfo->ui32AllocSize);
++      
++      sDevVAddr = psROffMemInfo->sDevVAddr;
++      
++      
++      sDevVAddr.uiAddr += ui32ROffOffset;
++
++      
++
++
++    CpuPAddr = OSMemHandleToCpuPAddr(psROffMemInfo->sMemBlk.hOSMemHandle, ui32ROffOffset);
++    ui32PageOffset = CpuPAddr.uiAddr & (PAGE_SIZE -1);
++
++      
++      sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageOffset;
++      
++      
++      BM_GetPhysPageAddr(psROffMemInfo, sDevVPageAddr, &sDevPAddr);
++      
++      
++      sDevPAddr.uiAddr += ui32PageOffset;
++      
++      snprintf(pszScript,
++                       SZ_SCRIPT_SIZE_MAX,
++                       "CBP :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX 0x%8.8lX\r\n",
++                       hUniqueTag,
++                       sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
++                       sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
++                       ui32WPosVal,
++                       ui32PacketSize,
++                       ui32BufferSize);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++
++IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
++{
++      __PDBG_PDUMP_STATE_GET_SCRIPT_STRING();
++
++      sprintf(pszScript, "IDL %lu\r\n", ui32Clocks);
++      PDumpWriteString2(pszScript, ui32Flags);
++}
++
++
++IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks)
++{
++      PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
++}
++
++
++IMG_VOID PDumpSuspendKM(IMG_VOID)
++{
++}
++
++IMG_VOID PDumpResumeKM(IMG_VOID)
++{
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,369 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++
++#include "services_headers.h"
++
++#include "queue.h"
++#include "resman.h"
++#include "pvrmmap.h"
++#include "pvr_debug.h"
++#include "pvrversion.h"
++#include "proc.h"
++
++#ifdef DEBUG
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data);
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data);
++#endif
++
++static struct proc_dir_entry * dir;
++
++static off_t procDumpSysNodes(char *buf, size_t size, off_t off);
++static off_t procDumpVersion(char *buf, size_t size, off_t off);
++
++off_t printAppend(char * buffer, size_t size, off_t off, const char * format, ...)
++{
++    int n;
++    int space = size - off;
++    va_list ap;
++
++    va_start (ap, format);
++
++    n = vsnprintf (buffer+off, space, format, ap);
++
++    va_end (ap);
++    
++
++    if (n > space || n < 0)
++    {
++        return size;
++    }
++    else
++    {
++        return off+n;
++    }
++}
++
++
++static int pvr_read_proc(char *page, char **start, off_t off,
++                         int count, int *eof, void *data)
++{
++      pvr_read_proc_t *pprn = data;
++
++    off_t len = pprn (page, count, off);
++
++    if (len == END_OF_FILE)
++    {
++        len  = 0;
++        *eof = 1;
++    }
++    else if (!len)             
++    {
++        *start = (char *) 0;   
++    }
++    else
++    {
++        *start = (char *) 1;
++    }
++
++    return len;
++}
++
++
++int CreateProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data)
++{
++    struct proc_dir_entry * file;
++      mode_t mode;
++
++      if (!dir)
++      {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no parent", name));
++        return -ENOMEM;
++      }
++
++      mode = S_IFREG;
++
++      if (rhandler)
++      {
++              mode |= S_IRUGO;
++      }
++
++      if (whandler)
++      {
++              mode |= S_IWUSR;
++      }
++
++      file = create_proc_entry(name, mode, dir);
++
++    if (file)
++    {
++        file->owner = THIS_MODULE;
++              file->read_proc = rhandler;
++              file->write_proc = whandler;
++              file->data = data;
++
++              PVR_DPF((PVR_DBG_MESSAGE, "Created /proc/pvr/%s", name));
++
++        return 0;
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "CreateProcEntry: cannot make proc entry /proc/pvr/%s: no memory", name));
++
++    return -ENOMEM;
++}
++
++
++int CreateProcReadEntry(const char * name, pvr_read_proc_t handler)
++{
++    struct proc_dir_entry * file;
++
++      if (!dir)
++      {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no parent", name));
++
++        return -ENOMEM;
++      }
++
++      file = create_proc_read_entry (name, S_IFREG | S_IRUGO, dir, pvr_read_proc, (void *)handler);
++
++    if (file)
++    {
++        file->owner = THIS_MODULE;
++
++        return 0;
++    }
++
++    PVR_DPF((PVR_DBG_ERROR, "CreateProcReadEntry: cannot make proc entry /proc/pvr/%s: no memory", name));
++
++    return -ENOMEM;
++}
++
++
++int CreateProcEntries(void)
++{
++    dir = proc_mkdir ("pvr", NULL);
++
++    if (!dir)
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: cannot make /proc/pvr directory"));
++
++        return -ENOMEM;
++    }
++
++    if (CreateProcReadEntry("queue", QueuePrintQueues) ||
++              CreateProcReadEntry("version", procDumpVersion) ||
++              CreateProcReadEntry("nodes", procDumpSysNodes))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr files"));
++
++        return -ENOMEM;
++    }
++
++#ifdef DEBUG
++      if (CreateProcEntry ("debug_level", PVRDebugProcGetLevel, PVRDebugProcSetLevel, 0))
++    {
++        PVR_DPF((PVR_DBG_ERROR, "CreateProcEntries: couldn't make /proc/pvr/debug_level"));
++
++        return -ENOMEM;
++    }
++#endif
++
++    return 0;
++}
++
++
++void RemoveProcEntry(const char *name)
++{
++      if (dir)
++      {
++      remove_proc_entry(name, dir);
++      }
++
++      PVR_DPF((PVR_DBG_MESSAGE, "Removing /proc/pvr/%s", name));
++}
++
++
++void RemoveProcEntries(void)
++{
++#ifdef DEBUG
++    RemoveProcEntry("debug_level");
++#endif
++    RemoveProcEntry("queue");
++    RemoveProcEntry("nodes");
++    RemoveProcEntry("version");
++
++      while (dir->subdir)
++      {
++              PVR_DPF((PVR_DBG_WARNING, "Belatedly removing /proc/pvr/%s", dir->subdir->name));
++
++              RemoveProcEntry(dir->subdir->name);
++      }
++
++    remove_proc_entry("pvr", NULL);
++}
++
++
++static off_t procDumpVersion(char *buf, size_t size, off_t off)
++{
++    SYS_DATA *psSysData;
++    
++      if (off == 0)
++      {
++              return printAppend(buf, size, 0,
++                                                      "Version %s (%s) %s\n",
++                                                      PVRVERSION_STRING,
++                                                      PVR_BUILD_TYPE, PVR_BUILD_DIR);
++      }
++
++    if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++    
++    if (off == 1)
++    {
++        IMG_CHAR *pszSystemVersionString = "None";
++
++        if(psSysData->pszVersionString)
++        {
++            pszSystemVersionString = psSysData->pszVersionString;
++        }
++            
++        if(strlen(pszSystemVersionString) 
++            + strlen("System Version String: \n") 
++            + 1 > size)
++        {
++            return 0;
++        }
++        return printAppend(buf, size, 0,
++                            "System Version String: %s\n",
++                            pszSystemVersionString);
++    }
++    
++      return END_OF_FILE;
++}
++
++
++static const char *deviceTypeToString(PVRSRV_DEVICE_TYPE deviceType)
++{
++      switch (deviceType)
++      {
++              default:
++              {
++                      static char text[10];
++
++                      sprintf(text, "?%x", deviceType);
++
++                      return text;
++              }
++      }
++}
++
++
++static const char *deviceClassToString(PVRSRV_DEVICE_CLASS deviceClass)
++{
++      switch (deviceClass) 
++      {
++              case PVRSRV_DEVICE_CLASS_3D:
++              {
++                      return "3D";
++              }
++              case PVRSRV_DEVICE_CLASS_DISPLAY:
++              {
++                      return "display";
++              }
++              case PVRSRV_DEVICE_CLASS_BUFFER:
++              {
++                      return "buffer";
++              }
++              default:
++              {
++                      static char text[10];
++
++                      sprintf(text, "?%x", deviceClass);
++                      return text;
++              }
++      }
++}
++
++static
++off_t procDumpSysNodes(char *buf, size_t size, off_t off)
++{
++      SYS_DATA                        *psSysData;
++      PVRSRV_DEVICE_NODE      *psDevNode;
++      off_t                           len;
++      
++      
++      if (size < 80)
++      {
++              return 0;
++      }
++
++      if (off == 0)
++      {
++              return printAppend(buf, size, 0, 
++                                                      "Registered nodes\n"
++                                                      "Addr     Type     Class    Index Ref pvDev     Size Res\n");
++      }
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      
++      for(psDevNode = psSysData->psDeviceNodeList;
++                      --off && psDevNode;
++                      psDevNode = psDevNode->psNext)
++              ;
++
++      if (!psDevNode)
++      {
++              return END_OF_FILE;
++      }
++
++      len = printAppend(buf, size, 0,
++                                        "%p %-8s %-8s %4d  %2lu  %p  %3lu  %p\n",
++                                        psDevNode,
++                                        deviceTypeToString(psDevNode->sDevId.eDeviceType),
++                                        deviceClassToString(psDevNode->sDevId.eDeviceClass),
++                                        psDevNode->sDevId.eDeviceClass,
++                                        psDevNode->ui32RefCount,
++                                        psDevNode->pvDevice,
++                                        psDevNode->ui32pvDeviceSize,
++                                        psDevNode->psResItem);
++      return (len);
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/proc.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,50 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __SERVICES_PROC_H__
++#define __SERVICES_PROC_H__
++
++#include <asm/system.h>               
++#include <linux/proc_fs.h>    
++
++#define END_OF_FILE (off_t) -1
++
++typedef off_t (pvr_read_proc_t)(char *, size_t, off_t);
++
++off_t printAppend(char * buffer, size_t size, off_t off, const char * format, ...)
++      __attribute__((format(printf, 4, 5)));
++
++int CreateProcEntries(void);
++
++int CreateProcReadEntry (const char * name, pvr_read_proc_t handler);
++
++int CreateProcEntry(const char * name, read_proc_t rhandler, write_proc_t whandler, void *data);
++
++void RemoveProcEntry(const char * name);
++
++void RemoveProcEntries(void);
++
++#endif
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_bridge_k.c 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,215 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "img_defs.h"
++#include "services.h"
++#include "pvr_bridge.h"
++#include "perproc.h"
++#include "mutex.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "proc.h"
++
++#include "sgx_bridge.h"
++
++#include "bridged_pvr_bridge.h"
++
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t printLinuxBridgeStats(char * buffer, size_t size, off_t off);
++#endif
++
++extern PVRSRV_LINUX_MUTEX gPVRSRVLock;
++
++
++PVRSRV_ERROR
++LinuxBridgeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++      {
++              int iStatus;
++              iStatus = CreateProcReadEntry("bridge_stats", printLinuxBridgeStats);
++              if(iStatus!=0)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++      }
++#endif
++      return CommonBridgeInit();
++}
++
++IMG_VOID
++LinuxBridgeDeInit(IMG_VOID)
++{
++#if defined(DEBUG_BRIDGE_KM)
++      RemoveProcEntry("bridge_stats");
++#endif
++}
++
++#if defined(DEBUG_BRIDGE_KM)
++static off_t
++printLinuxBridgeStats(char * buffer, size_t count, off_t off)
++{
++      PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry;
++      off_t Ret;
++
++      LinuxLockMutex(&gPVRSRVLock);
++
++      if(!off)
++      {
++              if(count < 500)
++              {
++                      Ret = 0;
++                      goto unlock_and_return;
++              }
++              Ret = printAppend(buffer, count, 0,
++                                                "Total ioctl call count = %lu\n"
++                                                "Total number of bytes copied via copy_from_user = %lu\n"
++                                                "Total number of bytes copied via copy_to_user = %lu\n"
++                                                "Total number of bytes copied via copy_*_user = %lu\n\n"
++                                                "%-45s | %-40s | %10s | %20s | %10s\n",
++                                                g_BridgeGlobalStats.ui32IOCTLCount,
++                                                g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
++                                                g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++                                                g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
++                                                "Bridge Name",
++                                                "Wrapper Function",
++                                                "Call Count",
++                                                "copy_from_user Bytes",
++                                                "copy_to_user Bytes"
++                                               );
++              goto unlock_and_return;
++      }
++
++      if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
++      {
++              Ret = END_OF_FILE;
++              goto unlock_and_return;
++      }
++
++      if(count < 300)
++      {
++              Ret = 0;
++              goto unlock_and_return;
++      }
++
++      psEntry = &g_BridgeDispatchTable[off-1];
++      Ret =  printAppend(buffer, count, 0,
++                                         "%-45s   %-40s   %-10lu   %-20lu   %-10lu\n",
++                                         psEntry->pszIOCName,
++                                         psEntry->pszFunctionName,
++                                         psEntry->ui32CallCount,
++                                         psEntry->ui32CopyFromUserTotalBytes,
++                                         psEntry->ui32CopyToUserTotalBytes);
++
++unlock_and_return:
++      LinuxUnLockMutex(&gPVRSRVLock);
++      return Ret;
++}
++#endif 
++
++
++
++int
++PVRSRV_BridgeDispatchKM(struct inode *inode,
++                                              struct file *file,
++                                              unsigned int cmd,
++                                              unsigned long arg)
++{
++      IMG_UINT32 ui32BridgeID = PVRSRV_GET_BRIDGE_ID(cmd);
++      PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg;
++      PVRSRV_BRIDGE_PACKAGE sBridgePackageKM;
++      IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM();
++      PVRSRV_PER_PROCESS_DATA *psPerProc;
++      int err = -EFAULT;
++
++      LinuxLockMutex(&gPVRSRVLock);
++
++
++      if(!OSAccessOK(PVR_VERIFY_WRITE,
++                                 psBridgePackageUM,
++                                 sizeof(PVRSRV_BRIDGE_PACKAGE)))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments",
++                               __FUNCTION__));
++
++              goto unlock_and_return;
++      }
++      
++      
++      if(OSCopyFromUser(IMG_NULL,
++                                        &sBridgePackageKM,
++                                        psBridgePackageUM,
++                                        sizeof(PVRSRV_BRIDGE_PACKAGE))
++        != PVRSRV_OK)
++      {
++              goto unlock_and_return;
++      }
++      
++      if(ui32BridgeID != PVRSRV_GET_BRIDGE_ID(PVRSRV_BRIDGE_CONNECT_SERVICES))
++      {
++              PVRSRV_ERROR eError;
++
++              eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE,
++                                                                      (IMG_PVOID *)&psPerProc,
++                                                                      sBridgePackageKM.hKernelServices,
++                                                                      PVRSRV_HANDLE_TYPE_PERPROC_DATA);
++              if(eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)",
++                                       __FUNCTION__, eError));
++                      goto unlock_and_return;
++              }
++
++              if(psPerProc->ui32PID != ui32PID)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data "
++                                       "belonging to process %d", __FUNCTION__, ui32PID,
++                                       psPerProc->ui32PID));
++                      goto unlock_and_return;
++              }
++      }
++      else
++      {
++              
++              psPerProc = PVRSRVPerProcessData(ui32PID, IMG_TRUE);
++              if(psPerProc == IMG_NULL)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: "
++                                       "Couldn't create per-process data area"));
++                      goto unlock_and_return;
++              }
++      }
++
++      sBridgePackageKM.ui32BridgeID = PVRSRV_GET_BRIDGE_ID(sBridgePackageKM.ui32BridgeID);
++      
++      err = BridgedDispatchKM(psPerProc, &sBridgePackageKM);
++      
++unlock_and_return:
++      LinuxUnLockMutex(&gPVRSRVLock);
++      return err;
++}
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c
+--- git/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/env/linux/pvr_debug.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,199 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++  
++#ifndef AUTOCONF_INCLUDED
++ #include <linux/config.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/tty.h>                        
++#include <stdarg.h>
++#include "img_types.h"
++#include "pvr_debug.h"
++#include "proc.h"
++
++#if defined(DEBUG) || defined(TIMING)
++
++IMG_UINT32    gPVRDebugLevel = DBGPRIV_WARNING;
++
++#define PVR_STRING_TERMINATOR         '\0'
++#define PVR_IS_FILE_SEPARATOR(character) ( ((character) == '\\') || ((character) == '/') )
++
++void PVRSRVDebugPrintf        (
++                                              IMG_UINT32      ui32DebugLevel,
++                                              const IMG_CHAR* pszFileName,
++                                              IMG_UINT32      ui32Line,
++                                              const IMG_CHAR* pszFormat,
++                                              ...
++                                      )
++{
++      IMG_BOOL bTrace, bDebug;
++#if !defined(__sh__)
++      IMG_CHAR *pszLeafName;
++      
++      pszLeafName = (char *)strrchr (pszFileName, '\\');
++      
++      if (pszLeafName)
++      {
++              pszFileName = pszLeafName;
++      }
++#endif 
++              
++      bTrace = gPVRDebugLevel & ui32DebugLevel & DBGPRIV_CALLTRACE;
++      bDebug = ((gPVRDebugLevel & DBGPRIV_ALLLEVELS) >= ui32DebugLevel);
++
++      if (bTrace || bDebug)
++      {
++              va_list vaArgs;
++              static char szBuffer[256];
++
++              va_start (vaArgs, pszFormat);
++
++              
++              if (bDebug)
++              {
++                      switch(ui32DebugLevel)
++                      {
++                              case DBGPRIV_FATAL:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Fatal): ");
++                                      break;
++                              }
++                              case DBGPRIV_ERROR:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Error): ");
++                                      break;
++                              }
++                              case DBGPRIV_WARNING:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Warning): ");
++                                      break;
++                              }
++                              case DBGPRIV_MESSAGE:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Message): ");
++                                      break;
++                              }
++                              case DBGPRIV_VERBOSE:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Verbose): ");
++                                      break;
++                              }
++                              default:
++                              {
++                                      strcpy (szBuffer, "PVR_K:(Unknown message level)");
++                                      break;
++                              }
++                      }
++              }
++              else
++              {
++                      strcpy (szBuffer, "PVR_K: ");
++              }
++
++              vsprintf (&szBuffer[strlen(szBuffer)], pszFormat, vaArgs);
++
++              
++
++              if (!bTrace)
++              {
++                      sprintf (&szBuffer[strlen(szBuffer)], " [%d, %s]", (int)ui32Line, pszFileName);
++              }
++
++              printk(KERN_INFO "%s\r\n", szBuffer);
++
++              va_end (vaArgs);
++      }
++}
++
++void PVRSRVDebugAssertFail(const IMG_CHAR* pszFile, IMG_UINT32 uLine)
++{
++      PVRSRVDebugPrintf(DBGPRIV_FATAL, pszFile, uLine, "Debug assertion failed!");
++      BUG();
++}
++
++void PVRSRVTrace(const IMG_CHAR* pszFormat, ...)
++{
++      static IMG_CHAR szMessage[PVR_MAX_DEBUG_MESSAGE_LEN+1];
++      IMG_CHAR* pszEndOfMessage = IMG_NULL;
++      va_list ArgList;
++
++      strncpy(szMessage, "PVR: ", PVR_MAX_DEBUG_MESSAGE_LEN);
++
++      pszEndOfMessage = &szMessage[strlen(szMessage)];
++
++      va_start(ArgList, pszFormat);
++      vsprintf(pszEndOfMessage, pszFormat, ArgList);
++      va_end(ArgList);
++
++      strcat(szMessage,"\r\n");
++
++      printk(KERN_INFO "%s", szMessage);
++}
++
++
++void PVRDebugSetLevel(IMG_UINT32 uDebugLevel)
++{
++      printk(KERN_INFO "PVR: Setting Debug Level = 0x%x",(unsigned int)uDebugLevel);
++
++      gPVRDebugLevel = uDebugLevel;
++}
++
++int PVRDebugProcSetLevel(struct file *file, const char *buffer, unsigned long count, void *data)
++{
++#define       _PROC_SET_BUFFER_SZ             2
++      char data_buffer[_PROC_SET_BUFFER_SZ];
++
++      if (count != _PROC_SET_BUFFER_SZ)
++      {
++              return -EINVAL;
++      }
++      else
++      {
++              if (copy_from_user(data_buffer, buffer, count))
++                      return -EINVAL;
++              if (data_buffer[count - 1] != '\n')
++                      return -EINVAL;
++              PVRDebugSetLevel(data_buffer[0] - '0');
++      }
++      return (count);
++}
++
++int PVRDebugProcGetLevel(char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++      if (off == 0) {
++              *start = (char *)1;
++              return printAppend(page, count, 0, "%lu\n", gPVRDebugLevel);
++      }
++      *eof = 1;
++      return 0;
++}
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgx530defs.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,423 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGX530DEFS_KM_H_
++#define _SGX530DEFS_KM_H_
++
++#define EUR_CR_CLKGATECTL                   0x0000
++#define EUR_CR_CLKGATECTL_2D_CLKG_MASK      0x00000003
++#define EUR_CR_CLKGATECTL_2D_CLKG_SHIFT     0
++#define EUR_CR_CLKGATECTL_ISP_CLKG_MASK     0x00000030
++#define EUR_CR_CLKGATECTL_ISP_CLKG_SHIFT    4
++#define EUR_CR_CLKGATECTL_TSP_CLKG_MASK     0x00000300
++#define EUR_CR_CLKGATECTL_TSP_CLKG_SHIFT    8
++#define EUR_CR_CLKGATECTL_TA_CLKG_MASK      0x00003000
++#define EUR_CR_CLKGATECTL_TA_CLKG_SHIFT     12
++#define EUR_CR_CLKGATECTL_DPM_CLKG_MASK     0x00030000
++#define EUR_CR_CLKGATECTL_DPM_CLKG_SHIFT    16
++#define EUR_CR_CLKGATECTL_USE_CLKG_MASK     0x00300000
++#define EUR_CR_CLKGATECTL_USE_CLKG_SHIFT    20
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_MASK 0x01000000
++#define EUR_CR_CLKGATECTL_AUTO_MAN_REG_SHIFT 24
++#define EUR_CR_CLKGATESTATUS                0x0004
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_MASK   0x00000001
++#define EUR_CR_CLKGATESTATUS_2D_CLKS_SHIFT  0
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_MASK  0x00000010
++#define EUR_CR_CLKGATESTATUS_ISP_CLKS_SHIFT 4
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_MASK  0x00000100
++#define EUR_CR_CLKGATESTATUS_TSP_CLKS_SHIFT 8
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_MASK   0x00001000
++#define EUR_CR_CLKGATESTATUS_TA_CLKS_SHIFT  12
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_MASK  0x00010000
++#define EUR_CR_CLKGATESTATUS_DPM_CLKS_SHIFT 16
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_MASK  0x00100000
++#define EUR_CR_CLKGATESTATUS_USE_CLKS_SHIFT 20
++#define EUR_CR_CLKGATECTLOVR                0x0008
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_MASK   0x00000003
++#define EUR_CR_CLKGATECTLOVR_2D_CLKO_SHIFT  0
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_MASK  0x00000030
++#define EUR_CR_CLKGATECTLOVR_ISP_CLKO_SHIFT 4
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_MASK  0x00000300
++#define EUR_CR_CLKGATECTLOVR_TSP_CLKO_SHIFT 8
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_MASK   0x00003000
++#define EUR_CR_CLKGATECTLOVR_TA_CLKO_SHIFT  12
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_MASK  0x00030000
++#define EUR_CR_CLKGATECTLOVR_DPM_CLKO_SHIFT 16
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_MASK  0x00300000
++#define EUR_CR_CLKGATECTLOVR_USE_CLKO_SHIFT 20
++#define EUR_CR_CORE_ID                      0x0010
++#define EUR_CR_CORE_ID_CONFIG_MASK          0x0000FFFF
++#define EUR_CR_CORE_ID_CONFIG_SHIFT         0
++#define EUR_CR_CORE_ID_ID_MASK              0xFFFF0000
++#define EUR_CR_CORE_ID_ID_SHIFT             16
++#define EUR_CR_CORE_REVISION                0x0014
++#define EUR_CR_CORE_REVISION_MAINTENANCE_MASK 0x000000FF
++#define EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT 0
++#define EUR_CR_CORE_REVISION_MINOR_MASK     0x0000FF00
++#define EUR_CR_CORE_REVISION_MINOR_SHIFT    8
++#define EUR_CR_CORE_REVISION_MAJOR_MASK     0x00FF0000
++#define EUR_CR_CORE_REVISION_MAJOR_SHIFT    16
++#define EUR_CR_CORE_REVISION_DESIGNER_MASK  0xFF000000
++#define EUR_CR_CORE_REVISION_DESIGNER_SHIFT 24
++#define EUR_CR_DESIGNER_REV_FIELD1          0x0018
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT 0
++#define EUR_CR_DESIGNER_REV_FIELD2          0x001C
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_MASK 0xFFFFFFFF
++#define EUR_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT 0
++#define EUR_CR_SOFT_RESET                   0x0080
++#define EUR_CR_SOFT_RESET_BIF_RESET_MASK    0x00000001
++#define EUR_CR_SOFT_RESET_BIF_RESET_SHIFT   0
++#define EUR_CR_SOFT_RESET_TWOD_RESET_MASK   0x00000002
++#define EUR_CR_SOFT_RESET_TWOD_RESET_SHIFT  1
++#define EUR_CR_SOFT_RESET_DPM_RESET_MASK    0x00000004
++#define EUR_CR_SOFT_RESET_DPM_RESET_SHIFT   2
++#define EUR_CR_SOFT_RESET_TA_RESET_MASK     0x00000008
++#define EUR_CR_SOFT_RESET_TA_RESET_SHIFT    3
++#define EUR_CR_SOFT_RESET_USE_RESET_MASK    0x00000010
++#define EUR_CR_SOFT_RESET_USE_RESET_SHIFT   4
++#define EUR_CR_SOFT_RESET_ISP_RESET_MASK    0x00000020
++#define EUR_CR_SOFT_RESET_ISP_RESET_SHIFT   5
++#define EUR_CR_SOFT_RESET_TSP_RESET_MASK    0x00000040
++#define EUR_CR_SOFT_RESET_TSP_RESET_SHIFT   6
++#define EUR_CR_EVENT_HOST_ENABLE2           0x0110
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR2            0x0114
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS2                0x0118
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS2_DPM_3D_FREE_LOAD_SHIFT 1
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS2_DPM_TA_FREE_LOAD_SHIFT 0
++#define EUR_CR_EVENT_STATUS                 0x012C
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_STATUS_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_STATUS_TIMER_MASK      0x20000000
++#define EUR_CR_EVENT_STATUS_TIMER_SHIFT     29
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_STATUS_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_STATUS_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_STATUS_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_STATUS_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_STATUS_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_STATUS_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_STATUS_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_STATUS_OTPM_INV_MASK   0x00100000
++#define EUR_CR_EVENT_STATUS_OTPM_INV_SHIFT  20
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_STATUS_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_STATUS_ISP_HALT_MASK   0x00020000
++#define EUR_CR_EVENT_STATUS_ISP_HALT_SHIFT  17
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_STATUS_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_STATUS_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_STATUS_SW_EVENT_MASK   0x00004000
++#define EUR_CR_EVENT_STATUS_SW_EVENT_SHIFT  14
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_STATUS_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_STATUS_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_MASK  0x00000800
++#define EUR_CR_EVENT_STATUS_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_MASK  0x00000400
++#define EUR_CR_EVENT_STATUS_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_STATUS_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_STATUS_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_STATUS_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_STATUS_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_STATUS_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_STATUS_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_STATUS_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_ENABLE            0x0130
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_ENABLE_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_MASK 0x20000000
++#define EUR_CR_EVENT_HOST_ENABLE_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_ENABLE_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_ENABLE_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_ENABLE_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_ENABLE_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_ENABLE_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_ENABLE_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_ENABLE_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_ENABLE_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_ENABLE_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_ENABLE_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_EVENT_HOST_CLEAR             0x0134
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_MASK 0x80000000
++#define EUR_CR_EVENT_HOST_CLEAR_MASTER_INTERRUPT_SHIFT 31
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_MASK  0x20000000
++#define EUR_CR_EVENT_HOST_CLEAR_TIMER_SHIFT 29
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_MASK 0x10000000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_DPM_FAULT_SHIFT 28
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_MASK 0x08000000
++#define EUR_CR_EVENT_HOST_CLEAR_TWOD_COMPLETE_SHIFT 27
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_MASK 0x04000000
++#define EUR_CR_EVENT_HOST_CLEAR_MADD_CACHE_INVALCOMPLETE_SHIFT 26
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_MASK 0x02000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_ZLS_SHIFT 25
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_MASK 0x01000000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_TA_MEM_FREE_SHIFT 24
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_MASK 0x00800000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_END_TILE_SHIFT 23
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_MASK 0x00400000
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_INITEND_SHIFT 22
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_MASK 0x00200000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_LOADED_SHIFT 21
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_MASK 0x00100000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_INV_SHIFT 20
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_MASK 0x00080000
++#define EUR_CR_EVENT_HOST_CLEAR_OTPM_FLUSHED_SHIFT 19
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_MASK 0x00040000
++#define EUR_CR_EVENT_HOST_CLEAR_PIXELBE_END_RENDER_SHIFT 18
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_MASK 0x00020000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_HALT_SHIFT 17
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_MASK 0x00010000
++#define EUR_CR_EVENT_HOST_CLEAR_ISP_VISIBILITY_FAIL_SHIFT 16
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_MASK 0x00008000
++#define EUR_CR_EVENT_HOST_CLEAR_BREAKPOINT_SHIFT 15
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_MASK 0x00004000
++#define EUR_CR_EVENT_HOST_CLEAR_SW_EVENT_SHIFT 14
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_MASK 0x00002000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_FINISHED_SHIFT 13
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_MASK 0x00001000
++#define EUR_CR_EVENT_HOST_CLEAR_TA_TERMINATE_SHIFT 12
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_MASK 0x00000800
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_CLEAR_SHIFT 11
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_MASK 0x00000400
++#define EUR_CR_EVENT_HOST_CLEAR_TPC_FLUSH_SHIFT 10
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_MASK 0x00000200
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_CLEAR_SHIFT 9
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_MASK 0x00000100
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_LOAD_SHIFT 8
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_MASK 0x00000080
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_CONTROL_STORE_SHIFT 7
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_MASK 0x00000040
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_CLEAR_SHIFT 6
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_MASK 0x00000020
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_LOAD_SHIFT 5
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_MASK 0x00000010
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_STATE_STORE_SHIFT 4
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_MASK 0x00000008
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_REACHED_MEM_THRESH_SHIFT 3
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_MASK 0x00000004
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_GBL_SHIFT 2
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_MASK 0x00000002
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_OUT_OF_MEMORY_MT_SHIFT 1
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_MASK 0x00000001
++#define EUR_CR_EVENT_HOST_CLEAR_DPM_3D_MEM_FREE_SHIFT 0
++#define EUR_CR_PDS                          0x0ABC
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_MASK 0x00000040
++#define EUR_CR_PDS_DOUT_TIMEOUT_DISABLE_SHIFT 6
++#define EUR_CR_PDS_EXEC_BASE                0x0AB8
++#define EUR_CR_PDS_EXEC_BASE_ADDR_MASK      0x0FF00000
++#define EUR_CR_PDS_EXEC_BASE_ADDR_SHIFT     20
++#define EUR_CR_EVENT_KICKER                 0x0AC4
++#define EUR_CR_EVENT_KICKER_ADDRESS_MASK    0x0FFFFFF0
++#define EUR_CR_EVENT_KICKER_ADDRESS_SHIFT   4
++#define EUR_CR_EVENT_KICK                   0x0AC8
++#define EUR_CR_EVENT_KICK_NOW_MASK          0x00000001
++#define EUR_CR_EVENT_KICK_NOW_SHIFT         0
++#define EUR_CR_PDS_INV0                     0x0AD0
++#define EUR_CR_PDS_INV0_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV0_DSC_SHIFT           0
++#define EUR_CR_PDS_INV1                     0x0AD4
++#define EUR_CR_PDS_INV1_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV1_DSC_SHIFT           0
++#define EUR_CR_PDS_INV2                     0x0AD8
++#define EUR_CR_PDS_INV2_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV2_DSC_SHIFT           0
++#define EUR_CR_PDS_INV3                     0x0ADC
++#define EUR_CR_PDS_INV3_DSC_MASK            0x00000001
++#define EUR_CR_PDS_INV3_DSC_SHIFT           0
++#define EUR_CR_PDS_INV_CSC                  0x0AE0
++#define EUR_CR_PDS_INV_CSC_KICK_MASK        0x00000001
++#define EUR_CR_PDS_INV_CSC_KICK_SHIFT       0
++#define EUR_CR_PDS_PC_BASE                  0x0B2C
++#define EUR_CR_PDS_PC_BASE_ADDRESS_MASK     0x3FFFFFFF
++#define EUR_CR_PDS_PC_BASE_ADDRESS_SHIFT    0
++#define EUR_CR_BIF_CTRL                     0x0C00
++#define EUR_CR_BIF_CTRL_NOREORDER_MASK      0x00000001
++#define EUR_CR_BIF_CTRL_NOREORDER_SHIFT     0
++#define EUR_CR_BIF_CTRL_PAUSE_MASK          0x00000002
++#define EUR_CR_BIF_CTRL_PAUSE_SHIFT         1
++#define EUR_CR_BIF_CTRL_FLUSH_MASK          0x00000004
++#define EUR_CR_BIF_CTRL_FLUSH_SHIFT         2
++#define EUR_CR_BIF_CTRL_INVALDC_MASK        0x00000008
++#define EUR_CR_BIF_CTRL_INVALDC_SHIFT       3
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK    0x00000010
++#define EUR_CR_BIF_CTRL_CLEAR_FAULT_SHIFT   4
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_MASK 0x00000100
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_CACHE_SHIFT 8
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_MASK 0x00000200
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_VDM_SHIFT 9
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_MASK  0x00000400
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TE_SHIFT 10
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_MASK 0x00000800
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TWOD_SHIFT 11
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_MASK 0x00001000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_PBE_SHIFT 12
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_MASK 0x00002000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_TSPP_SHIFT 13
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_MASK 0x00004000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_ISP_SHIFT 14
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_MASK 0x00008000
++#define EUR_CR_BIF_CTRL_MMU_BYPASS_USE_SHIFT 15
++#define EUR_CR_BIF_INT_STAT                 0x0C04
++#define EUR_CR_BIF_INT_STAT_FAULT_MASK      0x00003FFF
++#define EUR_CR_BIF_INT_STAT_FAULT_SHIFT     0
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_MASK    0x00004000
++#define EUR_CR_BIF_INT_STAT_PF_N_RW_SHIFT   14
++#define EUR_CR_BIF_FAULT                    0x0C08
++#define EUR_CR_BIF_FAULT_ADDR_MASK          0x0FFFF000
++#define EUR_CR_BIF_FAULT_ADDR_SHIFT         12
++#define EUR_CR_BIF_DIR_LIST_BASE0           0x0C84
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_MASK 0xFFFFF000
++#define EUR_CR_BIF_DIR_LIST_BASE0_ADDR_SHIFT 12
++#define EUR_CR_BIF_TWOD_REQ_BASE            0x0C88
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK  0x0FF00000
++#define EUR_CR_BIF_TWOD_REQ_BASE_ADDR_SHIFT 20
++#define EUR_CR_BIF_TA_REQ_BASE              0x0C90
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_MASK    0x0FF00000
++#define EUR_CR_BIF_TA_REQ_BASE_ADDR_SHIFT   20
++#define EUR_CR_BIF_MEM_REQ_STAT             0x0CA8
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_MASK  0x000000FF
++#define EUR_CR_BIF_MEM_REQ_STAT_READS_SHIFT 0
++#define EUR_CR_BIF_3D_REQ_BASE              0x0CAC
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_MASK    0x0FF00000
++#define EUR_CR_BIF_3D_REQ_BASE_ADDR_SHIFT   20
++#define EUR_CR_BIF_ZLS_REQ_BASE             0x0CB0
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_MASK   0x0FF00000
++#define EUR_CR_BIF_ZLS_REQ_BASE_ADDR_SHIFT  20
++#define EUR_CR_2D_BLIT_STATUS               0x0E04
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_MASK 0x00FFFFFF
++#define EUR_CR_2D_BLIT_STATUS_COMPLETE_SHIFT 0
++#define EUR_CR_2D_BLIT_STATUS_BUSY_MASK     0x01000000
++#define EUR_CR_2D_BLIT_STATUS_BUSY_SHIFT    24
++#define EUR_CR_2D_VIRTUAL_FIFO_0            0x0E10
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_MASK 0x00000001
++#define EUR_CR_2D_VIRTUAL_FIFO_0_ENABLE_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MASK 0x0000000E
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_SHIFT 1
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_MASK 0x00000FF0
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_DIV_SHIFT 4
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_MASK 0x0000F000
++#define EUR_CR_2D_VIRTUAL_FIFO_0_FLOWRATE_MUL_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1            0x0E14
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_MASK 0x00000FFF
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_ACC_SHIFT 0
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_MASK 0x00FFF000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MAX_ACC_SHIFT 12
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_MASK 0xFF000000
++#define EUR_CR_2D_VIRTUAL_FIFO_1_MIN_METRIC_SHIFT 24
++#define EUR_CR_USE_CODE_BASE(X)     (0x0A0C + (4 * (X)))
++#define EUR_CR_USE_CODE_BASE_ADDR_MASK      0x00FFFFFF
++#define EUR_CR_USE_CODE_BASE_ADDR_SHIFT     0
++#define EUR_CR_USE_CODE_BASE_DM_MASK        0x03000000
++#define EUR_CR_USE_CODE_BASE_DM_SHIFT       24
++#define EUR_CR_USE_CODE_BASE_SIZE_UINT32 16
++#define EUR_CR_USE_CODE_BASE_NUM_ENTRIES 16
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxcoretypes.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXCORETYPES_KM_H_
++#define _SGXCORETYPES_KM_H_
++
++typedef enum
++{
++      SGX_CORE_ID_INVALID = 0,
++      SGX_CORE_ID_530 = 2,
++      SGX_CORE_ID_535 = 3,
++} SGX_CORE_ID_TYPE;
++
++typedef struct _SGX_CORE_INFO
++{
++      SGX_CORE_ID_TYPE        eID;
++      IMG_UINT32                      uiRev;
++} SGX_CORE_INFO, *PSGX_CORE_INFO;
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxdefs.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,47 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXDEFS_H_
++#define       _SGXDEFS_H_
++
++#if defined(SGX530)
++#include "sgx530defs.h"
++#else
++#if defined(SGX535)
++#include "sgx535defs.h"
++#else
++#if defined(SGX535_V1_1)
++#include "sgx535defs.h"
++#else
++#endif
++#endif
++#endif
++
++#include "sgxerrata.h"
++#include "sgxfeaturedefs.h"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxerrata.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,108 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SGXERRATA_KM_H_
++#define _SGXERRATA_KM_H_
++
++#if defined(SGX530) && !defined(SGX_CORE_DEFINED)
++      
++      #define SGX_CORE_REV_HEAD       0
++      #if defined(USE_SGX_CORE_REV_HEAD)
++              
++              #define SGX_CORE_REV    SGX_CORE_REV_HEAD
++      #endif
++
++      #if SGX_CORE_REV == 103
++      #else
++      #if SGX_CORE_REV == 110
++      #else
++      #if SGX_CORE_REV == 111
++      #else
++      #if SGX_CORE_REV == 120
++      #else
++      #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++              
++      #else
++              #error "sgxerrata.h: SGX530 Core Revision unspecified"
++      #endif
++      #endif
++      #endif
++      #endif
++        #endif
++      
++      #define SGX_CORE_DEFINED
++#endif
++
++
++#if (defined(SGX535) || defined(SGX535_V1_1)) && !defined(SGX_CORE_DEFINED)
++      
++      #define SGX_CORE_REV_HEAD       0
++      #if defined(USE_SGX_CORE_REV_HEAD)
++              
++              #define SGX_CORE_REV    SGX_CORE_REV_HEAD
++      #endif
++
++      #if SGX_CORE_REV == 111
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 1111
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 112
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23410
++              #define FIX_HW_BRN_22693
++      #else
++      #if SGX_CORE_REV == 113
++              #define FIX_HW_BRN_23281
++              #define FIX_HW_BRN_23944
++              #define FIX_HW_BRN_23410
++      #else
++      #if SGX_CORE_REV == 121
++              #define FIX_HW_BRN_23944
++              #define FIX_HW_BRN_23410
++      #else
++      #if SGX_CORE_REV == SGX_CORE_REV_HEAD
++              
++      #else
++              #error "sgxerrata.h: SGX535 Core Revision unspecified"
++
++      #endif
++      #endif
++      #endif
++      #endif
++      #endif
++      #endif
++      
++      #define SGX_CORE_DEFINED
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxfeaturedefs.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,55 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(SGX530)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX530"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_530
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (28)
++      #define SGX_FEATURE_AUTOCLOCKGATING
++#else
++#if defined(SGX535)
++      #define SGX_CORE_FRIENDLY_NAME                                                  "SGX535"
++      #define SGX_CORE_ID                                                                             SGX_CORE_ID_535
++      #define SGX_FEATURE_ADDRESS_SPACE_SIZE                                  (32)
++      #define SGX_FEATURE_MULTIPLE_MEM_CONTEXTS
++      #define SGX_FEATURE_2D_HARDWARE
++              #define SGX_FEATURE_AUTOCLOCKGATING
++
++#endif
++#endif
++
++#if !defined(SGX_DONT_SWITCH_OFF_FEATURES)
++
++#if defined(FIX_HW_BRN_22693) 
++#undef SGX_FEATURE_AUTOCLOCKGATING
++#endif
++
++#endif 
++
++#include "img_types.h"
++
++#include "sgxcoretypes.h"
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h
+--- git/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/hwdefs/sgxmmu.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SGXMMU_KM_H__)
++#define __SGXMMU_KM_H__
++
++#define SGX_MMU_PAGE_SHIFT                            (12)
++#define SGX_MMU_PAGE_SIZE                             (1<<SGX_MMU_PAGE_SHIFT)
++
++#define SGX_MMU_PD_SHIFT                              (10)
++#define SGX_MMU_PD_SIZE                                       (1<<SGX_MMU_PD_SHIFT)
++#define SGX_MMU_PD_MASK                                       (0xFFC00000)
++
++#define SGX_MMU_PDE_ADDR_MASK                 (0xFFFFF000)
++#define SGX_MMU_PDE_VALID                             (0x00000001)
++#define SGX_MMU_PDE_WRITEONLY                 (0x00000002)
++#define SGX_MMU_PDE_READONLY                  (0x00000004)
++#define SGX_MMU_PDE_CACHECONSISTENT           (0x00000008)
++#define SGX_MMU_PDE_EDMPROTECT                        (0x00000010)
++
++#define SGX_MMU_PT_SHIFT                              (10)
++#define SGX_MMU_PT_SIZE                                       (1<<SGX_MMU_PT_SHIFT)
++#define SGX_MMU_PT_MASK                                       (0x003FF000)
++
++#define SGX_MMU_PTE_ADDR_MASK                 (0xFFFFF000)
++#define SGX_MMU_PTE_VALID                             (0x00000001)
++#define SGX_MMU_PTE_WRITEONLY                 (0x00000002)
++#define SGX_MMU_PTE_READONLY                  (0x00000004)
++#define SGX_MMU_PTE_CACHECONSISTENT           (0x00000008)
++#define SGX_MMU_PTE_EDMPROTECT                        (0x00000010)
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/buffer_manager.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,210 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _BUFFER_MANAGER_H_
++#define _BUFFER_MANAGER_H_
++
++#include "img_types.h"
++#include "ra.h"
++
++#if defined(__cplusplus)
++extern "C"{
++#endif        
++      
++typedef struct _BM_HEAP_ BM_HEAP;
++
++struct _BM_MAPPING_
++{
++      enum
++      {
++              hm_wrapped = 1,         
++              hm_wrapped_scatter,     
++              hm_wrapped_virtaddr, 
++              hm_env,                         
++              hm_contiguous           
++      } eCpuMemoryOrigin;
++
++      BM_HEAP                         *pBMHeap;       
++      RA_ARENA                        *pArena;        
++
++      IMG_CPU_VIRTADDR        CpuVAddr;
++      IMG_CPU_PHYADDR         CpuPAddr;
++      IMG_DEV_VIRTADDR        DevVAddr;
++      IMG_SYS_PHYADDR         *psSysAddr;
++      IMG_SIZE_T                      uSize;
++    IMG_HANDLE          hOSMemHandle;
++      IMG_UINT32                      ui32Flags;
++};
++
++typedef struct _BM_BUF_
++{
++      IMG_CPU_VIRTADDR        *CpuVAddr;
++    IMG_VOID            *hOSMemHandle;
++      IMG_CPU_PHYADDR         CpuPAddr;
++      IMG_DEV_VIRTADDR        DevVAddr;
++
++      BM_MAPPING                      *pMapping;
++      IMG_UINT32                      ui32RefCount;
++} BM_BUF;
++
++struct _BM_HEAP_
++{
++      IMG_UINT32                              ui32Attribs;
++      BM_CONTEXT                              *pBMContext;
++      RA_ARENA                                *pImportArena;
++      RA_ARENA                                *pLocalDevMemArena;
++      RA_ARENA                                *pVMArena;
++      DEV_ARENA_DESCRIPTOR    sDevArena;
++      MMU_HEAP                                *pMMUHeap;
++      
++      struct _BM_HEAP_                *psNext;
++};
++
++struct _BM_CONTEXT_
++{
++      MMU_CONTEXT     *psMMUContext;
++
++      
++       BM_HEAP *psBMHeap;
++       
++      
++       BM_HEAP *psBMSharedHeap;
++
++      PVRSRV_DEVICE_NODE *psDeviceNode;
++
++      
++      HASH_TABLE *pBufferHash;
++
++      
++      IMG_HANDLE hResItem;
++
++      IMG_UINT32 ui32RefCount;
++
++      
++
++      struct _BM_CONTEXT_ *psNext;
++};
++
++
++
++typedef void *BM_HANDLE;
++
++#define BP_POOL_MASK         0x7 
++
++#define BP_CONTIGUOUS                 (1 << 3)
++#define BP_PARAMBUFFER                        (1 << 4)
++
++#define BM_MAX_DEVMEM_ARENAS  2
++
++IMG_HANDLE
++BM_CreateContext (PVRSRV_DEVICE_NODE *psDeviceNode,
++                                      IMG_DEV_PHYADDR *psPDDevPAddr,
++                                      IMG_BOOL bKernelContext,
++                                      IMG_BOOL *pbCreated);
++
++PVRSRV_ERROR
++BM_DestroyContext (IMG_HANDLE hBMContext,
++                                      IMG_BOOL bKernelContext,
++                                      IMG_BOOL bResManCallback,
++                                      IMG_BOOL *pbCreated);
++
++
++IMG_HANDLE 
++BM_CreateHeap (IMG_HANDLE hBMContext,
++                              DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo);
++
++IMG_VOID 
++BM_DestroyHeap (IMG_HANDLE hDevMemHeap);
++
++
++IMG_BOOL 
++BM_Reinitialise (PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_BOOL
++BM_Alloc (IMG_HANDLE                  hDevMemHeap,
++                      IMG_DEV_VIRTADDR        *psDevVAddr,
++                      IMG_SIZE_T                      uSize,
++                      IMG_UINT32                      *pui32Flags,
++                      IMG_UINT32                      uDevVAddrAlignment,
++                      BM_HANDLE                       *phBuf);
++
++IMG_BOOL
++BM_Wrap (     IMG_HANDLE hDevMemHeap,
++                  IMG_UINT32 ui32Size,
++                      IMG_UINT32 ui32Offset,
++                      IMG_BOOL bPhysContig,
++                      IMG_SYS_PHYADDR *psSysAddr,
++                      IMG_VOID *pvCPUVAddr,
++                      IMG_UINT32 *pui32Flags,
++                      BM_HANDLE *phBuf);
++
++void
++BM_Free (BM_HANDLE hBuf, 
++              IMG_UINT32 ui32Flags);
++
++
++IMG_CPU_VIRTADDR
++BM_HandleToCpuVaddr (BM_HANDLE hBuf);
++
++IMG_DEV_VIRTADDR
++BM_HandleToDevVaddr (BM_HANDLE hBuf);
++
++IMG_SYS_PHYADDR
++BM_HandleToSysPaddr (BM_HANDLE hBuf);
++
++IMG_HANDLE
++BM_HandleToOSMemHandle (BM_HANDLE hBuf);
++
++IMG_BOOL
++BM_ContiguousStatistics (IMG_UINT32 uFlags,
++                         IMG_UINT32 *pTotalBytes,
++                         IMG_UINT32 *pAvailableBytes);
++
++
++PVRSRV_ERROR BM_GetPhysPageAddr(PVRSRV_KERNEL_MEM_INFO *psMemInfo, 
++                                                              IMG_DEV_VIRTADDR sDevVPageAddr,  
++                                                              IMG_DEV_PHYADDR *psDevPAddr);
++
++PVRSRV_ERROR BM_GetHeapInfo(IMG_HANDLE hDevMemHeap, 
++                                                      PVRSRV_HEAP_INFO *psHeapInfo);
++
++MMU_CONTEXT* BM_GetMMUContext(IMG_HANDLE hDevMemHeap);
++
++MMU_CONTEXT* BM_GetMMUContextFromMemContext(IMG_HANDLE hDevMemContext);
++
++IMG_HANDLE BM_GetMMUHeap(IMG_HANDLE hDevMemHeap);
++
++PVRSRV_DEVICE_NODE* BM_GetDeviceNode(IMG_HANDLE hDevMemContext);
++
++
++IMG_HANDLE BM_GetMappingHandle(PVRSRV_KERNEL_MEM_INFO *psMemInfo);
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/device.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/device.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/device.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/device.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,267 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __DEVICE_H__
++#define __DEVICE_H__
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++      
++#include "ra.h"               
++#include "resman.h"           
++
++typedef struct _BM_CONTEXT_ BM_CONTEXT;
++
++typedef struct _MMU_HEAP_ MMU_HEAP;
++typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
++
++#define PVRSRV_BACKINGSTORE_SYSMEM_CONTIG             (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+0))
++#define PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG  (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+1))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG           (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+2))
++#define PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG        (1<<(PVRSRV_MEM_BACKINGSTORE_FIELD_SHIFT+3))
++
++typedef IMG_UINT32 DEVICE_MEMORY_HEAP_TYPE;
++#define DEVICE_MEMORY_HEAP_PERCONTEXT         0
++#define DEVICE_MEMORY_HEAP_KERNEL                     1
++#define DEVICE_MEMORY_HEAP_SHARED                     2
++#define DEVICE_MEMORY_HEAP_SHARED_EXPORTED    3
++
++#define PVRSRV_DEVICE_NODE_FLAGS_PORT80DISPLAY        1
++#define PVRSRV_DEVICE_NODE_FLAGS_MMU_OPT_INV  2       
++
++typedef struct _DEVICE_MEMORY_HEAP_INFO_
++{
++      
++      IMG_UINT32                              ui32HeapID;
++
++      
++      IMG_CHAR                                *pszName;
++
++      
++      IMG_CHAR                                *pszBSName;
++      
++      
++      IMG_DEV_VIRTADDR                sDevVAddrBase;
++
++      
++      IMG_UINT32                              ui32HeapSize;
++
++      
++      IMG_UINT32                              ui32Attribs;
++
++      
++      DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++      
++      
++      IMG_HANDLE                              hDevMemHeap;
++      
++      
++      RA_ARENA                                *psLocalDevMemArena;
++
++} DEVICE_MEMORY_HEAP_INFO;
++
++typedef struct _DEVICE_MEMORY_INFO_
++{
++      
++      IMG_UINT32                              ui32AddressSpaceSizeLog2;
++
++      
++
++
++      IMG_UINT32                              ui32Flags;
++
++      
++      IMG_UINT32                              ui32HeapCount;
++      
++      
++      IMG_UINT32                              ui32SyncHeapID;
++      
++      
++      IMG_UINT32                              ui32MappingHeapID;
++
++      
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++      
++    BM_CONTEXT                                *pBMKernelContext;
++
++      
++    BM_CONTEXT                                *pBMContext;
++
++} DEVICE_MEMORY_INFO;
++
++
++typedef struct DEV_ARENA_DESCRIPTOR_TAG
++{
++      IMG_UINT32                              ui32HeapID;             
++
++      IMG_CHAR                                *pszName;               
++
++      IMG_DEV_VIRTADDR                BaseDevVAddr;   
++
++      IMG_UINT32                              ui32Size;               
++
++      DEVICE_MEMORY_HEAP_TYPE DevMemHeapType;
++
++      DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeapInfo;
++
++} DEV_ARENA_DESCRIPTOR;
++
++typedef struct _SYS_DATA_TAG_ *PSYS_DATA;
++
++typedef struct _PVRSRV_DEVICE_NODE_
++{
++      PVRSRV_DEVICE_IDENTIFIER        sDevId;
++      IMG_UINT32                                      ui32RefCount;
++
++      
++
++      
++      PVRSRV_ERROR                    (*pfnInitDevice) (IMG_VOID*);
++      
++      PVRSRV_ERROR                    (*pfnDeInitDevice) (IMG_VOID*);
++
++      
++      PVRSRV_ERROR                    (*pfnMMUInitialise)(struct _PVRSRV_DEVICE_NODE_*, MMU_CONTEXT**, IMG_DEV_PHYADDR*);
++      IMG_VOID                                (*pfnMMUFinalise)(MMU_CONTEXT*);
++      IMG_VOID                                (*pfnMMUInsertHeap)(MMU_CONTEXT*, MMU_HEAP*);
++      MMU_HEAP*                               (*pfnMMUCreate)(MMU_CONTEXT*,DEV_ARENA_DESCRIPTOR*,RA_ARENA**);
++      IMG_VOID                                (*pfnMMUDelete)(MMU_HEAP*);
++      IMG_BOOL                                (*pfnMMUAlloc)(MMU_HEAP*pMMU,
++                                                                                 IMG_SIZE_T uSize,
++                                                                                 IMG_SIZE_T *pActualSize,
++                                                                                 IMG_UINT32 uFlags,
++                                                                                 IMG_UINT32 uDevVAddrAlignment,
++                                                                                 IMG_DEV_VIRTADDR *pDevVAddr);
++      IMG_VOID                                (*pfnMMUFree)(MMU_HEAP*,IMG_DEV_VIRTADDR,IMG_UINT32);
++      IMG_VOID                                (*pfnMMUEnable)(MMU_HEAP*);
++      IMG_VOID                                (*pfnMMUDisable)(MMU_HEAP*);
++      IMG_VOID                                (*pfnMMUMapPages)(MMU_HEAP *pMMU,
++                                                                                        IMG_DEV_VIRTADDR devVAddr,
++                                                                                        IMG_SYS_PHYADDR SysPAddr,
++                                                                                        IMG_SIZE_T uSize,
++                                                                                        IMG_UINT32 ui32MemFlags,
++                                                                                        IMG_HANDLE hUniqueTag);
++      IMG_VOID                                (*pfnMMUMapShadow)(MMU_HEAP            *pMMU,
++                                                                                         IMG_DEV_VIRTADDR    MapBaseDevVAddr,
++                                                                                         IMG_SIZE_T          uSize, 
++                                                                                         IMG_CPU_VIRTADDR    CpuVAddr,
++                                                                                         IMG_HANDLE          hOSMemHandle,
++                                                                                         IMG_DEV_VIRTADDR    *pDevVAddr,
++                                                                                         IMG_UINT32 ui32MemFlags,
++                                                                                         IMG_HANDLE hUniqueTag);
++      IMG_VOID                                (*pfnMMUUnmapPages)(MMU_HEAP *pMMU,
++                                                                                              IMG_DEV_VIRTADDR dev_vaddr,
++                                                                                              IMG_UINT32 ui32PageCount,
++                                                                                              IMG_HANDLE hUniqueTag);
++
++      IMG_VOID                                (*pfnMMUMapScatter)(MMU_HEAP *pMMU,
++                                                                                              IMG_DEV_VIRTADDR DevVAddr,
++                                                                                              IMG_SYS_PHYADDR *psSysAddr,
++                                                                                              IMG_SIZE_T uSize,
++                                                                                              IMG_UINT32 ui32MemFlags,
++                                                                                              IMG_HANDLE hUniqueTag);
++
++      IMG_DEV_PHYADDR                 (*pfnMMUGetPhysPageAddr)(MMU_HEAP *pMMUHeap, IMG_DEV_VIRTADDR sDevVPageAddr);
++      IMG_DEV_PHYADDR                 (*pfnMMUGetPDDevPAddr)(MMU_CONTEXT *pMMUContext);
++
++      
++      IMG_BOOL                                (*pfnDeviceISR)(IMG_VOID*);
++      
++      IMG_VOID                                *pvISRData;
++      
++      IMG_UINT32                              ui32SOCInterruptBit;
++      
++      IMG_VOID                                (*pfnDeviceMISR)(IMG_VOID*);
++
++      
++      IMG_VOID                                (*pfnDeviceCommandComplete)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
++      
++      IMG_BOOL                                bReProcessDeviceCommandComplete;
++      
++      
++      DEVICE_MEMORY_INFO              sDevMemoryInfo;
++
++      
++      IMG_VOID                                *pvDevice;
++      IMG_UINT32                              ui32pvDeviceSize; 
++      IMG_VOID                                *hDeviceOSMemHandle;
++              
++      
++      PRESMAN_ITEM                    psResItem;
++      
++      
++      PSYS_DATA                               psSysData;
++      
++      
++      RA_ARENA                                *psLocalDevMemArena;
++      
++      IMG_UINT32                              ui32Flags;
++      
++      struct _PVRSRV_DEVICE_NODE_     *psNext;
++} PVRSRV_DEVICE_NODE;
++
++PVRSRV_ERROR PVRSRVRegisterDevice(PSYS_DATA psSysData,
++                                                                PVRSRV_ERROR (*pfnRegisterDevice)(PVRSRV_DEVICE_NODE*),
++                                                                IMG_UINT32 ui32SOCInterruptBit,
++                                                                IMG_UINT32 *pui32DeviceIndex );
++
++PVRSRV_ERROR PVRSRVInitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++PVRSRV_ERROR PVRSRVDeinitialiseDevice(IMG_UINT32 ui32DevIndex);
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT PVRSRV_ERROR PollForValueKM(volatile IMG_UINT32* pui32LinMemAddr,
++                                                                         IMG_UINT32 ui32Value,
++                                                                         IMG_UINT32 ui32Mask,
++                                                                         IMG_UINT32 ui32Waitus,
++                                                                         IMG_UINT32 ui32Tries);
++
++#endif 
++
++
++#if defined (USING_ISR_INTERRUPTS)
++PVRSRV_ERROR PollForInterruptKM(IMG_UINT32 ui32Value,
++                                                              IMG_UINT32 ui32Mask,
++                                                              IMG_UINT32 ui32Waitus,
++                                                              IMG_UINT32 ui32Tries);
++#endif 
++
++
++PVRSRV_ERROR PVRSRVInit(PSYS_DATA psSysData);
++IMG_VOID PVRSRVDeInit(PSYS_DATA psSysData);
++IMG_BOOL PVRSRVDeviceLISR(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_BOOL PVRSRVSystemLISR(IMG_VOID *pvSysData);
++IMG_VOID PVRSRVMISR(IMG_VOID *pvSysData);
++
++#if defined(__cplusplus)
++}
++#endif
++      
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/handle.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/handle.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/handle.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/handle.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,339 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __HANDLE_H__
++#define __HANDLE_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "hash.h"
++#include "resman.h"
++
++typedef enum
++{
++      PVRSRV_HANDLE_TYPE_NONE = 0,
++      PVRSRV_HANDLE_TYPE_PERPROC_DATA,
++      PVRSRV_HANDLE_TYPE_DEV_NODE,
++      PVRSRV_HANDLE_TYPE_DEV_MEM_CONTEXT,
++      PVRSRV_HANDLE_TYPE_DEV_MEM_HEAP,
++      PVRSRV_HANDLE_TYPE_MEM_INFO,
++      PVRSRV_HANDLE_TYPE_SYNC_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_SWAP_CHAIN,
++      PVRSRV_HANDLE_TYPE_BUF_INFO,
++      PVRSRV_HANDLE_TYPE_DISP_BUFFER,
++      PVRSRV_HANDLE_TYPE_BUF_BUFFER,
++      PVRSRV_HANDLE_TYPE_SGX_HW_RENDER_CONTEXT,
++      PVRSRV_HANDLE_TYPE_SHARED_PB_DESC,
++      PVRSRV_HANDLE_TYPE_MEM_INFO_REF,
++      PVRSRV_HANDLE_TYPE_SHARED_SYS_MEM_INFO,
++      PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT
++} PVRSRV_HANDLE_TYPE;
++
++typedef enum
++{
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_SHARED = 1,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 2,
++      
++      PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 4
++} PVRSRV_HANDLE_ALLOC_FLAG;
++
++struct sHandleList
++{
++      IMG_UINT32 ui32Prev;
++      IMG_UINT32 ui32Next;
++      IMG_HANDLE hParent;
++};
++
++struct sHandle
++{
++      
++      PVRSRV_HANDLE_TYPE eType;
++      
++      IMG_VOID *pvData;
++      
++      IMG_UINT32 ui32NextIndexPlusOne;
++      
++      PVRSRV_HANDLE_ALLOC_FLAG eFlag;
++      
++      IMG_UINT32 ui32PID;
++      
++      IMG_UINT32 ui32Index;
++      
++      struct sHandleList sChildren;
++      
++      struct sHandleList sSiblings;
++};
++
++typedef struct _PVRSRV_HANDLE_BASE_
++{
++      
++      IMG_HANDLE hBaseBlockAlloc;
++
++      
++      IMG_UINT32 ui32PID;
++
++      
++      IMG_HANDLE hHandBlockAlloc;
++
++      
++      PRESMAN_ITEM psResManItem;
++
++      
++      struct sHandle *psHandleArray;
++
++      
++      HASH_TABLE *psHashTab;
++
++      
++      IMG_UINT32 ui32FreeHandCount;
++
++      
++      IMG_UINT32 ui32FirstFreeIndex;
++
++      
++      IMG_UINT32 ui32TotalHandCount;
++
++      
++      IMG_UINT32 ui32LastFreeIndexPlusOne;
++} PVRSRV_HANDLE_BASE;
++
++#ifdef        PVR_SECURE_HANDLES
++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
++
++#define       KERNEL_HANDLE_BASE (gpsKernelHandleBase)
++
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag);
++
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
++
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle);
++
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
++
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
++
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID);
++
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase);
++
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID);
++
++#else 
++
++#define KERNEL_HANDLE_BASE IMG_NULL
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(eFlag);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(eFlag);
++      PVR_UNREFERENCED_PARAMETER(hParent);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFindHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *phHandle = pvData;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandleAnyType)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandleAnyType(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, PVRSRV_HANDLE_TYPE *peType, IMG_HANDLE hHandle)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      
++      *peType = PVRSRV_HANDLE_TYPE_NONE;
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupSubHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(hAncestor);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVGetParentHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(hHandle);
++
++      *phParent = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVLookupAndReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVLookupAndReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_PVOID *ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      *ppvData = hHandle;
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVReleaseHandle)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType)
++{
++      PVR_UNREFERENCED_PARAMETER(hHandle);
++      PVR_UNREFERENCED_PARAMETER(eType);
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVAllocHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, IMG_UINT32 ui32PID)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32PID);
++
++      *ppsBase = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVFreeHandleBase)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase)
++{
++      PVR_UNREFERENCED_PARAMETER(psBase);
++
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleInit(IMG_VOID)
++{
++      return PVRSRV_OK;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(PVRSRVHandleDeInit)
++#endif
++static INLINE
++PVRSRV_ERROR PVRSRVHandleDeInit(IMG_VOID)
++{
++      return PVRSRV_OK;
++}
++
++#endif        
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/hash.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/hash.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/hash.h 1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/hash.h   2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,73 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _HASH_H_
++#define _HASH_H_
++
++#include "img_types.h"
++#include "osfunc.h"
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32 HASH_FUNC(IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++typedef IMG_BOOL HASH_KEY_COMP(IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++typedef struct _HASH_TABLE_ HASH_TABLE;
++
++IMG_UINT32 HASH_Func_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey, IMG_UINT32 uHashTabLen);
++
++IMG_BOOL HASH_Key_Comp_Default (IMG_SIZE_T uKeySize, IMG_VOID *pKey1, IMG_VOID *pKey2);
++
++HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, IMG_SIZE_T uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
++
++HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
++
++IMG_VOID HASH_Delete (HASH_TABLE *pHash);
++
++IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, IMG_VOID *pKey, IMG_UINTPTR_T v);
++
++IMG_BOOL HASH_Insert (HASH_TABLE *pHash, IMG_UINTPTR_T k, IMG_UINTPTR_T v);
++
++IMG_UINTPTR_T HASH_Remove_Extended(HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Remove (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++IMG_UINTPTR_T HASH_Retrieve_Extended (HASH_TABLE *pHash, IMG_VOID *pKey);
++
++IMG_UINTPTR_T HASH_Retrieve (HASH_TABLE *pHash, IMG_UINTPTR_T k);
++
++#ifdef HASH_TRACE
++void HASH_Dump (HASH_TABLE *pHash);
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/metrics.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/metrics.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/metrics.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/metrics.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,130 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _METRICS_
++#define _METRICS_
++
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++
++#if defined(DEBUG) || defined(TIMING)
++
++
++typedef struct 
++{
++      IMG_UINT32 ui32Start;
++      IMG_UINT32 ui32Stop;
++      IMG_UINT32 ui32Total;
++      IMG_UINT32 ui32Count;
++} Temporal_Data;
++
++extern Temporal_Data asTimers[]; 
++
++extern IMG_UINT32 PVRSRVTimeNow(IMG_VOID);
++extern IMG_VOID   PVRSRVSetupMetricTimers(IMG_VOID *pvDevInfo);
++extern IMG_VOID   PVRSRVOutputMetricTotals(IMG_VOID);
++
++
++#define PVRSRV_TIMER_DUMMY                            0
++
++#define PVRSRV_TIMER_EXAMPLE_1                        1
++#define PVRSRV_TIMER_EXAMPLE_2                        2
++
++
++#define PVRSRV_NUM_TIMERS             (PVRSRV_TIMER_EXAMPLE_2 + 1)
++
++#define PVRSRV_TIME_START(X)  { \
++                                                                      asTimers[X].ui32Count += 1; \
++                                                                      asTimers[X].ui32Count |= 0x80000000L; \
++                                                                      asTimers[X].ui32Start = PVRSRVTimeNow(); \
++                                                                      asTimers[X].ui32Stop  = 0; \
++                                                              }
++
++#define PVRSRV_TIME_SUSPEND(X)        { \
++                                                                      asTimers[X].ui32Stop += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++                                                              }
++
++#define PVRSRV_TIME_RESUME(X) { \
++                                                                      asTimers[X].ui32Start = PVRSRVTimeNow(); \
++                                                              }
++
++#define PVRSRV_TIME_STOP(X)           { \
++                                                                      asTimers[X].ui32Stop  += PVRSRVTimeNow() - asTimers[X].ui32Start; \
++                                                                      asTimers[X].ui32Total += asTimers[X].ui32Stop; \
++                                                                      asTimers[X].ui32Count &= 0x7FFFFFFFL; \
++                                                              }
++
++#define PVRSRV_TIME_RESET(X)  { \
++                                                                      asTimers[X].ui32Start = 0; \
++                                                                      asTimers[X].ui32Stop  = 0; \
++                                                                      asTimers[X].ui32Total = 0; \
++                                                                      asTimers[X].ui32Count = 0; \
++                                                              }
++
++
++#if defined(__sh__)
++
++#define TST_REG   ((volatile unsigned char *) (psDevInfo->pvSOCRegsBaseKM))   
++
++#define TCOR_2    ((volatile unsigned int *)  (psDevInfo->pvSOCRegsBaseKM+28))        
++#define TCNT_2    ((volatile unsigned int *)  (psDevInfo->pvSOCRegsBaseKM+32))        
++#define TCR_2     ((volatile unsigned short *)(psDevInfo->pvSOCRegsBaseKM+36))        
++
++#define TIMER_DIVISOR  4
++
++#endif 
++
++
++
++
++
++#else 
++
++
++
++#define PVRSRV_TIME_START(X)
++#define PVRSRV_TIME_SUSPEND(X)
++#define PVRSRV_TIME_RESUME(X)
++#define PVRSRV_TIME_STOP(X)
++#define PVRSRV_TIME_RESET(X)
++
++#define PVRSRVSetupMetricTimers(X)
++#define PVRSRVOutputMetricTotals()
++
++
++
++#endif 
++
++#if defined(__cplusplus)
++}
++#endif
++
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/osfunc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/osfunc.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/osfunc.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,246 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG         1
++#endif
++
++#ifndef __OSFUNC_H__
++#define __OSFUNC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#ifdef        __linux__
++#ifdef        __KERNEL__
++#include <linux/string.h>
++#endif
++#endif
++
++
++#define KERNEL_ID                     0xffffffffL
++#define POWER_MANAGER_ID      0xfffffffeL
++#define ISR_ID                                0xfffffffdL
++#define TIMER_ID                      0xfffffffcL
++
++
++#define HOST_PAGESIZE                 OSGetPageSize
++#define HOST_PAGEMASK                 (~(HOST_PAGESIZE()-1))
++#define HOST_PAGEALIGN(addr)  (((addr)+HOST_PAGESIZE()-1)&HOST_PAGEMASK)
++
++#define PVRSRV_OS_HEAP_MASK                   0xf 
++#define PVRSRV_OS_PAGEABLE_HEAP               0x1 
++#define PVRSRV_OS_NON_PAGEABLE_HEAP   0x2 
++
++
++IMG_UINT32 OSClockus(IMG_VOID);
++IMG_UINT32 OSGetPageSize(IMG_VOID);
++PVRSRV_ERROR OSInstallDeviceLISR(IMG_VOID *pvSysData,
++                                                               IMG_UINT32 ui32Irq,
++                                                               IMG_CHAR *pszISRName,
++                                                               IMG_VOID *pvDeviceNode);
++PVRSRV_ERROR OSUninstallDeviceLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallSystemLISR(IMG_VOID *pvSysData, IMG_UINT32 ui32Irq);
++PVRSRV_ERROR OSUninstallSystemLISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSInstallMISR(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSUninstallMISR(IMG_VOID *pvSysData);
++IMG_CPU_PHYADDR OSMapLinToCPUPhys(IMG_VOID* pvLinAddr);
++IMG_VOID OSMemCopy(IMG_VOID *pvDst, IMG_VOID *pvSrc, IMG_UINT32 ui32Size);
++IMG_VOID *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++IMG_BOOL OSUnMapPhysToLin(IMG_VOID *pvLinAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSReservePhys(IMG_CPU_PHYADDR BasePAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_VOID **ppvCpuVAddr, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnReservePhys(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++PVRSRV_ERROR OSRegisterMem(IMG_CPU_PHYADDR BasePAddr, IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, 
++                                                 IMG_UINT32 ui32Flags, IMG_HANDLE *phOSMemHandle);
++PVRSRV_ERROR OSUnRegisterMem(IMG_VOID *pvCpuVAddr, IMG_UINT32 ui32Bytes, IMG_UINT32 ui32Flags, IMG_HANDLE hOSMemHandle);
++
++#if defined(__linux__)
++PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                                                         IMG_UINT32 ui32ByteOffset,
++                                                         IMG_UINT32 ui32Bytes,
++                                                         IMG_UINT32 ui32Flags,
++                                                         IMG_HANDLE *phOSMemHandleRet);
++PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSGetSubMemHandle)
++#endif
++static INLINE PVRSRV_ERROR OSGetSubMemHandle(IMG_HANDLE hOSMemHandle,
++                                                                                       IMG_UINT32 ui32ByteOffset,
++                                                                                       IMG_UINT32 ui32Bytes,
++                                                                                       IMG_UINT32 ui32Flags,
++                                                                                       IMG_HANDLE *phOSMemHandleRet)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++      PVR_UNREFERENCED_PARAMETER(ui32Bytes);
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++
++      *phOSMemHandleRet = hOSMemHandle;
++      return PVRSRV_OK;
++}
++
++static INLINE PVRSRV_ERROR OSReleaseSubMemHandle(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32Flags)
++{
++      PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++      PVR_UNREFERENCED_PARAMETER(ui32Flags);
++      return PVRSRV_OK;
++}
++#endif
++
++IMG_UINT32 OSGetCurrentProcessIDKM(IMG_VOID);
++IMG_UINT32 OSGetCurrentThreadID( IMG_VOID );
++IMG_VOID OSMemSet(IMG_VOID *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size);
++
++
++#if defined(__linux__) && defined(DEBUG_LINUX_MEMORY_ALLOCATIONS)
++PVRSRV_ERROR _OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc, IMG_CHAR *szFilename, IMG_UINT32 ui32Line);
++#define OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc) _OSAllocMem(ui32Flags, ui32Size, ppvLinAddr, phBlockAlloc, __FILE__, __LINE__)
++#else
++PVRSRV_ERROR OSAllocMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phBlockAlloc);
++#endif
++PVRSRV_ERROR OSFreeMem(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hBlockAlloc);
++PVRSRV_ERROR OSAllocPages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID *ppvLinAddr, IMG_HANDLE *phPageAlloc);
++PVRSRV_ERROR OSFreePages(IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_PVOID pvLinAddr, IMG_HANDLE hPageAlloc);
++#if defined(__linux__)
++IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_VOID *hOSMemHandle, IMG_UINT32 ui32ByteOffset);
++#else
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(OSMemHandleToCpuPAddr)
++#endif
++static INLINE IMG_CPU_PHYADDR OSMemHandleToCpuPAddr(IMG_HANDLE hOSMemHandle, IMG_UINT32 ui32ByteOffset)
++{
++      IMG_CPU_PHYADDR sCpuPAddr;
++      PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
++      PVR_UNREFERENCED_PARAMETER(ui32ByteOffset);
++      sCpuPAddr.uiAddr = 0;
++      return sCpuPAddr;
++}
++#endif
++PVRSRV_ERROR OSInitEnvData(IMG_PVOID *ppvEnvSpecificData);
++PVRSRV_ERROR OSDeInitEnvData(IMG_PVOID pvEnvSpecificData);
++IMG_CHAR* OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc);
++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, IMG_UINT32 ui32Size, const IMG_CHAR *pszFormat, ...);
++#define OSStringLength(pszString) strlen(pszString)
++PVRSRV_ERROR OSPowerManagerConnect(IMG_VOID);
++PVRSRV_ERROR OSPowerManagerDisconnect(IMG_VOID);
++
++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
++                                                               PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectDestroy(PVRSRV_EVENTOBJECT *psEventObject);
++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hOSEventKM);
++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM, IMG_UINT32 ui32MSTimeout);
++
++
++PVRSRV_ERROR OSBaseAllocContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR *pLinAddr, IMG_CPU_PHYADDR *pPhysAddr);
++PVRSRV_ERROR OSBaseFreeContigMemory(IMG_UINT32 ui32Size, IMG_CPU_VIRTADDR LinAddr, IMG_CPU_PHYADDR PhysAddr);
++
++IMG_PVOID MapUserFromKernel(IMG_PVOID pvLinAddrKM,IMG_UINT32 ui32Size,IMG_HANDLE *phMemBlock);
++IMG_VOID  UnmapUserFromKernel(IMG_PVOID pvLinAddrUM, IMG_UINT32 ui32Size, IMG_HANDLE hMemBlock);
++
++IMG_PVOID OSMapHWRegsIntoUserSpace(IMG_HANDLE hDevCookie, IMG_SYS_PHYADDR sRegAddr, IMG_UINT32 ulSize, IMG_PVOID *ppvProcess);
++IMG_VOID  OSUnmapHWRegsFromUserSpace(IMG_HANDLE hDevCookie, IMG_PVOID pvUserAddr, IMG_PVOID pvProcess);
++
++IMG_SYS_PHYADDR OSMapLinToPhys(IMG_PVOID pvLinAddr);
++
++
++PVRSRV_ERROR OSMapPhysToUserSpace(IMG_HANDLE hDevCookie,
++                                                                IMG_SYS_PHYADDR sCPUPhysAddr,
++                                                                IMG_UINT32 uiSizeInBytes,
++                                                                IMG_UINT32 ui32CacheFlags,
++                                                                IMG_PVOID *ppvUserAddr,
++                                                                IMG_UINT32 *puiActualSize,
++                                                                IMG_HANDLE hMappingHandle);
++
++PVRSRV_ERROR OSUnmapPhysToUserSpace(IMG_HANDLE hDevCookie,
++                                                                      IMG_PVOID pvUserAddr,
++                                                                      IMG_PVOID pvProcess);
++
++PVRSRV_ERROR OSLockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSUnlockResource(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_BOOL OSIsResourceLocked(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++PVRSRV_ERROR OSCreateResource(PVRSRV_RESOURCE *psResource);
++PVRSRV_ERROR OSDestroyResource(PVRSRV_RESOURCE *psResource);
++IMG_VOID OSBreakResourceLock(PVRSRV_RESOURCE *psResource, IMG_UINT32 ui32ID);
++IMG_VOID OSWaitus(IMG_UINT32 ui32Timeus);
++IMG_VOID OSReleaseThreadQuanta(IMG_VOID);
++IMG_UINT32 OSPCIReadDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg);
++IMG_VOID OSPCIWriteDword(IMG_UINT32 ui32Bus, IMG_UINT32 ui32Dev, IMG_UINT32 ui32Func, IMG_UINT32 ui32Reg, IMG_UINT32 ui32Value);
++
++#ifndef OSReadHWReg
++IMG_UINT32 OSReadHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
++#endif
++#ifndef OSWriteHWReg
++IMG_VOID OSWriteHWReg(IMG_PVOID pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
++#endif
++
++typedef IMG_VOID (*PFN_TIMER_FUNC)(IMG_VOID*);
++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, IMG_VOID *pvData, IMG_UINT32 ui32MsTimeout);
++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer);
++
++PVRSRV_ERROR OSGetSysMemSize(IMG_UINT32 *pui32Bytes);
++
++typedef enum _HOST_PCI_INIT_FLAGS_
++{
++      HOST_PCI_INIT_FLAG_BUS_MASTER = 0x1,
++      HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff
++} HOST_PCI_INIT_FLAGS;
++PVRSRV_ERROR OSPCIAcquireDev(IMG_VOID *pvSysData, IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCISetDev(IMG_VOID *pvSysData, IMG_VOID *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags);
++PVRSRV_ERROR OSPCIReleaseDev(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSPCIIRQ(IMG_VOID *pvSysData, IMG_UINT32 *pui32IRQ);
++IMG_UINT32 OSPCIAddrRangeLen(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeStart(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++IMG_UINT32 OSPCIAddrRangeEnd(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIRequestAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCIReleaseAddrRange(IMG_VOID *pvSysData, IMG_UINT32 ui32Index);
++PVRSRV_ERROR OSPCISuspendDev(IMG_VOID *pvSysData);
++PVRSRV_ERROR OSPCIResumeDev(IMG_VOID *pvSysData);
++
++PVRSRV_ERROR OSScheduleMISR(IMG_VOID *pvSysData);
++
++IMG_BOOL OSProcHasPrivSrvInit(IMG_VOID);
++
++typedef enum _img_verify_test
++{
++      PVR_VERIFY_WRITE = 0,
++      PVR_VERIFY_READ
++} IMG_VERIFY_TEST;
++
++IMG_BOOL OSAccessOK(IMG_VERIFY_TEST eVerification, IMG_VOID *pvUserPtr, IMG_UINT32 ui32Bytes);
++
++PVRSRV_ERROR OSCopyToUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Bytes);
++PVRSRV_ERROR OSCopyFromUser(IMG_PVOID pvProcess, IMG_VOID *pvDest, IMG_VOID *pvSrc, IMG_UINT32 ui32Bytes);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/pdump_km.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,288 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _PDUMP_KM_H_
++#define _PDUMP_KM_H_
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define PDUMP_FLAGS_NEVER                     0x08000000
++#define PDUMP_FLAGS_TOOUT2MEM         0x10000000
++#define PDUMP_FLAGS_LASTFRAME         0x20000000
++#define PDUMP_FLAGS_RESETLFBUFFER     0x40000000
++#define PDUMP_FLAGS_CONTINUOUS                0x80000000
++
++#define PDUMP_PD_UNIQUETAG                    (IMG_HANDLE)0
++#define PDUMP_PT_UNIQUETAG                    (IMG_HANDLE)0
++
++#ifdef PDUMP
++
++#define MAKEUNIQUETAG(hMemInfo)       (((BM_BUF *)(((PVRSRV_KERNEL_MEM_INFO *)hMemInfo)->sMemBlk.hBuffer))->pMapping)
++
++      #define PDUMP_REG_FUNC_NAME PDumpReg
++
++      IMG_IMPORT PVRSRV_ERROR PDumpMemPolKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                                IMG_UINT32                    ui32Offset,
++                                                                                IMG_UINT32                    ui32Value,
++                                                                                IMG_UINT32                    ui32Mask,
++                                                                                PDUMP_POLL_OPERATOR   eOperator,
++                                                                                IMG_BOOL                              bLastFrame,
++                                                                                IMG_BOOL                              bOverwrite,
++                                                                                IMG_HANDLE                    hUniqueTag);
++
++      IMG_IMPORT PVRSRV_ERROR PDumpMemKM(IMG_PVOID                    pvAltLinAddr,
++                                                                         PVRSRV_KERNEL_MEM_INFO       *psMemInfo,
++                                                                         IMG_UINT32                   ui32Offset,
++                                                                         IMG_UINT32                   ui32Bytes,
++                                                                         IMG_UINT32                   ui32Flags,
++                                                                         IMG_HANDLE                   hUniqueTag);
++      PVRSRV_ERROR PDumpMemPagesKM(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                               IMG_DEV_PHYADDR                *pPages,
++                                                               IMG_UINT32                     ui32NumPages,
++                                                               IMG_DEV_VIRTADDR       sDevAddr,
++                                                               IMG_UINT32                     ui32Start,
++                                                               IMG_UINT32                     ui32Length,
++                                                               IMG_UINT32                     ui32Flags);
++
++      PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE     eDeviceType,
++                                                       IMG_CPU_VIRTADDR       pvLinAddr,
++                                                       IMG_UINT32                     ui32Bytes,
++                                                       IMG_UINT32                     ui32Flags,
++                                                       IMG_BOOL                       bInitialisePages,
++                                                       IMG_HANDLE                     hUniqueTag1,
++                                                       IMG_HANDLE                     hUniqueTag2);
++      IMG_VOID PDumpInit(IMG_VOID);
++      IMG_VOID PDumpDeInit(IMG_VOID);
++      IMG_IMPORT PVRSRV_ERROR PDumpSetFrameKM(IMG_UINT32 ui32Frame);
++      IMG_IMPORT PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
++      IMG_IMPORT PVRSRV_ERROR PDumpDriverInfoKM(IMG_CHAR *pszString, IMG_UINT32 ui32Flags);
++      PVRSRV_ERROR PDumpRegWithFlagsKM(IMG_UINT32 ui32RegAddr,
++                                                                       IMG_UINT32 ui32RegValue,
++                                                                       IMG_UINT32 ui32Flags);
++      IMG_IMPORT PVRSRV_ERROR PDumpBitmapKM(IMG_CHAR *pszFileName,
++                                                                                IMG_UINT32 ui32FileOffset,
++                                                                                IMG_UINT32 ui32Width,
++                                                                                IMG_UINT32 ui32Height,
++                                                                                IMG_UINT32 ui32StrideInBytes,
++                                                                                IMG_DEV_VIRTADDR sDevBaseAddr,
++                                                                                IMG_UINT32 ui32Size,
++                                                                                PDUMP_PIXEL_FORMAT ePixelFormat,
++                                                                                PDUMP_MEM_FORMAT eMemFormat,
++                                                                                IMG_UINT32 ui32PDumpFlags);
++      IMG_IMPORT PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszFileName,
++                                                                                 IMG_UINT32 ui32FileOffset,
++                                                                                 IMG_UINT32 ui32Address,
++                                                                                 IMG_UINT32 ui32Size,
++                                                                                 IMG_UINT32 ui32PDumpFlags);
++      IMG_VOID PDUMP_REG_FUNC_NAME(IMG_UINT32         dwReg,
++                                                               IMG_UINT32             dwData);
++
++      IMG_VOID PDumpMsvdxRegRead(const IMG_CHAR* const        pRegRegion,
++                                                         const IMG_UINT32             dwRegOffset);
++
++      IMG_VOID PDumpMsvdxRegWrite(const IMG_CHAR* const       pRegRegion,
++                                                              const IMG_UINT32                dwRegOffset,
++                                                              const IMG_UINT32                dwData);
++
++      PVRSRV_ERROR PDumpMsvdxRegPol(const IMG_CHAR* const     pRegRegion,
++                                                                const IMG_UINT32              ui32Offset,
++                                                                const IMG_UINT32              ui32CheckFuncIdExt,
++                                                                const IMG_UINT32              ui32RequValue,
++                                                                const IMG_UINT32              ui32Enable,
++                                                                const IMG_UINT32              ui32PollCount,
++                                                                const IMG_UINT32              ui32TimeOut);
++
++      PVRSRV_ERROR  PDumpMsvdxWriteRef(const IMG_CHAR* const  pRegRegion,
++                                                                       const IMG_UINT32               ui32VLROffset,
++                                                                       const IMG_UINT32               ui32Physical );
++
++      IMG_VOID PDumpComment(IMG_CHAR* pszFormat, ...);
++      IMG_VOID PDumpCommentWithFlags(IMG_UINT32       ui32Flags,
++                                                                 IMG_CHAR*    pszFormat,
++                                                                 ...);
++      PVRSRV_ERROR PDumpRegPolKM(IMG_UINT32 ui32RegAddr,
++                                                         IMG_UINT32 ui32RegValue,
++                                                         IMG_UINT32 ui32Mask);
++      PVRSRV_ERROR PDumpRegPolWithFlagsKM(IMG_UINT32 ui32RegAddr,
++                                                                              IMG_UINT32 ui32RegValue,
++                                                                              IMG_UINT32 ui32Mask,
++                                                                              IMG_UINT32 ui32Flags);
++      IMG_BOOL PDumpIsLastCaptureFrameKM(IMG_VOID);
++      IMG_IMPORT IMG_BOOL PDumpIsCaptureFrameKM(IMG_VOID);
++
++      IMG_VOID PDumpMallocPages(PVRSRV_DEVICE_TYPE    eDeviceType,
++                                                        IMG_UINT32                    ui32DevVAddr,
++                                                        IMG_CPU_VIRTADDR              pvLinAddr,
++                                                        IMG_HANDLE                    hOSMemHandle,
++                                                        IMG_UINT32                    ui32NumBytes,
++                                                        IMG_HANDLE                    hUniqueTag);
++      IMG_VOID PDumpMallocPagesPhys(PVRSRV_DEVICE_TYPE        eDeviceType,
++                                                                IMG_UINT32                    ui32DevVAddr,
++                                                                IMG_PUINT32                   pui32PhysPages,
++                                                                IMG_UINT32                    ui32NumPages);
++      IMG_VOID PDumpMallocPageTable(PVRSRV_DEVICE_TYPE        eDeviceType,
++                                                                IMG_CPU_VIRTADDR              pvLinAddr,
++                                                                IMG_UINT32                    ui32NumBytes,
++                                                                IMG_HANDLE                    hUniqueTag);
++      IMG_VOID PDumpFreePages(struct _BM_HEAP_        *psBMHeap,
++                                                      IMG_DEV_VIRTADDR        sDevVAddr,
++                                                      IMG_UINT32                      ui32NumBytes,
++                                                      IMG_HANDLE              hUniqueTag,
++                                                      IMG_BOOL                        bInterleaved);
++      IMG_VOID PDumpFreePageTable(PVRSRV_DEVICE_TYPE  eDeviceType,
++                                                              IMG_CPU_VIRTADDR        pvLinAddr,
++                                                              IMG_UINT32                      ui32NumBytes,
++                                                              IMG_HANDLE                      hUniqueTag);
++      IMG_VOID PDumpPDReg(IMG_UINT32  ui32Reg,
++                                              IMG_UINT32      ui32dwData,
++                                              IMG_HANDLE      hUniqueTag);
++      IMG_VOID PDumpPDRegWithFlags(IMG_UINT32         ui32Reg,
++                                                               IMG_UINT32             ui32Data,
++                                                               IMG_UINT32             ui32Flags,
++                                                               IMG_HANDLE             hUniqueTag);
++
++      PVRSRV_ERROR PDumpPDDevPAddrKM(PVRSRV_KERNEL_MEM_INFO *psMemInfo,
++                                                                 IMG_UINT32 ui32Offset,
++                                                                 IMG_DEV_PHYADDR sPDDevPAddr,
++                                                                 IMG_HANDLE hUniqueTag1,
++                                                                 IMG_HANDLE hUniqueTag2);
++
++      IMG_BOOL PDumpTestNextFrame(IMG_UINT32 ui32CurrentFrame);
++
++      void PDumpTASignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
++                                                                 IMG_UINT32   ui32TAKickCount,
++                                                                 IMG_BOOL             bLastFrame);
++      void PDump3DSignatureRegisters(IMG_UINT32       ui32DumpFrameNum,
++                                                                 IMG_BOOL             bLastFrame);
++
++      IMG_VOID PDumpCycleCountRegRead(const IMG_UINT32 dwRegOffset, IMG_BOOL bLastFrame);
++
++      void PDumpPerformanceCounterRegisters(IMG_UINT32        ui32DumpFrameNum,
++                                                                                IMG_BOOL              bLastFrame);
++
++      IMG_VOID PDumpEndInitPhase(IMG_VOID);
++
++      void PDumpCBP(PPVRSRV_KERNEL_MEM_INFO   psROffMemInfo,
++                                IMG_UINT32                            ui32ROffOffset,
++                                IMG_UINT32                            ui32WPosVal,
++                                IMG_UINT32                            ui32PacketSize,
++                                IMG_UINT32                            ui32BufferSize,
++                                IMG_UINT32                            ui32Flags,
++                                IMG_HANDLE                            hUniqueTag);
++
++      IMG_VOID PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
++      IMG_VOID PDumpIDL(IMG_UINT32 ui32Clocks);
++
++      IMG_VOID PDumpSuspendKM(IMG_VOID);
++      IMG_VOID PDumpResumeKM(IMG_VOID);
++
++      #define PDUMPMEMPOL                             PDumpMemPolKM
++      #define PDUMPMEM                                PDumpMemKM
++      #define PDUMPMEM2                               PDumpMem2KM
++      #define PDUMPINIT                               PDumpInit
++      #define PDUMPDEINIT                             PDumpDeInit
++      #define PDUMPISLASTFRAME                PDumpIsLastCaptureFrameKM
++      #define PDUMPTESTFRAME                  PDumpIsCaptureFrameKM
++      #define PDUMPTESTNEXTFRAME              PDumpTestNextFrame
++      #define PDUMPREGWITHFLAGS               PDumpRegWithFlagsKM
++      #define PDUMPREG                                PDUMP_REG_FUNC_NAME
++      #define PDUMPCOMMENT                    PDumpComment
++      #define PDUMPCOMMENTWITHFLAGS   PDumpCommentWithFlags
++      #define PDUMPREGPOL                             PDumpRegPolKM
++      #define PDUMPREGPOLWITHFLAGS    PDumpRegPolWithFlagsKM
++      #define PDUMPMALLOCPAGES                PDumpMallocPages
++      #define PDUMPMALLOCPAGETABLE    PDumpMallocPageTable
++      #define PDUMPFREEPAGES                  PDumpFreePages
++      #define PDUMPFREEPAGETABLE              PDumpFreePageTable
++      #define PDUMPPDREG                              PDumpPDReg
++      #define PDUMPPDREGWITHFLAGS             PDumpPDRegWithFlags
++      #define PDUMPCBP                                PDumpCBP
++      #define PDUMPMALLOCPAGESPHYS    PDumpMallocPagesPhys
++      #define PDUMPENDINITPHASE               PDumpEndInitPhase
++      #define PDUMPMSVDXREGWRITE              PDumpMsvdxRegWrite
++      #define PDUMPMSVDXREGREAD               PDumpMsvdxRegRead
++      #define PDUMPMSVDXPOL                   PDumpMsvdxRegPol
++      #define PDUMPMSVDXWRITEREF              PDumpMsvdxWriteRef
++      #define PDUMPBITMAPKM                   PDumpBitmapKM
++      #define PDUMPDRIVERINFO                 PDumpDriverInfoKM
++      #define PDUMPIDLWITHFLAGS               PDumpIDLWithFlags
++      #define PDUMPIDL                                PDumpIDL
++      #define PDUMPSUSPEND                    PDumpSuspendKM
++      #define PDUMPRESUME                             PDumpResumeKM
++
++#else
++              #if ((defined(LINUX) || defined(GCC_IA32)) || defined(GCC_ARM))
++                      #define PDUMPMEMPOL(args...)
++                      #define PDUMPMEM(args...)
++                      #define PDUMPMEM2(args...)
++                      #define PDUMPINIT(args...)
++                      #define PDUMPDEINIT(args...)
++                      #define PDUMPISLASTFRAME(args...)
++                      #define PDUMPTESTFRAME(args...)
++                      #define PDUMPTESTNEXTFRAME(args...)
++                      #define PDUMPREGWITHFLAGS(args...)
++                      #define PDUMPREG(args...)
++                      #define PDUMPCOMMENT(args...)
++                      #define PDUMPREGPOL(args...)
++                      #define PDUMPREGPOLWITHFLAGS(args...)
++                      #define PDUMPMALLOCPAGES(args...)
++                      #define PDUMPMALLOCPAGETABLE(args...)
++                      #define PDUMPFREEPAGES(args...)
++                      #define PDUMPFREEPAGETABLE(args...)
++                      #define PDUMPPDREG(args...)
++                      #define PDUMPPDREGWITHFLAGS(args...)
++                      #define PDUMPSYNC(args...)
++                      #define PDUMPCOPYTOMEM(args...)
++                      #define PDUMPWRITE(args...)
++                      #define PDUMPCBP(args...)
++                      #define PDUMPCOMMENTWITHFLAGS(args...)
++                      #define PDUMPMALLOCPAGESPHYS(args...)
++                      #define PDUMPENDINITPHASE(args...)
++                      #define PDUMPMSVDXREG(args...)
++                      #define PDUMPMSVDXREGWRITE(args...)
++                      #define PDUMPMSVDXREGREAD(args...)
++                      #define PDUMPMSVDXPOLEQ(args...)
++                      #define PDUMPMSVDXPOL(args...)
++                      #define PDUMPBITMAPKM(args...)
++                      #define PDUMPDRIVERINFO(args...)
++                      #define PDUMPIDLWITHFLAGS(args...)
++                      #define PDUMPIDL(args...)
++                      #define PDUMPSUSPEND(args...)
++                      #define PDUMPRESUME(args...)
++                      #define PDUMPMSVDXWRITEREF(args...)
++              #else
++                      #error Compiler not specified
++              #endif
++#endif
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/perproc.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/perproc.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/perproc.h      1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/perproc.h        2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,65 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __PERPROC_H__
++#define __PERPROC_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++#include "img_types.h"
++#include "resman.h"
++
++#include "handle.h"
++
++typedef struct _PVRSRV_PER_PROCESS_DATA_
++{
++      IMG_UINT32 ui32PID;
++      IMG_HANDLE hBlockAlloc;
++      PRESMAN_ITEM psResManItem;
++      IMG_HANDLE hPerProcData;
++      PVRSRV_HANDLE_BASE *psHandleBase;
++
++      
++      IMG_BOOL bInitProcess;
++
++      
++      IMG_HANDLE hOsPrivateData;
++} PVRSRV_PER_PROCESS_DATA;
++
++PVRSRV_PER_PROCESS_DATA *PVRSRVPerProcessData(IMG_UINT32 ui32PID, IMG_BOOL bAlloc);
++
++PVRSRV_ERROR PVRSRVPerProcessDataInit(IMG_VOID);
++
++PVRSRV_ERROR PVRSRVPerProcessDataDeInit(IMG_VOID);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/power.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/power.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/power.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/power.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,90 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef POWER_H
++#define POWER_H
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++ 
++typedef struct _PVRSRV_POWER_DEV_TAG_
++{
++      PFN_PRE_POWER                                   pfnPrePower;
++      PFN_POST_POWER                                  pfnPostPower;
++      IMG_HANDLE                                              hDevCookie;
++      IMG_UINT32                                              ui32DeviceIndex;
++      PVR_POWER_STATE                                 eDefaultPowerState;
++      PVR_POWER_STATE                                 eCurrentPowerState;
++      struct _PVRSRV_POWER_DEV_TAG_   *psNext;
++
++} PVRSRV_POWER_DEV;
++
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32       ui32CallerID,
++                                                       IMG_BOOL       bSystemPowerEvent);
++IMG_IMPORT
++IMG_VOID PVRSRVPowerUnlock(IMG_UINT32 ui32CallerID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(IMG_UINT32                   ui32DeviceIndex,
++                                                                               PVR_POWER_STATE        eNewPowerState,
++                                                                               IMG_UINT32                     ui32CallerID,
++                                                                               IMG_BOOL                       bRetainMutex);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPrePowerStateKM(PVR_POWER_STATE eNewPowerState);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSystemPostPowerStateKM(PVR_POWER_STATE eNewPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVSetPowerStateKM (PVR_POWER_STATE ePVRState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVPowerControlKM(PVR_POWER_CONTROL   ePowerControl,
++                                                                PVR_POWER_STATE       *pePVRPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterPowerDevice(IMG_UINT32             ui32DeviceIndex,
++                                                                         PFN_PRE_POWER        pfnPrePower,
++                                                                         PFN_POST_POWER       pfnPostPower,
++                                                                         IMG_HANDLE           hDevCookie,
++                                                                         PVR_POWER_STATE      eCurrentPowerState,
++                                                                         PVR_POWER_STATE      eDefaultPowerState);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemovePowerDevice (IMG_UINT32 ui32DeviceIndex);
++
++IMG_IMPORT
++IMG_BOOL PVRSRVIsDevicePowered(IMG_UINT32 ui32DeviceIndex);
++
++#if defined (__cplusplus)
++}
++#endif
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/queue.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/queue.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/queue.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/queue.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,110 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef QUEUE_H
++#define QUEUE_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++#define UPDATE_QUEUE_ROFF(psQueue, ui32Size)                                          \
++      psQueue->ui32ReadOffset = (psQueue->ui32ReadOffset + ui32Size)  \
++      & (psQueue->ui32QueueSize - 1);
++
++ typedef struct _COMMAND_COMPLETE_DATA_
++ {
++      IMG_BOOL                        bInUse;
++              
++      IMG_UINT32                      ui32DstSyncCount;       
++      IMG_UINT32                      ui32SrcSyncCount;       
++      PVRSRV_SYNC_OBJECT      *psDstSync;                     
++      PVRSRV_SYNC_OBJECT      *psSrcSync;                     
++ }COMMAND_COMPLETE_DATA, *PCOMMAND_COMPLETE_DATA;
++
++#if !defined(USE_CODE)
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVProcessQueues (IMG_UINT32  ui32CallerID,
++                                                                IMG_BOOL              bFlush);
++
++#if defined(__linux__) && defined(__KERNEL__) 
++#include <linux/types.h>
++off_t
++QueuePrintQueues (char * buffer, size_t size, off_t off);
++#endif
++
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_UINT32 ui32QueueSize,
++                                                                                                       PVRSRV_QUEUE_INFO **ppsQueueInfo);
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO     *psQueue,
++                                                                                              PVRSRV_COMMAND          **ppsCommand,
++                                                                                              IMG_UINT32                      ui32DevIndex,
++                                                                                              IMG_UINT16                      CommandType,
++                                                                                              IMG_UINT32                      ui32DstSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
++                                                                                              IMG_UINT32                      ui32SrcSyncCount,
++                                                                                              PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
++                                                                                              IMG_UINT32                      ui32DataByteSize );
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              IMG_UINT32 ui32ParamSize,
++                                                                                              IMG_VOID **ppvSpace);
++
++IMG_IMPORT
++PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
++                                                                                              PVRSRV_COMMAND *psCommand);
++
++IMG_IMPORT
++IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie, IMG_BOOL bScheduleMISR);
++
++IMG_VOID PVRSRVCommandCompleteCallbacks(IMG_VOID);
++
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32           ui32DevIndex,
++                                                                               PFN_CMD_PROC   *ppfnCmdProcList,
++                                                                               IMG_UINT32             ui32MaxSyncsPerCmd[][2],
++                                                                               IMG_UINT32             ui32CmdCount);
++IMG_IMPORT
++PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32     ui32DevIndex,
++                                                                         IMG_UINT32   ui32CmdCount);
++
++#endif 
++
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/ra.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/ra.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/ra.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/ra.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,152 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _RA_H_
++#define _RA_H_
++
++#include "img_types.h"
++#include "hash.h"
++#include "osfunc.h"
++
++typedef struct _RA_ARENA_ RA_ARENA;
++typedef struct _BM_MAPPING_ BM_MAPPING;
++
++
++
++#define RA_STATS 
++
++
++struct _RA_STATISTICS_
++{
++    
++    IMG_UINT32 uSpanCount;
++
++    
++    IMG_UINT32 uLiveSegmentCount;
++
++    
++    IMG_UINT32 uFreeSegmentCount;
++
++    
++    IMG_UINT32 uTotalResourceCount;
++    
++    
++    IMG_UINT32 uFreeResourceCount;
++
++    
++    IMG_UINT32 uCumulativeAllocs;
++
++    
++    IMG_UINT32 uCumulativeFrees;
++
++    
++    IMG_UINT32 uImportCount;
++
++    
++    IMG_UINT32 uExportCount;
++};
++typedef struct _RA_STATISTICS_ RA_STATISTICS;
++
++struct _RA_SEGMENT_DETAILS_
++{
++      IMG_UINT32      uiSize;
++      IMG_CPU_PHYADDR sCpuPhyAddr;
++      IMG_HANDLE      hSegment;
++};
++typedef struct _RA_SEGMENT_DETAILS_ RA_SEGMENT_DETAILS;
++
++RA_ARENA *
++RA_Create (IMG_CHAR *name,
++           IMG_UINTPTR_T base,
++           IMG_SIZE_T uSize,
++           BM_MAPPING *psMapping,
++           IMG_SIZE_T uQuantum, 
++           IMG_BOOL (*alloc)(IMG_VOID *_h,
++                             IMG_SIZE_T uSize,
++                             IMG_SIZE_T *pActualSize,
++                             BM_MAPPING **ppsMapping,
++                             IMG_UINT32 uFlags,
++                                                       IMG_UINTPTR_T *pBase),
++           IMG_VOID (*free) (IMG_VOID *,
++                                                      IMG_UINTPTR_T,
++                                                      BM_MAPPING *psMapping),
++                 IMG_VOID (*backingstore_free) (IMG_VOID *,
++                                                                                IMG_UINT32,
++                                          IMG_UINT32,
++                                          IMG_HANDLE),
++           IMG_VOID *import_handle);
++
++void
++RA_Delete (RA_ARENA *pArena);
++
++IMG_BOOL
++RA_Add (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_SIZE_T uSize);
++
++IMG_BOOL
++RA_Alloc (RA_ARENA *pArena, 
++          IMG_SIZE_T uSize,
++          IMG_SIZE_T *pActualSize,
++          BM_MAPPING **ppsMapping, 
++          IMG_UINT32 uFlags,
++          IMG_UINT32 uAlignment,
++                IMG_UINT32 uAlignmentOffset,
++          IMG_UINTPTR_T *pBase);
++
++void 
++RA_Free (RA_ARENA *pArena, IMG_UINTPTR_T base, IMG_BOOL bFreeBackingStore);
++
++
++#ifdef RA_STATS
++
++#define CHECK_SPACE(total)                                    \
++{                                                                                     \
++      if(total<100)                                                   \
++              return PVRSRV_ERROR_INVALID_PARAMS;     \
++}
++
++#define UPDATE_SPACE(str, count, total)               \
++{                                                                                     \
++      if(count == -1)                                                 \
++              return PVRSRV_ERROR_INVALID_PARAMS;     \
++      else                                                                    \
++      {                                                                               \
++              str += count;                                           \
++              total -= count;                                         \
++      }                                                                               \
++}
++
++
++IMG_BOOL RA_GetNextLiveSegment(IMG_HANDLE hArena, RA_SEGMENT_DETAILS *psSegDetails);
++
++
++PVRSRV_ERROR RA_GetStats(RA_ARENA *pArena,
++                                                      IMG_CHAR **ppszStr, 
++                                                      IMG_UINT32 *pui32StrLen);
++
++#endif 
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/resman.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/resman.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/resman.h       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/resman.h 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,115 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef __RESMAN_H__
++#define __RESMAN_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++enum {
++      
++      RESMAN_TYPE_SHARED_PB_DESC = 1,                                 
++      RESMAN_TYPE_HW_RENDER_CONTEXT,                                          
++      RESMAN_TYPE_TRANSFER_CONTEXT,                                   
++
++      
++      
++      
++      
++      RESMAN_TYPE_DISPLAYCLASS_SWAPCHAIN,                             
++      RESMAN_TYPE_DISPLAYCLASS_DEVICE,                                
++
++      
++      RESMAN_TYPE_BUFFERCLASS_DEVICE,                                 
++      
++      
++      RESMAN_TYPE_OS_USERMODE_MAPPING,                                
++      
++      
++      RESMAN_TYPE_DEVICEMEM_CONTEXT,                                  
++      RESMAN_TYPE_DEVICECLASSMEM_MAPPING,                             
++      RESMAN_TYPE_DEVICEMEM_MAPPING,                                  
++      RESMAN_TYPE_DEVICEMEM_WRAP,                                             
++      RESMAN_TYPE_DEVICEMEM_ALLOCATION,                               
++      RESMAN_TYPE_RESOURCE_PERPROC_DATA,                              
++    RESMAN_TYPE_SHARED_MEM_INFO,                    
++      
++      
++      RESMAN_TYPE_KERNEL_DEVICEMEM_ALLOCATION                 
++};
++
++#define RESMAN_TYPE_USE_PROCESSID             0x80000000      
++
++#define RESMAN_CRITERIA_ALL                           0x00000000      
++#define RESMAN_CRITERIA_RESTYPE                       0x00000001      
++#define RESMAN_CRITERIA_PVOID_PARAM           0x00000002      
++#define RESMAN_CRITERIA_UI32_PARAM            0x00000004      
++
++#define RESMAN_PROCESSID_FIND                 0xffffffff      
++
++#define RESMAN_SRVINIT_PROCESSID              0xfffffff1      
++
++#define RESMAN_KERNEL_PROCESSID                       0                       
++
++typedef PVRSRV_ERROR (*RESMAN_FREE_FN)(IMG_UINT32 ui32ProcessID, IMG_PVOID pvParam, IMG_UINT32 ui32Param); 
++                                                                                                                                              
++
++typedef struct _RESMAN_ITEM_ *PRESMAN_ITEM;
++
++
++PVRSRV_ERROR ResManInit(IMG_VOID);
++IMG_VOID ResManDeInit(IMG_VOID);
++
++PRESMAN_ITEM ResManRegisterRes(IMG_UINT32             ui32ResType, 
++                                                         IMG_PVOID            pvParam, 
++                                                         IMG_UINT32           ui32Param, 
++                                                         RESMAN_FREE_FN       pfnFreeResource, 
++                                                         IMG_UINT32           ui32ProcessID);
++PVRSRV_ERROR ResManFreeResByPtr(PRESMAN_ITEM  psResItem,
++                                                              IMG_BOOL                bExecuteCallback);
++PVRSRV_ERROR ResManFreeResByCriteria(IMG_UINT32       ui32SearchCriteria, 
++                                                                       IMG_UINT32     ui32ResType, 
++                                                                       IMG_PVOID      pvParam, 
++                                                                       IMG_UINT32     ui32Param, 
++                                                                       IMG_BOOL       bExecuteCallback);
++
++PVRSRV_ERROR ResManFindResourceByPtr(PRESMAN_ITEM psItem);
++
++PVRSRV_ERROR ResManPrePower(PVR_POWER_STATE eNewPowerState,
++                                                      PVR_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR ResManPostPower(PVR_POWER_STATE eNewPowerState, 
++                                                      PVR_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR PVRSRVResManConnect(IMG_UINT32 ui32ProcID, IMG_BOOL bConnect);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/services_headers.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/services_headers.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/services_headers.h     1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/services_headers.h       2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,49 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef SERVICES_HEADERS_H
++#define SERVICES_HEADERS_H
++
++#ifdef DEBUG_RELEASE_BUILD
++#pragma optimize( "", off )
++#define DEBUG         1
++#endif
++
++#include "img_defs.h"
++#include "services.h"
++#include "servicesint.h"
++#include "power.h"
++#include "resman.h"
++#include "queue.h"
++#include "srvkm.h"
++#include "kerneldisplay.h"
++#include "syscommon.h"
++#include "pvr_debug.h"
++#include "metrics.h"
++#include "osfunc.h"
++
++#endif 
++
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h git-nokia/drivers/gpu/pvr/services4/srvkm/include/srvkm.h
+--- git/drivers/gpu/pvr/services4/srvkm/include/srvkm.h        1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/include/srvkm.h  2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,44 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef SRVKM_H
++#define SRVKM_H
++
++
++#if defined(__cplusplus)
++extern "C" {
++#endif
++
++
++IMG_VOID PVRSRVSetDCState(IMG_UINT32 ui32State);
++
++PVRSRV_ERROR PVRSRVSaveRestoreLiveSegments(IMG_HANDLE hArena, IMG_PBYTE pbyBuffer, IMG_UINT32 *puiBufSize, IMG_BOOL bSave);
++
++#if defined (__cplusplus)
++}
++#endif
++
++#endif 
+diff -Nurd git/drivers/gpu/pvr/services4/srvkm/Makefile git-nokia/drivers/gpu/pvr/services4/srvkm/Makefile
+--- git/drivers/gpu/pvr/services4/srvkm/Makefile       1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/srvkm/Makefile 2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,68 @@
++#
++# Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++#
++# This program is free software; you can redistribute it and/or modify it
++# under the terms and conditions of the GNU General Public License,
++# version 2, as published by the Free Software Foundation.
++#
++# This program is distributed in the hope it will be useful but, except
++# as otherwise stated in writing, without any warranty; without even the
++# implied warranty of merchantability or fitness for a particular purpose.
++# See the GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License along with
++# this program; if not, write to the Free Software Foundation, Inc.,
++# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++#
++# The full GNU General Public License is included in this distribution in
++# the file called "COPYING".
++#
++# Contact Information:
++# Imagination Technologies Ltd. <gpl-support@imgtec.com>
++# Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
++#
++#
++
++obj-y +=      env/linux/osfunc.o              \
++              env/linux/mmap.o                \
++              env/linux/mod.o                 \
++              env/linux/pdump.o               \
++              env/linux/proc.o                \
++              env/linux/pvr_bridge_k.o        \
++              env/linux/pvr_debug.o           \
++              env/linux/mm.o                  \
++              env/linux/mutex.o
++
++obj-y +=      common/buffer_manager.o         \
++              common/devicemem.o              \
++              common/deviceclass.o            \
++              common/handle.o                 \
++              common/hash.o                   \
++              common/metrics.o                \
++              common/pvrsrv.o                 \
++              common/queue.o                  \
++              common/ra.o                     \
++              common/resman.o                 \
++              common/power.o                  \
++              common/mem.o                    \
++              bridged/bridged_pvr_bridge.o    \
++              devices/sgx/sgxinit.o           \
++              devices/sgx/sgxutils.o          \
++              devices/sgx/sgxkick.o           \
++              devices/sgx/sgxtransfer.o       \
++              devices/sgx/mmu.o               \
++              devices/sgx/pb.o                \
++              common/perproc.o                \
++              ../system/$(CONFIG_PVR_SYSTEM)/sysconfig.o      \
++              ../system/$(CONFIG_PVR_SYSTEM)/sysutils.o       \
++              devices/sgx/sgx2dcore.o
++
++INCLUDES =    -I$(src)/env/linux      \
++              -I$(src)/include        \
++              -I$(src)/bridged        \
++              -I$(src)/devices/sgx    \
++              -I$(src)/include        \
++              -I$(src)/hwdefs
++
++ccflags-y += $(CONFIG_PVR_OPTS) $(INCLUDES)
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/include/syscommon.h git-nokia/drivers/gpu/pvr/services4/system/include/syscommon.h
+--- git/drivers/gpu/pvr/services4/system/include/syscommon.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/include/syscommon.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,189 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#ifndef _SYSCOMMON_H
++#define _SYSCOMMON_H
++
++#include "sysconfig.h"      
++#include "sysinfo.h"          
++#include "servicesint.h"
++#include "queue.h"
++#include "power.h"
++#include "resman.h"
++#include "ra.h"
++#include "device.h"
++#include "buffer_manager.h"
++ 
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef struct _SYS_DEVICE_ID_TAG
++{
++      IMG_UINT32      uiID;
++      IMG_BOOL        bInUse;
++
++} SYS_DEVICE_ID;
++
++
++#define SYS_MAX_LOCAL_DEVMEM_ARENAS   4
++
++typedef struct _SYS_DATA_TAG_
++{
++    IMG_UINT32                  ui32NumDevices;               
++      SYS_DEVICE_ID                           sDeviceID[SYS_DEVICE_COUNT];
++    PVRSRV_DEVICE_NODE                        *psDeviceNodeList;                      
++    PVRSRV_POWER_DEV                  *psPowerDeviceList;                     
++      PVRSRV_RESOURCE                         sPowerStateChangeResource;      
++      PVR_POWER_STATE                         eCurrentPowerState;                     
++      PVR_POWER_STATE                         eFailedPowerState;                      
++      IMG_UINT32                                      ui32CurrentOSPowerState;        
++    PVRSRV_QUEUE_INFO           *psQueueList;                 
++      PVRSRV_KERNEL_SYNC_INFO         *psSharedSyncInfoList;          
++    IMG_PVOID                   pvEnvSpecificData;            
++    IMG_PVOID                   pvSysSpecificData;                    
++      PVRSRV_RESOURCE                         sQProcessResource;                      
++      IMG_VOID                                        *pvSOCRegsBase;                         
++    IMG_HANDLE                  hSOCTimerRegisterOSMemHandle; 
++      IMG_UINT32                                      *pvSOCTimerRegisterKM;          
++      IMG_VOID                                        *pvSOCClockGateRegsBase;        
++      IMG_UINT32                                      ui32SOCClockGateRegsSize;
++      PFN_CMD_PROC                            *ppfnCmdProcList[SYS_DEVICE_COUNT];
++                                                                                                                      
++
++
++      PCOMMAND_COMPLETE_DATA          *ppsCmdCompleteData[SYS_DEVICE_COUNT];
++                                                                                                                      
++
++      IMG_BOOL                    bReProcessQueues;                   
++
++      RA_ARENA                                        *apsLocalDevMemArena[SYS_MAX_LOCAL_DEVMEM_ARENAS]; 
++
++    IMG_CHAR                    *pszVersionString;          
++} SYS_DATA;
++
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID);
++
++IMG_UINT32 GetCPUTranslatedAddress(IMG_VOID);
++
++PVRSRV_ERROR SysDeinitialise(SYS_DATA *psSysData);
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                                      IMG_VOID **ppvDeviceMap);
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA                     *psSysData,
++                                                               PVRSRV_DEVICE_NODE *psDeviceNode);
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits);
++
++PVRSRV_ERROR SysResetDevice(IMG_UINT32 ui32DeviceIndex);
++
++PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState);
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32 ui32DeviceIndex,
++                                                                      PVR_POWER_STATE eNewPowerState,
++                                                                      PVR_POWER_STATE eCurrentPowerState);
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32 ui32DeviceIndex,
++                                                                       PVR_POWER_STATE eNewPowerState,
++                                                                       PVR_POWER_STATE eCurrentPowerState);
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32      ui32ID, 
++                                                              IMG_VOID        *pvIn,
++                                                              IMG_UINT32  ulInSize,
++                                                              IMG_VOID        *pvOut,
++                                                              IMG_UINT32      ulOutSize);
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_CPU_PHYADDR cpu_paddr);
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr (PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR SysPAddr);
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR SysPAddr);
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr);
++
++
++
++extern SYS_DATA* gpsSysData;
++
++#if !defined(USE_CODE)
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysAcquireData)
++#endif
++static INLINE PVRSRV_ERROR SysAcquireData(SYS_DATA **ppsSysData)
++{
++      
++      *ppsSysData = gpsSysData;
++
++      
++
++
++
++      if (!gpsSysData)
++      {
++              return PVRSRV_ERROR_GENERIC;    
++      }
++              
++      return PVRSRV_OK;
++}
++
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysInitialiseCommon)
++#endif
++static INLINE PVRSRV_ERROR SysInitialiseCommon(SYS_DATA *psSysData)
++{
++      PVRSRV_ERROR    eError;
++
++      
++      eError = PVRSRVInit(psSysData);
++
++      return eError;
++}
++
++#ifdef INLINE_IS_PRAGMA
++#pragma inline(SysDeinitialiseCommon)
++#endif
++static INLINE IMG_VOID SysDeinitialiseCommon(SYS_DATA *psSysData)
++{
++      
++      PVRSRVDeInit(psSysData);
++
++      OSDestroyResource(&psSysData->sPowerStateChangeResource);
++}
++#endif 
++
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/oemfuncs.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,56 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__OEMFUNCS_H__)
++#define __OEMFUNCS_H__
++
++#if defined (__cplusplus)
++extern "C" {
++#endif
++
++typedef IMG_UINT32   (*PFN_SRV_BRIDGEDISPATCH)( IMG_UINT32  Ioctl,
++                                                                                              IMG_BYTE   *pInBuf,
++                                                                                              IMG_UINT32  InBufLen, 
++                                                                                          IMG_BYTE   *pOutBuf,
++                                                                                              IMG_UINT32  OutBufLen,
++                                                                                              IMG_UINT32 *pdwBytesTransferred);
++typedef struct PVRSRV_DC_OEM_JTABLE_TAG
++{
++      PFN_SRV_BRIDGEDISPATCH                  pfnOEMBridgeDispatch;
++      IMG_PVOID                                               pvDummy1;
++      IMG_PVOID                                               pvDummy2;
++      IMG_PVOID                                               pvDummy3;
++
++} PVRSRV_DC_OEM_JTABLE;
++
++#define OEM_GET_EXT_FUNCS                     (1<<1)
++
++#if defined(__cplusplus)
++}
++#endif
++
++#endif        
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.c    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,764 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#include "services_headers.h"
++#include "kerneldisplay.h"
++#include "oemfuncs.h"
++#include "sgxinfo.h"
++#include "pdump_km.h"
++#include "sgxinfokm.h"
++#include "syslocal.h"
++#include "sysconfig.h"
++
++#define SYS_SGX_HWRECOVERY_TIMEOUT_FREQ               (100) 
++#define SYS_SGX_PDS_TIMER_FREQ                                (1000) 
++#define SYS_SGX_ACTIVE_POWER_LATENCY_MS               (500)
++
++SYS_DATA* gpsSysData = (SYS_DATA*)IMG_NULL;
++SYS_DATA  gsSysData;
++
++static SYS_SPECIFIC_DATA gsSysSpecificData;
++
++static IMG_UINT32     gui32SGXDeviceID;
++static SGX_DEVICE_MAP gsSGXDeviceMap;
++static PVRSRV_DEVICE_NODE *gpsSGXDevNode;
++
++#define DEVICE_SGX_INTERRUPT (1 << 0)
++
++#if defined(NO_HARDWARE)
++
++#if defined(__linux__)
++#include "mm.h"
++#else
++IMG_CPU_VIRTADDR gsSGXRegsCPUVAddr;
++#endif
++#endif
++
++IMG_UINT32 PVRSRV_BridgeDispatchKM(IMG_UINT32 Ioctl,
++                                                                 IMG_BYTE             *pInBuf,
++                                                                 IMG_UINT32   InBufLen,
++                                                                 IMG_BYTE             *pOutBuf,
++                                                                 IMG_UINT32   OutBufLen,
++                                                                 IMG_UINT32   *pdwBytesTransferred);
++
++static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData)
++{
++#if !defined(__linux__) && defined(NO_HARDWARE)
++      PVRSRV_ERROR eError;
++      IMG_CPU_PHYADDR sCpuPAddr;
++#endif
++
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++      
++      gsSGXDeviceMap.ui32Flags = 0x0;
++      
++#if defined(NO_HARDWARE)
++      
++
++
++      
++#if defined(__linux__)
++      gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_SGX_DUMMY_REGS_SYS_PHYS_BASE;
++      gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++      gsSGXDeviceMap.sRegsDevPBase.uiAddr = 0;
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++      
++      {
++              IMG_VOID *pvRegisters;
++              pvRegisters = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase,
++                              gsSGXDeviceMap.ui32RegsSize,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              IMG_NULL);
++              if (!pvRegisters)
++              {
++                      return PVRSRV_ERROR_OUT_OF_MEMORY;
++              }
++              OSMemSet(pvRegisters, 0, gsSGXDeviceMap.ui32RegsSize);
++              OSUnMapPhysToLin(pvRegisters,
++                              gsSGXDeviceMap.ui32RegsSize,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              IMG_NULL);
++      }
++#else
++      eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, 
++                                                                       &gsSGXRegsCPUVAddr,
++                                                                       &sCpuPAddr);
++      if(eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++      gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr;
++      gsSGXDeviceMap.sRegsDevPBase = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, gsSGXDeviceMap.sRegsCpuPBase);
++      gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);;
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++      OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE);
++#endif
++
++      
++
++
++      gsSGXDeviceMap.ui32IRQ = 0;
++
++#else 
++
++      gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE;
++      gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase);
++      gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE;
++
++      gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ;
++
++#endif 
++
++
++      
++
++
++      return PVRSRV_OK;
++}
++
++
++IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion)
++{
++      static IMG_CHAR aszVersionString[100];
++      IMG_VOID        *pvRegsLinAddr;
++      SYS_DATA        *psSysData;
++      IMG_UINT32      ui32SGXRevision;
++      IMG_INT32       i32Count;
++
++      pvRegsLinAddr = OSMapPhysToLin(sRegRegion,
++                                                                 SYS_OMAP3430_SGX_REGS_SIZE,
++                                                                 PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
++                                                                 IMG_NULL);
++      if(!pvRegsLinAddr)
++      {
++              return IMG_NULL;
++      }
++
++      ui32SGXRevision = OSReadHWReg((IMG_PVOID)((IMG_PBYTE)pvRegsLinAddr),
++                                                                EUR_CR_CORE_REVISION);
++
++      if (SysAcquireData(&psSysData) != PVRSRV_OK)
++      {
++              return IMG_NULL;
++      }
++
++      i32Count = OSSNPrintf(aszVersionString, 100,
++                                                "SGX revision = %u.%u.%u",
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAJOR_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MAJOR_SHIFT),
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MINOR_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MINOR_SHIFT),
++                                                (unsigned int)((ui32SGXRevision & EUR_CR_CORE_REVISION_MAINTENANCE_MASK)
++                                                      >> EUR_CR_CORE_REVISION_MAINTENANCE_SHIFT)
++                                               );
++
++      OSUnMapPhysToLin(pvRegsLinAddr,
++                                       SYS_OMAP3430_SGX_REGS_SIZE,
++                                       PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
++                                       IMG_NULL);
++
++      if(i32Count == -1)
++      {
++              return IMG_NULL;
++      }
++
++      return aszVersionString;
++}
++
++
++PVRSRV_ERROR SysInitialise(IMG_VOID)
++{
++      IMG_UINT32                      i;
++      PVRSRV_ERROR            eError;
++      PVRSRV_DEVICE_NODE      *psDeviceNode;
++      IMG_CPU_PHYADDR         TimerRegPhysBase;
++      SGX_TIMING_INFORMATION* psTimingInfo;
++
++      gpsSysData = &gsSysData;
++      OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));
++
++      gpsSysData->pvSysSpecificData = &gsSysSpecificData;
++      gsSysSpecificData.ui32SysSpecificData = 0;
++
++      eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_ENVDATA;
++
++      gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;
++
++      
++      for(i=0; i<SYS_DEVICE_COUNT; i++)
++      {
++              gpsSysData->sDeviceID[i].uiID = i;
++              gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
++      }
++
++      gpsSysData->psDeviceNodeList = IMG_NULL;
++      gpsSysData->psQueueList = IMG_NULL;
++
++      eError = SysInitialiseCommon(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE;
++      gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
++      gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
++      OSReservePhys(TimerRegPhysBase,
++                                4,
++                                PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
++                                (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
++                                &gpsSysData->hSOCTimerRegisterOSMemHandle);
++
++      
++      psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
++      psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
++      psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; 
++      psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; 
++      psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; 
++
++      
++
++
++
++      eError = SysLocateDevices(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV;
++
++      
++
++
++      eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
++                                                                DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_REGDEV;
++
++      
++
++
++      
++      psDeviceNode = gpsSysData->psDeviceNodeList;
++      while(psDeviceNode)
++      {
++              
++              switch(psDeviceNode->sDevId.eDeviceType)
++              {
++                      case PVRSRV_DEVICE_TYPE_SGX:
++                      {
++                              DEVICE_MEMORY_INFO *psDevMemoryInfo;
++                              DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
++
++                              
++
++
++                              psDeviceNode->psLocalDevMemArena = IMG_NULL;
++
++                              
++                              psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
++                              psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
++
++                              
++                              for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
++                              {
++                                      psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
++                              }
++
++                              gpsSGXDevNode = psDeviceNode;
++
++                              break;
++                      }
++                      default:
++                              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
++                              return PVRSRV_ERROR_INIT_FAILURE;
++              }
++
++              
++              psDeviceNode = psDeviceNode->psNext;
++      }
++
++      PDUMPINIT();
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT;
++
++      
++
++
++      eError = EnableSystemClocks(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      eError = EnableSGXClocks(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++#endif        
++
++      
++
++      eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_INITDEV;
++
++
++#if defined(SYS_USING_INTERRUPTS)
++      eError = OSInstallMISR(gpsSysData);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallMISR: Failed to install MISR"));
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_MISR;
++
++      
++      eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
++      if (eError != PVRSRV_OK)
++      {
++              PVR_DPF((PVR_DBG_ERROR,"OSInstallDeviceLISR: Failed to install ISR"));
++              OSUninstallMISR(gpsSysData);
++              SysDeinitialise(gpsSysData);
++              gpsSysData = IMG_NULL;
++              return eError;
++      }
++      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
++#endif 
++
++      
++      gpsSysData->pszVersionString = SysCreateVersionString(gsSGXDeviceMap.sRegsCpuPBase);
++      if (!gpsSysData->pszVersionString)
++      { 
++              PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create a system version string"));
++      }
++      else
++      {
++              PVR_DPF((PVR_DBG_WARNING, "SysInitialise: Version string: %s", gpsSysData->pszVersionString));
++      }
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      DisableSGXClocks(gpsSysData);
++#endif        
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData)
++{
++      PVRSRV_ERROR eError;
++      
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++
++#if defined(SYS_USING_INTERRUPTS)
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR)
++      {
++              eError = OSUninstallDeviceLISR(psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed"));
++                      return eError;
++              }
++      }
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_MISR)
++      {
++              eError = OSUninstallMISR(psSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed"));
++                      return eError;
++              }
++      }
++#endif 
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_INITDEV)      
++      {
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++              PVR_ASSERT(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
++              
++              eError = EnableSGXClocks(gpsSysData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed"));
++                      return eError;
++              }
++#endif        
++
++              
++              eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device"));
++                      return eError;
++              }
++      }
++      
++      
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
++      {
++              DisableSystemClocks(gpsSysData);
++      }
++
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_ENVDATA)      
++      {       
++              eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData);
++              if (eError != PVRSRV_OK)
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure"));
++                      return eError;
++              }
++      }
++
++      if(gpsSysData->pvSOCTimerRegisterKM)
++      {
++              OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM,
++                                              4,
++                                              PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
++                                              gpsSysData->hSOCTimerRegisterOSMemHandle);
++      }
++
++      SysDeinitialiseCommon(gpsSysData);
++
++#if defined(NO_HARDWARE)
++
++#if !defined(__linux__)
++      if(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV)
++      {
++              
++              OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase);
++      }
++#endif
++#endif
++
++      
++      if(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT)
++      {
++              PDUMPDEINIT();
++      }
++
++      psSysSpecData->ui32SysSpecificData = 0;
++      gpsSysData = IMG_NULL;
++
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysGetDeviceMemoryMap(PVRSRV_DEVICE_TYPE eDeviceType,
++                                                                 IMG_VOID                             **ppvDeviceMap)
++{
++
++      switch(eDeviceType)
++      {
++              case PVRSRV_DEVICE_TYPE_SGX:
++              {
++                      
++                      *ppvDeviceMap = (IMG_VOID*)&gsSGXDeviceMap;
++
++                      break;
++              }
++              default:
++              {
++                      PVR_DPF((PVR_DBG_ERROR,"SysGetDeviceMemoryMap: unsupported device type"));
++              }
++      }
++      return PVRSRV_OK;
++}
++
++
++IMG_DEV_PHYADDR SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE      eDeviceType,
++                                                                        IMG_CPU_PHYADDR               CpuPAddr)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      DevPAddr.uiAddr = CpuPAddr.uiAddr;
++      
++      return DevPAddr;
++}
++
++IMG_CPU_PHYADDR SysSysPAddrToCpuPAddr (IMG_SYS_PHYADDR sys_paddr)
++{
++      IMG_CPU_PHYADDR cpu_paddr;
++
++      
++      cpu_paddr.uiAddr = sys_paddr.uiAddr;
++      return cpu_paddr;
++}
++
++IMG_SYS_PHYADDR SysCpuPAddrToSysPAddr (IMG_CPU_PHYADDR cpu_paddr)
++{
++      IMG_SYS_PHYADDR sys_paddr;
++
++      
++      sys_paddr.uiAddr = cpu_paddr.uiAddr;
++      return sys_paddr;
++}
++
++
++IMG_DEV_PHYADDR SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_SYS_PHYADDR SysPAddr)
++{
++      IMG_DEV_PHYADDR DevPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      DevPAddr.uiAddr = SysPAddr.uiAddr;
++
++      return DevPAddr;
++}
++
++
++IMG_SYS_PHYADDR SysDevPAddrToSysPAddr(PVRSRV_DEVICE_TYPE eDeviceType, IMG_DEV_PHYADDR DevPAddr)
++{
++      IMG_SYS_PHYADDR SysPAddr;
++
++      PVR_UNREFERENCED_PARAMETER(eDeviceType);
++
++      
++      SysPAddr.uiAddr = DevPAddr.uiAddr;
++
++      return SysPAddr;
++}
++
++
++IMG_VOID SysRegisterExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++
++IMG_VOID SysRemoveExternalDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psDeviceNode);
++}
++
++
++IMG_UINT32 SysGetInterruptSource(SYS_DATA                     *psSysData,
++                                                               PVRSRV_DEVICE_NODE     *psDeviceNode)
++{
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++#if defined(NO_HARDWARE)
++      
++      return 0xFFFFFFFF;
++#else
++      
++      return psDeviceNode->ui32SOCInterruptBit;
++#endif
++}
++
++
++IMG_VOID SysClearInterrupts(SYS_DATA* psSysData, IMG_UINT32 ui32ClearBits)
++{
++      PVR_UNREFERENCED_PARAMETER(psSysData);
++      PVR_UNREFERENCED_PARAMETER(ui32ClearBits);
++
++      
++}
++
++
++PVRSRV_ERROR SysSystemPrePowerState(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysSystemPrePowerState: Entering state D3"));
++
++#if defined(SYS_USING_INTERRUPTS)
++              if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR)
++              {
++                      eError = OSUninstallDeviceLISR(gpsSysData);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPrePowerState: OSUninstallDeviceLISR failed (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_LISR;
++              }
++#endif        
++              if (gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)
++              {
++                      DisableSystemClocks(gpsSysData);
++                      gsSysSpecificData.ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++              }
++      }
++      return eError;
++}
++
++
++PVRSRV_ERROR SysSystemPostPowerState(PVR_POWER_STATE eNewPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (eNewPowerState == PVRSRV_POWER_STATE_D0)
++      {
++              PVR_TRACE(("SysSystemPostPowerState: Entering state D0"));
++
++              if (!(gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS))
++              {
++                      eError = EnableSystemClocks(gpsSysData);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: EnableSystemClocks failed (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS;
++              }
++
++#if defined(SYS_USING_INTERRUPTS)
++              if (!(gsSysSpecificData.ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_LISR))
++              {
++                      eError = OSInstallDeviceLISR(gpsSysData, gsSGXDeviceMap.ui32IRQ, "SGX ISR", gpsSGXDevNode);
++                      if (eError != PVRSRV_OK)
++                      {
++                              PVR_DPF((PVR_DBG_ERROR,"SysSystemPostPowerState: OSInstallDeviceLISR failed to install ISR (%d)", eError));
++                              return eError;
++                      }
++                      gsSysSpecificData.ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_LISR;
++              }
++#endif        
++      }
++      return eError;
++}
++
++
++PVRSRV_ERROR SysDevicePrePowerState(IMG_UINT32                        ui32DeviceIndex,
++                                                                      PVR_POWER_STATE         eNewPowerState,
++                                                                      PVR_POWER_STATE         eCurrentPowerState)
++{
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++
++      if (ui32DeviceIndex != gui32SGXDeviceID)
++      {
++              return PVRSRV_OK;
++      }
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (eNewPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysDevicePrePowerState: SGX Entering state D3"));
++              DisableSGXClocks(gpsSysData);
++      }
++#else 
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++#endif 
++      return PVRSRV_OK;
++}
++
++
++PVRSRV_ERROR SysDevicePostPowerState(IMG_UINT32                       ui32DeviceIndex,
++                                                                       PVR_POWER_STATE        eNewPowerState,
++                                                                       PVR_POWER_STATE        eCurrentPowerState)
++{
++      PVRSRV_ERROR eError = PVRSRV_OK;
++
++      if (ui32DeviceIndex != gui32SGXDeviceID)
++      {
++              return eError;
++      }
++
++      PVR_UNREFERENCED_PARAMETER(eNewPowerState);
++
++#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      if (eCurrentPowerState == PVRSRV_POWER_STATE_D3)
++      {
++              PVR_TRACE(("SysDevicePostPowerState: SGX Leaving state D3"));
++              eError = EnableSGXClocks(gpsSysData);
++      }
++#else 
++      PVR_UNREFERENCED_PARAMETER(eCurrentPowerState);
++#endif        
++      
++      return eError;
++}
++
++
++PVRSRV_ERROR SysOEMFunction ( IMG_UINT32      ui32ID,
++                                                              IMG_VOID        *pvIn,
++                                                              IMG_UINT32      ulInSize,
++                                                              IMG_VOID        *pvOut,
++                                                              IMG_UINT32      ulOutSize)
++{
++      PVR_UNREFERENCED_PARAMETER(ui32ID);
++      PVR_UNREFERENCED_PARAMETER(pvIn);
++      PVR_UNREFERENCED_PARAMETER(ulInSize);
++      PVR_UNREFERENCED_PARAMETER(pvOut);
++      PVR_UNREFERENCED_PARAMETER(ulOutSize);
++
++      if ((ui32ID == OEM_GET_EXT_FUNCS) &&
++              (ulOutSize == sizeof(PVRSRV_DC_OEM_JTABLE)))
++      {
++              
++              PVRSRV_DC_OEM_JTABLE *psOEMJTable = (PVRSRV_DC_OEM_JTABLE*) pvOut;
++              psOEMJTable->pfnOEMBridgeDispatch = &PVRSRV_BridgeDispatchKM;
++              return PVRSRV_OK;
++      }
++
++      return PVRSRV_ERROR_INVALID_PARAMS;
++}
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h  1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysconfig.h    2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,60 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SOCCONFIG_H__)
++#define __SOCCONFIG_H__
++
++#include "syscommon.h"
++
++#define VS_PRODUCT_NAME       "OMAP3430"
++
++#define SYS_SGX_CLOCK_SPEED                                   (110000000)
++
++#define SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE  0x50000000
++#define SYS_OMAP3430_SGX_REGS_SIZE           0x4000
++
++#define SYS_OMAP3430_SGX_IRQ                           21
++
++#define SYS_OMAP3430_PM_REGS_SYS_PHYS_BASE     0x48306000
++#define SYS_OMAP3430_PM_REGS_SIZE                      0x1000
++
++#define SYS_OMAP3430_CM_REGS_SYS_PHYS_BASE     0x48004000
++#define SYS_OMAP3430_CM_REGS_SIZE                      0x1000
++
++
++#define SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE  0x48088024
++#define SYS_OMAP3430_GP11TIMER_REGS_SYS_PHYS_BASE      0x48088028
++#define SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE     0x48088040
++
++#if defined(NO_HARDWARE)
++
++#if defined(__linux__)
++#define SYS_SGX_DUMMY_REGS_SYS_PHYS_BASE  ((127*1024*1024) + 0x80000000)
++#endif
++#endif
++
++ 
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h    1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysinfo.h      2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,98 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSINFO_H__)
++#define __SYSINFO_H__
++
++#define MAX_HW_TIME_US                                (500000)
++#define WAIT_TRY_COUNT                                (10000)
++
++typedef enum _SYS_DEVICE_TYPE_
++{
++      SYS_DEVICE_SGX                                          = 0,
++
++      SYS_DEVICE_FORCE_I16                            = 0x7fff
++
++} SYS_DEVICE_TYPE;
++
++#define SYS_DEVICE_COUNT 3 
++
++#define PRM_REG32(offset)       (offset)
++#define CM_REG32(offset)        (offset)
++
++#define CM_FCLKEN_SGX         CM_REG32(0xB00)
++#define               CM_FCLKEN_SGX_EN_3D                                     0x00000002
++
++#define CM_ICLKEN_SGX         CM_REG32(0xB10)
++#define               CM_ICLKEN_SGX_EN_SGX                            0x00000001
++
++#define CM_IDLEST_SGX         CM_REG32(0xB20)
++#define               CM_IDLEST_SGX_ST_SGX                            0x00000001
++
++#define CM_CLKSEL_SGX         CM_REG32(0xB40)
++#define               CM_CLKSEL_SGX_MASK                                      0x0000000f
++#define               CM_CLKSEL_SGX_L3DIV3                            0x00000000
++#define               CM_CLKSEL_SGX_L3DIV4                            0x00000001
++#define               CM_CLKSEL_SGX_L3DIV6                            0x00000002
++#define               CM_CLKSEL_SGX_96M                                       0x00000003
++
++#define CM_SLEEPDEP_SGX               CM_REG32(0xB44)
++#define CM_CLKSTCTRL_SGX      CM_REG32(0xB48)
++#define       CM_CLKSTCTRL_SGX_AUTOSTATE                      0x00008001
++
++#define CM_CLKSTST_SGX                CM_REG32(0xB4C)
++#define       CM_CLKSTST_SGX_STATUS_VALID                     0x00000001
++
++#define RM_RSTST_SGX          PRM_REG32(0xB58)
++#define       RM_RSTST_SGX_RST_MASK                           0x0000000F
++#define       RM_RSTST_SGX_COREDOMAINWKUP_RST         0x00000008
++#define       RM_RSTST_SGX_DOMAINWKUP_RST                     0x00000004
++#define       RM_RSTST_SGX_GLOBALWARM_RST                     0x00000002
++#define       RM_RSTST_SGX_GLOBALCOLD_RST                     0x00000001
++
++#define PM_WKDEP_SGX          PRM_REG32(0xBC8)
++#define       PM_WKDEP_SGX_EN_WAKEUP                          0x00000010
++#define       PM_WKDEP_SGX_EN_MPU                                     0x00000002
++#define       PM_WKDEP_SGX_EN_CORE                            0x00000001
++
++#define PM_PWSTCTRL_SGX               PRM_REG32(0xBE0)
++#define               PM_PWSTCTRL_SGX_POWERSTATE_MASK         0x00000003
++#define                       PM_PWSTCTRL_SGX_OFF                             0x00000000
++#define                       PM_PWSTCTRL_SGX_RETENTION               0x00000001
++#define                       PM_PWSTCTRL_SGX_ON                              0x00000003
++
++#define PM_PWSTST_SGX         PRM_REG32(0xBE4)
++#define               PM_PWSTST_SGX_INTRANSITION                      0x00100000
++#define               PM_PWSTST_SGX_CLKACTIVITY                       0x00080000
++#define               PM_PWSTST_SGX_POWERSTATE_MASK           0x00000003
++#define                       PM_PWSTST_SGX_OFF                               0x00000003
++#define                       PM_PWSTST_SGX_RETENTION                 0x00000001
++#define                       PM_PWSTST_SGX_ON                                0x00000000
++
++#define PM_PREPWSTST_SGX      PRM_REG32(0xBE8)
++
++
++#endif        
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/syslocal.h git-nokia/drivers/gpu/pvr/services4/system/omap3430/syslocal.h
+--- git/drivers/gpu/pvr/services4/system/omap3430/syslocal.h   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/syslocal.h     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,63 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if !defined(__SYSLOCAL_H__)
++#define __SYSLOCAL_H__
++
++ 
++ 
++IMG_CHAR *SysCreateVersionString(IMG_CPU_PHYADDR sRegRegion);
++
++IMG_VOID DisableSystemClocks(SYS_DATA *psSysData);
++PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData);
++
++IMG_VOID DisableSGXClocks(SYS_DATA *psSysData);
++PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData);
++
++#define SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS    0x00000001
++#define SYS_SPECIFIC_DATA_ENABLE_LISR         0x00000002
++#define SYS_SPECIFIC_DATA_ENABLE_MISR         0x00000004
++#define SYS_SPECIFIC_DATA_ENABLE_ENVDATA      0x00000008
++#define SYS_SPECIFIC_DATA_ENABLE_LOCDEV               0x00000010
++#define SYS_SPECIFIC_DATA_ENABLE_REGDEV               0x00000020
++#define SYS_SPECIFIC_DATA_ENABLE_PDUMPINIT    0x00000040
++#define SYS_SPECIFIC_DATA_ENABLE_INITDEV      0x00000080
++#define SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV    0x00000100
++#define SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS    0x00000200
++
++typedef struct _SYS_SPECIFIC_DATA_TAG_
++{
++      IMG_UINT32 ui32SysSpecificData;
++#if defined(__linux__)
++      struct clk *psSGX_FCK;
++      struct clk *psSGX_ICK;
++      struct clk *psMPU_CK;
++#endif
++} SYS_SPECIFIC_DATA;
++
++#endif        
++
++
+diff -Nurd git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysutils.c
+--- git/drivers/gpu/pvr/services4/system/omap3430/sysutils.c   1970-01-01 01:00:00.000000000 +0100
++++ git-nokia/drivers/gpu/pvr/services4/system/omap3430/sysutils.c     2008-12-08 14:52:52.000000000 +0100
+@@ -0,0 +1,377 @@
++/**********************************************************************
++ *
++ * Copyright(c) 2008 Imagination Technologies Ltd. All rights reserved.
++ * 
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ * 
++ * This program is distributed in the hope it will be useful but, except 
++ * as otherwise stated in writing, without any warranty; without even the 
++ * implied warranty of merchantability or fitness for a particular purpose. 
++ * See the GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ * 
++ * The full GNU General Public License is included in this distribution in
++ * the file called "COPYING".
++ *
++ * Contact Information:
++ * Imagination Technologies Ltd. <gpl-support@imgtec.com>
++ * Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK 
++ *
++ ******************************************************************************/
++
++#if defined(__linux__)
++#include <linux/clk.h>
++#include <linux/err.h>
++#endif
++
++#include "sgxdefs.h"
++#include "services_headers.h"
++#include "sysinfo.h"
++#include "syslocal.h"
++#include "sgxapi_km.h"
++
++
++#if defined(__linux__)
++PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData)
++{
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++#if defined(__linux__)
++      int rate;
++      int res;
++      struct clk *psCLK;
++#endif
++
++      
++      if (psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS)
++      {
++              return PVRSRV_OK;
++      }
++
++      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Enabling SGX Clocks"));
++
++#if defined(__linux__)
++      if (psSysSpecData->psSGX_FCK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "sgx_fck");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get SGX functional clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psSGX_FCK = psCLK;
++      }
++
++      if (psSysSpecData->psSGX_ICK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "sgx_ick");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get SGX interface clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psSGX_ICK = psCLK;
++      }
++
++      if (psSysSpecData->psMPU_CK == IMG_NULL)
++      {
++              psCLK = clk_get(NULL, "mpu_ck");
++              if (IS_ERR(psCLK))
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't get MPU clock"));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++              psSysSpecData->psMPU_CK = psCLK;
++      }
++
++      rate = clk_get_rate(psSysSpecData->psMPU_CK);
++      PVR_TRACE(("CPU Clock is %dMhz", rate/1000000));
++
++      res = clk_enable(psSysSpecData->psSGX_FCK);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      res = clk_enable(psSysSpecData->psSGX_ICK); 
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX interface clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      rate = clk_get_rate(psSysSpecData->psSGX_FCK);
++      if(rate < 110666666)
++      {
++              PVR_TRACE(("SGX FClock is %dMhz. Setting to 110Mhz now", rate/1000000));
++              clk_set_rate(psSysSpecData->psSGX_FCK, 110666666);
++              rate = clk_get_rate(psSysSpecData->psSGX_FCK);
++              if (rate < 0)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't set SGX functional clock speed (%d)", rate));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      PVR_TRACE(("SGX FClock is %dMhz", rate/1000000));
++
++#else 
++#error "SGX dynamic clock control not supported for this environment"
++#endif        
++
++      
++      psSysSpecData->ui32SysSpecificData |= SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS;
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID DisableSGXClocks(SYS_DATA *psSysData)
++{
++      SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
++
++      
++      if (!(psSysSpecData->ui32SysSpecificData & SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS))
++      {
++              return;
++      }
++
++      PVR_TRACE(("DisableSGXClocks: Disabling SGX Clocks"));
++
++#if defined(__linux__)
++      if (psSysSpecData->psSGX_ICK)
++      {
++              clk_disable(psSysSpecData->psSGX_ICK); 
++      }
++
++      if (psSysSpecData->psSGX_FCK)
++      {
++              clk_disable(psSysSpecData->psSGX_FCK);
++      }
++
++#else 
++#error "SGX dynamic clock control not supported for this environment"
++#endif        
++
++      
++      psSysSpecData->ui32SysSpecificData &= ~SYS_SPECIFIC_DATA_ENABLE_SGXCLOCKS;
++
++      return;
++}
++#else 
++#endif 
++
++PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
++{
++#if defined(__linux__)
++      int res;
++      int rate;
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      PVRSRV_ERROR eError;
++#endif
++#if defined(DEBUG) || defined(TIMING)
++      struct clk *pGpt11_fck;
++      struct clk *pGpt11_ick;
++      struct clk *sys_ck;
++      IMG_CPU_PHYADDR     TimerRegPhysBase;
++      IMG_HANDLE hTimerEnable;
++      IMG_UINT32 *pui32TimerEnable;
++#endif        
++
++      PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));
++
++#if !defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
++      
++      eError = EnableSGXClocks(psSysData);
++      if (eError != PVRSRV_OK)
++      {
++              return eError;
++      }
++#endif
++
++#if defined(DEBUG) || defined(TIMING)
++      
++      pGpt11_fck = clk_get(NULL, "gpt11_fck");
++      if (IS_ERR(pGpt11_fck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      pGpt11_ick = clk_get(NULL, "gpt11_ick");
++      if (IS_ERR(pGpt11_ick))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      sys_ck = clk_get(NULL, "sys_ck");
++      if (IS_ERR(sys_ck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      if(clk_get_parent(pGpt11_fck) != sys_ck)
++      {
++              PVR_TRACE(("Setting GPTIMER11 parent to System Clock (13Mhz)"));
++              res = clk_set_parent(pGpt11_fck, sys_ck);
++              if (res < 0)
++              {
++                      PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
++                      return PVRSRV_ERROR_GENERIC;
++              }
++      }
++
++      rate = clk_get_rate(pGpt11_fck);
++      PVR_TRACE(("GPTIMER11 clock is %dHz", rate));
++      
++      res = clk_enable(pGpt11_fck);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++
++      res = clk_enable(pGpt11_ick);
++      if (res < 0)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_TSICR_SYS_PHYS_BASE;
++
++      
++      pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
++                  4,
++                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                  &hTimerEnable);
++
++      if (pui32TimerEnable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              rate = *pui32TimerEnable;
++              if(!(rate & 4))
++              {
++                      PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));
++                      
++                      
++                      *pui32TimerEnable = rate | 4;
++              }
++
++              OSUnMapPhysToLin(pui32TimerEnable,
++                          4,
++                          PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                          hTimerEnable);
++      }
++
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
++
++      
++      pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
++                  4,
++                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                  &hTimerEnable);
++
++      if (pui32TimerEnable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
++              return PVRSRV_ERROR_GENERIC;
++      }
++      else
++      {
++              
++              *pui32TimerEnable = 3;
++
++              OSUnMapPhysToLin(pui32TimerEnable,
++                          4,
++                          PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                          hTimerEnable);
++
++      }
++#endif 
++
++#else 
++#error "OMAP graphics clock initialisation not supported for this environment"
++#endif        
++
++      return PVRSRV_OK;
++}
++
++
++IMG_VOID DisableSystemClocks(SYS_DATA *psSysData)
++{
++#if defined(__linux__)
++#if defined(DEBUG) || defined(TIMING)
++      struct clk *pGpt11_fck;
++      struct clk *pGpt11_ick;
++      IMG_CPU_PHYADDR     TimerRegPhysBase;
++      IMG_HANDLE hTimerDisable;
++      IMG_UINT32 *pui32TimerDisable;
++#endif        
++
++      PVR_TRACE(("DisableSystemClocks: Disabling System Clocks"));
++
++#if defined(DEBUG) || defined(TIMING)
++      TimerRegPhysBase.uiAddr = SYS_OMAP3430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
++
++      
++      pui32TimerDisable = OSMapPhysToLin(TimerRegPhysBase,
++                              4,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              &hTimerDisable);
++      
++      if (pui32TimerDisable == IMG_NULL)
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: OSMapPhysToLin failed"));
++      }
++      else
++      {
++              *pui32TimerDisable = 0;
++              
++              OSUnMapPhysToLin(pui32TimerDisable,
++                              4,
++                              PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
++                              hTimerDisable);
++      }
++
++      
++      pGpt11_ick = clk_get(NULL, "gpt11_ick");
++      if (IS_ERR(pGpt11_ick))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: Couldn't get GPTIMER11 interface clock"));
++      }
++      else
++      {
++              clk_disable(pGpt11_ick);
++      }
++
++      pGpt11_fck = clk_get(NULL, "gpt11_fck");
++      if (IS_ERR(pGpt11_fck))
++      {
++              PVR_DPF((PVR_DBG_ERROR, "DisableSystemClocks: Couldn't get GPTIMER11 functional clock"));
++      }
++      else
++      {
++              clk_disable(pGpt11_fck);
++      }
++
++#endif 
++
++      
++      DisableSGXClocks(psSysData);
++
++#else 
++#error "Disabling of OMAP graphics clock not supported for this environment"
++#endif 
++}
+--- /tmp/Makefile      2008-12-09 15:25:43.000000000 +0100
++++ git/drivers/gpu/Makefile   2008-12-09 15:25:53.000000000 +0100
+@@ -1 +1 @@
+-obj-y                 += drm/
++obj-y                 += drm-tungsten/ pvr/
diff --git a/packages/linux/omap3-pandora-kernel/read_die_ids.patch b/packages/linux/omap3-pandora-kernel/read_die_ids.patch
new file mode 100755 (executable)
index 0000000..3f6c930
--- /dev/null
@@ -0,0 +1,23 @@
+OMAP2/3 TAP: enable debug messages
+
+From: Paul Walmsley <paul@pwsan.com>
+
+This patch causes the OMAP2/3 chip ID code to display the full DIE_ID registers at boot.
+
+---
+
+ arch/arm/mach-omap2/id.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
+index c7f9ab7..a154b5e 100644
+--- a/arch/arm/mach-omap2/id.c
++++ b/arch/arm/mach-omap2/id.c
+@@ -10,6 +10,7 @@
+  * it under the terms of the GNU General Public License version 2 as
+  * published by the Free Software Foundation.
+  */
++#define DEBUG
+ #include <linux/module.h>
+ #include <linux/kernel.h>
diff --git a/packages/linux/omap3-pandora-kernel/sitecomwl168-support.diff b/packages/linux/omap3-pandora-kernel/sitecomwl168-support.diff
new file mode 100755 (executable)
index 0000000..8a9a2f5
--- /dev/null
@@ -0,0 +1,10 @@
+--- /tmp/rtl8187_dev.c 2008-12-20 19:41:30.000000000 +0100
++++ git/drivers/net/wireless/rtl8187_dev.c     2008-12-20 19:42:01.000000000 +0100
+@@ -45,6 +45,7 @@
+       {USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
+       /* Sitecom */
+       {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
++      {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+       {}
+ };
diff --git a/packages/linux/omap3-pandora-kernel/strongly-ordered-memory.diff b/packages/linux/omap3-pandora-kernel/strongly-ordered-memory.diff
new file mode 100755 (executable)
index 0000000..b60e4f4
--- /dev/null
@@ -0,0 +1,18 @@
+--- /tmp/irq.c 2008-09-16 10:43:30.000000000 +0200
++++ git/arch/arm/mach-omap2/irq.c      2008-09-16 10:46:18.463198000 +0200
+@@ -64,6 +64,7 @@
+ static void omap_ack_irq(unsigned int irq)
+ {
+       intc_bank_write_reg(0x1, &irq_banks[0], INTC_CONTROL);
++      intc_bank_read_reg(&irq_banks[0],INTC_REVISION);
+ }
+ static void omap_mask_irq(unsigned int irq)
+@@ -73,6 +74,7 @@
+       irq &= (IRQ_BITS_PER_REG - 1);
+       intc_bank_write_reg(1 << irq, &irq_banks[0], INTC_MIR_SET0 + offset);
++      intc_bank_read_reg(&irq_banks[0],INTC_REVISION);
+ }
+ static void omap_unmask_irq(unsigned int irq)
diff --git a/packages/linux/omap3-pandora-kernel/timer-suppression.patch b/packages/linux/omap3-pandora-kernel/timer-suppression.patch
new file mode 100755 (executable)
index 0000000..04362c9
--- /dev/null
@@ -0,0 +1,43 @@
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index b854a89..26f5569 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -253,6 +253,16 @@ void tick_nohz_stop_sched_tick(void)
+       /* Schedule the tick, if we are at least one jiffie off */
+       if ((long)delta_jiffies >= 1) {
++              /*
++               * calculate the expiry time for the next timer wheel
++               * timer
++               */
++              expires = ktime_add_ns(last_update, tick_period.tv64 *
++                                      delta_jiffies);
++
++              /* Skip reprogram of event if its not changed */
++              if(ts->tick_stopped && ktime_equal(expires, dev->next_event))
++              goto out2;
+               if (delta_jiffies > 1)
+                       cpu_set(cpu, nohz_cpu_mask);
+@@ -304,12 +314,7 @@ void tick_nohz_stop_sched_tick(void)
+                       goto out;
+               }
+-              /*
+-               * calculate the expiry time for the next timer wheel
+-               * timer
+-               */
+-              expires = ktime_add_ns(last_update, tick_period.tv64 *
+-                                     delta_jiffies);
++              /* Mark expiries */
+               ts->idle_expires = expires;
+               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+@@ -328,6 +333,7 @@ void tick_nohz_stop_sched_tick(void)
+               tick_do_update_jiffies64(ktime_get());
+               cpu_clear(cpu, nohz_cpu_mask);
+       }
++out2: 
+       raise_softirq_irqoff(TIMER_SOFTIRQ);
+ out:
+       ts->next_jiffies = next_jiffies;
diff --git a/packages/linux/omap3-pandora-kernel/touchscreen.patch b/packages/linux/omap3-pandora-kernel/touchscreen.patch
new file mode 100755 (executable)
index 0000000..2325c40
--- /dev/null
@@ -0,0 +1,22 @@
+diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
+index d8109ae..f8ce669 100644
+--- a/arch/arm/mach-omap2/board-omap3evm.c
++++ b/arch/arm/mach-omap2/board-omap3evm.c
+@@ -128,8 +128,16 @@ static int ads7846_get_pendown_state(void)
+ }
+ struct ads7846_platform_data ads7846_config = {
++      .x_max                  = 0x0fff,
++      .y_max                  = 0x0fff,
++      .x_plate_ohms           = 180,
++      .pressure_max           = 255,
++      .debounce_max           = 10,
++      .debounce_tol           = 3,
++      .debounce_rep           = 1,
+       .get_pendown_state      = ads7846_get_pendown_state,
+       .keep_vref_on           = 1,
++      .settle_delay_usecs     = 150,
+ };
+ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+
diff --git a/packages/linux/omap3-pandora-kernel_2.6.27-pandora.bb b/packages/linux/omap3-pandora-kernel_2.6.27-pandora.bb
new file mode 100755 (executable)
index 0000000..441c59e
--- /dev/null
@@ -0,0 +1,46 @@
+require linux.inc
+
+DESCRIPTION = "2.6.27 Linux kernel for the Pandora handheld console"
+KERNEL_IMAGETYPE = "uImage"
+
+COMPATIBLE_MACHINE = "omap3-pandora"
+
+SRCREV = "33280e83ef2260f8f6ba01345c5d75a9d97a49c0"
+
+#PV = "2.6.27-pandora+git${SRCREV}"
+PR = "r15"
+
+SRC_URI = " \
+       git://openpandora.org/pandora-kernel.git;protocol=git;branch=pandora-27-omap1 \
+       file://defconfig \
+       file://no-empty-flash-warnings.patch;patch=1 \
+       file://oprofile-0.9.3.armv7.diff;patch=1 \
+       file://no-cortex-deadlock.patch;patch=1 \
+       file://read_die_ids.patch;patch=1 \
+       file://fix-install.patch;patch=1 \
+       file://musb-dma-iso-in.eml;patch=1 \
+       file://musb-support-high-bandwidth.patch.eml;patch=1 \
+       file://mru-fix-timings.diff;patch=1 \
+       file://mru-fix-display-panning.diff;patch=1 \
+       file://mru-make-dpll4-m4-ck-programmable.diff;patch=1 \
+       file://mru-add-clk-get-parent.diff;patch=1 \
+       file://mru-improve-pixclock-config.diff;patch=1 \
+       file://mru-make-video-timings-selectable.diff;patch=1 \
+       file://mru-enable-overlay-optimalization.diff;patch=1 \
+       file://musb-fix-ISO-in-unlink.diff;patch=1 \
+       file://musb-fix-multiple-bulk-transfers.diff;patch=1 \
+       file://musb-fix-endpoints.diff;patch=1 \
+       file://dvb-fix-dma.diff;patch=1 \
+       file://0001-Removed-resolution-check-that-prevents-scaling-when.patch;patch=1 \
+       file://0001-Implement-downsampling-with-debugs.patch;patch=1 \
+       file://sitecomwl168-support.diff;patch=1 \
+#      file://pvr/pvr-add.patch;patch=1 \
+       file://pvr/dispc.patch;patch=1 \
+#      file://pvr/nokia-TI.diff;patch=1 \
+"
+       
+S = "${WORKDIR}/git"
+
+#do_configure_prepend() {
+#      install -m 0644 ${S}/arch/arm/configs/omap3_pandora_defconfig ${WORKDIR}/defconfig
+#}
diff --git a/packages/omap3-deviceid/files/Makefile b/packages/omap3-deviceid/files/Makefile
new file mode 100755 (executable)
index 0000000..535afa8
--- /dev/null
@@ -0,0 +1,12 @@
+OBJECTS=mem
+
+CFLAGS=-g -O0 -Wall 
+LIBS= -lm
+
+all: $(OBJECTS)
+
+%: %.c
+       $(CC) -o $@ $^ $(CFLAGS) $(LIBS)
+
+clean:
+       -rm $(OBJECTS)
diff --git a/packages/omap3-deviceid/files/mem.c b/packages/omap3-deviceid/files/mem.c
new file mode 100755 (executable)
index 0000000..a5243c2
--- /dev/null
@@ -0,0 +1,140 @@
+/*\r
+  Copyright (C) 2009 Mans Rullgard\r
+\r
+  Permission is hereby granted, free of charge, to any person\r
+  obtaining a copy of this software and associated documentation files\r
+  (the "Software"), to deal in the Software without restriction,\r
+  including without limitation the rights to use, copy, modify, merge,\r
+  publish, distribute, sublicense, and/or sell copies of the Software,\r
+  and to permit persons to whom the Software is furnished to do so,\r
+  subject to the following conditions:\r
+\r
+  The above copyright notice and this permission notice shall be\r
+  included in all copies or substantial portions of the Software.\r
+\r
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+  CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+*/\r
+\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <sys/mman.h>\r
+#include <unistd.h>\r
+#include <fcntl.h>\r
+\r
+static void die(void)\r
+{\r
+    fprintf(stderr, "usage: memdump {-b|-h|-w} addr count\n");\r
+    exit(1);\r
+}\r
+\r
+static void dump_word(void *p, long count, unsigned long offset)\r
+{\r
+    unsigned *d = p;\r
+    int i;\r
+\r
+    while (count > 0) {\r
+        printf("%08lx:", offset);\r
+        for (i = 0; i < 4 && count--; i++)\r
+            printf(" %08x", *d++);\r
+        printf("\n");\r
+        offset += 16;\r
+    }\r
+}\r
+\r
+static void dump_half(void *p, long count, unsigned long offset)\r
+{\r
+    unsigned short *d = p;\r
+    int i;\r
+\r
+    while (count > 0) {\r
+        printf("%08lx:", offset);\r
+        for (i = 0; i < 8 && count--; i++)\r
+            printf(" %04x", *d++);\r
+        printf("\n");\r
+        offset += 16;\r
+    }\r
+}\r
+\r
+static void dump_byte(void *p, long count, unsigned long offset)\r
+{\r
+    unsigned char *d = p;\r
+    int i;\r
+\r
+    while (count > 0) {\r
+        printf("%08lx:", offset);\r
+        for (i = 0; i < 16 && count--; i++)\r
+            printf(" %02x", *d++);\r
+        printf("\n");\r
+        offset += 16;\r
+    }\r
+}\r
+\r
+int main(int argc, char **argv)\r
+{\r
+    void (*dump_fun)(void *p, long count, unsigned long offset) = NULL;\r
+    unsigned long offset, map_offset, map_size;\r
+    long count;\r
+    long pagesize;\r
+    void *mem, *data;\r
+    int type = 4;\r
+    int fd;\r
+\r
+    if (argc < 3) {\r
+        die();\r
+    }\r
+\r
+    if (*argv[1] == '-') {\r
+        if (argv[1][1] && argv[1][2])\r
+            die();\r
+\r
+        switch (argv[1][1]) {\r
+        case 'b': type = 1; dump_fun = dump_byte; break;\r
+        case 'h': type = 2; dump_fun = dump_half; break;\r
+        case 'w': type = 4; dump_fun = dump_word; break;\r
+        default:  die();\r
+        }\r
+\r
+        argc--;\r
+        argv++;\r
+    }\r
+\r
+    if (argc < 3) {\r
+        die();\r
+    }\r
+\r
+    offset = strtoul(argv[1], NULL, 0);\r
+    count  = strtoul(argv[2], NULL, 0);\r
+\r
+    fd = open("/dev/mem", O_RDONLY);\r
+    if (fd == -1) {\r
+        perror("/dev/mem");\r
+        return 1;\r
+    }\r
+\r
+    pagesize = sysconf(_SC_PAGESIZE);\r
+    map_offset = offset & ~(pagesize - 1);\r
+    map_size = count * type + (offset & (pagesize - 1));\r
+    map_size = (map_size + pagesize - 1) & ~(pagesize - 1);\r
+\r
+    mem = mmap(NULL, map_size, PROT_READ, MAP_SHARED, fd, map_offset);\r
+    if (mem == MAP_FAILED) {\r
+        perror("mmap");\r
+        exit(1);\r
+    }\r
+\r
+    data = (char*)mem + (offset - map_offset);\r
+\r
+    dump_fun(data, count, offset);\r
+\r
+    munmap(mem, map_size);\r
+    close(fd);\r
+\r
+    return 0;\r
+}\r
+\r
diff --git a/packages/omap3-deviceid/files/omap3-deviceid.sh b/packages/omap3-deviceid/files/omap3-deviceid.sh
new file mode 100755 (executable)
index 0000000..9f7bed0
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+/usr/bin/mem -h 0x4830a218 2 | cut -d' ' -f 2,3
diff --git a/packages/omap3-deviceid/omap3-deviceid_1.0.bb b/packages/omap3-deviceid/omap3-deviceid_1.0.bb
new file mode 100755 (executable)
index 0000000..27f9f38
--- /dev/null
@@ -0,0 +1,21 @@
+DESCRIPTION = "Report the OMAP3 SoC unique ID"
+LICENSE = "GPL"
+PR ="r2"
+
+inherit autotools
+
+PACKAGE_ARCH_omap3-pandora = "${MACHINE_ARCH}"
+
+SRC_URI = "file://mem.c \
+           file://omap3-deviceid.sh \
+           file://Makefile"
+
+S="${WORKDIR}"
+
+do_install() {
+       install -d ${D}${bindir}
+       install -m 0755 ${WORKDIR}/mem ${D}${bindir}
+       install -m 0755 ${WORKDIR}/omap3-deviceid.sh ${D}${bindir}
+}
+
+FILES_${PN} = "/usr/bin/mem /usr/bin/omap3-deviceid.sh"
diff --git a/packages/pandora-system/pandora-firmware.bb b/packages/pandora-system/pandora-firmware.bb
new file mode 100755 (executable)
index 0000000..9faf707
--- /dev/null
@@ -0,0 +1,23 @@
+DESCRIPTION = "Install binary firmware for Bluetooth and WiFi into the image."
+SUMMARY = "This is required to support the Bluetooth and WiFi modules on the Pandora"
+LICENCE = "proprietary-binary"
+
+COMPATIBLE_MACHINE = "omap3-pandora"
+PR = "r1"
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+RRECOMMENDS = "kernel-module-firmware-class"
+
+SRC_URI = "file://brf6300.bin \
+       file://Fw1251r1c.bin \
+"
+
+S = "${WORKDIR}"
+
+FILES_${PN} = "/lib"
+
+do_install() {
+       install -d ${D}/lib/firmware/
+       install -m 0644 ${S}/brf6300.bin ${S}/Fw1251r1c.bin ${D}/lib/firmware/
+}
diff --git a/packages/pandora-system/pandora-firmware/Fw1251r1c.bin b/packages/pandora-system/pandora-firmware/Fw1251r1c.bin
new file mode 100755 (executable)
index 0000000..1d7a033
Binary files /dev/null and b/packages/pandora-system/pandora-firmware/Fw1251r1c.bin differ
diff --git a/packages/pandora-system/pandora-firmware/README.txt b/packages/pandora-system/pandora-firmware/README.txt
new file mode 100755 (executable)
index 0000000..8442800
--- /dev/null
@@ -0,0 +1,3 @@
+Place Pandora firmware files in this dir. to build the pandora-firmware package (needed for WiFi and BT support).\r
+\r
+brf6300.bin and Fw1251r1c.bin
\ No newline at end of file
diff --git a/packages/pandora-system/pandora-firmware/brf6300.bin b/packages/pandora-system/pandora-firmware/brf6300.bin
new file mode 100755 (executable)
index 0000000..161feda
Binary files /dev/null and b/packages/pandora-system/pandora-firmware/brf6300.bin differ
diff --git a/packages/pandora-system/pandora-matchbox-gtk-theme.bb b/packages/pandora-system/pandora-matchbox-gtk-theme.bb
new file mode 100755 (executable)
index 0000000..97b7ad2
--- /dev/null
@@ -0,0 +1,20 @@
+DESCRIPTION = "Matchbox window manager theme for the Pandora"
+LICENSE = "GPL"
+DEPENDS = "matchbox-wm"
+SECTION = "x11/wm"
+
+DEFAULT_PREFERENCE = "-1"
+
+PV = "0.1"
+PR = "r0"
+
+PACKAGE_ARCH = "all"
+
+SRC_URI = "svn://svn.o-hand.com/repos/sato/trunk;module=matchbox-sato;proto=http"
+S = "${WORKDIR}/matchbox-sato"
+
+inherit autotools pkgconfig
+
+FILES_${PN} = "${datadir}/themes/Sato"
+
+
diff --git a/packages/pandora-system/pandora-set-root-password.bb b/packages/pandora-system/pandora-set-root-password.bb
new file mode 100755 (executable)
index 0000000..6176d4a
--- /dev/null
@@ -0,0 +1,27 @@
+DESCRIPTION = "Set the root password."
+SUMMARY = "On installation you will be prompted to set a root password. With \
+this password you can then log into the machine."
+LICENSE = "GPL"
+DEPENDS = "gtk+"
+PKG_TAGS_${PN} = "group::programming alias::Root_Password"
+
+SRC_URI = "svn://svn.openmoko.org/developers/zecke/;module=root-password;proto=http"
+S = "${WORKDIR}/root-password"
+PV = "1.0+svnr${SRCREV}"
+PE = "1"
+PR = "r1"
+
+do_compile () {
+    cd ${S}
+    oe_runmake
+}
+
+do_install() {
+    install -d ${D}/${sbindir}
+    install -m 0755 ${S}/root-password ${D}/${sbindir}/${PN}
+}
+
+pkg_postinst_${PN} () {
+    # assume we use display 0
+    DISPLAY=:0 ${sbindir}/${PN}
+}
diff --git a/packages/pandora-system/pandora-wifi-tools/wlan_cu_makefile.patch b/packages/pandora-system/pandora-wifi-tools/wlan_cu_makefile.patch
new file mode 100755 (executable)
index 0000000..93de941
--- /dev/null
@@ -0,0 +1,19 @@
+1e76adf502f842190416d5d7e3a8bc4f8f7a2c2a
+ sta_dk_4_0_4_32/CUDK/CLI/Makefile |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/sta_dk_4_0_4_32/CUDK/CLI/Makefile b/sta_dk_4_0_4_32/CUDK/CLI/Makefile
+old mode 100644
+new mode 100755
+index 93bd1e5..97d07a7
+--- a/sta_dk_4_0_4_32/CUDK/CLI/Makefile
++++ b/sta_dk_4_0_4_32/CUDK/CLI/Makefile
+@@ -136,7 +136,7 @@ endif
+ $(TARGET): $(OBJS) $(TI_UTIL_ADAPTER_LIB)
+ #     echo MAKECMDGOALS: $(MAKECMDGOALS) : $(findstring $(MAKECMDGOALS), "clean cleanall")
+-      $(CROSS_COMPILE)gcc $(OBJS) -L $(CUDK_ROOT)/Output -l $(TI_UTIL_ADAPTER_LIB) $(LDFLAGS) -lpthread -lc -lstdc++ -static -o $@
++      $(CROSS_COMPILE)gcc $(OBJS) -L $(OUTPUT_DIR) -l $(TI_UTIL_ADAPTER_LIB) $(LDFLAGS) -lpthread -lc -lstdc++ -static -o $@
+ ifeq ($(DEBUG), y)
+       cp -a $(TARGET) $(TARGET)_debug
+       $(CROSS_COMPILE)strip -s $(TARGET)
diff --git a/packages/pandora-system/pandora-wifi-tools_git.bb b/packages/pandora-system/pandora-wifi-tools_git.bb
new file mode 100755 (executable)
index 0000000..ff968e7
--- /dev/null
@@ -0,0 +1,29 @@
+DESCRIPTION = "Tools to support the TI1251 WiFi chip found on the Pandora - Connected via SDIO"
+LICENSE = "GPLv2"
+
+DEPENDS = "pandora-wifi"
+
+PR = "r3"
+
+# Check the include for the source location/GIT SRCREV etc.
+require pandora-wifi.inc
+
+SRC_URI += " \
+       file://wlan_cu_makefile.patch;patch=1 \
+"
+
+#
+#make  CROSS_COMPILE=${KERNEL_PREFIX} CROSS_COMPILE=arm-none-linux-gnueabi- V=1 ARCH=arm KERNEL_DIR=/storage/file-store/Projects/Pandora/pandora-kernel.git OUTPUT_DIR=/storage/file-store/Projects/Pandora/pandora-wifi.git
+
+do_compile_prepend() {
+       cd ${S}/sta_dk_4_0_4_32/CUDK/CLI/
+}
+
+do_install() {
+       install -d ${D}${bindir}
+       install -m 0755 ${S}/wlan_cu ${D}${bindir}
+#      install -m 0755 ${S}/tiwlan_loader ${D}${bindir}
+}
+
+FILES_${PN} = "/usr/bin/wlan_cu /usr/bin/tiwlan_loader"
+
diff --git a/packages/pandora-system/pandora-wifi.inc b/packages/pandora-system/pandora-wifi.inc
new file mode 100755 (executable)
index 0000000..0b9f585
--- /dev/null
@@ -0,0 +1,23 @@
+inherit module
+
+COMPATIBLE_MACHINE = "omap3-pandora"
+
+SRCREV = "f768d7ee7914d13a4277270774417b5e0f367701"
+
+SRC_URI = " \
+       git://openpandora.org/pandora-wifi.git;protocol=git;branch=pandora \
+"
+
+S = "${WORKDIR}/git"
+
+EXTRA_OEMAKE = " \
+               'KERNELVER=${KERNEL_VERSION}' \
+               'LINUXSRC=${STAGING_KERNEL_DIR}' \
+               'CC=${TARGET_PREFIX}' \
+               'OS=${TARGET_OS}'"
+
+KCFLAGS = "-D__KERNEL__ -DMODULE \
+          -I. -I${STAGING_KERNEL_DIR}/include \
+          ${CFLAGS}"
+
+MAKE_TARGETS = "BUILD=debug ARCH=arm CROSS_COMPILE=${TARGET_PREFIX} KERNEL_DIR=${STAGING_KERNEL_DIR} OUTPUT_DIR=${S} AR=ar"
diff --git a/packages/pandora-system/pandora-wifi_git.bb b/packages/pandora-system/pandora-wifi_git.bb
new file mode 100755 (executable)
index 0000000..406df0c
--- /dev/null
@@ -0,0 +1,17 @@
+DESCRIPTION = "Kernel drivers for the TI1251 WiFi chip found on the Pandora - Connected via SDIO"
+LICENSE = "GPLv2"
+
+PR = "r8"
+
+# Check the include for the source location/GIT SRCREV etc.
+require pandora-wifi.inc
+
+do_compile_prepend() {
+       cd ${S}/sta_dk_4_0_4_32/
+}
+
+do_install() {
+       cd ${S}/sta_dk_4_0_4_32/
+       mkdir -p ${D}/lib/modules/${KERNEL_VERSION}/kernel/drivers/net
+       cp ${S}/sta_dk_4_0_4_32/*.ko ${D}/lib/modules/${KERNEL_VERSION}/kernel/drivers/net
+}
diff --git a/packages/tasks/task-pandora-core.bb b/packages/tasks/task-pandora-core.bb
new file mode 100755 (executable)
index 0000000..3b1ea75
--- /dev/null
@@ -0,0 +1,54 @@
+DESCRIPTION = "Task file for default core/console apps in the Pandora image"
+
+# Don't forget to bump the PR if you change it.
+
+PR = "r12"
+
+inherit task 
+
+RDEPENDS_${PN} = "\
+       task-base-extended \
+       task-proper-tools \
+       pandora-wifi \
+       pandora-firmware \
+       libgles-omap3 \
+       libwiimote \
+       nfs-utils \
+       nfs-utils-client \
+#      unionfs-modules \
+       unionfs-utils \
+       tslib \
+       tslib-tests \
+       tslib-calibrate \
+       pointercal \
+       bash \
+       bzip2 \
+       psplash \
+       mkfs-jffs2 \
+       fbgrab \
+       fbset \
+       portmap \
+       fbset-modes \
+       fuse \
+       socat \
+       strace \
+       python-pygame \
+       ksymoops \
+       kexec-tools \
+       minicom \
+       nano \
+#      mono \
+       alsa-utils \
+       alsa-utils-alsactl \
+       alsa-utils-alsamixer \
+       alsa-utils-aplay \
+       openssh-scp \
+       openssh-ssh \
+       bluez-hcidump \
+       bluez-utils \
+       wireless-tools \
+       rdesktop \
+       zip \
+       openssh-scp openssh-ssh \
+#      networkmanager \
+"
diff --git a/packages/tasks/task-pandora-desktop.bb b/packages/tasks/task-pandora-desktop.bb
new file mode 100755 (executable)
index 0000000..c8ca4f6
--- /dev/null
@@ -0,0 +1,39 @@
+DESCRIPTION = "Task file for default GUI apps in the Pandora image"
+
+# Don't forget to bump the PR if you change it.
+
+PR = "r3"
+
+inherit task
+
+ECONFIG ?= "e-wm-config-angstrom e-wm-config-default"
+
+RDEPENDS_${PN} = "\
+       task-pandora-core \
+       ${XSERVER} \
+       angstrom-x11-base-depends \
+       angstrom-gpe-task-base \
+       angstrom-gpe-task-settings \
+       angstrom-zeroconf-audio \
+       angstrom-led-config \ 
+       gpe-scap \
+       mime-support e-wm ${ECONFIG} exhibit \
+       xterm xmms \
+       epiphany firefox midori \
+       swfdec-mozilla \
+       hicolor-icon-theme gnome-icon-theme \
+       jaaa nmap iperf gnuplot \
+       abiword \
+       gnumeric \
+       gimp \
+       powertop oprofile \
+       pidgin \
+       mplayer \
+       omapfbplay \
+       gnome-games \
+       stalonetray \
+       synergy \
+       x11vnc \
+       angstrom-gnome-icon-theme-enable \      
+#      network-manager-applet \
+"
diff --git a/packages/tasks/task-pandora-gui.bb b/packages/tasks/task-pandora-gui.bb
new file mode 100755 (executable)
index 0000000..e804ed2
--- /dev/null
@@ -0,0 +1,53 @@
+DESCRIPTION = "Task file for default GUI apps in the Pandora image"
+
+# Don't forget to bump the PR if you change it.
+
+PR = "r6"
+
+inherit task
+
+RDEPENDS_${PN} = "\
+       task-pandora-core \
+       ${XSERVER} \
+       angstrom-x11-base-depends \
+       angstrom-gpe-task-base \
+       angstrom-gpe-task-settings \
+       abiword \
+       claws-mail \
+       evince \
+       exhibit \
+       epiphany firefox midori \
+       swfdec-mozilla \
+       omapfbplay \
+       pidgin \
+       synergy \
+       vnc \
+       x11vnc \
+       xmms \
+       xterm \
+       xtscal \
+       alsa-utils \
+       alsa-utils-alsactl \
+       alsa-utils-alsamixer \
+       alsa-utils-aplay \
+       pointercal \
+       matchbox-wm \
+       matchbox-keyboard matchbox-keyboard-applet matchbox-keyboard-im \
+       matchbox-desktop \
+       matchbox-common \
+       matchbox-config-gtk \
+       matchbox-themes-gtk \
+       matchbox-panel-manager \
+       matchbox-panel-hacks \
+       ttf-liberation-sans ttf-liberation-serif ttf-liberation-mono \
+       xauth xhost xset xrandr \
+       xcursor-transparent-theme \
+       settings-daemon \
+       mplayer \
+       omapfbplay \
+       matchbox-applet-cards \
+       matchbox-applet-inputmanager \
+       matchbox-applet-volume \
+       matchbox-applet-startup-monitor \       
+#      network-manager-applet \
+"
diff --git a/packages/tasks/task-pandora-satogui.bb b/packages/tasks/task-pandora-satogui.bb
new file mode 100755 (executable)
index 0000000..b6d1054
--- /dev/null
@@ -0,0 +1,49 @@
+DESCRIPTION = "Task file for default GUI apps in the Pandora image"
+
+# Don't forget to bump the PR if you change it.
+
+PR = "r6"
+
+inherit task
+
+ECONFIG ?= "e-wm-config-standard e-wm-config-default"
+
+RDEPENDS_${PN} = "\
+       task-pandora-core \
+       ${XSERVER} \
+       angstrom-x11-base-depends \
+       pointercal \
+       matchbox-wm \
+       matchbox-keyboard matchbox-keyboard-applet matchbox-keyboard-im \
+       matchbox-desktop \
+       ttf-liberation-sans ttf-liberation-serif ttf-liberation-mono \
+       xauth xhost xset xrandr \
+       matchbox-sato \
+       matchbox-config-gtk \
+       matchbox-themes-gtk \
+       matchbox-applet-startup-monitor \
+       matchbox-panel-manager \
+       xcursor-transparent-theme \
+       sato-icon-theme \
+       settings-daemon \
+       abiword \
+       claws-mail \
+       evince \
+       exhibit \
+       epiphany firefox midori \
+       swfdec-mozilla \
+       omapfbplay \
+       pidgin \
+       synergy \
+       vnc \
+       x11vnc \
+       xmms \
+       xterm \
+       xtscal \
+       alsa-utils \
+       alsa-utils-alsactl \
+       alsa-utils-alsamixer \
+       alsa-utils-aplay \
+       mplayer omapfbplay \
+       gnumeric \
+"